]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
ipsec: ipcomp - Decompress into frags if necessary
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
a2fbb9ea
ET
43#endif
44#include <net/ip.h>
45#include <net/tcp.h>
46#include <net/checksum.h>
34f80b04
EG
47#include <linux/version.h>
48#include <net/ip6_checksum.h>
a2fbb9ea
ET
49#include <linux/workqueue.h>
50#include <linux/crc32.h>
34f80b04 51#include <linux/crc32c.h>
a2fbb9ea
ET
52#include <linux/prefetch.h>
53#include <linux/zlib.h>
a2fbb9ea
ET
54#include <linux/io.h>
55
56#include "bnx2x_reg.h"
57#include "bnx2x_fw_defs.h"
58#include "bnx2x_hsi.h"
c18487ee 59#include "bnx2x_link.h"
a2fbb9ea
ET
60#include "bnx2x.h"
61#include "bnx2x_init.h"
62
e35c3269
EG
63#define DRV_MODULE_VERSION "1.45.6"
64#define DRV_MODULE_RELDATE "2008/06/23"
34f80b04 65#define BNX2X_BC_VER 0x040200
a2fbb9ea 66
34f80b04
EG
67/* Time in jiffies before concluding the transmitter is hung */
68#define TX_TIMEOUT (5*HZ)
a2fbb9ea 69
53a10565 70static char version[] __devinitdata =
34f80b04 71 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
72 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73
24e3fcef 74MODULE_AUTHOR("Eliezer Tamir");
a2fbb9ea
ET
75MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76MODULE_LICENSE("GPL");
77MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea
ET
78
79static int use_inta;
80static int poll;
a2fbb9ea 81static int debug;
7a9b2557 82static int disable_tpa;
34f80b04
EG
83static int nomcp;
84static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
85static int use_multi;
86
87module_param(use_inta, int, 0);
88module_param(poll, int, 0);
a2fbb9ea 89module_param(debug, int, 0);
7a9b2557 90module_param(disable_tpa, int, 0);
34f80b04 91module_param(nomcp, int, 0);
a2fbb9ea
ET
92MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
93MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 94MODULE_PARM_DESC(debug, "default debug msglevel");
34f80b04 95MODULE_PARM_DESC(nomcp, "ignore management CPU");
a2fbb9ea
ET
96
97#ifdef BNX2X_MULTI
98module_param(use_multi, int, 0);
99MODULE_PARM_DESC(use_multi, "use per-CPU queues");
100#endif
101
102enum bnx2x_board_type {
103 BCM57710 = 0,
34f80b04
EG
104 BCM57711 = 1,
105 BCM57711E = 2,
a2fbb9ea
ET
106};
107
34f80b04 108/* indexed by board_type, above */
53a10565 109static struct {
a2fbb9ea
ET
110 char *name;
111} board_info[] __devinitdata = {
34f80b04
EG
112 { "Broadcom NetXtreme II BCM57710 XGb" },
113 { "Broadcom NetXtreme II BCM57711 XGb" },
114 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
115};
116
34f80b04 117
a2fbb9ea
ET
118static const struct pci_device_id bnx2x_pci_tbl[] = {
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
123 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
124 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
125 { 0 }
126};
127
128MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
129
130/****************************************************************************
131* General service functions
132****************************************************************************/
133
134/* used only at init
135 * locking is done by mcp
136 */
137static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
138{
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
140 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
141 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
142 PCICFG_VENDOR_ID_OFFSET);
143}
144
a2fbb9ea
ET
145static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
146{
147 u32 val;
148
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
150 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
151 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
152 PCICFG_VENDOR_ID_OFFSET);
153
154 return val;
155}
a2fbb9ea
ET
156
157static const u32 dmae_reg_go_c[] = {
158 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
159 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
160 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
161 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
162};
163
164/* copy command into DMAE command memory and set DMAE command go */
165static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
166 int idx)
167{
168 u32 cmd_offset;
169 int i;
170
171 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
172 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
173 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
174
ad8d3948
EG
175 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
176 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
177 }
178 REG_WR(bp, dmae_reg_go_c[idx], 1);
179}
180
ad8d3948
EG
181void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
182 u32 len32)
a2fbb9ea 183{
ad8d3948 184 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 185 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
186 int cnt = 200;
187
188 if (!bp->dmae_ready) {
189 u32 *data = bnx2x_sp(bp, wb_data[0]);
190
191 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
192 " using indirect\n", dst_addr, len32);
193 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
194 return;
195 }
196
197 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
198
199 memset(dmae, 0, sizeof(struct dmae_command));
200
201 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
202 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
203 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
204#ifdef __BIG_ENDIAN
205 DMAE_CMD_ENDIANITY_B_DW_SWAP |
206#else
207 DMAE_CMD_ENDIANITY_DW_SWAP |
208#endif
34f80b04
EG
209 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
210 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
211 dmae->src_addr_lo = U64_LO(dma_addr);
212 dmae->src_addr_hi = U64_HI(dma_addr);
213 dmae->dst_addr_lo = dst_addr >> 2;
214 dmae->dst_addr_hi = 0;
215 dmae->len = len32;
216 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
217 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 218 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 219
ad8d3948 220 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
221 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
222 "dst_addr [%x:%08x (%08x)]\n"
223 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
224 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
225 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
226 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 227 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
228 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
229 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
230
231 *wb_comp = 0;
232
34f80b04 233 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
234
235 udelay(5);
ad8d3948
EG
236
237 while (*wb_comp != DMAE_COMP_VAL) {
238 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
239
240 /* adjust delay for emulation/FPGA */
241 if (CHIP_REV_IS_SLOW(bp))
242 msleep(100);
243 else
244 udelay(5);
245
246 if (!cnt) {
a2fbb9ea
ET
247 BNX2X_ERR("dmae timeout!\n");
248 break;
249 }
ad8d3948 250 cnt--;
a2fbb9ea 251 }
ad8d3948
EG
252
253 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
254}
255
c18487ee 256void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 257{
ad8d3948 258 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 259 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
260 int cnt = 200;
261
262 if (!bp->dmae_ready) {
263 u32 *data = bnx2x_sp(bp, wb_data[0]);
264 int i;
265
266 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
267 " using indirect\n", src_addr, len32);
268 for (i = 0; i < len32; i++)
269 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
270 return;
271 }
272
273 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
274
275 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
276 memset(dmae, 0, sizeof(struct dmae_command));
277
278 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
279 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
280 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
281#ifdef __BIG_ENDIAN
282 DMAE_CMD_ENDIANITY_B_DW_SWAP |
283#else
284 DMAE_CMD_ENDIANITY_DW_SWAP |
285#endif
34f80b04
EG
286 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
287 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
288 dmae->src_addr_lo = src_addr >> 2;
289 dmae->src_addr_hi = 0;
290 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
291 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
292 dmae->len = len32;
293 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
294 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 295 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 296
ad8d3948 297 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
298 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
299 "dst_addr [%x:%08x (%08x)]\n"
300 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
301 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
302 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
303 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
304
305 *wb_comp = 0;
306
34f80b04 307 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
308
309 udelay(5);
ad8d3948
EG
310
311 while (*wb_comp != DMAE_COMP_VAL) {
312
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
315 msleep(100);
316 else
317 udelay(5);
318
319 if (!cnt) {
a2fbb9ea
ET
320 BNX2X_ERR("dmae timeout!\n");
321 break;
322 }
ad8d3948 323 cnt--;
a2fbb9ea 324 }
ad8d3948 325 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
326 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
327 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
328
329 mutex_unlock(&bp->dmae_mutex);
330}
331
332/* used only for slowpath so not inlined */
333static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
334{
335 u32 wb_write[2];
336
337 wb_write[0] = val_hi;
338 wb_write[1] = val_lo;
339 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 340}
a2fbb9ea 341
ad8d3948
EG
342#ifdef USE_WB_RD
343static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
344{
345 u32 wb_data[2];
346
347 REG_RD_DMAE(bp, reg, wb_data, 2);
348
349 return HILO_U64(wb_data[0], wb_data[1]);
350}
351#endif
352
a2fbb9ea
ET
353static int bnx2x_mc_assert(struct bnx2x *bp)
354{
a2fbb9ea 355 char last_idx;
34f80b04
EG
356 int i, rc = 0;
357 u32 row0, row1, row2, row3;
358
359 /* XSTORM */
360 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
361 XSTORM_ASSERT_LIST_INDEX_OFFSET);
362 if (last_idx)
363 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
364
365 /* print the asserts */
366 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
367
368 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i));
370 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
372 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
373 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
374 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
375 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
376
377 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
378 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
379 " 0x%08x 0x%08x 0x%08x\n",
380 i, row3, row2, row1, row0);
381 rc++;
382 } else {
383 break;
384 }
385 }
386
387 /* TSTORM */
388 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
389 TSTORM_ASSERT_LIST_INDEX_OFFSET);
390 if (last_idx)
391 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
392
393 /* print the asserts */
394 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
395
396 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i));
398 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
400 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
401 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
402 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
403 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
404
405 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
406 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
407 " 0x%08x 0x%08x 0x%08x\n",
408 i, row3, row2, row1, row0);
409 rc++;
410 } else {
411 break;
412 }
413 }
414
415 /* CSTORM */
416 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
417 CSTORM_ASSERT_LIST_INDEX_OFFSET);
418 if (last_idx)
419 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
420
421 /* print the asserts */
422 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
423
424 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i));
426 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
428 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
429 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
430 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
431 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
432
433 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
434 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
435 " 0x%08x 0x%08x 0x%08x\n",
436 i, row3, row2, row1, row0);
437 rc++;
438 } else {
439 break;
440 }
441 }
442
443 /* USTORM */
444 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
445 USTORM_ASSERT_LIST_INDEX_OFFSET);
446 if (last_idx)
447 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
448
449 /* print the asserts */
450 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
451
452 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i));
454 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 4);
456 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
457 USTORM_ASSERT_LIST_OFFSET(i) + 8);
458 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
459 USTORM_ASSERT_LIST_OFFSET(i) + 12);
460
461 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
462 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
463 " 0x%08x 0x%08x 0x%08x\n",
464 i, row3, row2, row1, row0);
465 rc++;
466 } else {
467 break;
a2fbb9ea
ET
468 }
469 }
34f80b04 470
a2fbb9ea
ET
471 return rc;
472}
c14423fe 473
a2fbb9ea
ET
474static void bnx2x_fw_dump(struct bnx2x *bp)
475{
476 u32 mark, offset;
477 u32 data[9];
478 int word;
479
480 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
481 mark = ((mark + 0x3) & ~0x3);
482 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
483
484 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
485 for (word = 0; word < 8; word++)
486 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
487 offset + 4*word));
488 data[8] = 0x0;
49d66772 489 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
490 }
491 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
492 for (word = 0; word < 8; word++)
493 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
494 offset + 4*word));
495 data[8] = 0x0;
49d66772 496 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
497 }
498 printk("\n" KERN_ERR PFX "end of fw dump\n");
499}
500
501static void bnx2x_panic_dump(struct bnx2x *bp)
502{
503 int i;
504 u16 j, start, end;
505
506 BNX2X_ERR("begin crash dump -----------------\n");
507
508 for_each_queue(bp, i) {
509 struct bnx2x_fastpath *fp = &bp->fp[i];
510 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
511
512 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 513 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 514 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04
EG
515 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
516 BNX2X_ERR(" rx_comp_prod(%x) rx_comp_cons(%x)"
7a9b2557
VZ
517 " *rx_cons_sb(%x) *rx_bd_cons_sb(%x)"
518 " rx_sge_prod(%x) last_max_sge(%x)\n",
34f80b04 519 fp->rx_comp_prod, fp->rx_comp_cons,
7a9b2557
VZ
520 le16_to_cpu(*fp->rx_cons_sb),
521 le16_to_cpu(*fp->rx_bd_cons_sb),
522 fp->rx_sge_prod, fp->last_max_sge);
34f80b04 523 BNX2X_ERR(" fp_c_idx(%x) fp_u_idx(%x)"
7a9b2557 524 " bd data(%x,%x) rx_alloc_failed(%lx)\n",
34f80b04 525 fp->fp_c_idx, fp->fp_u_idx, hw_prods->packets_prod,
7a9b2557 526 hw_prods->bds_prod, fp->rx_alloc_failed);
a2fbb9ea
ET
527
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
535 }
536
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544 }
545
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
554 }
555
7a9b2557
VZ
556 start = 0;
557 end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
564 }
565
a2fbb9ea
ET
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
573 }
574 }
575
49d66772
ET
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 578 " spq_prod_idx(%u)\n",
49d66772 579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
34f80b04 582 bnx2x_fw_dump(bp);
a2fbb9ea
ET
583 bnx2x_mc_assert(bp);
584 BNX2X_ERR("end crash dump -----------------\n");
585
bb2a0f7a
YG
586 bp->stats_state = STATS_STATE_DISABLED;
587 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
a2fbb9ea
ET
588}
589
615f8fd9 590static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 591{
34f80b04 592 int port = BP_PORT(bp);
a2fbb9ea
ET
593 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
594 u32 val = REG_RD(bp, addr);
595 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
596
597 if (msix) {
598 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
599 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
600 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
601 } else {
602 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 603 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
604 HC_CONFIG_0_REG_INT_LINE_EN_0 |
605 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 606
615f8fd9
ET
607 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
608 val, port, addr, msix);
609
610 REG_WR(bp, addr, val);
611
a2fbb9ea
ET
612 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
613 }
614
615f8fd9 615 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
616 val, port, addr, msix);
617
618 REG_WR(bp, addr, val);
34f80b04
EG
619
620 if (CHIP_IS_E1H(bp)) {
621 /* init leading/trailing edge */
622 if (IS_E1HMF(bp)) {
623 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
624 if (bp->port.pmf)
625 /* enable nig attention */
626 val |= 0x0100;
627 } else
628 val = 0xffff;
629
630 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
631 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
632 }
a2fbb9ea
ET
633}
634
615f8fd9 635static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 636{
34f80b04 637 int port = BP_PORT(bp);
a2fbb9ea
ET
638 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
639 u32 val = REG_RD(bp, addr);
640
641 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
642 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
643 HC_CONFIG_0_REG_INT_LINE_EN_0 |
644 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
645
646 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
647 val, port, addr);
648
649 REG_WR(bp, addr, val);
650 if (REG_RD(bp, addr) != val)
651 BNX2X_ERR("BUG! proper val not read from IGU!\n");
652}
653
615f8fd9 654static void bnx2x_int_disable_sync(struct bnx2x *bp)
a2fbb9ea 655{
a2fbb9ea
ET
656 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
657 int i;
658
34f80b04 659 /* disable interrupt handling */
a2fbb9ea 660 atomic_inc(&bp->intr_sem);
c14423fe 661 /* prevent the HW from sending interrupts */
615f8fd9 662 bnx2x_int_disable(bp);
a2fbb9ea
ET
663
664 /* make sure all ISRs are done */
665 if (msix) {
666 for_each_queue(bp, i)
667 synchronize_irq(bp->msix_table[i].vector);
668
669 /* one more for the Slow Path IRQ */
670 synchronize_irq(bp->msix_table[i].vector);
671 } else
672 synchronize_irq(bp->pdev->irq);
673
674 /* make sure sp_task is not running */
675 cancel_work_sync(&bp->sp_task);
a2fbb9ea
ET
676}
677
34f80b04 678/* fast path */
a2fbb9ea
ET
679
680/*
34f80b04 681 * General service functions
a2fbb9ea
ET
682 */
683
34f80b04 684static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
685 u8 storm, u16 index, u8 op, u8 update)
686{
34f80b04 687 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
688 struct igu_ack_register igu_ack;
689
690 igu_ack.status_block_index = index;
691 igu_ack.sb_id_and_flags =
34f80b04 692 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
693 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
34f80b04
EG
697 DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n",
698 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr);
a2fbb9ea
ET
699 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
700}
701
702static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703{
704 struct host_status_block *fpsb = fp->status_blk;
705 u16 rc = 0;
706
707 barrier(); /* status block is written to by the chip */
708 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710 rc |= 1;
711 }
712 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714 rc |= 2;
715 }
716 return rc;
717}
718
719static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
720{
721 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
722
723 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
724 rx_cons_sb++;
725
34f80b04
EG
726 if ((fp->rx_comp_cons != rx_cons_sb) ||
727 (fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
728 (fp->tx_pkt_prod != fp->tx_pkt_cons))
a2fbb9ea
ET
729 return 1;
730
731 return 0;
732}
733
734static u16 bnx2x_ack_int(struct bnx2x *bp)
735{
34f80b04 736 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
737 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
738
34f80b04
EG
739 DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n",
740 result, BAR_IGU_INTMEM + igu_addr);
a2fbb9ea
ET
741
742#ifdef IGU_DEBUG
743#warning IGU_DEBUG active
744 if (result == 0) {
745 BNX2X_ERR("read %x from IGU\n", result);
746 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
747 }
748#endif
749 return result;
750}
751
752
753/*
754 * fast path service functions
755 */
756
757/* free skb in the packet ring at pos idx
758 * return idx of last bd freed
759 */
760static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
761 u16 idx)
762{
763 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
764 struct eth_tx_bd *tx_bd;
765 struct sk_buff *skb = tx_buf->skb;
34f80b04 766 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
767 int nbd;
768
769 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
770 idx, tx_buf, skb);
771
772 /* unmap first bd */
773 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
774 tx_bd = &fp->tx_desc_ring[bd_idx];
775 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
776 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
777
778 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 779 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
780#ifdef BNX2X_STOP_ON_ERROR
781 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 782 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
783 bnx2x_panic();
784 }
785#endif
786
787 /* Skip a parse bd and the TSO split header bd
788 since they have no mapping */
789 if (nbd)
790 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
791
792 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
793 ETH_TX_BD_FLAGS_TCP_CSUM |
794 ETH_TX_BD_FLAGS_SW_LSO)) {
795 if (--nbd)
796 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
797 tx_bd = &fp->tx_desc_ring[bd_idx];
798 /* is this a TSO split header bd? */
799 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
800 if (--nbd)
801 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
802 }
803 }
804
805 /* now free frags */
806 while (nbd > 0) {
807
808 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
809 tx_bd = &fp->tx_desc_ring[bd_idx];
810 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
811 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
812 if (--nbd)
813 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
814 }
815
816 /* release skb */
817 BUG_TRAP(skb);
818 dev_kfree_skb(skb);
819 tx_buf->first_bd = 0;
820 tx_buf->skb = NULL;
821
34f80b04 822 return new_cons;
a2fbb9ea
ET
823}
824
34f80b04 825static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 826{
34f80b04
EG
827 s16 used;
828 u16 prod;
829 u16 cons;
a2fbb9ea 830
34f80b04 831 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
832 prod = fp->tx_bd_prod;
833 cons = fp->tx_bd_cons;
834
34f80b04
EG
835 /* NUM_TX_RINGS = number of "next-page" entries
836 It will be used as a threshold */
837 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 838
34f80b04
EG
839#ifdef BNX2X_STOP_ON_ERROR
840 BUG_TRAP(used >= 0);
a2fbb9ea
ET
841 BUG_TRAP(used <= fp->bp->tx_ring_size);
842 BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
34f80b04 843#endif
a2fbb9ea 844
34f80b04 845 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
846}
847
848static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
849{
850 struct bnx2x *bp = fp->bp;
851 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
852 int done = 0;
853
854#ifdef BNX2X_STOP_ON_ERROR
855 if (unlikely(bp->panic))
856 return;
857#endif
858
859 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
860 sw_cons = fp->tx_pkt_cons;
861
862 while (sw_cons != hw_cons) {
863 u16 pkt_cons;
864
865 pkt_cons = TX_BD(sw_cons);
866
867 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
868
34f80b04 869 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
870 hw_cons, sw_cons, pkt_cons);
871
34f80b04 872/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
873 rmb();
874 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
875 }
876*/
877 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
878 sw_cons++;
879 done++;
880
881 if (done == work)
882 break;
883 }
884
885 fp->tx_pkt_cons = sw_cons;
886 fp->tx_bd_cons = bd_cons;
887
888 /* Need to make the tx_cons update visible to start_xmit()
889 * before checking for netif_queue_stopped(). Without the
890 * memory barrier, there is a small possibility that start_xmit()
891 * will miss it and cause the queue to be stopped forever.
892 */
893 smp_mb();
894
895 /* TBD need a thresh? */
896 if (unlikely(netif_queue_stopped(bp->dev))) {
897
898 netif_tx_lock(bp->dev);
899
900 if (netif_queue_stopped(bp->dev) &&
901 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
902 netif_wake_queue(bp->dev);
903
904 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
905 }
906}
907
908static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
909 union eth_rx_cqe *rr_cqe)
910{
911 struct bnx2x *bp = fp->bp;
912 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
913 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
914
34f80b04 915 DP(BNX2X_MSG_SP,
a2fbb9ea 916 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
917 FP_IDX(fp), cid, command, bp->state,
918 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
919
920 bp->spq_left++;
921
34f80b04 922 if (FP_IDX(fp)) {
a2fbb9ea
ET
923 switch (command | fp->state) {
924 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
925 BNX2X_FP_STATE_OPENING):
926 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
927 cid);
928 fp->state = BNX2X_FP_STATE_OPEN;
929 break;
930
931 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
932 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
933 cid);
934 fp->state = BNX2X_FP_STATE_HALTED;
935 break;
936
937 default:
34f80b04
EG
938 BNX2X_ERR("unexpected MC reply (%d) "
939 "fp->state is %x\n", command, fp->state);
940 break;
a2fbb9ea 941 }
34f80b04 942 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
943 return;
944 }
c14423fe 945
a2fbb9ea
ET
946 switch (command | bp->state) {
947 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
948 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
949 bp->state = BNX2X_STATE_OPEN;
950 break;
951
952 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
953 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
954 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
955 fp->state = BNX2X_FP_STATE_HALTED;
956 break;
957
a2fbb9ea 958 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 959 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 960 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
961 break;
962
963 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 964 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 965 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 966 bp->set_mac_pending = 0;
a2fbb9ea
ET
967 break;
968
49d66772 969 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 970 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
971 break;
972
a2fbb9ea 973 default:
34f80b04 974 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 975 command, bp->state);
34f80b04 976 break;
a2fbb9ea 977 }
34f80b04 978 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
979}
980
7a9b2557
VZ
981static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
982 struct bnx2x_fastpath *fp, u16 index)
983{
984 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
985 struct page *page = sw_buf->page;
986 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
987
988 /* Skip "next page" elements */
989 if (!page)
990 return;
991
992 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
993 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
994 __free_pages(page, PAGES_PER_SGE_SHIFT);
995
996 sw_buf->page = NULL;
997 sge->addr_hi = 0;
998 sge->addr_lo = 0;
999}
1000
1001static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1002 struct bnx2x_fastpath *fp, int last)
1003{
1004 int i;
1005
1006 for (i = 0; i < last; i++)
1007 bnx2x_free_rx_sge(bp, fp, i);
1008}
1009
1010static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1011 struct bnx2x_fastpath *fp, u16 index)
1012{
1013 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1014 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1015 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1016 dma_addr_t mapping;
1017
1018 if (unlikely(page == NULL))
1019 return -ENOMEM;
1020
1021 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1022 PCI_DMA_FROMDEVICE);
1023 if (unlikely(dma_mapping_error(mapping))) {
1024 __free_pages(page, PAGES_PER_SGE_SHIFT);
1025 return -ENOMEM;
1026 }
1027
1028 sw_buf->page = page;
1029 pci_unmap_addr_set(sw_buf, mapping, mapping);
1030
1031 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1032 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1033
1034 return 0;
1035}
1036
a2fbb9ea
ET
1037static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1038 struct bnx2x_fastpath *fp, u16 index)
1039{
1040 struct sk_buff *skb;
1041 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1042 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1043 dma_addr_t mapping;
1044
1045 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1046 if (unlikely(skb == NULL))
1047 return -ENOMEM;
1048
1049 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1050 PCI_DMA_FROMDEVICE);
1051 if (unlikely(dma_mapping_error(mapping))) {
a2fbb9ea
ET
1052 dev_kfree_skb(skb);
1053 return -ENOMEM;
1054 }
1055
1056 rx_buf->skb = skb;
1057 pci_unmap_addr_set(rx_buf, mapping, mapping);
1058
1059 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1060 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1061
1062 return 0;
1063}
1064
1065/* note that we are not allocating a new skb,
1066 * we are just moving one from cons to prod
1067 * we are not creating a new mapping,
1068 * so there is no need to check for dma_mapping_error().
1069 */
1070static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1071 struct sk_buff *skb, u16 cons, u16 prod)
1072{
1073 struct bnx2x *bp = fp->bp;
1074 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1075 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1076 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1077 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1078
1079 pci_dma_sync_single_for_device(bp->pdev,
1080 pci_unmap_addr(cons_rx_buf, mapping),
1081 bp->rx_offset + RX_COPY_THRESH,
1082 PCI_DMA_FROMDEVICE);
1083
1084 prod_rx_buf->skb = cons_rx_buf->skb;
1085 pci_unmap_addr_set(prod_rx_buf, mapping,
1086 pci_unmap_addr(cons_rx_buf, mapping));
1087 *prod_bd = *cons_bd;
1088}
1089
7a9b2557
VZ
1090static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1091 u16 idx)
1092{
1093 u16 last_max = fp->last_max_sge;
1094
1095 if (SUB_S16(idx, last_max) > 0)
1096 fp->last_max_sge = idx;
1097}
1098
1099static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1100{
1101 int i, j;
1102
1103 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1104 int idx = RX_SGE_CNT * i - 1;
1105
1106 for (j = 0; j < 2; j++) {
1107 SGE_MASK_CLEAR_BIT(fp, idx);
1108 idx--;
1109 }
1110 }
1111}
1112
1113static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1114 struct eth_fast_path_rx_cqe *fp_cqe)
1115{
1116 struct bnx2x *bp = fp->bp;
1117 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1118 le16_to_cpu(fp_cqe->len_on_bd)) >>
1119 BCM_PAGE_SHIFT;
1120 u16 last_max, last_elem, first_elem;
1121 u16 delta = 0;
1122 u16 i;
1123
1124 if (!sge_len)
1125 return;
1126
1127 /* First mark all used pages */
1128 for (i = 0; i < sge_len; i++)
1129 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1130
1131 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1132 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1133
1134 /* Here we assume that the last SGE index is the biggest */
1135 prefetch((void *)(fp->sge_mask));
1136 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1137
1138 last_max = RX_SGE(fp->last_max_sge);
1139 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1140 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1141
1142 /* If ring is not full */
1143 if (last_elem + 1 != first_elem)
1144 last_elem++;
1145
1146 /* Now update the prod */
1147 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1148 if (likely(fp->sge_mask[i]))
1149 break;
1150
1151 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1152 delta += RX_SGE_MASK_ELEM_SZ;
1153 }
1154
1155 if (delta > 0) {
1156 fp->rx_sge_prod += delta;
1157 /* clear page-end entries */
1158 bnx2x_clear_sge_mask_next_elems(fp);
1159 }
1160
1161 DP(NETIF_MSG_RX_STATUS,
1162 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1163 fp->last_max_sge, fp->rx_sge_prod);
1164}
1165
1166static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1167{
1168 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1169 memset(fp->sge_mask, 0xff,
1170 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1171
1172 /* Clear the two last indeces in the page to 1:
1173 these are the indeces that correspond to the "next" element,
1174 hence will never be indicated and should be removed from
1175 the calculations. */
1176 bnx2x_clear_sge_mask_next_elems(fp);
1177}
1178
1179static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1180 struct sk_buff *skb, u16 cons, u16 prod)
1181{
1182 struct bnx2x *bp = fp->bp;
1183 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1184 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1185 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1186 dma_addr_t mapping;
1187
1188 /* move empty skb from pool to prod and map it */
1189 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1190 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1191 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1192 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1193
1194 /* move partial skb from cons to pool (don't unmap yet) */
1195 fp->tpa_pool[queue] = *cons_rx_buf;
1196
1197 /* mark bin state as start - print error if current state != stop */
1198 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1199 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1200
1201 fp->tpa_state[queue] = BNX2X_TPA_START;
1202
1203 /* point prod_bd to new skb */
1204 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1205 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1206
1207#ifdef BNX2X_STOP_ON_ERROR
1208 fp->tpa_queue_used |= (1 << queue);
1209#ifdef __powerpc64__
1210 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1211#else
1212 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1213#endif
1214 fp->tpa_queue_used);
1215#endif
1216}
1217
1218static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1219 struct sk_buff *skb,
1220 struct eth_fast_path_rx_cqe *fp_cqe,
1221 u16 cqe_idx)
1222{
1223 struct sw_rx_page *rx_pg, old_rx_pg;
1224 struct page *sge;
1225 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1226 u32 i, frag_len, frag_size, pages;
1227 int err;
1228 int j;
1229
1230 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1231 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1232
1233 /* This is needed in order to enable forwarding support */
1234 if (frag_size)
1235 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1236 max(frag_size, (u32)len_on_bd));
1237
1238#ifdef BNX2X_STOP_ON_ERROR
1239 if (pages > 8*PAGES_PER_SGE) {
1240 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1241 pages, cqe_idx);
1242 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1243 fp_cqe->pkt_len, len_on_bd);
1244 bnx2x_panic();
1245 return -EINVAL;
1246 }
1247#endif
1248
1249 /* Run through the SGL and compose the fragmented skb */
1250 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1251 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1252
1253 /* FW gives the indices of the SGE as if the ring is an array
1254 (meaning that "next" element will consume 2 indices) */
1255 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1256 rx_pg = &fp->rx_page_ring[sge_idx];
1257 sge = rx_pg->page;
1258 old_rx_pg = *rx_pg;
1259
1260 /* If we fail to allocate a substitute page, we simply stop
1261 where we are and drop the whole packet */
1262 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1263 if (unlikely(err)) {
1264 fp->rx_alloc_failed++;
1265 return err;
1266 }
1267
1268 /* Unmap the page as we r going to pass it to the stack */
1269 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1270 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1271
1272 /* Add one frag and update the appropriate fields in the skb */
1273 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1274
1275 skb->data_len += frag_len;
1276 skb->truesize += frag_len;
1277 skb->len += frag_len;
1278
1279 frag_size -= frag_len;
1280 }
1281
1282 return 0;
1283}
1284
1285static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1286 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1287 u16 cqe_idx)
1288{
1289 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1290 struct sk_buff *skb = rx_buf->skb;
1291 /* alloc new skb */
1292 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1293
1294 /* Unmap skb in the pool anyway, as we are going to change
1295 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1296 fails. */
1297 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1298 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1299
1300 /* if alloc failed drop the packet and keep the buffer in the bin */
1301 if (likely(new_skb)) {
1302
1303 prefetch(skb);
1304 prefetch(((char *)(skb)) + 128);
1305
1306 /* else fix ip xsum and give it to the stack */
1307 /* (no need to map the new skb) */
1308#ifdef BNX2X_STOP_ON_ERROR
1309 if (pad + len > bp->rx_buf_size) {
1310 BNX2X_ERR("skb_put is about to fail... "
1311 "pad %d len %d rx_buf_size %d\n",
1312 pad, len, bp->rx_buf_size);
1313 bnx2x_panic();
1314 return;
1315 }
1316#endif
1317
1318 skb_reserve(skb, pad);
1319 skb_put(skb, len);
1320
1321 skb->protocol = eth_type_trans(skb, bp->dev);
1322 skb->ip_summed = CHECKSUM_UNNECESSARY;
1323
1324 {
1325 struct iphdr *iph;
1326
1327 iph = (struct iphdr *)skb->data;
1328 iph->check = 0;
1329 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1330 }
1331
1332 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1333 &cqe->fast_path_cqe, cqe_idx)) {
1334#ifdef BCM_VLAN
1335 if ((bp->vlgrp != NULL) &&
1336 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1337 PARSING_FLAGS_VLAN))
1338 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1339 le16_to_cpu(cqe->fast_path_cqe.
1340 vlan_tag));
1341 else
1342#endif
1343 netif_receive_skb(skb);
1344 } else {
1345 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1346 " - dropping packet!\n");
1347 dev_kfree_skb(skb);
1348 }
1349
1350 bp->dev->last_rx = jiffies;
1351
1352 /* put new skb in bin */
1353 fp->tpa_pool[queue].skb = new_skb;
1354
1355 } else {
1356 DP(NETIF_MSG_RX_STATUS,
1357 "Failed to allocate new skb - dropping packet!\n");
1358 fp->rx_alloc_failed++;
1359 }
1360
1361 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1362}
1363
1364static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1365 struct bnx2x_fastpath *fp,
1366 u16 bd_prod, u16 rx_comp_prod,
1367 u16 rx_sge_prod)
1368{
1369 struct tstorm_eth_rx_producers rx_prods = {0};
1370 int i;
1371
1372 /* Update producers */
1373 rx_prods.bd_prod = bd_prod;
1374 rx_prods.cqe_prod = rx_comp_prod;
1375 rx_prods.sge_prod = rx_sge_prod;
1376
1377 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1378 REG_WR(bp, BAR_TSTRORM_INTMEM +
1379 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1380 ((u32 *)&rx_prods)[i]);
1381
1382 DP(NETIF_MSG_RX_STATUS,
1383 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1384 bd_prod, rx_comp_prod, rx_sge_prod);
1385}
1386
a2fbb9ea
ET
1387static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1388{
1389 struct bnx2x *bp = fp->bp;
34f80b04 1390 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1391 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1392 int rx_pkt = 0;
7a9b2557 1393 u16 queue;
a2fbb9ea
ET
1394
1395#ifdef BNX2X_STOP_ON_ERROR
1396 if (unlikely(bp->panic))
1397 return 0;
1398#endif
1399
34f80b04
EG
1400 /* CQ "next element" is of the size of the regular element,
1401 that's why it's ok here */
a2fbb9ea
ET
1402 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1403 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1404 hw_comp_cons++;
1405
1406 bd_cons = fp->rx_bd_cons;
1407 bd_prod = fp->rx_bd_prod;
34f80b04 1408 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1409 sw_comp_cons = fp->rx_comp_cons;
1410 sw_comp_prod = fp->rx_comp_prod;
1411
1412 /* Memory barrier necessary as speculative reads of the rx
1413 * buffer can be ahead of the index in the status block
1414 */
1415 rmb();
1416
1417 DP(NETIF_MSG_RX_STATUS,
1418 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1419 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1420
1421 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1422 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1423 struct sk_buff *skb;
1424 union eth_rx_cqe *cqe;
34f80b04
EG
1425 u8 cqe_fp_flags;
1426 u16 len, pad;
a2fbb9ea
ET
1427
1428 comp_ring_cons = RCQ_BD(sw_comp_cons);
1429 bd_prod = RX_BD(bd_prod);
1430 bd_cons = RX_BD(bd_cons);
1431
1432 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1433 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1434
a2fbb9ea 1435 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1436 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1437 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
a2fbb9ea 1438 cqe->fast_path_cqe.rss_hash_result,
34f80b04
EG
1439 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1440 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1441
1442 /* is this a slowpath msg? */
34f80b04 1443 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1444 bnx2x_sp_event(fp, cqe);
1445 goto next_cqe;
1446
1447 /* this is an rx packet */
1448 } else {
1449 rx_buf = &fp->rx_buf_ring[bd_cons];
1450 skb = rx_buf->skb;
a2fbb9ea
ET
1451 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1452 pad = cqe->fast_path_cqe.placement_offset;
1453
7a9b2557
VZ
1454 /* If CQE is marked both TPA_START and TPA_END
1455 it is a non-TPA CQE */
1456 if ((!fp->disable_tpa) &&
1457 (TPA_TYPE(cqe_fp_flags) !=
1458 (TPA_TYPE_START | TPA_TYPE_END))) {
1459 queue = cqe->fast_path_cqe.queue_index;
1460
1461 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1462 DP(NETIF_MSG_RX_STATUS,
1463 "calling tpa_start on queue %d\n",
1464 queue);
1465
1466 bnx2x_tpa_start(fp, queue, skb,
1467 bd_cons, bd_prod);
1468 goto next_rx;
1469 }
1470
1471 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1472 DP(NETIF_MSG_RX_STATUS,
1473 "calling tpa_stop on queue %d\n",
1474 queue);
1475
1476 if (!BNX2X_RX_SUM_FIX(cqe))
1477 BNX2X_ERR("STOP on none TCP "
1478 "data\n");
1479
1480 /* This is a size of the linear data
1481 on this skb */
1482 len = le16_to_cpu(cqe->fast_path_cqe.
1483 len_on_bd);
1484 bnx2x_tpa_stop(bp, fp, queue, pad,
1485 len, cqe, comp_ring_cons);
1486#ifdef BNX2X_STOP_ON_ERROR
1487 if (bp->panic)
1488 return -EINVAL;
1489#endif
1490
1491 bnx2x_update_sge_prod(fp,
1492 &cqe->fast_path_cqe);
1493 goto next_cqe;
1494 }
1495 }
1496
a2fbb9ea
ET
1497 pci_dma_sync_single_for_device(bp->pdev,
1498 pci_unmap_addr(rx_buf, mapping),
1499 pad + RX_COPY_THRESH,
1500 PCI_DMA_FROMDEVICE);
1501 prefetch(skb);
1502 prefetch(((char *)(skb)) + 128);
1503
1504 /* is this an error packet? */
34f80b04 1505 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea
ET
1506 /* do we sometimes forward error packets anyway? */
1507 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1508 "ERROR flags %x rx packet %u\n",
1509 cqe_fp_flags, sw_comp_cons);
a2fbb9ea
ET
1510 /* TBD make sure MC counts this as a drop */
1511 goto reuse_rx;
1512 }
1513
1514 /* Since we don't have a jumbo ring
1515 * copy small packets if mtu > 1500
1516 */
1517 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1518 (len <= RX_COPY_THRESH)) {
1519 struct sk_buff *new_skb;
1520
1521 new_skb = netdev_alloc_skb(bp->dev,
1522 len + pad);
1523 if (new_skb == NULL) {
1524 DP(NETIF_MSG_RX_ERR,
34f80b04 1525 "ERROR packet dropped "
a2fbb9ea 1526 "because of alloc failure\n");
7a9b2557 1527 fp->rx_alloc_failed++;
a2fbb9ea
ET
1528 goto reuse_rx;
1529 }
1530
1531 /* aligned copy */
1532 skb_copy_from_linear_data_offset(skb, pad,
1533 new_skb->data + pad, len);
1534 skb_reserve(new_skb, pad);
1535 skb_put(new_skb, len);
1536
1537 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1538
1539 skb = new_skb;
1540
1541 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1542 pci_unmap_single(bp->pdev,
1543 pci_unmap_addr(rx_buf, mapping),
1544 bp->rx_buf_use_size,
1545 PCI_DMA_FROMDEVICE);
1546 skb_reserve(skb, pad);
1547 skb_put(skb, len);
1548
1549 } else {
1550 DP(NETIF_MSG_RX_ERR,
34f80b04 1551 "ERROR packet dropped because "
a2fbb9ea 1552 "of alloc failure\n");
7a9b2557 1553 fp->rx_alloc_failed++;
a2fbb9ea
ET
1554reuse_rx:
1555 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1556 goto next_rx;
1557 }
1558
1559 skb->protocol = eth_type_trans(skb, bp->dev);
1560
1561 skb->ip_summed = CHECKSUM_NONE;
1562 if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
1563 skb->ip_summed = CHECKSUM_UNNECESSARY;
1564
1565 /* TBD do we pass bad csum packets in promisc */
1566 }
1567
1568#ifdef BCM_VLAN
34f80b04
EG
1569 if ((bp->vlgrp != NULL) &&
1570 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1571 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1572 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1573 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1574 else
1575#endif
34f80b04 1576 netif_receive_skb(skb);
a2fbb9ea
ET
1577
1578 bp->dev->last_rx = jiffies;
1579
1580next_rx:
1581 rx_buf->skb = NULL;
1582
1583 bd_cons = NEXT_RX_IDX(bd_cons);
1584 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1585 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1586 rx_pkt++;
a2fbb9ea
ET
1587next_cqe:
1588 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1589 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1590
34f80b04 1591 if (rx_pkt == budget)
a2fbb9ea
ET
1592 break;
1593 } /* while */
1594
1595 fp->rx_bd_cons = bd_cons;
34f80b04 1596 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1597 fp->rx_comp_cons = sw_comp_cons;
1598 fp->rx_comp_prod = sw_comp_prod;
1599
7a9b2557
VZ
1600 /* Update producers */
1601 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1602 fp->rx_sge_prod);
a2fbb9ea
ET
1603 mmiowb(); /* keep prod updates ordered */
1604
1605 fp->rx_pkt += rx_pkt;
1606 fp->rx_calls++;
1607
1608 return rx_pkt;
1609}
1610
1611static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1612{
1613 struct bnx2x_fastpath *fp = fp_cookie;
1614 struct bnx2x *bp = fp->bp;
1615 struct net_device *dev = bp->dev;
34f80b04 1616 int index = FP_IDX(fp);
a2fbb9ea 1617
34f80b04
EG
1618 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1619 index, FP_SB_ID(fp));
1620 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1621
1622#ifdef BNX2X_STOP_ON_ERROR
1623 if (unlikely(bp->panic))
1624 return IRQ_HANDLED;
1625#endif
1626
1627 prefetch(fp->rx_cons_sb);
1628 prefetch(fp->tx_cons_sb);
1629 prefetch(&fp->status_blk->c_status_block.status_block_index);
1630 prefetch(&fp->status_blk->u_status_block.status_block_index);
1631
1632 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
34f80b04 1633
a2fbb9ea
ET
1634 return IRQ_HANDLED;
1635}
1636
1637static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1638{
1639 struct net_device *dev = dev_instance;
1640 struct bnx2x *bp = netdev_priv(dev);
1641 u16 status = bnx2x_ack_int(bp);
34f80b04 1642 u16 mask;
a2fbb9ea 1643
34f80b04 1644 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1645 if (unlikely(status == 0)) {
1646 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1647 return IRQ_NONE;
1648 }
34f80b04 1649 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea
ET
1650
1651#ifdef BNX2X_STOP_ON_ERROR
1652 if (unlikely(bp->panic))
1653 return IRQ_HANDLED;
1654#endif
1655
34f80b04 1656 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1657 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1658 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1659 return IRQ_HANDLED;
1660 }
1661
34f80b04
EG
1662 mask = 0x2 << bp->fp[0].sb_id;
1663 if (status & mask) {
a2fbb9ea
ET
1664 struct bnx2x_fastpath *fp = &bp->fp[0];
1665
1666 prefetch(fp->rx_cons_sb);
1667 prefetch(fp->tx_cons_sb);
1668 prefetch(&fp->status_blk->c_status_block.status_block_index);
1669 prefetch(&fp->status_blk->u_status_block.status_block_index);
1670
1671 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1672
34f80b04 1673 status &= ~mask;
a2fbb9ea
ET
1674 }
1675
a2fbb9ea 1676
34f80b04 1677 if (unlikely(status & 0x1)) {
a2fbb9ea
ET
1678 schedule_work(&bp->sp_task);
1679
1680 status &= ~0x1;
1681 if (!status)
1682 return IRQ_HANDLED;
1683 }
1684
34f80b04
EG
1685 if (status)
1686 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1687 status);
a2fbb9ea 1688
c18487ee 1689 return IRQ_HANDLED;
a2fbb9ea
ET
1690}
1691
c18487ee 1692/* end of fast path */
a2fbb9ea 1693
bb2a0f7a 1694static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1695
c18487ee
YR
1696/* Link */
1697
1698/*
1699 * General service functions
1700 */
a2fbb9ea 1701
c18487ee
YR
1702static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1703{
1704 u32 lock_status;
1705 u32 resource_bit = (1 << resource);
34f80b04 1706 u8 port = BP_PORT(bp);
c18487ee 1707 int cnt;
a2fbb9ea 1708
c18487ee
YR
1709 /* Validating that the resource is within range */
1710 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1711 DP(NETIF_MSG_HW,
1712 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1713 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1714 return -EINVAL;
1715 }
a2fbb9ea 1716
c18487ee
YR
1717 /* Validating that the resource is not already taken */
1718 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1719 if (lock_status & resource_bit) {
1720 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1721 lock_status, resource_bit);
1722 return -EEXIST;
1723 }
a2fbb9ea 1724
c18487ee
YR
1725 /* Try for 1 second every 5ms */
1726 for (cnt = 0; cnt < 200; cnt++) {
1727 /* Try to acquire the lock */
1728 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4,
1729 resource_bit);
1730 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1731 if (lock_status & resource_bit)
1732 return 0;
a2fbb9ea 1733
c18487ee 1734 msleep(5);
a2fbb9ea 1735 }
c18487ee
YR
1736 DP(NETIF_MSG_HW, "Timeout\n");
1737 return -EAGAIN;
1738}
a2fbb9ea 1739
c18487ee
YR
1740static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1741{
1742 u32 lock_status;
1743 u32 resource_bit = (1 << resource);
34f80b04 1744 u8 port = BP_PORT(bp);
a2fbb9ea 1745
c18487ee
YR
1746 /* Validating that the resource is within range */
1747 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1748 DP(NETIF_MSG_HW,
1749 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1750 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1751 return -EINVAL;
1752 }
1753
1754 /* Validating that the resource is currently taken */
1755 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1756 if (!(lock_status & resource_bit)) {
1757 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1758 lock_status, resource_bit);
1759 return -EFAULT;
a2fbb9ea
ET
1760 }
1761
c18487ee
YR
1762 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit);
1763 return 0;
1764}
1765
1766/* HW Lock for shared dual port PHYs */
1767static void bnx2x_phy_hw_lock(struct bnx2x *bp)
1768{
1769 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1770
34f80b04 1771 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1772
c18487ee
YR
1773 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1774 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1775 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1776}
a2fbb9ea 1777
c18487ee
YR
1778static void bnx2x_phy_hw_unlock(struct bnx2x *bp)
1779{
1780 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1781
c18487ee
YR
1782 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1783 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1784 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1785
34f80b04 1786 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1787}
a2fbb9ea 1788
c18487ee
YR
1789int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1790{
1791 /* The GPIO should be swapped if swap register is set and active */
1792 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
34f80b04 1793 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
c18487ee
YR
1794 int gpio_shift = gpio_num +
1795 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1796 u32 gpio_mask = (1 << gpio_shift);
1797 u32 gpio_reg;
a2fbb9ea 1798
c18487ee
YR
1799 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1800 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1801 return -EINVAL;
1802 }
a2fbb9ea 1803
c18487ee
YR
1804 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1805 /* read GPIO and mask except the float bits */
1806 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1807
c18487ee
YR
1808 switch (mode) {
1809 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1810 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1811 gpio_num, gpio_shift);
1812 /* clear FLOAT and set CLR */
1813 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1814 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1815 break;
a2fbb9ea 1816
c18487ee
YR
1817 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1818 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1819 gpio_num, gpio_shift);
1820 /* clear FLOAT and set SET */
1821 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1822 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1823 break;
a2fbb9ea 1824
c18487ee
YR
1825 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1826 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1827 gpio_num, gpio_shift);
1828 /* set FLOAT */
1829 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1830 break;
a2fbb9ea 1831
c18487ee
YR
1832 default:
1833 break;
a2fbb9ea
ET
1834 }
1835
c18487ee
YR
1836 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1837 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1838
c18487ee 1839 return 0;
a2fbb9ea
ET
1840}
1841
c18487ee 1842static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1843{
c18487ee
YR
1844 u32 spio_mask = (1 << spio_num);
1845 u32 spio_reg;
a2fbb9ea 1846
c18487ee
YR
1847 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1848 (spio_num > MISC_REGISTERS_SPIO_7)) {
1849 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1850 return -EINVAL;
a2fbb9ea
ET
1851 }
1852
c18487ee
YR
1853 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1854 /* read SPIO and mask except the float bits */
1855 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1856
c18487ee
YR
1857 switch (mode) {
1858 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1859 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1860 /* clear FLOAT and set CLR */
1861 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1862 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1863 break;
a2fbb9ea 1864
c18487ee
YR
1865 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1866 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1867 /* clear FLOAT and set SET */
1868 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1869 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1870 break;
a2fbb9ea 1871
c18487ee
YR
1872 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1873 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1874 /* set FLOAT */
1875 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1876 break;
a2fbb9ea 1877
c18487ee
YR
1878 default:
1879 break;
a2fbb9ea
ET
1880 }
1881
c18487ee
YR
1882 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1883 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1884
a2fbb9ea
ET
1885 return 0;
1886}
1887
c18487ee 1888static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1889{
c18487ee
YR
1890 switch (bp->link_vars.ieee_fc) {
1891 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1892 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1893 ADVERTISED_Pause);
1894 break;
1895 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1896 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1897 ADVERTISED_Pause);
1898 break;
1899 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1900 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1901 break;
1902 default:
34f80b04 1903 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1904 ADVERTISED_Pause);
1905 break;
1906 }
1907}
f1410647 1908
c18487ee
YR
1909static void bnx2x_link_report(struct bnx2x *bp)
1910{
1911 if (bp->link_vars.link_up) {
1912 if (bp->state == BNX2X_STATE_OPEN)
1913 netif_carrier_on(bp->dev);
1914 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1915
c18487ee 1916 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1917
c18487ee
YR
1918 if (bp->link_vars.duplex == DUPLEX_FULL)
1919 printk("full duplex");
1920 else
1921 printk("half duplex");
f1410647 1922
c18487ee
YR
1923 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1924 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1925 printk(", receive ");
1926 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1927 printk("& transmit ");
1928 } else {
1929 printk(", transmit ");
1930 }
1931 printk("flow control ON");
1932 }
1933 printk("\n");
f1410647 1934
c18487ee
YR
1935 } else { /* link_down */
1936 netif_carrier_off(bp->dev);
1937 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1938 }
c18487ee
YR
1939}
1940
1941static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1942{
1943 u8 rc;
a2fbb9ea 1944
c18487ee
YR
1945 /* Initialize link parameters structure variables */
1946 bp->link_params.mtu = bp->dev->mtu;
a2fbb9ea 1947
c18487ee
YR
1948 bnx2x_phy_hw_lock(bp);
1949 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1950 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 1951
c18487ee
YR
1952 if (bp->link_vars.link_up)
1953 bnx2x_link_report(bp);
a2fbb9ea 1954
c18487ee 1955 bnx2x_calc_fc_adv(bp);
34f80b04 1956
c18487ee 1957 return rc;
a2fbb9ea
ET
1958}
1959
c18487ee 1960static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1961{
c18487ee
YR
1962 bnx2x_phy_hw_lock(bp);
1963 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1964 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 1965
c18487ee
YR
1966 bnx2x_calc_fc_adv(bp);
1967}
a2fbb9ea 1968
c18487ee
YR
1969static void bnx2x__link_reset(struct bnx2x *bp)
1970{
1971 bnx2x_phy_hw_lock(bp);
1972 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1973 bnx2x_phy_hw_unlock(bp);
1974}
a2fbb9ea 1975
c18487ee
YR
1976static u8 bnx2x_link_test(struct bnx2x *bp)
1977{
1978 u8 rc;
a2fbb9ea 1979
c18487ee
YR
1980 bnx2x_phy_hw_lock(bp);
1981 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1982 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 1983
c18487ee
YR
1984 return rc;
1985}
a2fbb9ea 1986
34f80b04
EG
1987/* Calculates the sum of vn_min_rates.
1988 It's needed for further normalizing of the min_rates.
1989
1990 Returns:
1991 sum of vn_min_rates
1992 or
1993 0 - if all the min_rates are 0.
1994 In the later case fainess algorithm should be deactivated.
1995 If not all min_rates are zero then those that are zeroes will
1996 be set to 1.
1997 */
1998static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
1999{
2000 int i, port = BP_PORT(bp);
2001 u32 wsum = 0;
2002 int all_zero = 1;
2003
2004 for (i = 0; i < E1HVN_MAX; i++) {
2005 u32 vn_cfg =
2006 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2007 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2008 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2009 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2010 /* If min rate is zero - set it to 1 */
2011 if (!vn_min_rate)
2012 vn_min_rate = DEF_MIN_RATE;
2013 else
2014 all_zero = 0;
2015
2016 wsum += vn_min_rate;
2017 }
2018 }
2019
2020 /* ... only if all min rates are zeros - disable FAIRNESS */
2021 if (all_zero)
2022 return 0;
2023
2024 return wsum;
2025}
2026
2027static void bnx2x_init_port_minmax(struct bnx2x *bp,
2028 int en_fness,
2029 u16 port_rate,
2030 struct cmng_struct_per_port *m_cmng_port)
2031{
2032 u32 r_param = port_rate / 8;
2033 int port = BP_PORT(bp);
2034 int i;
2035
2036 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2037
2038 /* Enable minmax only if we are in e1hmf mode */
2039 if (IS_E1HMF(bp)) {
2040 u32 fair_periodic_timeout_usec;
2041 u32 t_fair;
2042
2043 /* Enable rate shaping and fairness */
2044 m_cmng_port->flags.cmng_vn_enable = 1;
2045 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2046 m_cmng_port->flags.rate_shaping_enable = 1;
2047
2048 if (!en_fness)
2049 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2050 " fairness will be disabled\n");
2051
2052 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2053 m_cmng_port->rs_vars.rs_periodic_timeout =
2054 RS_PERIODIC_TIMEOUT_USEC / 4;
2055
2056 /* this is the threshold below which no timer arming will occur
2057 1.25 coefficient is for the threshold to be a little bigger
2058 than the real time, to compensate for timer in-accuracy */
2059 m_cmng_port->rs_vars.rs_threshold =
2060 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2061
2062 /* resolution of fairness timer */
2063 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2064 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2065 t_fair = T_FAIR_COEF / port_rate;
2066
2067 /* this is the threshold below which we won't arm
2068 the timer anymore */
2069 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2070
2071 /* we multiply by 1e3/8 to get bytes/msec.
2072 We don't want the credits to pass a credit
2073 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2074 m_cmng_port->fair_vars.upper_bound =
2075 r_param * t_fair * FAIR_MEM;
2076 /* since each tick is 4 usec */
2077 m_cmng_port->fair_vars.fairness_timeout =
2078 fair_periodic_timeout_usec / 4;
2079
2080 } else {
2081 /* Disable rate shaping and fairness */
2082 m_cmng_port->flags.cmng_vn_enable = 0;
2083 m_cmng_port->flags.fairness_enable = 0;
2084 m_cmng_port->flags.rate_shaping_enable = 0;
2085
2086 DP(NETIF_MSG_IFUP,
2087 "Single function mode minmax will be disabled\n");
2088 }
2089
2090 /* Store it to internal memory */
2091 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2092 REG_WR(bp, BAR_XSTRORM_INTMEM +
2093 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2094 ((u32 *)(m_cmng_port))[i]);
2095}
2096
2097static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2098 u32 wsum, u16 port_rate,
2099 struct cmng_struct_per_port *m_cmng_port)
2100{
2101 struct rate_shaping_vars_per_vn m_rs_vn;
2102 struct fairness_vars_per_vn m_fair_vn;
2103 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2104 u16 vn_min_rate, vn_max_rate;
2105 int i;
2106
2107 /* If function is hidden - set min and max to zeroes */
2108 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2109 vn_min_rate = 0;
2110 vn_max_rate = 0;
2111
2112 } else {
2113 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2114 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2115 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2116 if current min rate is zero - set it to 1.
2117 This is a requirment of the algorithm. */
2118 if ((vn_min_rate == 0) && wsum)
2119 vn_min_rate = DEF_MIN_RATE;
2120 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2121 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2122 }
2123
2124 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2125 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2126
2127 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2128 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2129
2130 /* global vn counter - maximal Mbps for this vn */
2131 m_rs_vn.vn_counter.rate = vn_max_rate;
2132
2133 /* quota - number of bytes transmitted in this period */
2134 m_rs_vn.vn_counter.quota =
2135 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2136
2137#ifdef BNX2X_PER_PROT_QOS
2138 /* per protocol counter */
2139 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2140 /* maximal Mbps for this protocol */
2141 m_rs_vn.protocol_counters[protocol].rate =
2142 protocol_max_rate[protocol];
2143 /* the quota in each timer period -
2144 number of bytes transmitted in this period */
2145 m_rs_vn.protocol_counters[protocol].quota =
2146 (u32)(rs_periodic_timeout_usec *
2147 ((double)m_rs_vn.
2148 protocol_counters[protocol].rate/8));
2149 }
2150#endif
2151
2152 if (wsum) {
2153 /* credit for each period of the fairness algorithm:
2154 number of bytes in T_FAIR (the vn share the port rate).
2155 wsum should not be larger than 10000, thus
2156 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2157 m_fair_vn.vn_credit_delta =
2158 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2159 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2160 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2161 m_fair_vn.vn_credit_delta);
2162 }
2163
2164#ifdef BNX2X_PER_PROT_QOS
2165 do {
2166 u32 protocolWeightSum = 0;
2167
2168 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2169 protocolWeightSum +=
2170 drvInit.protocol_min_rate[protocol];
2171 /* per protocol counter -
2172 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2173 if (protocolWeightSum > 0) {
2174 for (protocol = 0;
2175 protocol < NUM_OF_PROTOCOLS; protocol++)
2176 /* credit for each period of the
2177 fairness algorithm - number of bytes in
2178 T_FAIR (the protocol share the vn rate) */
2179 m_fair_vn.protocol_credit_delta[protocol] =
2180 (u32)((vn_min_rate / 8) * t_fair *
2181 protocol_min_rate / protocolWeightSum);
2182 }
2183 } while (0);
2184#endif
2185
2186 /* Store it to internal memory */
2187 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2188 REG_WR(bp, BAR_XSTRORM_INTMEM +
2189 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2190 ((u32 *)(&m_rs_vn))[i]);
2191
2192 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2193 REG_WR(bp, BAR_XSTRORM_INTMEM +
2194 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2195 ((u32 *)(&m_fair_vn))[i]);
2196}
2197
c18487ee
YR
2198/* This function is called upon link interrupt */
2199static void bnx2x_link_attn(struct bnx2x *bp)
2200{
34f80b04
EG
2201 int vn;
2202
bb2a0f7a
YG
2203 /* Make sure that we are synced with the current statistics */
2204 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2205
c18487ee
YR
2206 bnx2x_phy_hw_lock(bp);
2207 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2208 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 2209
bb2a0f7a
YG
2210 if (bp->link_vars.link_up) {
2211
2212 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2213 struct host_port_stats *pstats;
2214
2215 pstats = bnx2x_sp(bp, port_stats);
2216 /* reset old bmac stats */
2217 memset(&(pstats->mac_stx[0]), 0,
2218 sizeof(struct mac_stx));
2219 }
2220 if ((bp->state == BNX2X_STATE_OPEN) ||
2221 (bp->state == BNX2X_STATE_DISABLED))
2222 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2223 }
2224
c18487ee
YR
2225 /* indicate link status */
2226 bnx2x_link_report(bp);
34f80b04
EG
2227
2228 if (IS_E1HMF(bp)) {
2229 int func;
2230
2231 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2232 if (vn == BP_E1HVN(bp))
2233 continue;
2234
2235 func = ((vn << 1) | BP_PORT(bp));
2236
2237 /* Set the attention towards other drivers
2238 on the same port */
2239 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2240 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2241 }
2242 }
2243
2244 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2245 struct cmng_struct_per_port m_cmng_port;
2246 u32 wsum;
2247 int port = BP_PORT(bp);
2248
2249 /* Init RATE SHAPING and FAIRNESS contexts */
2250 wsum = bnx2x_calc_vn_wsum(bp);
2251 bnx2x_init_port_minmax(bp, (int)wsum,
2252 bp->link_vars.line_speed,
2253 &m_cmng_port);
2254 if (IS_E1HMF(bp))
2255 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2256 bnx2x_init_vn_minmax(bp, 2*vn + port,
2257 wsum, bp->link_vars.line_speed,
2258 &m_cmng_port);
2259 }
c18487ee 2260}
a2fbb9ea 2261
c18487ee
YR
2262static void bnx2x__link_status_update(struct bnx2x *bp)
2263{
2264 if (bp->state != BNX2X_STATE_OPEN)
2265 return;
a2fbb9ea 2266
c18487ee 2267 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2268
bb2a0f7a
YG
2269 if (bp->link_vars.link_up)
2270 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2271 else
2272 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2273
c18487ee
YR
2274 /* indicate link status */
2275 bnx2x_link_report(bp);
a2fbb9ea 2276}
a2fbb9ea 2277
34f80b04
EG
2278static void bnx2x_pmf_update(struct bnx2x *bp)
2279{
2280 int port = BP_PORT(bp);
2281 u32 val;
2282
2283 bp->port.pmf = 1;
2284 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2285
2286 /* enable nig attention */
2287 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2288 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2289 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2290
2291 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2292}
2293
c18487ee 2294/* end of Link */
a2fbb9ea
ET
2295
2296/* slow path */
2297
2298/*
2299 * General service functions
2300 */
2301
2302/* the slow path queue is odd since completions arrive on the fastpath ring */
2303static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2304 u32 data_hi, u32 data_lo, int common)
2305{
34f80b04 2306 int func = BP_FUNC(bp);
a2fbb9ea 2307
34f80b04
EG
2308 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2309 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2310 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2311 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2312 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2313
2314#ifdef BNX2X_STOP_ON_ERROR
2315 if (unlikely(bp->panic))
2316 return -EIO;
2317#endif
2318
34f80b04 2319 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2320
2321 if (!bp->spq_left) {
2322 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2323 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2324 bnx2x_panic();
2325 return -EBUSY;
2326 }
f1410647 2327
a2fbb9ea
ET
2328 /* CID needs port number to be encoded int it */
2329 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2330 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2331 HW_CID(bp, cid)));
2332 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2333 if (common)
2334 bp->spq_prod_bd->hdr.type |=
2335 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2336
2337 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2338 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2339
2340 bp->spq_left--;
2341
2342 if (bp->spq_prod_bd == bp->spq_last_bd) {
2343 bp->spq_prod_bd = bp->spq;
2344 bp->spq_prod_idx = 0;
2345 DP(NETIF_MSG_TIMER, "end of spq\n");
2346
2347 } else {
2348 bp->spq_prod_bd++;
2349 bp->spq_prod_idx++;
2350 }
2351
34f80b04 2352 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2353 bp->spq_prod_idx);
2354
34f80b04 2355 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2356 return 0;
2357}
2358
2359/* acquire split MCP access lock register */
2360static int bnx2x_lock_alr(struct bnx2x *bp)
2361{
a2fbb9ea 2362 u32 i, j, val;
34f80b04 2363 int rc = 0;
a2fbb9ea
ET
2364
2365 might_sleep();
2366 i = 100;
2367 for (j = 0; j < i*10; j++) {
2368 val = (1UL << 31);
2369 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2370 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2371 if (val & (1L << 31))
2372 break;
2373
2374 msleep(5);
2375 }
a2fbb9ea
ET
2376 if (!(val & (1L << 31))) {
2377 BNX2X_ERR("Cannot acquire nvram interface\n");
a2fbb9ea
ET
2378 rc = -EBUSY;
2379 }
2380
2381 return rc;
2382}
2383
2384/* Release split MCP access lock register */
2385static void bnx2x_unlock_alr(struct bnx2x *bp)
2386{
2387 u32 val = 0;
2388
2389 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2390}
2391
2392static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2393{
2394 struct host_def_status_block *def_sb = bp->def_status_blk;
2395 u16 rc = 0;
2396
2397 barrier(); /* status block is written to by the chip */
2398
2399 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2400 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2401 rc |= 1;
2402 }
2403 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2404 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2405 rc |= 2;
2406 }
2407 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2408 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2409 rc |= 4;
2410 }
2411 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2412 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2413 rc |= 8;
2414 }
2415 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2416 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2417 rc |= 16;
2418 }
2419 return rc;
2420}
2421
2422/*
2423 * slow path service functions
2424 */
2425
2426static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2427{
34f80b04
EG
2428 int port = BP_PORT(bp);
2429 int func = BP_FUNC(bp);
2430 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8;
a2fbb9ea
ET
2431 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2432 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2433 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2434 NIG_REG_MASK_INTERRUPT_PORT0;
a2fbb9ea
ET
2435
2436 if (~bp->aeu_mask & (asserted & 0xff))
2437 BNX2X_ERR("IGU ERROR\n");
2438 if (bp->attn_state & asserted)
2439 BNX2X_ERR("IGU ERROR\n");
2440
2441 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2442 bp->aeu_mask, asserted);
2443 bp->aeu_mask &= ~(asserted & 0xff);
2444 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
2445
2446 REG_WR(bp, aeu_addr, bp->aeu_mask);
2447
2448 bp->attn_state |= asserted;
2449
2450 if (asserted & ATTN_HARD_WIRED_MASK) {
2451 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2452
877e9aa4
ET
2453 /* save nig interrupt mask */
2454 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2455 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2456
c18487ee 2457 bnx2x_link_attn(bp);
a2fbb9ea
ET
2458
2459 /* handle unicore attn? */
2460 }
2461 if (asserted & ATTN_SW_TIMER_4_FUNC)
2462 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2463
2464 if (asserted & GPIO_2_FUNC)
2465 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2466
2467 if (asserted & GPIO_3_FUNC)
2468 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2469
2470 if (asserted & GPIO_4_FUNC)
2471 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2472
2473 if (port == 0) {
2474 if (asserted & ATTN_GENERAL_ATTN_1) {
2475 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2476 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2477 }
2478 if (asserted & ATTN_GENERAL_ATTN_2) {
2479 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2480 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2481 }
2482 if (asserted & ATTN_GENERAL_ATTN_3) {
2483 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2484 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2485 }
2486 } else {
2487 if (asserted & ATTN_GENERAL_ATTN_4) {
2488 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2489 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2490 }
2491 if (asserted & ATTN_GENERAL_ATTN_5) {
2492 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2493 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2494 }
2495 if (asserted & ATTN_GENERAL_ATTN_6) {
2496 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2497 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2498 }
2499 }
2500
2501 } /* if hardwired */
2502
2503 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
2504 asserted, BAR_IGU_INTMEM + igu_addr);
2505 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
2506
2507 /* now set back the mask */
2508 if (asserted & ATTN_NIG_FOR_FUNC)
877e9aa4 2509 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a2fbb9ea
ET
2510}
2511
877e9aa4 2512static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2513{
34f80b04 2514 int port = BP_PORT(bp);
877e9aa4
ET
2515 int reg_offset;
2516 u32 val;
2517
34f80b04
EG
2518 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2519 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2520
34f80b04 2521 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2522
2523 val = REG_RD(bp, reg_offset);
2524 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2525 REG_WR(bp, reg_offset, val);
2526
2527 BNX2X_ERR("SPIO5 hw attention\n");
2528
34f80b04 2529 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
877e9aa4
ET
2530 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2531 /* Fan failure attention */
2532
2533 /* The PHY reset is controled by GPIO 1 */
2534 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2535 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2536 /* Low power mode is controled by GPIO 2 */
2537 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2538 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2539 /* mark the failure */
c18487ee 2540 bp->link_params.ext_phy_config &=
877e9aa4 2541 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2542 bp->link_params.ext_phy_config |=
877e9aa4
ET
2543 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2544 SHMEM_WR(bp,
2545 dev_info.port_hw_config[port].
2546 external_phy_config,
c18487ee 2547 bp->link_params.ext_phy_config);
877e9aa4
ET
2548 /* log the failure */
2549 printk(KERN_ERR PFX "Fan Failure on Network"
2550 " Controller %s has caused the driver to"
2551 " shutdown the card to prevent permanent"
2552 " damage. Please contact Dell Support for"
2553 " assistance\n", bp->dev->name);
2554 break;
2555
2556 default:
2557 break;
2558 }
2559 }
34f80b04
EG
2560
2561 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2562
2563 val = REG_RD(bp, reg_offset);
2564 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2565 REG_WR(bp, reg_offset, val);
2566
2567 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2568 (attn & HW_INTERRUT_ASSERT_SET_0));
2569 bnx2x_panic();
2570 }
877e9aa4
ET
2571}
2572
2573static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2574{
2575 u32 val;
2576
2577 if (attn & BNX2X_DOORQ_ASSERT) {
2578
2579 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2580 BNX2X_ERR("DB hw attention 0x%x\n", val);
2581 /* DORQ discard attention */
2582 if (val & 0x2)
2583 BNX2X_ERR("FATAL error from DORQ\n");
2584 }
34f80b04
EG
2585
2586 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2587
2588 int port = BP_PORT(bp);
2589 int reg_offset;
2590
2591 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2592 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2593
2594 val = REG_RD(bp, reg_offset);
2595 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2596 REG_WR(bp, reg_offset, val);
2597
2598 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2599 (attn & HW_INTERRUT_ASSERT_SET_1));
2600 bnx2x_panic();
2601 }
877e9aa4
ET
2602}
2603
2604static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2605{
2606 u32 val;
2607
2608 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2609
2610 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2611 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2612 /* CFC error attention */
2613 if (val & 0x2)
2614 BNX2X_ERR("FATAL error from CFC\n");
2615 }
2616
2617 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2618
2619 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2620 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2621 /* RQ_USDMDP_FIFO_OVERFLOW */
2622 if (val & 0x18000)
2623 BNX2X_ERR("FATAL error from PXP\n");
2624 }
34f80b04
EG
2625
2626 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2627
2628 int port = BP_PORT(bp);
2629 int reg_offset;
2630
2631 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2632 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2633
2634 val = REG_RD(bp, reg_offset);
2635 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2636 REG_WR(bp, reg_offset, val);
2637
2638 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2639 (attn & HW_INTERRUT_ASSERT_SET_2));
2640 bnx2x_panic();
2641 }
877e9aa4
ET
2642}
2643
2644static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2645{
34f80b04
EG
2646 u32 val;
2647
877e9aa4
ET
2648 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2649
34f80b04
EG
2650 if (attn & BNX2X_PMF_LINK_ASSERT) {
2651 int func = BP_FUNC(bp);
2652
2653 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2654 bnx2x__link_status_update(bp);
2655 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2656 DRV_STATUS_PMF)
2657 bnx2x_pmf_update(bp);
2658
2659 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2660
2661 BNX2X_ERR("MC assert!\n");
2662 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2663 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2664 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2665 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2666 bnx2x_panic();
2667
2668 } else if (attn & BNX2X_MCP_ASSERT) {
2669
2670 BNX2X_ERR("MCP assert!\n");
2671 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2672 bnx2x_fw_dump(bp);
877e9aa4
ET
2673
2674 } else
2675 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2676 }
2677
2678 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2679 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2680 if (attn & BNX2X_GRC_TIMEOUT) {
2681 val = CHIP_IS_E1H(bp) ?
2682 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2683 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2684 }
2685 if (attn & BNX2X_GRC_RSV) {
2686 val = CHIP_IS_E1H(bp) ?
2687 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2688 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2689 }
877e9aa4 2690 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2691 }
2692}
2693
2694static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2695{
a2fbb9ea
ET
2696 struct attn_route attn;
2697 struct attn_route group_mask;
34f80b04 2698 int port = BP_PORT(bp);
877e9aa4 2699 int index;
a2fbb9ea
ET
2700 u32 reg_addr;
2701 u32 val;
2702
2703 /* need to take HW lock because MCP or other port might also
2704 try to handle this event */
2705 bnx2x_lock_alr(bp);
2706
2707 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2708 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2709 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2710 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2711 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2712 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2713
2714 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2715 if (deasserted & (1 << index)) {
2716 group_mask = bp->attn_group[index];
2717
34f80b04
EG
2718 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2719 index, group_mask.sig[0], group_mask.sig[1],
2720 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2721
877e9aa4
ET
2722 bnx2x_attn_int_deasserted3(bp,
2723 attn.sig[3] & group_mask.sig[3]);
2724 bnx2x_attn_int_deasserted1(bp,
2725 attn.sig[1] & group_mask.sig[1]);
2726 bnx2x_attn_int_deasserted2(bp,
2727 attn.sig[2] & group_mask.sig[2]);
2728 bnx2x_attn_int_deasserted0(bp,
2729 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2730
a2fbb9ea
ET
2731 if ((attn.sig[0] & group_mask.sig[0] &
2732 HW_PRTY_ASSERT_SET_0) ||
2733 (attn.sig[1] & group_mask.sig[1] &
2734 HW_PRTY_ASSERT_SET_1) ||
2735 (attn.sig[2] & group_mask.sig[2] &
2736 HW_PRTY_ASSERT_SET_2))
877e9aa4 2737 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2738 }
2739 }
2740
2741 bnx2x_unlock_alr(bp);
2742
34f80b04 2743 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
2744
2745 val = ~deasserted;
34f80b04 2746/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
a2fbb9ea
ET
2747 val, BAR_IGU_INTMEM + reg_addr); */
2748 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
2749
2750 if (bp->aeu_mask & (deasserted & 0xff))
34f80b04 2751 BNX2X_ERR("IGU BUG!\n");
a2fbb9ea 2752 if (~bp->attn_state & deasserted)
34f80b04 2753 BNX2X_ERR("IGU BUG!\n");
a2fbb9ea
ET
2754
2755 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2756 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2757
2758 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
2759 bp->aeu_mask |= (deasserted & 0xff);
2760
2761 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
2762 REG_WR(bp, reg_addr, bp->aeu_mask);
2763
2764 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2765 bp->attn_state &= ~deasserted;
2766 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2767}
2768
2769static void bnx2x_attn_int(struct bnx2x *bp)
2770{
2771 /* read local copy of bits */
2772 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2773 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2774 u32 attn_state = bp->attn_state;
2775
2776 /* look for changed bits */
2777 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2778 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2779
2780 DP(NETIF_MSG_HW,
2781 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2782 attn_bits, attn_ack, asserted, deasserted);
2783
2784 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2785 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2786
2787 /* handle bits that were raised */
2788 if (asserted)
2789 bnx2x_attn_int_asserted(bp, asserted);
2790
2791 if (deasserted)
2792 bnx2x_attn_int_deasserted(bp, deasserted);
2793}
2794
2795static void bnx2x_sp_task(struct work_struct *work)
2796{
2797 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2798 u16 status;
2799
34f80b04 2800
a2fbb9ea
ET
2801 /* Return here if interrupt is disabled */
2802 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 2803 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2804 return;
2805 }
2806
2807 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2808/* if (status == 0) */
2809/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2810
34f80b04 2811 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2812
877e9aa4
ET
2813 /* HW attentions */
2814 if (status & 0x1)
a2fbb9ea 2815 bnx2x_attn_int(bp);
a2fbb9ea 2816
bb2a0f7a
YG
2817 /* CStorm events: query_stats, port delete ramrod */
2818 if (status & 0x2)
2819 bp->stats_pending = 0;
2820
a2fbb9ea
ET
2821 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2822 IGU_INT_NOP, 1);
2823 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2824 IGU_INT_NOP, 1);
2825 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2826 IGU_INT_NOP, 1);
2827 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2828 IGU_INT_NOP, 1);
2829 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2830 IGU_INT_ENABLE, 1);
877e9aa4 2831
a2fbb9ea
ET
2832}
2833
2834static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2835{
2836 struct net_device *dev = dev_instance;
2837 struct bnx2x *bp = netdev_priv(dev);
2838
2839 /* Return here if interrupt is disabled */
2840 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 2841 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2842 return IRQ_HANDLED;
2843 }
2844
877e9aa4 2845 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2846
2847#ifdef BNX2X_STOP_ON_ERROR
2848 if (unlikely(bp->panic))
2849 return IRQ_HANDLED;
2850#endif
2851
2852 schedule_work(&bp->sp_task);
2853
2854 return IRQ_HANDLED;
2855}
2856
2857/* end of slow path */
2858
2859/* Statistics */
2860
2861/****************************************************************************
2862* Macros
2863****************************************************************************/
2864
a2fbb9ea
ET
2865/* sum[hi:lo] += add[hi:lo] */
2866#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2867 do { \
2868 s_lo += a_lo; \
2869 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2870 } while (0)
2871
2872/* difference = minuend - subtrahend */
2873#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2874 do { \
bb2a0f7a
YG
2875 if (m_lo < s_lo) { \
2876 /* underflow */ \
a2fbb9ea 2877 d_hi = m_hi - s_hi; \
bb2a0f7a
YG
2878 if (d_hi > 0) { \
2879 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2880 d_hi--; \
2881 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a
YG
2882 } else { \
2883 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2884 d_hi = 0; \
2885 d_lo = 0; \
2886 } \
bb2a0f7a
YG
2887 } else { \
2888 /* m_lo >= s_lo */ \
a2fbb9ea 2889 if (m_hi < s_hi) { \
bb2a0f7a
YG
2890 d_hi = 0; \
2891 d_lo = 0; \
2892 } else { \
2893 /* m_hi >= s_hi */ \
2894 d_hi = m_hi - s_hi; \
2895 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2896 } \
2897 } \
2898 } while (0)
2899
bb2a0f7a 2900#define UPDATE_STAT64(s, t) \
a2fbb9ea 2901 do { \
bb2a0f7a
YG
2902 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2903 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2904 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2905 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2906 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2907 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2908 } while (0)
2909
bb2a0f7a 2910#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2911 do { \
bb2a0f7a
YG
2912 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2913 diff.lo, new->s##_lo, old->s##_lo); \
2914 ADD_64(estats->t##_hi, diff.hi, \
2915 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2916 } while (0)
2917
2918/* sum[hi:lo] += add */
2919#define ADD_EXTEND_64(s_hi, s_lo, a) \
2920 do { \
2921 s_lo += a; \
2922 s_hi += (s_lo < a) ? 1 : 0; \
2923 } while (0)
2924
bb2a0f7a 2925#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2926 do { \
bb2a0f7a
YG
2927 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2928 pstats->mac_stx[1].s##_lo, \
2929 new->s); \
a2fbb9ea
ET
2930 } while (0)
2931
bb2a0f7a 2932#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2933 do { \
2934 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2935 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
2936 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2937 } while (0)
2938
2939#define UPDATE_EXTEND_XSTAT(s, t) \
2940 do { \
2941 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2942 old_xclient->s = le32_to_cpu(xclient->s); \
2943 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
2944 } while (0)
2945
2946/*
2947 * General service functions
2948 */
2949
2950static inline long bnx2x_hilo(u32 *hiref)
2951{
2952 u32 lo = *(hiref + 1);
2953#if (BITS_PER_LONG == 64)
2954 u32 hi = *hiref;
2955
2956 return HILO_U64(hi, lo);
2957#else
2958 return lo;
2959#endif
2960}
2961
2962/*
2963 * Init service functions
2964 */
2965
bb2a0f7a
YG
2966static void bnx2x_storm_stats_init(struct bnx2x *bp)
2967{
2968 int func = BP_FUNC(bp);
2969
2970 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func), 1);
2971 REG_WR(bp, BAR_XSTRORM_INTMEM +
2972 XSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2973
2974 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func), 1);
2975 REG_WR(bp, BAR_TSTRORM_INTMEM +
2976 TSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2977
2978 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func), 0);
2979 REG_WR(bp, BAR_CSTRORM_INTMEM +
2980 CSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2981
2982 REG_WR(bp, BAR_XSTRORM_INTMEM +
2983 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
2984 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2985 REG_WR(bp, BAR_XSTRORM_INTMEM +
2986 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
2987 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2988
2989 REG_WR(bp, BAR_TSTRORM_INTMEM +
2990 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
2991 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2992 REG_WR(bp, BAR_TSTRORM_INTMEM +
2993 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
2994 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2995}
2996
2997static void bnx2x_storm_stats_post(struct bnx2x *bp)
2998{
2999 if (!bp->stats_pending) {
3000 struct eth_query_ramrod_data ramrod_data = {0};
3001 int rc;
3002
3003 ramrod_data.drv_counter = bp->stats_counter++;
3004 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3005 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3006
3007 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3008 ((u32 *)&ramrod_data)[1],
3009 ((u32 *)&ramrod_data)[0], 0);
3010 if (rc == 0) {
3011 /* stats ramrod has it's own slot on the spq */
3012 bp->spq_left++;
3013 bp->stats_pending = 1;
3014 }
3015 }
3016}
3017
3018static void bnx2x_stats_init(struct bnx2x *bp)
3019{
3020 int port = BP_PORT(bp);
3021
3022 bp->executer_idx = 0;
3023 bp->stats_counter = 0;
3024
3025 /* port stats */
3026 if (!BP_NOMCP(bp))
3027 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3028 else
3029 bp->port.port_stx = 0;
3030 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3031
3032 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3033 bp->port.old_nig_stats.brb_discard =
3034 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3035 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3036 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3037 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3038 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3039
3040 /* function stats */
3041 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3042 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3043 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3044 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3045
3046 bp->stats_state = STATS_STATE_DISABLED;
3047 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3048 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3049}
3050
3051static void bnx2x_hw_stats_post(struct bnx2x *bp)
3052{
3053 struct dmae_command *dmae = &bp->stats_dmae;
3054 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3055
3056 *stats_comp = DMAE_COMP_VAL;
3057
3058 /* loader */
3059 if (bp->executer_idx) {
3060 int loader_idx = PMF_DMAE_C(bp);
3061
3062 memset(dmae, 0, sizeof(struct dmae_command));
3063
3064 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3065 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3066 DMAE_CMD_DST_RESET |
3067#ifdef __BIG_ENDIAN
3068 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3069#else
3070 DMAE_CMD_ENDIANITY_DW_SWAP |
3071#endif
3072 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3073 DMAE_CMD_PORT_0) |
3074 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3075 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3076 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3077 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3078 sizeof(struct dmae_command) *
3079 (loader_idx + 1)) >> 2;
3080 dmae->dst_addr_hi = 0;
3081 dmae->len = sizeof(struct dmae_command) >> 2;
3082 if (CHIP_IS_E1(bp))
3083 dmae->len--;
3084 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3085 dmae->comp_addr_hi = 0;
3086 dmae->comp_val = 1;
3087
3088 *stats_comp = 0;
3089 bnx2x_post_dmae(bp, dmae, loader_idx);
3090
3091 } else if (bp->func_stx) {
3092 *stats_comp = 0;
3093 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3094 }
3095}
3096
3097static int bnx2x_stats_comp(struct bnx2x *bp)
3098{
3099 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3100 int cnt = 10;
3101
3102 might_sleep();
3103 while (*stats_comp != DMAE_COMP_VAL) {
3104 msleep(1);
3105 if (!cnt) {
3106 BNX2X_ERR("timeout waiting for stats finished\n");
3107 break;
3108 }
3109 cnt--;
3110 }
3111 return 1;
3112}
3113
3114/*
3115 * Statistics service functions
3116 */
3117
3118static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3119{
3120 struct dmae_command *dmae;
3121 u32 opcode;
3122 int loader_idx = PMF_DMAE_C(bp);
3123 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3124
3125 /* sanity */
3126 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3127 BNX2X_ERR("BUG!\n");
3128 return;
3129 }
3130
3131 bp->executer_idx = 0;
3132
3133 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3134 DMAE_CMD_C_ENABLE |
3135 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3136#ifdef __BIG_ENDIAN
3137 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3138#else
3139 DMAE_CMD_ENDIANITY_DW_SWAP |
3140#endif
3141 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3142 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3143
3144 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3145 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3146 dmae->src_addr_lo = bp->port.port_stx >> 2;
3147 dmae->src_addr_hi = 0;
3148 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3149 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3150 dmae->len = DMAE_LEN32_RD_MAX;
3151 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3152 dmae->comp_addr_hi = 0;
3153 dmae->comp_val = 1;
3154
3155 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3156 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3157 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3158 dmae->src_addr_hi = 0;
7a9b2557
VZ
3159 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3160 DMAE_LEN32_RD_MAX * 4);
3161 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3162 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3163 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3164 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3165 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3166 dmae->comp_val = DMAE_COMP_VAL;
3167
3168 *stats_comp = 0;
3169 bnx2x_hw_stats_post(bp);
3170 bnx2x_stats_comp(bp);
3171}
3172
3173static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3174{
3175 struct dmae_command *dmae;
34f80b04 3176 int port = BP_PORT(bp);
bb2a0f7a 3177 int vn = BP_E1HVN(bp);
a2fbb9ea 3178 u32 opcode;
bb2a0f7a 3179 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3180 u32 mac_addr;
bb2a0f7a
YG
3181 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3182
3183 /* sanity */
3184 if (!bp->link_vars.link_up || !bp->port.pmf) {
3185 BNX2X_ERR("BUG!\n");
3186 return;
3187 }
a2fbb9ea
ET
3188
3189 bp->executer_idx = 0;
bb2a0f7a
YG
3190
3191 /* MCP */
3192 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3193 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3194 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3195#ifdef __BIG_ENDIAN
bb2a0f7a 3196 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3197#else
bb2a0f7a 3198 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3199#endif
bb2a0f7a
YG
3200 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3201 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3202
bb2a0f7a 3203 if (bp->port.port_stx) {
a2fbb9ea
ET
3204
3205 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3206 dmae->opcode = opcode;
bb2a0f7a
YG
3207 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3208 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3209 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3210 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3211 dmae->len = sizeof(struct host_port_stats) >> 2;
3212 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3213 dmae->comp_addr_hi = 0;
3214 dmae->comp_val = 1;
a2fbb9ea
ET
3215 }
3216
bb2a0f7a
YG
3217 if (bp->func_stx) {
3218
3219 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3220 dmae->opcode = opcode;
3221 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3222 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3223 dmae->dst_addr_lo = bp->func_stx >> 2;
3224 dmae->dst_addr_hi = 0;
3225 dmae->len = sizeof(struct host_func_stats) >> 2;
3226 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3227 dmae->comp_addr_hi = 0;
3228 dmae->comp_val = 1;
a2fbb9ea
ET
3229 }
3230
bb2a0f7a 3231 /* MAC */
a2fbb9ea
ET
3232 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3233 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3234 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3235#ifdef __BIG_ENDIAN
3236 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3237#else
3238 DMAE_CMD_ENDIANITY_DW_SWAP |
3239#endif
bb2a0f7a
YG
3240 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3241 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3242
c18487ee 3243 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3244
3245 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3246 NIG_REG_INGRESS_BMAC0_MEM);
3247
3248 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3249 BIGMAC_REGISTER_TX_STAT_GTBYT */
3250 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3251 dmae->opcode = opcode;
3252 dmae->src_addr_lo = (mac_addr +
3253 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3254 dmae->src_addr_hi = 0;
3255 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3256 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3257 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3258 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3259 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3260 dmae->comp_addr_hi = 0;
3261 dmae->comp_val = 1;
3262
3263 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3264 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3265 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3266 dmae->opcode = opcode;
3267 dmae->src_addr_lo = (mac_addr +
3268 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3269 dmae->src_addr_hi = 0;
3270 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3271 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3272 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3273 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3274 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3275 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3276 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3277 dmae->comp_addr_hi = 0;
3278 dmae->comp_val = 1;
3279
c18487ee 3280 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3281
3282 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3283
3284 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3285 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3286 dmae->opcode = opcode;
3287 dmae->src_addr_lo = (mac_addr +
3288 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3289 dmae->src_addr_hi = 0;
3290 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3291 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3292 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3293 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3294 dmae->comp_addr_hi = 0;
3295 dmae->comp_val = 1;
3296
3297 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3298 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3299 dmae->opcode = opcode;
3300 dmae->src_addr_lo = (mac_addr +
3301 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3302 dmae->src_addr_hi = 0;
3303 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3304 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3305 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3306 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3307 dmae->len = 1;
3308 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3309 dmae->comp_addr_hi = 0;
3310 dmae->comp_val = 1;
3311
3312 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3313 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3314 dmae->opcode = opcode;
3315 dmae->src_addr_lo = (mac_addr +
3316 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3317 dmae->src_addr_hi = 0;
3318 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3319 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3320 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3321 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3322 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3323 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3324 dmae->comp_addr_hi = 0;
3325 dmae->comp_val = 1;
3326 }
3327
3328 /* NIG */
bb2a0f7a
YG
3329 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3330 dmae->opcode = opcode;
3331 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3332 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3333 dmae->src_addr_hi = 0;
3334 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3335 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3336 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3337 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3338 dmae->comp_addr_hi = 0;
3339 dmae->comp_val = 1;
3340
3341 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3342 dmae->opcode = opcode;
3343 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3344 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3345 dmae->src_addr_hi = 0;
3346 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3347 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3348 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3349 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3350 dmae->len = (2*sizeof(u32)) >> 2;
3351 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3352 dmae->comp_addr_hi = 0;
3353 dmae->comp_val = 1;
3354
a2fbb9ea
ET
3355 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3356 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3357 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3358 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3359#ifdef __BIG_ENDIAN
3360 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3361#else
3362 DMAE_CMD_ENDIANITY_DW_SWAP |
3363#endif
bb2a0f7a
YG
3364 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3365 (vn << DMAE_CMD_E1HVN_SHIFT));
3366 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3367 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3368 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3369 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3370 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3371 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3372 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3373 dmae->len = (2*sizeof(u32)) >> 2;
3374 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3375 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3376 dmae->comp_val = DMAE_COMP_VAL;
3377
3378 *stats_comp = 0;
a2fbb9ea
ET
3379}
3380
bb2a0f7a 3381static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3382{
bb2a0f7a
YG
3383 struct dmae_command *dmae = &bp->stats_dmae;
3384 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3385
bb2a0f7a
YG
3386 /* sanity */
3387 if (!bp->func_stx) {
3388 BNX2X_ERR("BUG!\n");
3389 return;
3390 }
a2fbb9ea 3391
bb2a0f7a
YG
3392 bp->executer_idx = 0;
3393 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3394
bb2a0f7a
YG
3395 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3396 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3397 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3398#ifdef __BIG_ENDIAN
3399 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3400#else
3401 DMAE_CMD_ENDIANITY_DW_SWAP |
3402#endif
3403 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3404 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3405 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3406 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3407 dmae->dst_addr_lo = bp->func_stx >> 2;
3408 dmae->dst_addr_hi = 0;
3409 dmae->len = sizeof(struct host_func_stats) >> 2;
3410 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3411 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3412 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3413
bb2a0f7a
YG
3414 *stats_comp = 0;
3415}
a2fbb9ea 3416
bb2a0f7a
YG
3417static void bnx2x_stats_start(struct bnx2x *bp)
3418{
3419 if (bp->port.pmf)
3420 bnx2x_port_stats_init(bp);
3421
3422 else if (bp->func_stx)
3423 bnx2x_func_stats_init(bp);
3424
3425 bnx2x_hw_stats_post(bp);
3426 bnx2x_storm_stats_post(bp);
3427}
3428
3429static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3430{
3431 bnx2x_stats_comp(bp);
3432 bnx2x_stats_pmf_update(bp);
3433 bnx2x_stats_start(bp);
3434}
3435
3436static void bnx2x_stats_restart(struct bnx2x *bp)
3437{
3438 bnx2x_stats_comp(bp);
3439 bnx2x_stats_start(bp);
3440}
3441
3442static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3443{
3444 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3445 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3446 struct regpair diff;
3447
3448 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3449 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3450 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3451 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3452 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3453 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3454 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3455 UPDATE_STAT64(rx_stat_grxcf, rx_stat_bmac_xcf);
3456 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3457 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3458 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3459 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3460 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3461 UPDATE_STAT64(tx_stat_gt127,
3462 tx_stat_etherstatspkts65octetsto127octets);
3463 UPDATE_STAT64(tx_stat_gt255,
3464 tx_stat_etherstatspkts128octetsto255octets);
3465 UPDATE_STAT64(tx_stat_gt511,
3466 tx_stat_etherstatspkts256octetsto511octets);
3467 UPDATE_STAT64(tx_stat_gt1023,
3468 tx_stat_etherstatspkts512octetsto1023octets);
3469 UPDATE_STAT64(tx_stat_gt1518,
3470 tx_stat_etherstatspkts1024octetsto1522octets);
3471 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3472 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3473 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3474 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3475 UPDATE_STAT64(tx_stat_gterr,
3476 tx_stat_dot3statsinternalmactransmiterrors);
3477 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3478}
3479
3480static void bnx2x_emac_stats_update(struct bnx2x *bp)
3481{
3482 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3483 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3484
3485 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3486 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3487 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3488 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3489 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3490 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3491 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3492 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3493 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3494 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3495 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3496 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3497 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3498 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3499 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3500 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3501 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3502 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3503 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3504 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3505 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3506 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3507 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3508 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3509 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3510 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3511 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3512 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3513 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3514 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3515 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3516}
3517
3518static int bnx2x_hw_stats_update(struct bnx2x *bp)
3519{
3520 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3521 struct nig_stats *old = &(bp->port.old_nig_stats);
3522 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3523 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3524 struct regpair diff;
3525
3526 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3527 bnx2x_bmac_stats_update(bp);
3528
3529 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3530 bnx2x_emac_stats_update(bp);
3531
3532 else { /* unreached */
3533 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3534 return -1;
3535 }
a2fbb9ea 3536
bb2a0f7a
YG
3537 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3538 new->brb_discard - old->brb_discard);
a2fbb9ea 3539
bb2a0f7a
YG
3540 UPDATE_STAT64_NIG(egress_mac_pkt0,
3541 etherstatspkts1024octetsto1522octets);
3542 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3543
bb2a0f7a 3544 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3545
bb2a0f7a
YG
3546 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3547 sizeof(struct mac_stx));
3548 estats->brb_drop_hi = pstats->brb_drop_hi;
3549 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3550
bb2a0f7a 3551 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3552
bb2a0f7a 3553 return 0;
a2fbb9ea
ET
3554}
3555
bb2a0f7a 3556static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3557{
3558 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3559 int cl_id = BP_CL_ID(bp);
3560 struct tstorm_per_port_stats *tport =
3561 &stats->tstorm_common.port_statistics;
a2fbb9ea 3562 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3563 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3564 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3565 struct xstorm_per_client_stats *xclient =
3566 &stats->xstorm_common.client_statistics[cl_id];
3567 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3568 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3569 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3570 u32 diff;
3571
bb2a0f7a
YG
3572 /* are storm stats valid? */
3573 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3574 bp->stats_counter) {
3575 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3576 " tstorm counter (%d) != stats_counter (%d)\n",
3577 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3578 return -1;
3579 }
bb2a0f7a
YG
3580 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3581 bp->stats_counter) {
3582 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3583 " xstorm counter (%d) != stats_counter (%d)\n",
3584 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3585 return -2;
3586 }
a2fbb9ea 3587
bb2a0f7a
YG
3588 fstats->total_bytes_received_hi =
3589 fstats->valid_bytes_received_hi =
a2fbb9ea 3590 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3591 fstats->total_bytes_received_lo =
3592 fstats->valid_bytes_received_lo =
a2fbb9ea 3593 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3594
3595 estats->error_bytes_received_hi =
3596 le32_to_cpu(tclient->rcv_error_bytes.hi);
3597 estats->error_bytes_received_lo =
3598 le32_to_cpu(tclient->rcv_error_bytes.lo);
3599 ADD_64(estats->error_bytes_received_hi,
3600 estats->rx_stat_ifhcinbadoctets_hi,
3601 estats->error_bytes_received_lo,
3602 estats->rx_stat_ifhcinbadoctets_lo);
3603
3604 ADD_64(fstats->total_bytes_received_hi,
3605 estats->error_bytes_received_hi,
3606 fstats->total_bytes_received_lo,
3607 estats->error_bytes_received_lo);
3608
3609 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3610 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3611 total_multicast_packets_received);
a2fbb9ea 3612 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3613 total_broadcast_packets_received);
3614
3615 fstats->total_bytes_transmitted_hi =
3616 le32_to_cpu(xclient->total_sent_bytes.hi);
3617 fstats->total_bytes_transmitted_lo =
3618 le32_to_cpu(xclient->total_sent_bytes.lo);
3619
3620 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3621 total_unicast_packets_transmitted);
3622 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3623 total_multicast_packets_transmitted);
3624 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3625 total_broadcast_packets_transmitted);
3626
3627 memcpy(estats, &(fstats->total_bytes_received_hi),
3628 sizeof(struct host_func_stats) - 2*sizeof(u32));
3629
3630 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3631 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3632 estats->brb_truncate_discard =
3633 le32_to_cpu(tport->brb_truncate_discard);
3634 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3635
3636 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3637 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3638 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3639 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3640 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3641 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3642 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3643 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3644 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3645 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3646 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3647 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3648 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3649
bb2a0f7a
YG
3650 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3651 old_tclient->packets_too_big_discard =
a2fbb9ea 3652 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3653 estats->no_buff_discard =
3654 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3655 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3656
3657 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3658 old_xclient->unicast_bytes_sent.hi =
3659 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3660 old_xclient->unicast_bytes_sent.lo =
3661 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3662 old_xclient->multicast_bytes_sent.hi =
3663 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3664 old_xclient->multicast_bytes_sent.lo =
3665 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3666 old_xclient->broadcast_bytes_sent.hi =
3667 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3668 old_xclient->broadcast_bytes_sent.lo =
3669 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3670
3671 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3672
3673 return 0;
3674}
3675
bb2a0f7a 3676static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3677{
bb2a0f7a
YG
3678 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3679 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3680 struct net_device_stats *nstats = &bp->dev->stats;
3681
3682 nstats->rx_packets =
3683 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3684 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3685 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3686
3687 nstats->tx_packets =
3688 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3689 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3690 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3691
bb2a0f7a 3692 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3693
0e39e645 3694 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3695
bb2a0f7a
YG
3696 nstats->rx_dropped = old_tclient->checksum_discard +
3697 estats->mac_discard;
a2fbb9ea
ET
3698 nstats->tx_dropped = 0;
3699
3700 nstats->multicast =
3701 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3702
bb2a0f7a
YG
3703 nstats->collisions =
3704 estats->tx_stat_dot3statssinglecollisionframes_lo +
3705 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3706 estats->tx_stat_dot3statslatecollisions_lo +
3707 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3708
bb2a0f7a
YG
3709 estats->jabber_packets_received =
3710 old_tclient->packets_too_big_discard +
3711 estats->rx_stat_dot3statsframestoolong_lo;
3712
3713 nstats->rx_length_errors =
3714 estats->rx_stat_etherstatsundersizepkts_lo +
3715 estats->jabber_packets_received;
3716 nstats->rx_over_errors = estats->brb_drop_lo +
0e39e645 3717 estats->brb_truncate_discard;
bb2a0f7a
YG
3718 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3719 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3720 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3721 nstats->rx_missed_errors = estats->xxoverflow_discard;
3722
3723 nstats->rx_errors = nstats->rx_length_errors +
3724 nstats->rx_over_errors +
3725 nstats->rx_crc_errors +
3726 nstats->rx_frame_errors +
0e39e645
ET
3727 nstats->rx_fifo_errors +
3728 nstats->rx_missed_errors;
a2fbb9ea 3729
bb2a0f7a
YG
3730 nstats->tx_aborted_errors =
3731 estats->tx_stat_dot3statslatecollisions_lo +
3732 estats->tx_stat_dot3statsexcessivecollisions_lo;
3733 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3734 nstats->tx_fifo_errors = 0;
3735 nstats->tx_heartbeat_errors = 0;
3736 nstats->tx_window_errors = 0;
3737
3738 nstats->tx_errors = nstats->tx_aborted_errors +
3739 nstats->tx_carrier_errors;
a2fbb9ea
ET
3740}
3741
bb2a0f7a 3742static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3743{
bb2a0f7a
YG
3744 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3745 int update = 0;
a2fbb9ea 3746
bb2a0f7a
YG
3747 if (*stats_comp != DMAE_COMP_VAL)
3748 return;
3749
3750 if (bp->port.pmf)
3751 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3752
bb2a0f7a 3753 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3754
bb2a0f7a
YG
3755 if (update)
3756 bnx2x_net_stats_update(bp);
a2fbb9ea 3757
bb2a0f7a
YG
3758 else {
3759 if (bp->stats_pending) {
3760 bp->stats_pending++;
3761 if (bp->stats_pending == 3) {
3762 BNX2X_ERR("stats not updated for 3 times\n");
3763 bnx2x_panic();
3764 return;
3765 }
3766 }
a2fbb9ea
ET
3767 }
3768
3769 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3770 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3771 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3772 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3773 int i;
a2fbb9ea
ET
3774
3775 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3776 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3777 " tx pkt (%lx)\n",
3778 bnx2x_tx_avail(bp->fp),
7a9b2557 3779 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3780 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3781 " rx pkt (%lx)\n",
7a9b2557
VZ
3782 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3783 bp->fp->rx_comp_cons),
3784 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea
ET
3785 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3786 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
bb2a0f7a 3787 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3788 printk(KERN_DEBUG "tstats: checksum_discard %u "
3789 "packets_too_big_discard %u no_buff_discard %u "
3790 "mac_discard %u mac_filter_discard %u "
3791 "xxovrflow_discard %u brb_truncate_discard %u "
3792 "ttl0_discard %u\n",
bb2a0f7a
YG
3793 old_tclient->checksum_discard,
3794 old_tclient->packets_too_big_discard,
3795 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3796 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3797 estats->brb_truncate_discard,
3798 old_tclient->ttl0_discard);
a2fbb9ea
ET
3799
3800 for_each_queue(bp, i) {
3801 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3802 bnx2x_fp(bp, i, tx_pkt),
3803 bnx2x_fp(bp, i, rx_pkt),
3804 bnx2x_fp(bp, i, rx_calls));
3805 }
3806 }
3807
bb2a0f7a
YG
3808 bnx2x_hw_stats_post(bp);
3809 bnx2x_storm_stats_post(bp);
3810}
a2fbb9ea 3811
bb2a0f7a
YG
3812static void bnx2x_port_stats_stop(struct bnx2x *bp)
3813{
3814 struct dmae_command *dmae;
3815 u32 opcode;
3816 int loader_idx = PMF_DMAE_C(bp);
3817 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3818
bb2a0f7a 3819 bp->executer_idx = 0;
a2fbb9ea 3820
bb2a0f7a
YG
3821 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3822 DMAE_CMD_C_ENABLE |
3823 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3824#ifdef __BIG_ENDIAN
bb2a0f7a 3825 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3826#else
bb2a0f7a 3827 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3828#endif
bb2a0f7a
YG
3829 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3830 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3831
3832 if (bp->port.port_stx) {
3833
3834 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3835 if (bp->func_stx)
3836 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3837 else
3838 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3839 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3840 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3841 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3842 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3843 dmae->len = sizeof(struct host_port_stats) >> 2;
3844 if (bp->func_stx) {
3845 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3846 dmae->comp_addr_hi = 0;
3847 dmae->comp_val = 1;
3848 } else {
3849 dmae->comp_addr_lo =
3850 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3851 dmae->comp_addr_hi =
3852 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3853 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3854
bb2a0f7a
YG
3855 *stats_comp = 0;
3856 }
a2fbb9ea
ET
3857 }
3858
bb2a0f7a
YG
3859 if (bp->func_stx) {
3860
3861 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3862 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3863 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3864 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3865 dmae->dst_addr_lo = bp->func_stx >> 2;
3866 dmae->dst_addr_hi = 0;
3867 dmae->len = sizeof(struct host_func_stats) >> 2;
3868 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3869 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3870 dmae->comp_val = DMAE_COMP_VAL;
3871
3872 *stats_comp = 0;
a2fbb9ea 3873 }
bb2a0f7a
YG
3874}
3875
3876static void bnx2x_stats_stop(struct bnx2x *bp)
3877{
3878 int update = 0;
3879
3880 bnx2x_stats_comp(bp);
3881
3882 if (bp->port.pmf)
3883 update = (bnx2x_hw_stats_update(bp) == 0);
3884
3885 update |= (bnx2x_storm_stats_update(bp) == 0);
3886
3887 if (update) {
3888 bnx2x_net_stats_update(bp);
a2fbb9ea 3889
bb2a0f7a
YG
3890 if (bp->port.pmf)
3891 bnx2x_port_stats_stop(bp);
3892
3893 bnx2x_hw_stats_post(bp);
3894 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3895 }
3896}
3897
bb2a0f7a
YG
3898static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3899{
3900}
3901
3902static const struct {
3903 void (*action)(struct bnx2x *bp);
3904 enum bnx2x_stats_state next_state;
3905} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3906/* state event */
3907{
3908/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3909/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3910/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3911/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3912},
3913{
3914/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3915/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3916/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3917/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3918}
3919};
3920
3921static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3922{
3923 enum bnx2x_stats_state state = bp->stats_state;
3924
3925 bnx2x_stats_stm[state][event].action(bp);
3926 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3927
3928 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3929 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3930 state, event, bp->stats_state);
3931}
3932
a2fbb9ea
ET
3933static void bnx2x_timer(unsigned long data)
3934{
3935 struct bnx2x *bp = (struct bnx2x *) data;
3936
3937 if (!netif_running(bp->dev))
3938 return;
3939
3940 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3941 goto timer_restart;
a2fbb9ea
ET
3942
3943 if (poll) {
3944 struct bnx2x_fastpath *fp = &bp->fp[0];
3945 int rc;
3946
3947 bnx2x_tx_int(fp, 1000);
3948 rc = bnx2x_rx_int(fp, 1000);
3949 }
3950
34f80b04
EG
3951 if (!BP_NOMCP(bp)) {
3952 int func = BP_FUNC(bp);
a2fbb9ea
ET
3953 u32 drv_pulse;
3954 u32 mcp_pulse;
3955
3956 ++bp->fw_drv_pulse_wr_seq;
3957 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3958 /* TBD - add SYSTEM_TIME */
3959 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 3960 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 3961
34f80b04 3962 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
3963 MCP_PULSE_SEQ_MASK);
3964 /* The delta between driver pulse and mcp response
3965 * should be 1 (before mcp response) or 0 (after mcp response)
3966 */
3967 if ((drv_pulse != mcp_pulse) &&
3968 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3969 /* someone lost a heartbeat... */
3970 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3971 drv_pulse, mcp_pulse);
3972 }
3973 }
3974
bb2a0f7a
YG
3975 if ((bp->state == BNX2X_STATE_OPEN) ||
3976 (bp->state == BNX2X_STATE_DISABLED))
3977 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3978
f1410647 3979timer_restart:
a2fbb9ea
ET
3980 mod_timer(&bp->timer, jiffies + bp->current_interval);
3981}
3982
3983/* end of Statistics */
3984
3985/* nic init */
3986
3987/*
3988 * nic init service functions
3989 */
3990
34f80b04 3991static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 3992{
34f80b04
EG
3993 int port = BP_PORT(bp);
3994
3995 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3996 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3997 sizeof(struct ustorm_def_status_block)/4);
3998 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3999 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4000 sizeof(struct cstorm_def_status_block)/4);
4001}
4002
4003static void bnx2x_init_sb(struct bnx2x *bp, int sb_id,
4004 struct host_status_block *sb, dma_addr_t mapping)
4005{
4006 int port = BP_PORT(bp);
bb2a0f7a 4007 int func = BP_FUNC(bp);
a2fbb9ea 4008 int index;
34f80b04 4009 u64 section;
a2fbb9ea
ET
4010
4011 /* USTORM */
4012 section = ((u64)mapping) + offsetof(struct host_status_block,
4013 u_status_block);
34f80b04 4014 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4015
4016 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4017 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4018 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4019 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4020 U64_HI(section));
bb2a0f7a
YG
4021 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4022 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4023
4024 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4025 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4026 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4027
4028 /* CSTORM */
4029 section = ((u64)mapping) + offsetof(struct host_status_block,
4030 c_status_block);
34f80b04 4031 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4032
4033 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4034 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4035 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4036 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4037 U64_HI(section));
7a9b2557
VZ
4038 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4039 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4040
4041 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4042 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4043 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4044
4045 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4046}
4047
4048static void bnx2x_zero_def_sb(struct bnx2x *bp)
4049{
4050 int func = BP_FUNC(bp);
a2fbb9ea 4051
34f80b04
EG
4052 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4053 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4054 sizeof(struct ustorm_def_status_block)/4);
4055 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4056 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4057 sizeof(struct cstorm_def_status_block)/4);
4058 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4059 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4060 sizeof(struct xstorm_def_status_block)/4);
4061 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4062 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4063 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4064}
4065
4066static void bnx2x_init_def_sb(struct bnx2x *bp,
4067 struct host_def_status_block *def_sb,
34f80b04 4068 dma_addr_t mapping, int sb_id)
a2fbb9ea 4069{
34f80b04
EG
4070 int port = BP_PORT(bp);
4071 int func = BP_FUNC(bp);
a2fbb9ea
ET
4072 int index, val, reg_offset;
4073 u64 section;
4074
4075 /* ATTN */
4076 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4077 atten_status_block);
34f80b04 4078 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4079
49d66772
ET
4080 bp->def_att_idx = 0;
4081 bp->attn_state = 0;
4082
a2fbb9ea
ET
4083 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4084 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4085
34f80b04 4086 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4087 bp->attn_group[index].sig[0] = REG_RD(bp,
4088 reg_offset + 0x10*index);
4089 bp->attn_group[index].sig[1] = REG_RD(bp,
4090 reg_offset + 0x4 + 0x10*index);
4091 bp->attn_group[index].sig[2] = REG_RD(bp,
4092 reg_offset + 0x8 + 0x10*index);
4093 bp->attn_group[index].sig[3] = REG_RD(bp,
4094 reg_offset + 0xc + 0x10*index);
4095 }
4096
4097 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4098 MISC_REG_AEU_MASK_ATTN_FUNC_0));
4099
4100 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4101 HC_REG_ATTN_MSG0_ADDR_L);
4102
4103 REG_WR(bp, reg_offset, U64_LO(section));
4104 REG_WR(bp, reg_offset + 4, U64_HI(section));
4105
4106 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4107
4108 val = REG_RD(bp, reg_offset);
34f80b04 4109 val |= sb_id;
a2fbb9ea
ET
4110 REG_WR(bp, reg_offset, val);
4111
4112 /* USTORM */
4113 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4114 u_def_status_block);
34f80b04 4115 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4116
49d66772
ET
4117 bp->def_u_idx = 0;
4118
a2fbb9ea 4119 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4120 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4121 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4122 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4123 U64_HI(section));
34f80b04
EG
4124 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4125 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4126 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4127 BNX2X_BTR);
4128
4129 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4130 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4131 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4132
4133 /* CSTORM */
4134 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4135 c_def_status_block);
34f80b04 4136 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea 4137
49d66772
ET
4138 bp->def_c_idx = 0;
4139
a2fbb9ea 4140 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4141 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4142 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4143 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4144 U64_HI(section));
34f80b04
EG
4145 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4146 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4147 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4148 BNX2X_BTR);
4149
4150 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4151 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4152 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4153
4154 /* TSTORM */
4155 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4156 t_def_status_block);
34f80b04 4157 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea 4158
49d66772
ET
4159 bp->def_t_idx = 0;
4160
a2fbb9ea 4161 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4162 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4163 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4164 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4165 U64_HI(section));
34f80b04
EG
4166 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4167 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4168 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4169 BNX2X_BTR);
4170
4171 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4172 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4173 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4174
4175 /* XSTORM */
4176 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4177 x_def_status_block);
34f80b04 4178 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea 4179
49d66772
ET
4180 bp->def_x_idx = 0;
4181
a2fbb9ea 4182 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4183 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4184 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4185 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4186 U64_HI(section));
34f80b04
EG
4187 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4188 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4189 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
4190 BNX2X_BTR);
4191
4192 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4193 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4194 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4195
bb2a0f7a
YG
4196 bp->stats_pending = 0;
4197
34f80b04 4198 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4199}
4200
4201static void bnx2x_update_coalesce(struct bnx2x *bp)
4202{
34f80b04 4203 int port = BP_PORT(bp);
a2fbb9ea
ET
4204 int i;
4205
4206 for_each_queue(bp, i) {
34f80b04 4207 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4208
4209 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4210 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4211 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
a2fbb9ea 4212 HC_INDEX_U_ETH_RX_CQ_CONS),
34f80b04 4213 bp->rx_ticks/12);
a2fbb9ea 4214 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4215 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
a2fbb9ea 4216 HC_INDEX_U_ETH_RX_CQ_CONS),
34f80b04 4217 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4218
4219 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4220 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4221 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
a2fbb9ea 4222 HC_INDEX_C_ETH_TX_CQ_CONS),
34f80b04 4223 bp->tx_ticks/12);
a2fbb9ea 4224 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4225 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
a2fbb9ea 4226 HC_INDEX_C_ETH_TX_CQ_CONS),
34f80b04 4227 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4228 }
4229}
4230
7a9b2557
VZ
4231static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4232 struct bnx2x_fastpath *fp, int last)
4233{
4234 int i;
4235
4236 for (i = 0; i < last; i++) {
4237 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4238 struct sk_buff *skb = rx_buf->skb;
4239
4240 if (skb == NULL) {
4241 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4242 continue;
4243 }
4244
4245 if (fp->tpa_state[i] == BNX2X_TPA_START)
4246 pci_unmap_single(bp->pdev,
4247 pci_unmap_addr(rx_buf, mapping),
4248 bp->rx_buf_use_size,
4249 PCI_DMA_FROMDEVICE);
4250
4251 dev_kfree_skb(skb);
4252 rx_buf->skb = NULL;
4253 }
4254}
4255
a2fbb9ea
ET
4256static void bnx2x_init_rx_rings(struct bnx2x *bp)
4257{
7a9b2557
VZ
4258 int func = BP_FUNC(bp);
4259 u16 ring_prod, cqe_ring_prod = 0;
a2fbb9ea 4260 int i, j;
a2fbb9ea
ET
4261
4262 bp->rx_buf_use_size = bp->dev->mtu;
a2fbb9ea
ET
4263 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4264 bp->rx_buf_size = bp->rx_buf_use_size + 64;
4265
7a9b2557
VZ
4266 if (bp->flags & TPA_ENABLE_FLAG) {
4267 DP(NETIF_MSG_IFUP,
4268 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n",
4269 bp->rx_buf_use_size, bp->rx_buf_size,
4270 bp->dev->mtu + ETH_OVREHEAD);
4271
4272 for_each_queue(bp, j) {
4273 for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) {
4274 struct bnx2x_fastpath *fp = &bp->fp[j];
4275
4276 fp->tpa_pool[i].skb =
4277 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4278 if (!fp->tpa_pool[i].skb) {
4279 BNX2X_ERR("Failed to allocate TPA "
4280 "skb pool for queue[%d] - "
4281 "disabling TPA on this "
4282 "queue!\n", j);
4283 bnx2x_free_tpa_pool(bp, fp, i);
4284 fp->disable_tpa = 1;
4285 break;
4286 }
4287 pci_unmap_addr_set((struct sw_rx_bd *)
4288 &bp->fp->tpa_pool[i],
4289 mapping, 0);
4290 fp->tpa_state[i] = BNX2X_TPA_STOP;
4291 }
4292 }
4293 }
4294
a2fbb9ea
ET
4295 for_each_queue(bp, j) {
4296 struct bnx2x_fastpath *fp = &bp->fp[j];
4297
4298 fp->rx_bd_cons = 0;
4299 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4300 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4301
4302 /* "next page" elements initialization */
4303 /* SGE ring */
4304 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4305 struct eth_rx_sge *sge;
4306
4307 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4308 sge->addr_hi =
4309 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4310 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4311 sge->addr_lo =
4312 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4313 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4314 }
4315
4316 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4317
7a9b2557 4318 /* RX BD ring */
a2fbb9ea
ET
4319 for (i = 1; i <= NUM_RX_RINGS; i++) {
4320 struct eth_rx_bd *rx_bd;
4321
4322 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4323 rx_bd->addr_hi =
4324 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4325 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4326 rx_bd->addr_lo =
4327 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4328 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4329 }
4330
34f80b04 4331 /* CQ ring */
a2fbb9ea
ET
4332 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4333 struct eth_rx_cqe_next_page *nextpg;
4334
4335 nextpg = (struct eth_rx_cqe_next_page *)
4336 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4337 nextpg->addr_hi =
4338 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4339 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4340 nextpg->addr_lo =
4341 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4342 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4343 }
4344
7a9b2557
VZ
4345 /* Allocate SGEs and initialize the ring elements */
4346 for (i = 0, ring_prod = 0;
4347 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4348
7a9b2557
VZ
4349 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4350 BNX2X_ERR("was only able to allocate "
4351 "%d rx sges\n", i);
4352 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4353 /* Cleanup already allocated elements */
4354 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4355 bnx2x_free_tpa_pool(bp, fp,
4356 ETH_MAX_AGGREGATION_QUEUES_E1H);
4357 fp->disable_tpa = 1;
4358 ring_prod = 0;
4359 break;
4360 }
4361 ring_prod = NEXT_SGE_IDX(ring_prod);
4362 }
4363 fp->rx_sge_prod = ring_prod;
4364
4365 /* Allocate BDs and initialize BD ring */
4366 fp->rx_comp_cons = fp->rx_alloc_failed = 0;
4367 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4368 for (i = 0; i < bp->rx_ring_size; i++) {
4369 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4370 BNX2X_ERR("was only able to allocate "
4371 "%d rx skbs\n", i);
7a9b2557 4372 fp->rx_alloc_failed++;
a2fbb9ea
ET
4373 break;
4374 }
4375 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4376 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
a2fbb9ea
ET
4377 BUG_TRAP(ring_prod > i);
4378 }
4379
7a9b2557
VZ
4380 fp->rx_bd_prod = ring_prod;
4381 /* must not have more available CQEs than BDs */
4382 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4383 cqe_ring_prod);
a2fbb9ea
ET
4384 fp->rx_pkt = fp->rx_calls = 0;
4385
7a9b2557
VZ
4386 /* Warning!
4387 * this will generate an interrupt (to the TSTORM)
4388 * must only be done after chip is initialized
4389 */
4390 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4391 fp->rx_sge_prod);
a2fbb9ea
ET
4392 if (j != 0)
4393 continue;
4394
4395 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4396 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4397 U64_LO(fp->rx_comp_mapping));
4398 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4399 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4400 U64_HI(fp->rx_comp_mapping));
4401 }
4402}
4403
4404static void bnx2x_init_tx_ring(struct bnx2x *bp)
4405{
4406 int i, j;
4407
4408 for_each_queue(bp, j) {
4409 struct bnx2x_fastpath *fp = &bp->fp[j];
4410
4411 for (i = 1; i <= NUM_TX_RINGS; i++) {
4412 struct eth_tx_bd *tx_bd =
4413 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4414
4415 tx_bd->addr_hi =
4416 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4417 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4418 tx_bd->addr_lo =
4419 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4420 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4421 }
4422
4423 fp->tx_pkt_prod = 0;
4424 fp->tx_pkt_cons = 0;
4425 fp->tx_bd_prod = 0;
4426 fp->tx_bd_cons = 0;
4427 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4428 fp->tx_pkt = 0;
4429 }
4430}
4431
4432static void bnx2x_init_sp_ring(struct bnx2x *bp)
4433{
34f80b04 4434 int func = BP_FUNC(bp);
a2fbb9ea
ET
4435
4436 spin_lock_init(&bp->spq_lock);
4437
4438 bp->spq_left = MAX_SPQ_PENDING;
4439 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4440 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4441 bp->spq_prod_bd = bp->spq;
4442 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4443
34f80b04 4444 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4445 U64_LO(bp->spq_mapping));
34f80b04
EG
4446 REG_WR(bp,
4447 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4448 U64_HI(bp->spq_mapping));
4449
34f80b04 4450 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4451 bp->spq_prod_idx);
4452}
4453
4454static void bnx2x_init_context(struct bnx2x *bp)
4455{
4456 int i;
4457
4458 for_each_queue(bp, i) {
4459 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4460 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4461 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea
ET
4462
4463 context->xstorm_st_context.tx_bd_page_base_hi =
4464 U64_HI(fp->tx_desc_mapping);
4465 context->xstorm_st_context.tx_bd_page_base_lo =
4466 U64_LO(fp->tx_desc_mapping);
4467 context->xstorm_st_context.db_data_addr_hi =
4468 U64_HI(fp->tx_prods_mapping);
4469 context->xstorm_st_context.db_data_addr_lo =
4470 U64_LO(fp->tx_prods_mapping);
34f80b04
EG
4471 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4472 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4473
4474 context->ustorm_st_context.common.sb_index_numbers =
4475 BNX2X_RX_SB_INDEX_NUM;
4476 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4477 context->ustorm_st_context.common.status_block_id = sb_id;
4478 context->ustorm_st_context.common.flags =
4479 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4480 context->ustorm_st_context.common.mc_alignment_size = 64;
4481 context->ustorm_st_context.common.bd_buff_size =
4482 bp->rx_buf_use_size;
4483 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4484 U64_HI(fp->rx_desc_mapping);
34f80b04 4485 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4486 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4487 if (!fp->disable_tpa) {
4488 context->ustorm_st_context.common.flags |=
4489 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4490 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4491 context->ustorm_st_context.common.sge_buff_size =
4492 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4493 context->ustorm_st_context.common.sge_page_base_hi =
4494 U64_HI(fp->rx_sge_mapping);
4495 context->ustorm_st_context.common.sge_page_base_lo =
4496 U64_LO(fp->rx_sge_mapping);
4497 }
4498
a2fbb9ea
ET
4499 context->cstorm_st_context.sb_index_number =
4500 HC_INDEX_C_ETH_TX_CQ_CONS;
34f80b04 4501 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4502
4503 context->xstorm_ag_context.cdu_reserved =
4504 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4505 CDU_REGION_NUMBER_XCM_AG,
4506 ETH_CONNECTION_TYPE);
4507 context->ustorm_ag_context.cdu_usage =
4508 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4509 CDU_REGION_NUMBER_UCM_AG,
4510 ETH_CONNECTION_TYPE);
4511 }
4512}
4513
4514static void bnx2x_init_ind_table(struct bnx2x *bp)
4515{
34f80b04 4516 int port = BP_PORT(bp);
a2fbb9ea
ET
4517 int i;
4518
4519 if (!is_multi(bp))
4520 return;
4521
34f80b04 4522 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 4523 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04
EG
4524 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4525 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
a2fbb9ea
ET
4526 i % bp->num_queues);
4527
4528 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4529}
4530
49d66772
ET
4531static void bnx2x_set_client_config(struct bnx2x *bp)
4532{
49d66772 4533 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4534 int port = BP_PORT(bp);
4535 int i;
49d66772 4536
34f80b04 4537 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
49d66772
ET
4538 tstorm_client.statistics_counter_id = 0;
4539 tstorm_client.config_flags =
4540 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4541#ifdef BCM_VLAN
34f80b04 4542 if (bp->rx_mode && bp->vlgrp) {
49d66772
ET
4543 tstorm_client.config_flags |=
4544 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4545 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4546 }
4547#endif
49d66772 4548
7a9b2557
VZ
4549 if (bp->flags & TPA_ENABLE_FLAG) {
4550 tstorm_client.max_sges_for_packet =
4551 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4552 tstorm_client.max_sges_for_packet =
4553 ((tstorm_client.max_sges_for_packet +
4554 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4555 PAGES_PER_SGE_SHIFT;
4556
4557 tstorm_client.config_flags |=
4558 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4559 }
4560
49d66772
ET
4561 for_each_queue(bp, i) {
4562 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4563 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4564 ((u32 *)&tstorm_client)[0]);
4565 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4566 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4567 ((u32 *)&tstorm_client)[1]);
4568 }
4569
34f80b04
EG
4570 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4571 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4572}
4573
a2fbb9ea
ET
4574static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4575{
a2fbb9ea 4576 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4577 int mode = bp->rx_mode;
4578 int mask = (1 << BP_L_ID(bp));
4579 int func = BP_FUNC(bp);
a2fbb9ea
ET
4580 int i;
4581
4582 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4583
4584 switch (mode) {
4585 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4586 tstorm_mac_filter.ucast_drop_all = mask;
4587 tstorm_mac_filter.mcast_drop_all = mask;
4588 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4589 break;
4590 case BNX2X_RX_MODE_NORMAL:
34f80b04 4591 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4592 break;
4593 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4594 tstorm_mac_filter.mcast_accept_all = mask;
4595 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4596 break;
4597 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4598 tstorm_mac_filter.ucast_accept_all = mask;
4599 tstorm_mac_filter.mcast_accept_all = mask;
4600 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4601 break;
4602 default:
34f80b04
EG
4603 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4604 break;
a2fbb9ea
ET
4605 }
4606
4607 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4608 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4609 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4610 ((u32 *)&tstorm_mac_filter)[i]);
4611
34f80b04 4612/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4613 ((u32 *)&tstorm_mac_filter)[i]); */
4614 }
a2fbb9ea 4615
49d66772
ET
4616 if (mode != BNX2X_RX_MODE_NONE)
4617 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4618}
4619
4620static void bnx2x_init_internal(struct bnx2x *bp)
4621{
a2fbb9ea
ET
4622 struct tstorm_eth_function_common_config tstorm_config = {0};
4623 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4624 int port = BP_PORT(bp);
4625 int func = BP_FUNC(bp);
4626 int i;
a2fbb9ea
ET
4627
4628 if (is_multi(bp)) {
4629 tstorm_config.config_flags = MULTI_FLAGS;
4630 tstorm_config.rss_result_mask = MULTI_MASK;
4631 }
4632
34f80b04
EG
4633 tstorm_config.leading_client_id = BP_L_ID(bp);
4634
a2fbb9ea 4635 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4636 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4637 (*(u32 *)&tstorm_config));
4638
34f80b04 4639/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
a2fbb9ea
ET
4640 (*(u32 *)&tstorm_config)); */
4641
c14423fe 4642 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4643 bnx2x_set_storm_rx_mode(bp);
4644
34f80b04 4645 stats_flags.collect_eth = 1;
a2fbb9ea
ET
4646
4647 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
4648 ((u32 *)&stats_flags)[0]);
4649 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
4650 ((u32 *)&stats_flags)[1]);
4651
4652 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
4653 ((u32 *)&stats_flags)[0]);
4654 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
4655 ((u32 *)&stats_flags)[1]);
4656
4657 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
4658 ((u32 *)&stats_flags)[0]);
4659 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
4660 ((u32 *)&stats_flags)[1]);
4661
34f80b04 4662/* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
a2fbb9ea 4663 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
34f80b04
EG
4664
4665 if (CHIP_IS_E1H(bp)) {
4666 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4667 IS_E1HMF(bp));
4668 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4669 IS_E1HMF(bp));
4670 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4671 IS_E1HMF(bp));
4672 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4673 IS_E1HMF(bp));
4674
7a9b2557
VZ
4675 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4676 bp->e1hov);
34f80b04
EG
4677 }
4678
4679 /* Zero this manualy as its initialization is
4680 currently missing in the initTool */
4681 for (i = 0; i < USTORM_AGG_DATA_SIZE >> 2; i++)
4682 REG_WR(bp, BAR_USTRORM_INTMEM +
4683 USTORM_AGG_DATA_OFFSET + 4*i, 0);
7a9b2557
VZ
4684
4685 for_each_queue(bp, i) {
4686 struct bnx2x_fastpath *fp = &bp->fp[i];
4687 u16 max_agg_size;
4688
4689 REG_WR(bp, BAR_USTRORM_INTMEM +
4690 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4691 U64_LO(fp->rx_comp_mapping));
4692 REG_WR(bp, BAR_USTRORM_INTMEM +
4693 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4694 U64_HI(fp->rx_comp_mapping));
4695
4696 max_agg_size = min((u32)(bp->rx_buf_use_size +
4697 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4698 (u32)0xffff);
4699 REG_WR16(bp, BAR_USTRORM_INTMEM +
4700 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4701 max_agg_size);
4702 }
a2fbb9ea
ET
4703}
4704
4705static void bnx2x_nic_init(struct bnx2x *bp)
4706{
4707 int i;
4708
4709 for_each_queue(bp, i) {
4710 struct bnx2x_fastpath *fp = &bp->fp[i];
4711
34f80b04 4712 fp->bp = bp;
a2fbb9ea 4713 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4714 fp->index = i;
34f80b04
EG
4715 fp->cl_id = BP_L_ID(bp) + i;
4716 fp->sb_id = fp->cl_id;
4717 DP(NETIF_MSG_IFUP,
4718 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4719 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4720 bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk,
4721 fp->status_blk_mapping);
a2fbb9ea
ET
4722 }
4723
4724 bnx2x_init_def_sb(bp, bp->def_status_blk,
34f80b04 4725 bp->def_status_blk_mapping, DEF_SB_ID);
a2fbb9ea
ET
4726 bnx2x_update_coalesce(bp);
4727 bnx2x_init_rx_rings(bp);
4728 bnx2x_init_tx_ring(bp);
4729 bnx2x_init_sp_ring(bp);
4730 bnx2x_init_context(bp);
4731 bnx2x_init_internal(bp);
bb2a0f7a 4732 bnx2x_storm_stats_init(bp);
a2fbb9ea 4733 bnx2x_init_ind_table(bp);
615f8fd9 4734 bnx2x_int_enable(bp);
a2fbb9ea
ET
4735}
4736
4737/* end of nic init */
4738
4739/*
4740 * gzip service functions
4741 */
4742
4743static int bnx2x_gunzip_init(struct bnx2x *bp)
4744{
4745 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4746 &bp->gunzip_mapping);
4747 if (bp->gunzip_buf == NULL)
4748 goto gunzip_nomem1;
4749
4750 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4751 if (bp->strm == NULL)
4752 goto gunzip_nomem2;
4753
4754 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4755 GFP_KERNEL);
4756 if (bp->strm->workspace == NULL)
4757 goto gunzip_nomem3;
4758
4759 return 0;
4760
4761gunzip_nomem3:
4762 kfree(bp->strm);
4763 bp->strm = NULL;
4764
4765gunzip_nomem2:
4766 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4767 bp->gunzip_mapping);
4768 bp->gunzip_buf = NULL;
4769
4770gunzip_nomem1:
4771 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4772 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4773 return -ENOMEM;
4774}
4775
4776static void bnx2x_gunzip_end(struct bnx2x *bp)
4777{
4778 kfree(bp->strm->workspace);
4779
4780 kfree(bp->strm);
4781 bp->strm = NULL;
4782
4783 if (bp->gunzip_buf) {
4784 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4785 bp->gunzip_mapping);
4786 bp->gunzip_buf = NULL;
4787 }
4788}
4789
4790static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4791{
4792 int n, rc;
4793
4794 /* check gzip header */
4795 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4796 return -EINVAL;
4797
4798 n = 10;
4799
34f80b04 4800#define FNAME 0x8
a2fbb9ea
ET
4801
4802 if (zbuf[3] & FNAME)
4803 while ((zbuf[n++] != 0) && (n < len));
4804
4805 bp->strm->next_in = zbuf + n;
4806 bp->strm->avail_in = len - n;
4807 bp->strm->next_out = bp->gunzip_buf;
4808 bp->strm->avail_out = FW_BUF_SIZE;
4809
4810 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4811 if (rc != Z_OK)
4812 return rc;
4813
4814 rc = zlib_inflate(bp->strm, Z_FINISH);
4815 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4816 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4817 bp->dev->name, bp->strm->msg);
4818
4819 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4820 if (bp->gunzip_outlen & 0x3)
4821 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4822 " gunzip_outlen (%d) not aligned\n",
4823 bp->dev->name, bp->gunzip_outlen);
4824 bp->gunzip_outlen >>= 2;
4825
4826 zlib_inflateEnd(bp->strm);
4827
4828 if (rc == Z_STREAM_END)
4829 return 0;
4830
4831 return rc;
4832}
4833
4834/* nic load/unload */
4835
4836/*
34f80b04 4837 * General service functions
a2fbb9ea
ET
4838 */
4839
4840/* send a NIG loopback debug packet */
4841static void bnx2x_lb_pckt(struct bnx2x *bp)
4842{
a2fbb9ea 4843 u32 wb_write[3];
a2fbb9ea
ET
4844
4845 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4846 wb_write[0] = 0x55555555;
4847 wb_write[1] = 0x55555555;
34f80b04 4848 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4849 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4850
4851 /* NON-IP protocol */
a2fbb9ea
ET
4852 wb_write[0] = 0x09000000;
4853 wb_write[1] = 0x55555555;
34f80b04 4854 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4855 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4856}
4857
4858/* some of the internal memories
4859 * are not directly readable from the driver
4860 * to test them we send debug packets
4861 */
4862static int bnx2x_int_mem_test(struct bnx2x *bp)
4863{
4864 int factor;
4865 int count, i;
4866 u32 val = 0;
4867
ad8d3948 4868 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4869 factor = 120;
ad8d3948
EG
4870 else if (CHIP_REV_IS_EMUL(bp))
4871 factor = 200;
4872 else
a2fbb9ea 4873 factor = 1;
a2fbb9ea
ET
4874
4875 DP(NETIF_MSG_HW, "start part1\n");
4876
4877 /* Disable inputs of parser neighbor blocks */
4878 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4879 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4880 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4881 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4882
4883 /* Write 0 to parser credits for CFC search request */
4884 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4885
4886 /* send Ethernet packet */
4887 bnx2x_lb_pckt(bp);
4888
4889 /* TODO do i reset NIG statistic? */
4890 /* Wait until NIG register shows 1 packet of size 0x10 */
4891 count = 1000 * factor;
4892 while (count) {
34f80b04 4893
a2fbb9ea
ET
4894 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4895 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4896 if (val == 0x10)
4897 break;
4898
4899 msleep(10);
4900 count--;
4901 }
4902 if (val != 0x10) {
4903 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4904 return -1;
4905 }
4906
4907 /* Wait until PRS register shows 1 packet */
4908 count = 1000 * factor;
4909 while (count) {
4910 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4911 if (val == 1)
4912 break;
4913
4914 msleep(10);
4915 count--;
4916 }
4917 if (val != 0x1) {
4918 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4919 return -2;
4920 }
4921
4922 /* Reset and init BRB, PRS */
34f80b04 4923 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4924 msleep(50);
34f80b04 4925 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
4926 msleep(50);
4927 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4928 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4929
4930 DP(NETIF_MSG_HW, "part2\n");
4931
4932 /* Disable inputs of parser neighbor blocks */
4933 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4934 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4935 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4936 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4937
4938 /* Write 0 to parser credits for CFC search request */
4939 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4940
4941 /* send 10 Ethernet packets */
4942 for (i = 0; i < 10; i++)
4943 bnx2x_lb_pckt(bp);
4944
4945 /* Wait until NIG register shows 10 + 1
4946 packets of size 11*0x10 = 0xb0 */
4947 count = 1000 * factor;
4948 while (count) {
34f80b04 4949
a2fbb9ea
ET
4950 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4951 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4952 if (val == 0xb0)
4953 break;
4954
4955 msleep(10);
4956 count--;
4957 }
4958 if (val != 0xb0) {
4959 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4960 return -3;
4961 }
4962
4963 /* Wait until PRS register shows 2 packets */
4964 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4965 if (val != 2)
4966 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4967
4968 /* Write 1 to parser credits for CFC search request */
4969 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4970
4971 /* Wait until PRS register shows 3 packets */
4972 msleep(10 * factor);
4973 /* Wait until NIG register shows 1 packet of size 0x10 */
4974 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4975 if (val != 3)
4976 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4977
4978 /* clear NIG EOP FIFO */
4979 for (i = 0; i < 11; i++)
4980 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4981 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4982 if (val != 1) {
4983 BNX2X_ERR("clear of NIG failed\n");
4984 return -4;
4985 }
4986
4987 /* Reset and init BRB, PRS, NIG */
4988 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4989 msleep(50);
4990 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4991 msleep(50);
4992 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4993 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4994#ifndef BCM_ISCSI
4995 /* set NIC mode */
4996 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4997#endif
4998
4999 /* Enable inputs of parser neighbor blocks */
5000 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5001 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5002 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5003 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5004
5005 DP(NETIF_MSG_HW, "done\n");
5006
5007 return 0; /* OK */
5008}
5009
5010static void enable_blocks_attention(struct bnx2x *bp)
5011{
5012 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5013 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5014 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5015 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5016 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5017 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5018 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5019 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5020 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5021/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5022/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5023 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5024 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5025 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5026/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5027/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5028 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5029 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5030 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5031 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5032/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5033/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5034 if (CHIP_REV_IS_FPGA(bp))
5035 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5036 else
5037 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5038 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5039 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5040 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5041/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5042/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5043 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5044 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5045/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5046 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5047}
5048
34f80b04
EG
5049
5050static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5051{
a2fbb9ea 5052 u32 val, i;
a2fbb9ea 5053
34f80b04 5054 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5055
34f80b04
EG
5056 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5057 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5058
34f80b04
EG
5059 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5060 if (CHIP_IS_E1H(bp))
5061 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5062
34f80b04
EG
5063 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5064 msleep(30);
5065 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5066
34f80b04
EG
5067 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5068 if (CHIP_IS_E1(bp)) {
5069 /* enable HW interrupt from PXP on USDM overflow
5070 bit 16 on INT_MASK_0 */
5071 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5072 }
a2fbb9ea 5073
34f80b04
EG
5074 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5075 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5076
5077#ifdef __BIG_ENDIAN
34f80b04
EG
5078 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5079 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5080 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5081 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5082 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5083 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5084
5085/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5086 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5087 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5088 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5089 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5090#endif
5091
5092#ifndef BCM_ISCSI
5093 /* set NIC mode */
5094 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5095#endif
5096
34f80b04 5097 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5098#ifdef BCM_ISCSI
34f80b04
EG
5099 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5100 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5101 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5102#endif
5103
34f80b04
EG
5104 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5105 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5106
34f80b04
EG
5107 /* let the HW do it's magic ... */
5108 msleep(100);
5109 /* finish PXP init */
5110 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5111 if (val != 1) {
5112 BNX2X_ERR("PXP2 CFG failed\n");
5113 return -EBUSY;
5114 }
5115 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5116 if (val != 1) {
5117 BNX2X_ERR("PXP2 RD_INIT failed\n");
5118 return -EBUSY;
5119 }
a2fbb9ea 5120
34f80b04
EG
5121 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5122 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5123
34f80b04 5124 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5125
34f80b04
EG
5126 /* clean the DMAE memory */
5127 bp->dmae_ready = 1;
5128 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5129
34f80b04
EG
5130 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5131 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5132 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5133 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5134
34f80b04
EG
5135 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5136 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5137 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5138 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5139
5140 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5141 /* soft reset pulse */
5142 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5143 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5144
5145#ifdef BCM_ISCSI
34f80b04 5146 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5147#endif
a2fbb9ea 5148
34f80b04
EG
5149 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5150 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5151 if (!CHIP_REV_IS_SLOW(bp)) {
5152 /* enable hw interrupt from doorbell Q */
5153 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5154 }
a2fbb9ea 5155
34f80b04
EG
5156 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5157 if (CHIP_REV_IS_SLOW(bp)) {
5158 /* fix for emulation and FPGA for no pause */
5159 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5160 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5161 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5162 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5163 }
a2fbb9ea 5164
34f80b04
EG
5165 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5166 if (CHIP_IS_E1H(bp))
5167 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5168
34f80b04
EG
5169 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5170 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5171 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5172 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5173
34f80b04
EG
5174 if (CHIP_IS_E1H(bp)) {
5175 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5176 STORM_INTMEM_SIZE_E1H/2);
5177 bnx2x_init_fill(bp,
5178 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5179 0, STORM_INTMEM_SIZE_E1H/2);
5180 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5181 STORM_INTMEM_SIZE_E1H/2);
5182 bnx2x_init_fill(bp,
5183 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5184 0, STORM_INTMEM_SIZE_E1H/2);
5185 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5186 STORM_INTMEM_SIZE_E1H/2);
5187 bnx2x_init_fill(bp,
5188 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5189 0, STORM_INTMEM_SIZE_E1H/2);
5190 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5191 STORM_INTMEM_SIZE_E1H/2);
5192 bnx2x_init_fill(bp,
5193 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5194 0, STORM_INTMEM_SIZE_E1H/2);
5195 } else { /* E1 */
ad8d3948
EG
5196 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5197 STORM_INTMEM_SIZE_E1);
5198 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5199 STORM_INTMEM_SIZE_E1);
5200 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5201 STORM_INTMEM_SIZE_E1);
5202 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5203 STORM_INTMEM_SIZE_E1);
34f80b04 5204 }
a2fbb9ea 5205
34f80b04
EG
5206 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5207 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5208 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5209 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5210
34f80b04
EG
5211 /* sync semi rtc */
5212 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5213 0x80000000);
5214 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5215 0x80000000);
a2fbb9ea 5216
34f80b04
EG
5217 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5218 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5219 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5220
34f80b04
EG
5221 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5222 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5223 REG_WR(bp, i, 0xc0cac01a);
5224 /* TODO: replace with something meaningful */
5225 }
5226 if (CHIP_IS_E1H(bp))
5227 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5228 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5229
34f80b04
EG
5230 if (sizeof(union cdu_context) != 1024)
5231 /* we currently assume that a context is 1024 bytes */
5232 printk(KERN_ALERT PFX "please adjust the size of"
5233 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5234
34f80b04
EG
5235 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5236 val = (4 << 24) + (0 << 12) + 1024;
5237 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5238 if (CHIP_IS_E1(bp)) {
5239 /* !!! fix pxp client crdit until excel update */
5240 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5241 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5242 }
a2fbb9ea 5243
34f80b04
EG
5244 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5245 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
a2fbb9ea 5246
34f80b04
EG
5247 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5248 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5249
34f80b04
EG
5250 /* PXPCS COMMON comes here */
5251 /* Reset PCIE errors for debug */
5252 REG_WR(bp, 0x2814, 0xffffffff);
5253 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5254
34f80b04
EG
5255 /* EMAC0 COMMON comes here */
5256 /* EMAC1 COMMON comes here */
5257 /* DBU COMMON comes here */
5258 /* DBG COMMON comes here */
5259
5260 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5261 if (CHIP_IS_E1H(bp)) {
5262 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5263 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5264 }
5265
5266 if (CHIP_REV_IS_SLOW(bp))
5267 msleep(200);
5268
5269 /* finish CFC init */
5270 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5271 if (val != 1) {
5272 BNX2X_ERR("CFC LL_INIT failed\n");
5273 return -EBUSY;
5274 }
5275 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5276 if (val != 1) {
5277 BNX2X_ERR("CFC AC_INIT failed\n");
5278 return -EBUSY;
5279 }
5280 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5281 if (val != 1) {
5282 BNX2X_ERR("CFC CAM_INIT failed\n");
5283 return -EBUSY;
5284 }
5285 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5286
34f80b04
EG
5287 /* read NIG statistic
5288 to see if this is our first up since powerup */
5289 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5290 val = *bnx2x_sp(bp, wb_data[0]);
5291
5292 /* do internal memory self test */
5293 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5294 BNX2X_ERR("internal mem self test failed\n");
5295 return -EBUSY;
5296 }
5297
5298 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5299 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5300 /* Fan failure is indicated by SPIO 5 */
5301 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5302 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5303
5304 /* set to active low mode */
5305 val = REG_RD(bp, MISC_REG_SPIO_INT);
5306 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5307 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5308 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5309
34f80b04
EG
5310 /* enable interrupt to signal the IGU */
5311 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5312 val |= (1 << MISC_REGISTERS_SPIO_5);
5313 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5314 break;
f1410647 5315
34f80b04
EG
5316 default:
5317 break;
5318 }
f1410647 5319
34f80b04
EG
5320 /* clear PXP2 attentions */
5321 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5322
34f80b04 5323 enable_blocks_attention(bp);
a2fbb9ea 5324
7a9b2557
VZ
5325 if (bp->flags & TPA_ENABLE_FLAG) {
5326 struct tstorm_eth_tpa_exist tmp = {0};
5327
5328 tmp.tpa_exist = 1;
5329
5330 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5331 ((u32 *)&tmp)[0]);
5332 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5333 ((u32 *)&tmp)[1]);
5334 }
5335
34f80b04
EG
5336 return 0;
5337}
a2fbb9ea 5338
34f80b04
EG
5339static int bnx2x_init_port(struct bnx2x *bp)
5340{
5341 int port = BP_PORT(bp);
5342 u32 val;
a2fbb9ea 5343
34f80b04
EG
5344 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5345
5346 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5347
5348 /* Port PXP comes here */
5349 /* Port PXP2 comes here */
a2fbb9ea
ET
5350#ifdef BCM_ISCSI
5351 /* Port0 1
5352 * Port1 385 */
5353 i++;
5354 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5355 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5356 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5357 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5358
5359 /* Port0 2
5360 * Port1 386 */
5361 i++;
5362 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5363 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5364 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5365 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5366
5367 /* Port0 3
5368 * Port1 387 */
5369 i++;
5370 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5371 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5372 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5373 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5374#endif
34f80b04 5375 /* Port CMs come here */
a2fbb9ea
ET
5376
5377 /* Port QM comes here */
a2fbb9ea
ET
5378#ifdef BCM_ISCSI
5379 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5380 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5381
5382 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5383 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5384#endif
5385 /* Port DQ comes here */
5386 /* Port BRB1 comes here */
ad8d3948 5387 /* Port PRS comes here */
a2fbb9ea
ET
5388 /* Port TSDM comes here */
5389 /* Port CSDM comes here */
5390 /* Port USDM comes here */
5391 /* Port XSDM comes here */
34f80b04
EG
5392 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5393 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5394 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5395 port ? USEM_PORT1_END : USEM_PORT0_END);
5396 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5397 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5398 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5399 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5400 /* Port UPB comes here */
34f80b04
EG
5401 /* Port XPB comes here */
5402
5403 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5404 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5405
5406 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5407 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5408
5409 /* update threshold */
34f80b04 5410 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5411 /* update init credit */
34f80b04 5412 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5413
5414 /* probe changes */
34f80b04 5415 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5416 msleep(5);
34f80b04 5417 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5418
5419#ifdef BCM_ISCSI
5420 /* tell the searcher where the T2 table is */
5421 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5422
5423 wb_write[0] = U64_LO(bp->t2_mapping);
5424 wb_write[1] = U64_HI(bp->t2_mapping);
5425 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5426 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5427 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5428 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5429
5430 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5431 /* Port SRCH comes here */
5432#endif
5433 /* Port CDU comes here */
5434 /* Port CFC comes here */
34f80b04
EG
5435
5436 if (CHIP_IS_E1(bp)) {
5437 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5438 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5439 }
5440 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5441 port ? HC_PORT1_END : HC_PORT0_END);
5442
5443 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5444 MISC_AEU_PORT0_START,
34f80b04
EG
5445 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5446 /* init aeu_mask_attn_func_0/1:
5447 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5448 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5449 * bits 4-7 are used for "per vn group attention" */
5450 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5451 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5452
a2fbb9ea
ET
5453 /* Port PXPCS comes here */
5454 /* Port EMAC0 comes here */
5455 /* Port EMAC1 comes here */
5456 /* Port DBU comes here */
5457 /* Port DBG comes here */
34f80b04
EG
5458 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5459 port ? NIG_PORT1_END : NIG_PORT0_END);
5460
5461 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5462
5463 if (CHIP_IS_E1H(bp)) {
5464 u32 wsum;
5465 struct cmng_struct_per_port m_cmng_port;
5466 int vn;
5467
5468 /* 0x2 disable e1hov, 0x1 enable */
5469 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5470 (IS_E1HMF(bp) ? 0x1 : 0x2));
5471
5472 /* Init RATE SHAPING and FAIRNESS contexts.
5473 Initialize as if there is 10G link. */
5474 wsum = bnx2x_calc_vn_wsum(bp);
5475 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5476 if (IS_E1HMF(bp))
5477 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5478 bnx2x_init_vn_minmax(bp, 2*vn + port,
5479 wsum, 10000, &m_cmng_port);
5480 }
5481
a2fbb9ea
ET
5482 /* Port MCP comes here */
5483 /* Port DMAE comes here */
5484
34f80b04 5485 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
f1410647
ET
5486 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5487 /* add SPIO 5 to group 0 */
5488 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5489 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5490 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5491 break;
5492
5493 default:
5494 break;
5495 }
5496
c18487ee 5497 bnx2x__link_reset(bp);
a2fbb9ea 5498
34f80b04
EG
5499 return 0;
5500}
5501
5502#define ILT_PER_FUNC (768/2)
5503#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5504/* the phys address is shifted right 12 bits and has an added
5505 1=valid bit added to the 53rd bit
5506 then since this is a wide register(TM)
5507 we split it into two 32 bit writes
5508 */
5509#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5510#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5511#define PXP_ONE_ILT(x) (((x) << 10) | x)
5512#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5513
5514#define CNIC_ILT_LINES 0
5515
5516static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5517{
5518 int reg;
5519
5520 if (CHIP_IS_E1H(bp))
5521 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5522 else /* E1 */
5523 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5524
5525 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5526}
5527
5528static int bnx2x_init_func(struct bnx2x *bp)
5529{
5530 int port = BP_PORT(bp);
5531 int func = BP_FUNC(bp);
5532 int i;
5533
5534 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5535
5536 i = FUNC_ILT_BASE(func);
5537
5538 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5539 if (CHIP_IS_E1H(bp)) {
5540 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5541 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5542 } else /* E1 */
5543 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5544 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5545
5546
5547 if (CHIP_IS_E1H(bp)) {
5548 for (i = 0; i < 9; i++)
5549 bnx2x_init_block(bp,
5550 cm_start[func][i], cm_end[func][i]);
5551
5552 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5553 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5554 }
5555
5556 /* HC init per function */
5557 if (CHIP_IS_E1H(bp)) {
5558 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5559
5560 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5561 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5562 }
5563 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5564
5565 if (CHIP_IS_E1H(bp))
5566 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5567
c14423fe 5568 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5569 REG_WR(bp, 0x2114, 0xffffffff);
5570 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5571
34f80b04
EG
5572 return 0;
5573}
5574
5575static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5576{
5577 int i, rc = 0;
a2fbb9ea 5578
34f80b04
EG
5579 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5580 BP_FUNC(bp), load_code);
a2fbb9ea 5581
34f80b04
EG
5582 bp->dmae_ready = 0;
5583 mutex_init(&bp->dmae_mutex);
5584 bnx2x_gunzip_init(bp);
a2fbb9ea 5585
34f80b04
EG
5586 switch (load_code) {
5587 case FW_MSG_CODE_DRV_LOAD_COMMON:
5588 rc = bnx2x_init_common(bp);
5589 if (rc)
5590 goto init_hw_err;
5591 /* no break */
5592
5593 case FW_MSG_CODE_DRV_LOAD_PORT:
5594 bp->dmae_ready = 1;
5595 rc = bnx2x_init_port(bp);
5596 if (rc)
5597 goto init_hw_err;
5598 /* no break */
5599
5600 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5601 bp->dmae_ready = 1;
5602 rc = bnx2x_init_func(bp);
5603 if (rc)
5604 goto init_hw_err;
5605 break;
5606
5607 default:
5608 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5609 break;
5610 }
5611
5612 if (!BP_NOMCP(bp)) {
5613 int func = BP_FUNC(bp);
a2fbb9ea
ET
5614
5615 bp->fw_drv_pulse_wr_seq =
34f80b04 5616 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5617 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5618 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5619 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5620 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5621 } else
5622 bp->func_stx = 0;
a2fbb9ea 5623
34f80b04
EG
5624 /* this needs to be done before gunzip end */
5625 bnx2x_zero_def_sb(bp);
5626 for_each_queue(bp, i)
5627 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5628
5629init_hw_err:
5630 bnx2x_gunzip_end(bp);
5631
5632 return rc;
a2fbb9ea
ET
5633}
5634
c14423fe 5635/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5636static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5637{
34f80b04 5638 int func = BP_FUNC(bp);
f1410647
ET
5639 u32 seq = ++bp->fw_seq;
5640 u32 rc = 0;
a2fbb9ea 5641
34f80b04 5642 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5643 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea
ET
5644
5645 /* let the FW do it's magic ... */
5646 msleep(100); /* TBD */
5647
5648 if (CHIP_REV_IS_SLOW(bp))
5649 msleep(900);
5650
34f80b04 5651 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea
ET
5652 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
5653
5654 /* is this a reply to our command? */
5655 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5656 rc &= FW_MSG_CODE_MASK;
f1410647 5657
a2fbb9ea
ET
5658 } else {
5659 /* FW BUG! */
5660 BNX2X_ERR("FW failed to respond!\n");
5661 bnx2x_fw_dump(bp);
5662 rc = 0;
5663 }
f1410647 5664
a2fbb9ea
ET
5665 return rc;
5666}
5667
5668static void bnx2x_free_mem(struct bnx2x *bp)
5669{
5670
5671#define BNX2X_PCI_FREE(x, y, size) \
5672 do { \
5673 if (x) { \
5674 pci_free_consistent(bp->pdev, size, x, y); \
5675 x = NULL; \
5676 y = 0; \
5677 } \
5678 } while (0)
5679
5680#define BNX2X_FREE(x) \
5681 do { \
5682 if (x) { \
5683 vfree(x); \
5684 x = NULL; \
5685 } \
5686 } while (0)
5687
5688 int i;
5689
5690 /* fastpath */
5691 for_each_queue(bp, i) {
5692
5693 /* Status blocks */
5694 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5695 bnx2x_fp(bp, i, status_blk_mapping),
5696 sizeof(struct host_status_block) +
5697 sizeof(struct eth_tx_db_data));
5698
5699 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5700 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5701 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5702 bnx2x_fp(bp, i, tx_desc_mapping),
5703 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5704
5705 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5706 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5707 bnx2x_fp(bp, i, rx_desc_mapping),
5708 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5709
5710 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5711 bnx2x_fp(bp, i, rx_comp_mapping),
5712 sizeof(struct eth_fast_path_rx_cqe) *
5713 NUM_RCQ_BD);
a2fbb9ea 5714
7a9b2557
VZ
5715 /* SGE ring */
5716 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5717 bnx2x_fp(bp, i, rx_sge_mapping),
5718 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5719 }
a2fbb9ea
ET
5720 /* end of fastpath */
5721
5722 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5723 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5724
5725 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5726 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5727
5728#ifdef BCM_ISCSI
5729 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5730 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5731 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5732 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5733#endif
7a9b2557 5734 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5735
5736#undef BNX2X_PCI_FREE
5737#undef BNX2X_KFREE
5738}
5739
5740static int bnx2x_alloc_mem(struct bnx2x *bp)
5741{
5742
5743#define BNX2X_PCI_ALLOC(x, y, size) \
5744 do { \
5745 x = pci_alloc_consistent(bp->pdev, size, y); \
5746 if (x == NULL) \
5747 goto alloc_mem_err; \
5748 memset(x, 0, size); \
5749 } while (0)
5750
5751#define BNX2X_ALLOC(x, size) \
5752 do { \
5753 x = vmalloc(size); \
5754 if (x == NULL) \
5755 goto alloc_mem_err; \
5756 memset(x, 0, size); \
5757 } while (0)
5758
5759 int i;
5760
5761 /* fastpath */
a2fbb9ea
ET
5762 for_each_queue(bp, i) {
5763 bnx2x_fp(bp, i, bp) = bp;
5764
5765 /* Status blocks */
5766 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5767 &bnx2x_fp(bp, i, status_blk_mapping),
5768 sizeof(struct host_status_block) +
5769 sizeof(struct eth_tx_db_data));
5770
5771 bnx2x_fp(bp, i, hw_tx_prods) =
5772 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5773
5774 bnx2x_fp(bp, i, tx_prods_mapping) =
5775 bnx2x_fp(bp, i, status_blk_mapping) +
5776 sizeof(struct host_status_block);
5777
5778 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5779 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5780 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5781 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5782 &bnx2x_fp(bp, i, tx_desc_mapping),
5783 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5784
5785 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5786 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5787 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5788 &bnx2x_fp(bp, i, rx_desc_mapping),
5789 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5790
5791 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5792 &bnx2x_fp(bp, i, rx_comp_mapping),
5793 sizeof(struct eth_fast_path_rx_cqe) *
5794 NUM_RCQ_BD);
5795
7a9b2557
VZ
5796 /* SGE ring */
5797 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5798 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5799 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5800 &bnx2x_fp(bp, i, rx_sge_mapping),
5801 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea
ET
5802 }
5803 /* end of fastpath */
5804
5805 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5806 sizeof(struct host_def_status_block));
5807
5808 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5809 sizeof(struct bnx2x_slowpath));
5810
5811#ifdef BCM_ISCSI
5812 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5813
5814 /* Initialize T1 */
5815 for (i = 0; i < 64*1024; i += 64) {
5816 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5817 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5818 }
5819
5820 /* allocate searcher T2 table
5821 we allocate 1/4 of alloc num for T2
5822 (which is not entered into the ILT) */
5823 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5824
5825 /* Initialize T2 */
5826 for (i = 0; i < 16*1024; i += 64)
5827 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5828
c14423fe 5829 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5830 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5831
5832 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5833 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5834
5835 /* QM queues (128*MAX_CONN) */
5836 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5837#endif
5838
5839 /* Slow path ring */
5840 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5841
5842 return 0;
5843
5844alloc_mem_err:
5845 bnx2x_free_mem(bp);
5846 return -ENOMEM;
5847
5848#undef BNX2X_PCI_ALLOC
5849#undef BNX2X_ALLOC
5850}
5851
5852static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5853{
5854 int i;
5855
5856 for_each_queue(bp, i) {
5857 struct bnx2x_fastpath *fp = &bp->fp[i];
5858
5859 u16 bd_cons = fp->tx_bd_cons;
5860 u16 sw_prod = fp->tx_pkt_prod;
5861 u16 sw_cons = fp->tx_pkt_cons;
5862
a2fbb9ea
ET
5863 while (sw_cons != sw_prod) {
5864 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5865 sw_cons++;
5866 }
5867 }
5868}
5869
5870static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5871{
5872 int i, j;
5873
5874 for_each_queue(bp, j) {
5875 struct bnx2x_fastpath *fp = &bp->fp[j];
5876
a2fbb9ea
ET
5877 for (i = 0; i < NUM_RX_BD; i++) {
5878 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5879 struct sk_buff *skb = rx_buf->skb;
5880
5881 if (skb == NULL)
5882 continue;
5883
5884 pci_unmap_single(bp->pdev,
5885 pci_unmap_addr(rx_buf, mapping),
5886 bp->rx_buf_use_size,
5887 PCI_DMA_FROMDEVICE);
5888
5889 rx_buf->skb = NULL;
5890 dev_kfree_skb(skb);
5891 }
7a9b2557
VZ
5892 if (!fp->disable_tpa)
5893 bnx2x_free_tpa_pool(bp, fp,
5894 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
5895 }
5896}
5897
5898static void bnx2x_free_skbs(struct bnx2x *bp)
5899{
5900 bnx2x_free_tx_skbs(bp);
5901 bnx2x_free_rx_skbs(bp);
5902}
5903
5904static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5905{
34f80b04 5906 int i, offset = 1;
a2fbb9ea
ET
5907
5908 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 5909 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
5910 bp->msix_table[0].vector);
5911
5912 for_each_queue(bp, i) {
c14423fe 5913 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 5914 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5915 bnx2x_fp(bp, i, state));
5916
228241eb
ET
5917 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5918 BNX2X_ERR("IRQ of fp #%d being freed while "
5919 "state != closed\n", i);
a2fbb9ea 5920
34f80b04 5921 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 5922 }
a2fbb9ea
ET
5923}
5924
5925static void bnx2x_free_irq(struct bnx2x *bp)
5926{
a2fbb9ea 5927 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
5928 bnx2x_free_msix_irqs(bp);
5929 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
5930 bp->flags &= ~USING_MSIX_FLAG;
5931
5932 } else
5933 free_irq(bp->pdev->irq, bp->dev);
5934}
5935
5936static int bnx2x_enable_msix(struct bnx2x *bp)
5937{
34f80b04 5938 int i, rc, offset;
a2fbb9ea
ET
5939
5940 bp->msix_table[0].entry = 0;
34f80b04
EG
5941 offset = 1;
5942 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 5943
34f80b04
EG
5944 for_each_queue(bp, i) {
5945 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 5946
34f80b04
EG
5947 bp->msix_table[i + offset].entry = igu_vec;
5948 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
5949 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
5950 }
5951
34f80b04
EG
5952 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
5953 bp->num_queues + offset);
5954 if (rc) {
5955 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
5956 return -1;
5957 }
a2fbb9ea
ET
5958 bp->flags |= USING_MSIX_FLAG;
5959
5960 return 0;
a2fbb9ea
ET
5961}
5962
a2fbb9ea
ET
5963static int bnx2x_req_msix_irqs(struct bnx2x *bp)
5964{
34f80b04 5965 int i, rc, offset = 1;
a2fbb9ea 5966
a2fbb9ea
ET
5967 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
5968 bp->dev->name, bp->dev);
a2fbb9ea
ET
5969 if (rc) {
5970 BNX2X_ERR("request sp irq failed\n");
5971 return -EBUSY;
5972 }
5973
5974 for_each_queue(bp, i) {
34f80b04 5975 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5976 bnx2x_msix_fp_int, 0,
5977 bp->dev->name, &bp->fp[i]);
a2fbb9ea 5978 if (rc) {
34f80b04
EG
5979 BNX2X_ERR("request fp #%d irq failed rc %d\n",
5980 i + offset, rc);
a2fbb9ea
ET
5981 bnx2x_free_msix_irqs(bp);
5982 return -EBUSY;
5983 }
5984
5985 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
5986 }
5987
5988 return 0;
a2fbb9ea
ET
5989}
5990
5991static int bnx2x_req_irq(struct bnx2x *bp)
5992{
34f80b04 5993 int rc;
a2fbb9ea 5994
34f80b04
EG
5995 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
5996 bp->dev->name, bp->dev);
a2fbb9ea
ET
5997 if (!rc)
5998 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
5999
6000 return rc;
a2fbb9ea
ET
6001}
6002
6003/*
6004 * Init service functions
6005 */
6006
34f80b04 6007static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
a2fbb9ea
ET
6008{
6009 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6010 int port = BP_PORT(bp);
a2fbb9ea
ET
6011
6012 /* CAM allocation
6013 * unicasts 0-31:port0 32-63:port1
6014 * multicast 64-127:port0 128-191:port1
6015 */
6016 config->hdr.length_6b = 2;
34f80b04
EG
6017 config->hdr.offset = port ? 31 : 0;
6018 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6019 config->hdr.reserved1 = 0;
6020
6021 /* primary MAC */
6022 config->config_table[0].cam_entry.msb_mac_addr =
6023 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6024 config->config_table[0].cam_entry.middle_mac_addr =
6025 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6026 config->config_table[0].cam_entry.lsb_mac_addr =
6027 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6028 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
a2fbb9ea
ET
6029 config->config_table[0].target_table_entry.flags = 0;
6030 config->config_table[0].target_table_entry.client_id = 0;
6031 config->config_table[0].target_table_entry.vlan_id = 0;
6032
6033 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6034 config->config_table[0].cam_entry.msb_mac_addr,
6035 config->config_table[0].cam_entry.middle_mac_addr,
6036 config->config_table[0].cam_entry.lsb_mac_addr);
6037
6038 /* broadcast */
6039 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6040 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6041 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6042 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
a2fbb9ea
ET
6043 config->config_table[1].target_table_entry.flags =
6044 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6045 config->config_table[1].target_table_entry.client_id = 0;
6046 config->config_table[1].target_table_entry.vlan_id = 0;
6047
6048 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6049 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6050 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6051}
6052
34f80b04
EG
6053static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
6054{
6055 struct mac_configuration_cmd_e1h *config =
6056 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6057
6058 if (bp->state != BNX2X_STATE_OPEN) {
6059 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6060 return;
6061 }
6062
6063 /* CAM allocation for E1H
6064 * unicasts: by func number
6065 * multicast: 20+FUNC*20, 20 each
6066 */
6067 config->hdr.length_6b = 1;
6068 config->hdr.offset = BP_FUNC(bp);
6069 config->hdr.client_id = BP_CL_ID(bp);
6070 config->hdr.reserved1 = 0;
6071
6072 /* primary MAC */
6073 config->config_table[0].msb_mac_addr =
6074 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6075 config->config_table[0].middle_mac_addr =
6076 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6077 config->config_table[0].lsb_mac_addr =
6078 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6079 config->config_table[0].client_id = BP_L_ID(bp);
6080 config->config_table[0].vlan_id = 0;
6081 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6082 config->config_table[0].flags = BP_PORT(bp);
6083
6084 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6085 config->config_table[0].msb_mac_addr,
6086 config->config_table[0].middle_mac_addr,
6087 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6088
6089 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6090 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6091 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6092}
6093
a2fbb9ea
ET
6094static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6095 int *state_p, int poll)
6096{
6097 /* can take a while if any port is running */
34f80b04 6098 int cnt = 500;
a2fbb9ea 6099
c14423fe
ET
6100 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6101 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6102
6103 might_sleep();
34f80b04 6104 while (cnt--) {
a2fbb9ea
ET
6105 if (poll) {
6106 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6107 /* if index is different from 0
6108 * the reply for some commands will
a2fbb9ea
ET
6109 * be on the none default queue
6110 */
6111 if (idx)
6112 bnx2x_rx_int(&bp->fp[idx], 10);
6113 }
34f80b04 6114 mb(); /* state is changed by bnx2x_sp_event() */
a2fbb9ea 6115
49d66772 6116 if (*state_p == state)
a2fbb9ea
ET
6117 return 0;
6118
a2fbb9ea 6119 msleep(1);
a2fbb9ea
ET
6120 }
6121
a2fbb9ea 6122 /* timeout! */
49d66772
ET
6123 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6124 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6125#ifdef BNX2X_STOP_ON_ERROR
6126 bnx2x_panic();
6127#endif
a2fbb9ea 6128
49d66772 6129 return -EBUSY;
a2fbb9ea
ET
6130}
6131
6132static int bnx2x_setup_leading(struct bnx2x *bp)
6133{
34f80b04 6134 int rc;
a2fbb9ea 6135
c14423fe 6136 /* reset IGU state */
34f80b04 6137 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6138
6139 /* SETUP ramrod */
6140 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6141
34f80b04
EG
6142 /* Wait for completion */
6143 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6144
34f80b04 6145 return rc;
a2fbb9ea
ET
6146}
6147
6148static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6149{
a2fbb9ea 6150 /* reset IGU state */
34f80b04 6151 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6152
228241eb 6153 /* SETUP ramrod */
a2fbb9ea
ET
6154 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6155 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6156
6157 /* Wait for completion */
6158 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 6159 &(bp->fp[index].state), 0);
a2fbb9ea
ET
6160}
6161
a2fbb9ea
ET
6162static int bnx2x_poll(struct napi_struct *napi, int budget);
6163static void bnx2x_set_rx_mode(struct net_device *dev);
6164
34f80b04
EG
6165/* must be called with rtnl_lock */
6166static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 6167{
228241eb 6168 u32 load_code;
34f80b04
EG
6169 int i, rc;
6170
6171#ifdef BNX2X_STOP_ON_ERROR
6172 if (unlikely(bp->panic))
6173 return -EPERM;
6174#endif
a2fbb9ea
ET
6175
6176 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6177
34f80b04
EG
6178 /* Send LOAD_REQUEST command to MCP
6179 Returns the type of LOAD command:
6180 if it is the first port to be initialized
6181 common blocks should be initialized, otherwise - not
a2fbb9ea 6182 */
34f80b04 6183 if (!BP_NOMCP(bp)) {
228241eb
ET
6184 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6185 if (!load_code) {
6186 BNX2X_ERR("MCP response failure, unloading\n");
6187 return -EBUSY;
6188 }
34f80b04 6189 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
a2fbb9ea 6190 return -EBUSY; /* other port in diagnostic mode */
34f80b04 6191
a2fbb9ea 6192 } else {
34f80b04
EG
6193 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6194 load_count[0], load_count[1], load_count[2]);
6195 load_count[0]++;
6196 load_count[1 + BP_PORT(bp)]++;
6197 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6198 load_count[0], load_count[1], load_count[2]);
6199 if (load_count[0] == 1)
6200 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6201 else if (load_count[1 + BP_PORT(bp)] == 1)
6202 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6203 else
6204 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
a2fbb9ea
ET
6205 }
6206
34f80b04
EG
6207 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6208 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6209 bp->port.pmf = 1;
6210 else
6211 bp->port.pmf = 0;
6212 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6213
6214 /* if we can't use MSI-X we only need one fp,
6215 * so try to enable MSI-X with the requested number of fp's
a2fbb9ea
ET
6216 * and fallback to inta with one fp
6217 */
34f80b04
EG
6218 if (use_inta) {
6219 bp->num_queues = 1;
6220
6221 } else {
6222 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6223 /* user requested number */
6224 bp->num_queues = use_multi;
6225
6226 else if (use_multi)
6227 bp->num_queues = min_t(u32, num_online_cpus(),
6228 BP_MAX_QUEUES(bp));
6229 else
a2fbb9ea 6230 bp->num_queues = 1;
34f80b04
EG
6231
6232 if (bnx2x_enable_msix(bp)) {
6233 /* failed to enable MSI-X */
6234 bp->num_queues = 1;
6235 if (use_multi)
6236 BNX2X_ERR("Multi requested but failed"
6237 " to enable MSI-X\n");
a2fbb9ea
ET
6238 }
6239 }
34f80b04
EG
6240 DP(NETIF_MSG_IFUP,
6241 "set number of queues to %d\n", bp->num_queues);
c14423fe 6242
a2fbb9ea
ET
6243 if (bnx2x_alloc_mem(bp))
6244 return -ENOMEM;
6245
7a9b2557
VZ
6246 for_each_queue(bp, i)
6247 bnx2x_fp(bp, i, disable_tpa) =
6248 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6249
34f80b04
EG
6250 /* Disable interrupt handling until HW is initialized */
6251 atomic_set(&bp->intr_sem, 1);
a2fbb9ea 6252
34f80b04
EG
6253 if (bp->flags & USING_MSIX_FLAG) {
6254 rc = bnx2x_req_msix_irqs(bp);
6255 if (rc) {
6256 pci_disable_msix(bp->pdev);
6257 goto load_error;
6258 }
6259 } else {
6260 bnx2x_ack_int(bp);
6261 rc = bnx2x_req_irq(bp);
6262 if (rc) {
6263 BNX2X_ERR("IRQ request failed, aborting\n");
6264 goto load_error;
a2fbb9ea
ET
6265 }
6266 }
6267
6268 for_each_queue(bp, i)
6269 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6270 bnx2x_poll, 128);
6271
a2fbb9ea 6272 /* Initialize HW */
34f80b04
EG
6273 rc = bnx2x_init_hw(bp, load_code);
6274 if (rc) {
a2fbb9ea 6275 BNX2X_ERR("HW init failed, aborting\n");
228241eb 6276 goto load_error;
a2fbb9ea
ET
6277 }
6278
34f80b04 6279 /* Enable interrupt handling */
a2fbb9ea
ET
6280 atomic_set(&bp->intr_sem, 0);
6281
a2fbb9ea
ET
6282 /* Setup NIC internals and enable interrupts */
6283 bnx2x_nic_init(bp);
6284
6285 /* Send LOAD_DONE command to MCP */
34f80b04 6286 if (!BP_NOMCP(bp)) {
228241eb
ET
6287 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6288 if (!load_code) {
a2fbb9ea 6289 BNX2X_ERR("MCP response failure, unloading\n");
34f80b04 6290 rc = -EBUSY;
228241eb 6291 goto load_int_disable;
a2fbb9ea
ET
6292 }
6293 }
6294
bb2a0f7a
YG
6295 bnx2x_stats_init(bp);
6296
a2fbb9ea
ET
6297 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6298
6299 /* Enable Rx interrupt handling before sending the ramrod
6300 as it's completed on Rx FP queue */
6301 for_each_queue(bp, i)
6302 napi_enable(&bnx2x_fp(bp, i, napi));
6303
34f80b04
EG
6304 rc = bnx2x_setup_leading(bp);
6305 if (rc) {
6306#ifdef BNX2X_STOP_ON_ERROR
6307 bp->panic = 1;
6308#endif
228241eb 6309 goto load_stop_netif;
34f80b04 6310 }
a2fbb9ea 6311
34f80b04
EG
6312 if (CHIP_IS_E1H(bp))
6313 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6314 BNX2X_ERR("!!! mf_cfg function disabled\n");
6315 bp->state = BNX2X_STATE_DISABLED;
6316 }
a2fbb9ea 6317
34f80b04
EG
6318 if (bp->state == BNX2X_STATE_OPEN)
6319 for_each_nondefault_queue(bp, i) {
6320 rc = bnx2x_setup_multi(bp, i);
6321 if (rc)
6322 goto load_stop_netif;
6323 }
a2fbb9ea 6324
34f80b04
EG
6325 if (CHIP_IS_E1(bp))
6326 bnx2x_set_mac_addr_e1(bp);
6327 else
6328 bnx2x_set_mac_addr_e1h(bp);
6329
6330 if (bp->port.pmf)
6331 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6332
6333 /* Start fast path */
34f80b04
EG
6334 switch (load_mode) {
6335 case LOAD_NORMAL:
6336 /* Tx queue should be only reenabled */
6337 netif_wake_queue(bp->dev);
6338 bnx2x_set_rx_mode(bp->dev);
6339 break;
6340
6341 case LOAD_OPEN:
6342 /* IRQ is only requested from bnx2x_open */
a2fbb9ea 6343 netif_start_queue(bp->dev);
34f80b04 6344 bnx2x_set_rx_mode(bp->dev);
a2fbb9ea
ET
6345 if (bp->flags & USING_MSIX_FLAG)
6346 printk(KERN_INFO PFX "%s: using MSI-X\n",
6347 bp->dev->name);
34f80b04 6348 break;
a2fbb9ea 6349
34f80b04 6350 case LOAD_DIAG:
a2fbb9ea 6351 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6352 bp->state = BNX2X_STATE_DIAG;
6353 break;
6354
6355 default:
6356 break;
a2fbb9ea
ET
6357 }
6358
34f80b04
EG
6359 if (!bp->port.pmf)
6360 bnx2x__link_status_update(bp);
6361
a2fbb9ea
ET
6362 /* start the timer */
6363 mod_timer(&bp->timer, jiffies + bp->current_interval);
6364
34f80b04 6365
a2fbb9ea
ET
6366 return 0;
6367
228241eb 6368load_stop_netif:
a2fbb9ea
ET
6369 for_each_queue(bp, i)
6370 napi_disable(&bnx2x_fp(bp, i, napi));
6371
228241eb 6372load_int_disable:
615f8fd9 6373 bnx2x_int_disable_sync(bp);
a2fbb9ea 6374
34f80b04 6375 /* Release IRQs */
a2fbb9ea
ET
6376 bnx2x_free_irq(bp);
6377
7a9b2557
VZ
6378 /* Free SKBs, SGEs, TPA pool and driver internals */
6379 bnx2x_free_skbs(bp);
6380 for_each_queue(bp, i)
6381 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6382 RX_SGE_CNT*NUM_RX_SGE_PAGES);
228241eb 6383load_error:
a2fbb9ea
ET
6384 bnx2x_free_mem(bp);
6385
6386 /* TBD we really need to reset the chip
6387 if we want to recover from this */
34f80b04 6388 return rc;
a2fbb9ea
ET
6389}
6390
6391static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6392{
a2fbb9ea
ET
6393 int rc;
6394
c14423fe 6395 /* halt the connection */
a2fbb9ea
ET
6396 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6397 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6398
34f80b04 6399 /* Wait for completion */
a2fbb9ea 6400 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 6401 &(bp->fp[index].state), 1);
c14423fe 6402 if (rc) /* timeout */
a2fbb9ea
ET
6403 return rc;
6404
6405 /* delete cfc entry */
6406 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6407
34f80b04
EG
6408 /* Wait for completion */
6409 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6410 &(bp->fp[index].state), 1);
6411 return rc;
a2fbb9ea
ET
6412}
6413
a2fbb9ea
ET
6414static void bnx2x_stop_leading(struct bnx2x *bp)
6415{
49d66772 6416 u16 dsb_sp_prod_idx;
c14423fe 6417 /* if the other port is handling traffic,
a2fbb9ea 6418 this can take a lot of time */
34f80b04
EG
6419 int cnt = 500;
6420 int rc;
a2fbb9ea
ET
6421
6422 might_sleep();
6423
6424 /* Send HALT ramrod */
6425 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6426 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6427
34f80b04
EG
6428 /* Wait for completion */
6429 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6430 &(bp->fp[0].state), 1);
6431 if (rc) /* timeout */
a2fbb9ea
ET
6432 return;
6433
49d66772 6434 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6435
228241eb 6436 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6437 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6438
49d66772 6439 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6440 we are going to reset the chip anyway
6441 so there is not much to do if this times out
6442 */
34f80b04 6443 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
49d66772 6444 msleep(1);
34f80b04
EG
6445 if (!cnt) {
6446 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6447 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6448 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6449#ifdef BNX2X_STOP_ON_ERROR
6450 bnx2x_panic();
6451#endif
6452 break;
6453 }
6454 cnt--;
49d66772
ET
6455 }
6456 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6457 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
6458}
6459
34f80b04
EG
6460static void bnx2x_reset_func(struct bnx2x *bp)
6461{
6462 int port = BP_PORT(bp);
6463 int func = BP_FUNC(bp);
6464 int base, i;
6465
6466 /* Configure IGU */
6467 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6468 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6469
6470 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6471
6472 /* Clear ILT */
6473 base = FUNC_ILT_BASE(func);
6474 for (i = base; i < base + ILT_PER_FUNC; i++)
6475 bnx2x_ilt_wr(bp, i, 0);
6476}
6477
6478static void bnx2x_reset_port(struct bnx2x *bp)
6479{
6480 int port = BP_PORT(bp);
6481 u32 val;
6482
6483 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6484
6485 /* Do not rcv packets to BRB */
6486 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6487 /* Do not direct rcv packets that are not for MCP to the BRB */
6488 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6489 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6490
6491 /* Configure AEU */
6492 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6493
6494 msleep(100);
6495 /* Check for BRB port occupancy */
6496 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6497 if (val)
6498 DP(NETIF_MSG_IFDOWN,
6499 "BRB1 is not empty %d blooks are occupied\n", val);
6500
6501 /* TODO: Close Doorbell port? */
6502}
6503
6504static void bnx2x_reset_common(struct bnx2x *bp)
6505{
6506 /* reset_common */
6507 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6508 0xd3ffff7f);
6509 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6510}
6511
6512static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6513{
6514 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6515 BP_FUNC(bp), reset_code);
6516
6517 switch (reset_code) {
6518 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6519 bnx2x_reset_port(bp);
6520 bnx2x_reset_func(bp);
6521 bnx2x_reset_common(bp);
6522 break;
6523
6524 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6525 bnx2x_reset_port(bp);
6526 bnx2x_reset_func(bp);
6527 break;
6528
6529 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6530 bnx2x_reset_func(bp);
6531 break;
49d66772 6532
34f80b04
EG
6533 default:
6534 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6535 break;
6536 }
6537}
6538
6539/* msut be called with rtnl_lock */
6540static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea
ET
6541{
6542 u32 reset_code = 0;
34f80b04 6543 int i, cnt;
a2fbb9ea
ET
6544
6545 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6546
228241eb
ET
6547 bp->rx_mode = BNX2X_RX_MODE_NONE;
6548 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6549
228241eb
ET
6550 if (netif_running(bp->dev)) {
6551 netif_tx_disable(bp->dev);
6552 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6553 }
6554
34f80b04
EG
6555 del_timer_sync(&bp->timer);
6556 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6557 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6558 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6559
228241eb
ET
6560 /* Wait until all fast path tasks complete */
6561 for_each_queue(bp, i) {
6562 struct bnx2x_fastpath *fp = &bp->fp[i];
6563
34f80b04
EG
6564#ifdef BNX2X_STOP_ON_ERROR
6565#ifdef __powerpc64__
6566 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
6567#else
6568 DP(NETIF_MSG_IFDOWN, "fp->tpa_queue_used = 0x%llx\n",
6569#endif
6570 fp->tpa_queue_used);
6571#endif
6572 cnt = 1000;
6573 smp_rmb();
6574 while (bnx2x_has_work(fp)) {
228241eb 6575 msleep(1);
34f80b04
EG
6576 if (!cnt) {
6577 BNX2X_ERR("timeout waiting for queue[%d]\n",
6578 i);
6579#ifdef BNX2X_STOP_ON_ERROR
6580 bnx2x_panic();
6581 return -EBUSY;
6582#else
6583 break;
6584#endif
6585 }
6586 cnt--;
6587 smp_rmb();
6588 }
228241eb 6589 }
a2fbb9ea 6590
34f80b04
EG
6591 /* Wait until all slow path tasks complete */
6592 cnt = 1000;
6593 while ((bp->spq_left != MAX_SPQ_PENDING) && cnt--)
a2fbb9ea
ET
6594 msleep(1);
6595
228241eb
ET
6596 for_each_queue(bp, i)
6597 napi_disable(&bnx2x_fp(bp, i, napi));
6598 /* Disable interrupts after Tx and Rx are disabled on stack level */
6599 bnx2x_int_disable_sync(bp);
a2fbb9ea 6600
34f80b04
EG
6601 /* Release IRQs */
6602 bnx2x_free_irq(bp);
6603
a2fbb9ea
ET
6604 if (bp->flags & NO_WOL_FLAG)
6605 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
228241eb 6606
a2fbb9ea 6607 else if (bp->wol) {
34f80b04 6608 u32 emac_base = BP_PORT(bp) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
a2fbb9ea 6609 u8 *mac_addr = bp->dev->dev_addr;
34f80b04 6610 u32 val;
a2fbb9ea 6611
34f80b04
EG
6612 /* The mac address is written to entries 1-4 to
6613 preserve entry 0 which is used by the PMF */
a2fbb9ea 6614 val = (mac_addr[0] << 8) | mac_addr[1];
34f80b04 6615 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8, val);
a2fbb9ea
ET
6616
6617 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6618 (mac_addr[4] << 8) | mac_addr[5];
34f80b04
EG
6619 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8 + 4,
6620 val);
a2fbb9ea
ET
6621
6622 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
228241eb 6623
a2fbb9ea
ET
6624 } else
6625 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6626
34f80b04
EG
6627 /* Close multi and leading connections
6628 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6629 for_each_nondefault_queue(bp, i)
6630 if (bnx2x_stop_multi(bp, i))
228241eb 6631 goto unload_error;
a2fbb9ea 6632
34f80b04
EG
6633 if (CHIP_IS_E1H(bp))
6634 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + BP_PORT(bp)*8, 0);
6635
6636 bnx2x_stop_leading(bp);
6637#ifdef BNX2X_STOP_ON_ERROR
6638 /* If ramrod completion timed out - break here! */
6639 if (bp->panic) {
6640 BNX2X_ERR("Stop leading failed!\n");
6641 return -EBUSY;
6642 }
6643#endif
6644
228241eb
ET
6645 if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
6646 (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
34f80b04
EG
6647 DP(NETIF_MSG_IFDOWN, "failed to close leading properly! "
6648 "state 0x%x fp[0].state 0x%x\n",
228241eb
ET
6649 bp->state, bp->fp[0].state);
6650 }
6651
6652unload_error:
34f80b04 6653 if (!BP_NOMCP(bp))
228241eb 6654 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6655 else {
6656 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6657 load_count[0], load_count[1], load_count[2]);
6658 load_count[0]--;
6659 load_count[1 + BP_PORT(bp)]--;
6660 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6661 load_count[0], load_count[1], load_count[2]);
6662 if (load_count[0] == 0)
6663 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6664 else if (load_count[1 + BP_PORT(bp)] == 0)
6665 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6666 else
6667 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6668 }
a2fbb9ea 6669
34f80b04
EG
6670 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6671 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6672 bnx2x__link_reset(bp);
a2fbb9ea
ET
6673
6674 /* Reset the chip */
228241eb 6675 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6676
6677 /* Report UNLOAD_DONE to MCP */
34f80b04 6678 if (!BP_NOMCP(bp))
a2fbb9ea
ET
6679 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6680
7a9b2557 6681 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6682 bnx2x_free_skbs(bp);
7a9b2557
VZ
6683 for_each_queue(bp, i)
6684 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6685 RX_SGE_CNT*NUM_RX_SGE_PAGES);
a2fbb9ea
ET
6686 bnx2x_free_mem(bp);
6687
6688 bp->state = BNX2X_STATE_CLOSED;
228241eb 6689
a2fbb9ea
ET
6690 netif_carrier_off(bp->dev);
6691
6692 return 0;
6693}
6694
34f80b04
EG
6695static void bnx2x_reset_task(struct work_struct *work)
6696{
6697 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6698
6699#ifdef BNX2X_STOP_ON_ERROR
6700 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6701 " so reset not done to allow debug dump,\n"
6702 KERN_ERR " you will need to reboot when done\n");
6703 return;
6704#endif
6705
6706 rtnl_lock();
6707
6708 if (!netif_running(bp->dev))
6709 goto reset_task_exit;
6710
6711 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6712 bnx2x_nic_load(bp, LOAD_NORMAL);
6713
6714reset_task_exit:
6715 rtnl_unlock();
6716}
6717
a2fbb9ea
ET
6718/* end of nic load/unload */
6719
6720/* ethtool_ops */
6721
6722/*
6723 * Init service functions
6724 */
6725
34f80b04
EG
6726static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6727{
6728 u32 val;
6729
6730 /* Check if there is any driver already loaded */
6731 val = REG_RD(bp, MISC_REG_UNPREPARED);
6732 if (val == 0x1) {
6733 /* Check if it is the UNDI driver
6734 * UNDI driver initializes CID offset for normal bell to 0x7
6735 */
6736 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6737 if (val == 0x7) {
6738 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6739 /* save our func and fw_seq */
6740 int func = BP_FUNC(bp);
6741 u16 fw_seq = bp->fw_seq;
6742
6743 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6744
6745 /* try unload UNDI on port 0 */
6746 bp->func = 0;
6747 bp->fw_seq = (SHMEM_RD(bp,
6748 func_mb[bp->func].drv_mb_header) &
6749 DRV_MSG_SEQ_NUMBER_MASK);
6750
6751 reset_code = bnx2x_fw_command(bp, reset_code);
6752 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6753
6754 /* if UNDI is loaded on the other port */
6755 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6756
6757 bp->func = 1;
6758 bp->fw_seq = (SHMEM_RD(bp,
6759 func_mb[bp->func].drv_mb_header) &
6760 DRV_MSG_SEQ_NUMBER_MASK);
6761
6762 bnx2x_fw_command(bp,
6763 DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS);
6764 bnx2x_fw_command(bp,
6765 DRV_MSG_CODE_UNLOAD_DONE);
6766
6767 /* restore our func and fw_seq */
6768 bp->func = func;
6769 bp->fw_seq = fw_seq;
6770 }
6771
6772 /* reset device */
6773 REG_WR(bp,
6774 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6775 0xd3ffff7f);
6776 REG_WR(bp,
6777 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6778 0x1403);
6779 }
6780 }
6781}
6782
6783static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6784{
6785 u32 val, val2, val3, val4, id;
6786
6787 /* Get the chip revision id and number. */
6788 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6789 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6790 id = ((val & 0xffff) << 16);
6791 val = REG_RD(bp, MISC_REG_CHIP_REV);
6792 id |= ((val & 0xf) << 12);
6793 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6794 id |= ((val & 0xff) << 4);
6795 REG_RD(bp, MISC_REG_BOND_ID);
6796 id |= (val & 0xf);
6797 bp->common.chip_id = id;
6798 bp->link_params.chip_id = bp->common.chip_id;
6799 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6800
6801 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6802 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6803 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6804 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6805 bp->common.flash_size, bp->common.flash_size);
6806
6807 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6808 bp->link_params.shmem_base = bp->common.shmem_base;
6809 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6810
6811 if (!bp->common.shmem_base ||
6812 (bp->common.shmem_base < 0xA0000) ||
6813 (bp->common.shmem_base >= 0xC0000)) {
6814 BNX2X_DEV_INFO("MCP not active\n");
6815 bp->flags |= NO_MCP_FLAG;
6816 return;
6817 }
6818
6819 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6820 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6821 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6822 BNX2X_ERR("BAD MCP validity signature\n");
6823
6824 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6825 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6826
6827 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6828 bp->common.hw_config, bp->common.board);
6829
6830 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6831 SHARED_HW_CFG_LED_MODE_MASK) >>
6832 SHARED_HW_CFG_LED_MODE_SHIFT);
6833
6834 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6835 bp->common.bc_ver = val;
6836 BNX2X_DEV_INFO("bc_ver %X\n", val);
6837 if (val < BNX2X_BC_VER) {
6838 /* for now only warn
6839 * later we might need to enforce this */
6840 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6841 " please upgrade BC\n", BNX2X_BC_VER, val);
6842 }
6843 BNX2X_DEV_INFO("%sWoL Capable\n",
6844 (bp->flags & NO_WOL_FLAG)? "Not " : "");
6845
6846 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6847 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6848 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6849 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6850
6851 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6852 val, val2, val3, val4);
6853}
6854
6855static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6856 u32 switch_cfg)
a2fbb9ea 6857{
34f80b04 6858 int port = BP_PORT(bp);
a2fbb9ea
ET
6859 u32 ext_phy_type;
6860
a2fbb9ea
ET
6861 switch (switch_cfg) {
6862 case SWITCH_CFG_1G:
6863 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6864
c18487ee
YR
6865 ext_phy_type =
6866 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
6867 switch (ext_phy_type) {
6868 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6869 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6870 ext_phy_type);
6871
34f80b04
EG
6872 bp->port.supported |= (SUPPORTED_10baseT_Half |
6873 SUPPORTED_10baseT_Full |
6874 SUPPORTED_100baseT_Half |
6875 SUPPORTED_100baseT_Full |
6876 SUPPORTED_1000baseT_Full |
6877 SUPPORTED_2500baseX_Full |
6878 SUPPORTED_TP |
6879 SUPPORTED_FIBRE |
6880 SUPPORTED_Autoneg |
6881 SUPPORTED_Pause |
6882 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6883 break;
6884
6885 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6886 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6887 ext_phy_type);
6888
34f80b04
EG
6889 bp->port.supported |= (SUPPORTED_10baseT_Half |
6890 SUPPORTED_10baseT_Full |
6891 SUPPORTED_100baseT_Half |
6892 SUPPORTED_100baseT_Full |
6893 SUPPORTED_1000baseT_Full |
6894 SUPPORTED_TP |
6895 SUPPORTED_FIBRE |
6896 SUPPORTED_Autoneg |
6897 SUPPORTED_Pause |
6898 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6899 break;
6900
6901 default:
6902 BNX2X_ERR("NVRAM config error. "
6903 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 6904 bp->link_params.ext_phy_config);
a2fbb9ea
ET
6905 return;
6906 }
6907
34f80b04
EG
6908 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6909 port*0x10);
6910 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
6911 break;
6912
6913 case SWITCH_CFG_10G:
6914 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6915
c18487ee
YR
6916 ext_phy_type =
6917 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
6918 switch (ext_phy_type) {
6919 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6920 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6921 ext_phy_type);
6922
34f80b04
EG
6923 bp->port.supported |= (SUPPORTED_10baseT_Half |
6924 SUPPORTED_10baseT_Full |
6925 SUPPORTED_100baseT_Half |
6926 SUPPORTED_100baseT_Full |
6927 SUPPORTED_1000baseT_Full |
6928 SUPPORTED_2500baseX_Full |
6929 SUPPORTED_10000baseT_Full |
6930 SUPPORTED_TP |
6931 SUPPORTED_FIBRE |
6932 SUPPORTED_Autoneg |
6933 SUPPORTED_Pause |
6934 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6935 break;
6936
6937 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 6938 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 6939 ext_phy_type);
f1410647 6940
34f80b04
EG
6941 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6942 SUPPORTED_FIBRE |
6943 SUPPORTED_Pause |
6944 SUPPORTED_Asym_Pause);
f1410647
ET
6945 break;
6946
a2fbb9ea 6947 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
6948 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
6949 ext_phy_type);
6950
34f80b04
EG
6951 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6952 SUPPORTED_1000baseT_Full |
6953 SUPPORTED_FIBRE |
6954 SUPPORTED_Pause |
6955 SUPPORTED_Asym_Pause);
f1410647
ET
6956 break;
6957
6958 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6959 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
6960 ext_phy_type);
6961
34f80b04
EG
6962 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6963 SUPPORTED_1000baseT_Full |
6964 SUPPORTED_FIBRE |
6965 SUPPORTED_Autoneg |
6966 SUPPORTED_Pause |
6967 SUPPORTED_Asym_Pause);
f1410647
ET
6968 break;
6969
c18487ee
YR
6970 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6971 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
6972 ext_phy_type);
6973
34f80b04
EG
6974 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6975 SUPPORTED_2500baseX_Full |
6976 SUPPORTED_1000baseT_Full |
6977 SUPPORTED_FIBRE |
6978 SUPPORTED_Autoneg |
6979 SUPPORTED_Pause |
6980 SUPPORTED_Asym_Pause);
c18487ee
YR
6981 break;
6982
f1410647
ET
6983 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6984 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
6985 ext_phy_type);
6986
34f80b04
EG
6987 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6988 SUPPORTED_TP |
6989 SUPPORTED_Autoneg |
6990 SUPPORTED_Pause |
6991 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6992 break;
6993
c18487ee
YR
6994 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
6995 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
6996 bp->link_params.ext_phy_config);
6997 break;
6998
a2fbb9ea
ET
6999 default:
7000 BNX2X_ERR("NVRAM config error. "
7001 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7002 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7003 return;
7004 }
7005
34f80b04
EG
7006 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7007 port*0x18);
7008 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7009
a2fbb9ea
ET
7010 break;
7011
7012 default:
7013 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7014 bp->port.link_config);
a2fbb9ea
ET
7015 return;
7016 }
34f80b04 7017 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7018
7019 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7020 if (!(bp->link_params.speed_cap_mask &
7021 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7022 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7023
c18487ee
YR
7024 if (!(bp->link_params.speed_cap_mask &
7025 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7026 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7027
c18487ee
YR
7028 if (!(bp->link_params.speed_cap_mask &
7029 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7030 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7031
c18487ee
YR
7032 if (!(bp->link_params.speed_cap_mask &
7033 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7034 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7035
c18487ee
YR
7036 if (!(bp->link_params.speed_cap_mask &
7037 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7038 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7039 SUPPORTED_1000baseT_Full);
a2fbb9ea 7040
c18487ee
YR
7041 if (!(bp->link_params.speed_cap_mask &
7042 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7043 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7044
c18487ee
YR
7045 if (!(bp->link_params.speed_cap_mask &
7046 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7047 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7048
34f80b04 7049 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7050}
7051
34f80b04 7052static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7053{
c18487ee 7054 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7055
34f80b04 7056 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7057 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7058 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7059 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7060 bp->port.advertising = bp->port.supported;
a2fbb9ea 7061 } else {
c18487ee
YR
7062 u32 ext_phy_type =
7063 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7064
7065 if ((ext_phy_type ==
7066 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7067 (ext_phy_type ==
7068 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7069 /* force 10G, no AN */
c18487ee 7070 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7071 bp->port.advertising =
a2fbb9ea
ET
7072 (ADVERTISED_10000baseT_Full |
7073 ADVERTISED_FIBRE);
7074 break;
7075 }
7076 BNX2X_ERR("NVRAM config error. "
7077 "Invalid link_config 0x%x"
7078 " Autoneg not supported\n",
34f80b04 7079 bp->port.link_config);
a2fbb9ea
ET
7080 return;
7081 }
7082 break;
7083
7084 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7085 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7086 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7087 bp->port.advertising = (ADVERTISED_10baseT_Full |
7088 ADVERTISED_TP);
a2fbb9ea
ET
7089 } else {
7090 BNX2X_ERR("NVRAM config error. "
7091 "Invalid link_config 0x%x"
7092 " speed_cap_mask 0x%x\n",
34f80b04 7093 bp->port.link_config,
c18487ee 7094 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7095 return;
7096 }
7097 break;
7098
7099 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7100 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7101 bp->link_params.req_line_speed = SPEED_10;
7102 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7103 bp->port.advertising = (ADVERTISED_10baseT_Half |
7104 ADVERTISED_TP);
a2fbb9ea
ET
7105 } else {
7106 BNX2X_ERR("NVRAM config error. "
7107 "Invalid link_config 0x%x"
7108 " speed_cap_mask 0x%x\n",
34f80b04 7109 bp->port.link_config,
c18487ee 7110 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7111 return;
7112 }
7113 break;
7114
7115 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7116 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7117 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7118 bp->port.advertising = (ADVERTISED_100baseT_Full |
7119 ADVERTISED_TP);
a2fbb9ea
ET
7120 } else {
7121 BNX2X_ERR("NVRAM config error. "
7122 "Invalid link_config 0x%x"
7123 " speed_cap_mask 0x%x\n",
34f80b04 7124 bp->port.link_config,
c18487ee 7125 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7126 return;
7127 }
7128 break;
7129
7130 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7131 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7132 bp->link_params.req_line_speed = SPEED_100;
7133 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7134 bp->port.advertising = (ADVERTISED_100baseT_Half |
7135 ADVERTISED_TP);
a2fbb9ea
ET
7136 } else {
7137 BNX2X_ERR("NVRAM config error. "
7138 "Invalid link_config 0x%x"
7139 " speed_cap_mask 0x%x\n",
34f80b04 7140 bp->port.link_config,
c18487ee 7141 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7142 return;
7143 }
7144 break;
7145
7146 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7147 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7148 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7149 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7150 ADVERTISED_TP);
a2fbb9ea
ET
7151 } else {
7152 BNX2X_ERR("NVRAM config error. "
7153 "Invalid link_config 0x%x"
7154 " speed_cap_mask 0x%x\n",
34f80b04 7155 bp->port.link_config,
c18487ee 7156 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7157 return;
7158 }
7159 break;
7160
7161 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7162 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7163 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7164 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7165 ADVERTISED_TP);
a2fbb9ea
ET
7166 } else {
7167 BNX2X_ERR("NVRAM config error. "
7168 "Invalid link_config 0x%x"
7169 " speed_cap_mask 0x%x\n",
34f80b04 7170 bp->port.link_config,
c18487ee 7171 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7172 return;
7173 }
7174 break;
7175
7176 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7177 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7178 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7179 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7180 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7181 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7182 ADVERTISED_FIBRE);
a2fbb9ea
ET
7183 } else {
7184 BNX2X_ERR("NVRAM config error. "
7185 "Invalid link_config 0x%x"
7186 " speed_cap_mask 0x%x\n",
34f80b04 7187 bp->port.link_config,
c18487ee 7188 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7189 return;
7190 }
7191 break;
7192
7193 default:
7194 BNX2X_ERR("NVRAM config error. "
7195 "BAD link speed link_config 0x%x\n",
34f80b04 7196 bp->port.link_config);
c18487ee 7197 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7198 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7199 break;
7200 }
a2fbb9ea 7201
34f80b04
EG
7202 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7203 PORT_FEATURE_FLOW_CONTROL_MASK);
c18487ee 7204 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
34f80b04 7205 (!bp->port.supported & SUPPORTED_Autoneg))
c18487ee 7206 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 7207
c18487ee 7208 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7209 " advertising 0x%x\n",
c18487ee
YR
7210 bp->link_params.req_line_speed,
7211 bp->link_params.req_duplex,
34f80b04 7212 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7213}
7214
34f80b04 7215static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7216{
34f80b04
EG
7217 int port = BP_PORT(bp);
7218 u32 val, val2;
a2fbb9ea 7219
c18487ee 7220 bp->link_params.bp = bp;
34f80b04 7221 bp->link_params.port = port;
c18487ee 7222
c18487ee 7223 bp->link_params.serdes_config =
f1410647 7224 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7225 bp->link_params.lane_config =
a2fbb9ea 7226 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7227 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7228 SHMEM_RD(bp,
7229 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7230 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7231 SHMEM_RD(bp,
7232 dev_info.port_hw_config[port].speed_capability_mask);
7233
34f80b04 7234 bp->port.link_config =
a2fbb9ea
ET
7235 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7236
34f80b04
EG
7237 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7238 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7239 " link_config 0x%08x\n",
c18487ee
YR
7240 bp->link_params.serdes_config,
7241 bp->link_params.lane_config,
7242 bp->link_params.ext_phy_config,
34f80b04 7243 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7244
34f80b04 7245 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7246 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7247 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7248
7249 bnx2x_link_settings_requested(bp);
7250
7251 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7252 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7253 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7254 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7255 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7256 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7257 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7258 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7259 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7260 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7261}
7262
7263static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7264{
7265 int func = BP_FUNC(bp);
7266 u32 val, val2;
7267 int rc = 0;
a2fbb9ea 7268
34f80b04 7269 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7270
34f80b04
EG
7271 bp->e1hov = 0;
7272 bp->e1hmf = 0;
7273 if (CHIP_IS_E1H(bp)) {
7274 bp->mf_config =
7275 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7276
34f80b04
EG
7277 val =
7278 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7279 FUNC_MF_CFG_E1HOV_TAG_MASK);
7280 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7281
34f80b04
EG
7282 bp->e1hov = val;
7283 bp->e1hmf = 1;
7284 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7285 "(0x%04x)\n",
7286 func, bp->e1hov, bp->e1hov);
7287 } else {
7288 BNX2X_DEV_INFO("Single function mode\n");
7289 if (BP_E1HVN(bp)) {
7290 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7291 " aborting\n", func);
7292 rc = -EPERM;
7293 }
7294 }
7295 }
a2fbb9ea 7296
34f80b04
EG
7297 if (!BP_NOMCP(bp)) {
7298 bnx2x_get_port_hwinfo(bp);
7299
7300 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7301 DRV_MSG_SEQ_NUMBER_MASK);
7302 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7303 }
7304
7305 if (IS_E1HMF(bp)) {
7306 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7307 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7308 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7309 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7310 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7311 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7312 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7313 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7314 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7315 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7316 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7317 ETH_ALEN);
7318 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7319 ETH_ALEN);
a2fbb9ea 7320 }
34f80b04
EG
7321
7322 return rc;
a2fbb9ea
ET
7323 }
7324
34f80b04
EG
7325 if (BP_NOMCP(bp)) {
7326 /* only supposed to happen on emulation/FPGA */
7327 BNX2X_ERR("warning rendom MAC workaround active\n");
7328 random_ether_addr(bp->dev->dev_addr);
7329 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7330 }
a2fbb9ea 7331
34f80b04
EG
7332 return rc;
7333}
7334
7335static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7336{
7337 int func = BP_FUNC(bp);
7338 int rc;
7339
7340 if (nomcp)
7341 bp->flags |= NO_MCP_FLAG;
a2fbb9ea 7342
34f80b04 7343 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7344
34f80b04
EG
7345 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7346 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7347
7348 rc = bnx2x_get_hwinfo(bp);
7349
7350 /* need to reset chip if undi was active */
7351 if (!BP_NOMCP(bp))
7352 bnx2x_undi_unload(bp);
7353
7354 if (CHIP_REV_IS_FPGA(bp))
7355 printk(KERN_ERR PFX "FPGA detected\n");
7356
7357 if (BP_NOMCP(bp) && (func == 0))
7358 printk(KERN_ERR PFX
7359 "MCP disabled, must load devices in order!\n");
7360
7a9b2557
VZ
7361 /* Set TPA flags */
7362 if (disable_tpa) {
7363 bp->flags &= ~TPA_ENABLE_FLAG;
7364 bp->dev->features &= ~NETIF_F_LRO;
7365 } else {
7366 bp->flags |= TPA_ENABLE_FLAG;
7367 bp->dev->features |= NETIF_F_LRO;
7368 }
7369
7370
34f80b04
EG
7371 bp->tx_ring_size = MAX_TX_AVAIL;
7372 bp->rx_ring_size = MAX_RX_AVAIL;
7373
7374 bp->rx_csum = 1;
7375 bp->rx_offset = 0;
7376
7377 bp->tx_ticks = 50;
7378 bp->rx_ticks = 25;
7379
7380 bp->stats_ticks = 1000000 & 0xffff00;
7381
7382 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7383 bp->current_interval = (poll ? poll : bp->timer_interval);
7384
7385 init_timer(&bp->timer);
7386 bp->timer.expires = jiffies + bp->current_interval;
7387 bp->timer.data = (unsigned long) bp;
7388 bp->timer.function = bnx2x_timer;
7389
7390 return rc;
a2fbb9ea
ET
7391}
7392
7393/*
7394 * ethtool service functions
7395 */
7396
7397/* All ethtool functions called with rtnl_lock */
7398
7399static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7400{
7401 struct bnx2x *bp = netdev_priv(dev);
7402
34f80b04
EG
7403 cmd->supported = bp->port.supported;
7404 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7405
7406 if (netif_carrier_ok(dev)) {
c18487ee
YR
7407 cmd->speed = bp->link_vars.line_speed;
7408 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7409 } else {
c18487ee
YR
7410 cmd->speed = bp->link_params.req_line_speed;
7411 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7412 }
34f80b04
EG
7413 if (IS_E1HMF(bp)) {
7414 u16 vn_max_rate;
7415
7416 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7417 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7418 if (vn_max_rate < cmd->speed)
7419 cmd->speed = vn_max_rate;
7420 }
a2fbb9ea 7421
c18487ee
YR
7422 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7423 u32 ext_phy_type =
7424 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7425
7426 switch (ext_phy_type) {
7427 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7428 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7429 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7430 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7431 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7432 cmd->port = PORT_FIBRE;
7433 break;
7434
7435 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7436 cmd->port = PORT_TP;
7437 break;
7438
c18487ee
YR
7439 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7440 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7441 bp->link_params.ext_phy_config);
7442 break;
7443
f1410647
ET
7444 default:
7445 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7446 bp->link_params.ext_phy_config);
7447 break;
f1410647
ET
7448 }
7449 } else
a2fbb9ea 7450 cmd->port = PORT_TP;
a2fbb9ea 7451
34f80b04 7452 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7453 cmd->transceiver = XCVR_INTERNAL;
7454
c18487ee 7455 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7456 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7457 else
a2fbb9ea 7458 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7459
7460 cmd->maxtxpkt = 0;
7461 cmd->maxrxpkt = 0;
7462
7463 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7464 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7465 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7466 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7467 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7468 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7469 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7470
7471 return 0;
7472}
7473
7474static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7475{
7476 struct bnx2x *bp = netdev_priv(dev);
7477 u32 advertising;
7478
34f80b04
EG
7479 if (IS_E1HMF(bp))
7480 return 0;
7481
a2fbb9ea
ET
7482 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7483 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7484 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7485 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7486 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7487 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7488 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7489
a2fbb9ea 7490 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7491 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7492 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7493 return -EINVAL;
f1410647 7494 }
a2fbb9ea
ET
7495
7496 /* advertise the requested speed and duplex if supported */
34f80b04 7497 cmd->advertising &= bp->port.supported;
a2fbb9ea 7498
c18487ee
YR
7499 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7500 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7501 bp->port.advertising |= (ADVERTISED_Autoneg |
7502 cmd->advertising);
a2fbb9ea
ET
7503
7504 } else { /* forced speed */
7505 /* advertise the requested speed and duplex if supported */
7506 switch (cmd->speed) {
7507 case SPEED_10:
7508 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7509 if (!(bp->port.supported &
f1410647
ET
7510 SUPPORTED_10baseT_Full)) {
7511 DP(NETIF_MSG_LINK,
7512 "10M full not supported\n");
a2fbb9ea 7513 return -EINVAL;
f1410647 7514 }
a2fbb9ea
ET
7515
7516 advertising = (ADVERTISED_10baseT_Full |
7517 ADVERTISED_TP);
7518 } else {
34f80b04 7519 if (!(bp->port.supported &
f1410647
ET
7520 SUPPORTED_10baseT_Half)) {
7521 DP(NETIF_MSG_LINK,
7522 "10M half not supported\n");
a2fbb9ea 7523 return -EINVAL;
f1410647 7524 }
a2fbb9ea
ET
7525
7526 advertising = (ADVERTISED_10baseT_Half |
7527 ADVERTISED_TP);
7528 }
7529 break;
7530
7531 case SPEED_100:
7532 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7533 if (!(bp->port.supported &
f1410647
ET
7534 SUPPORTED_100baseT_Full)) {
7535 DP(NETIF_MSG_LINK,
7536 "100M full not supported\n");
a2fbb9ea 7537 return -EINVAL;
f1410647 7538 }
a2fbb9ea
ET
7539
7540 advertising = (ADVERTISED_100baseT_Full |
7541 ADVERTISED_TP);
7542 } else {
34f80b04 7543 if (!(bp->port.supported &
f1410647
ET
7544 SUPPORTED_100baseT_Half)) {
7545 DP(NETIF_MSG_LINK,
7546 "100M half not supported\n");
a2fbb9ea 7547 return -EINVAL;
f1410647 7548 }
a2fbb9ea
ET
7549
7550 advertising = (ADVERTISED_100baseT_Half |
7551 ADVERTISED_TP);
7552 }
7553 break;
7554
7555 case SPEED_1000:
f1410647
ET
7556 if (cmd->duplex != DUPLEX_FULL) {
7557 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7558 return -EINVAL;
f1410647 7559 }
a2fbb9ea 7560
34f80b04 7561 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7562 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7563 return -EINVAL;
f1410647 7564 }
a2fbb9ea
ET
7565
7566 advertising = (ADVERTISED_1000baseT_Full |
7567 ADVERTISED_TP);
7568 break;
7569
7570 case SPEED_2500:
f1410647
ET
7571 if (cmd->duplex != DUPLEX_FULL) {
7572 DP(NETIF_MSG_LINK,
7573 "2.5G half not supported\n");
a2fbb9ea 7574 return -EINVAL;
f1410647 7575 }
a2fbb9ea 7576
34f80b04 7577 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7578 DP(NETIF_MSG_LINK,
7579 "2.5G full not supported\n");
a2fbb9ea 7580 return -EINVAL;
f1410647 7581 }
a2fbb9ea 7582
f1410647 7583 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7584 ADVERTISED_TP);
7585 break;
7586
7587 case SPEED_10000:
f1410647
ET
7588 if (cmd->duplex != DUPLEX_FULL) {
7589 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7590 return -EINVAL;
f1410647 7591 }
a2fbb9ea 7592
34f80b04 7593 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7594 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7595 return -EINVAL;
f1410647 7596 }
a2fbb9ea
ET
7597
7598 advertising = (ADVERTISED_10000baseT_Full |
7599 ADVERTISED_FIBRE);
7600 break;
7601
7602 default:
f1410647 7603 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7604 return -EINVAL;
7605 }
7606
c18487ee
YR
7607 bp->link_params.req_line_speed = cmd->speed;
7608 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7609 bp->port.advertising = advertising;
a2fbb9ea
ET
7610 }
7611
c18487ee 7612 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7613 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7614 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7615 bp->port.advertising);
a2fbb9ea 7616
34f80b04 7617 if (netif_running(dev)) {
bb2a0f7a 7618 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7619 bnx2x_link_set(bp);
7620 }
a2fbb9ea
ET
7621
7622 return 0;
7623}
7624
c18487ee
YR
7625#define PHY_FW_VER_LEN 10
7626
a2fbb9ea
ET
7627static void bnx2x_get_drvinfo(struct net_device *dev,
7628 struct ethtool_drvinfo *info)
7629{
7630 struct bnx2x *bp = netdev_priv(dev);
c18487ee 7631 char phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7632
7633 strcpy(info->driver, DRV_MODULE_NAME);
7634 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7635
7636 phy_fw_ver[0] = '\0';
34f80b04
EG
7637 if (bp->port.pmf) {
7638 bnx2x_phy_hw_lock(bp);
7639 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7640 (bp->state != BNX2X_STATE_CLOSED),
7641 phy_fw_ver, PHY_FW_VER_LEN);
7642 bnx2x_phy_hw_unlock(bp);
7643 }
c18487ee
YR
7644
7645 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
a2fbb9ea 7646 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
c18487ee 7647 BCM_5710_FW_REVISION_VERSION,
34f80b04 7648 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
c18487ee 7649 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
a2fbb9ea
ET
7650 strcpy(info->bus_info, pci_name(bp->pdev));
7651 info->n_stats = BNX2X_NUM_STATS;
7652 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7653 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7654 info->regdump_len = 0;
7655}
7656
7657static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7658{
7659 struct bnx2x *bp = netdev_priv(dev);
7660
7661 if (bp->flags & NO_WOL_FLAG) {
7662 wol->supported = 0;
7663 wol->wolopts = 0;
7664 } else {
7665 wol->supported = WAKE_MAGIC;
7666 if (bp->wol)
7667 wol->wolopts = WAKE_MAGIC;
7668 else
7669 wol->wolopts = 0;
7670 }
7671 memset(&wol->sopass, 0, sizeof(wol->sopass));
7672}
7673
7674static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7675{
7676 struct bnx2x *bp = netdev_priv(dev);
7677
7678 if (wol->wolopts & ~WAKE_MAGIC)
7679 return -EINVAL;
7680
7681 if (wol->wolopts & WAKE_MAGIC) {
7682 if (bp->flags & NO_WOL_FLAG)
7683 return -EINVAL;
7684
7685 bp->wol = 1;
34f80b04 7686 } else
a2fbb9ea 7687 bp->wol = 0;
34f80b04 7688
a2fbb9ea
ET
7689 return 0;
7690}
7691
7692static u32 bnx2x_get_msglevel(struct net_device *dev)
7693{
7694 struct bnx2x *bp = netdev_priv(dev);
7695
7696 return bp->msglevel;
7697}
7698
7699static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7700{
7701 struct bnx2x *bp = netdev_priv(dev);
7702
7703 if (capable(CAP_NET_ADMIN))
7704 bp->msglevel = level;
7705}
7706
7707static int bnx2x_nway_reset(struct net_device *dev)
7708{
7709 struct bnx2x *bp = netdev_priv(dev);
7710
34f80b04
EG
7711 if (!bp->port.pmf)
7712 return 0;
a2fbb9ea 7713
34f80b04 7714 if (netif_running(dev)) {
bb2a0f7a 7715 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7716 bnx2x_link_set(bp);
7717 }
a2fbb9ea
ET
7718
7719 return 0;
7720}
7721
7722static int bnx2x_get_eeprom_len(struct net_device *dev)
7723{
7724 struct bnx2x *bp = netdev_priv(dev);
7725
34f80b04 7726 return bp->common.flash_size;
a2fbb9ea
ET
7727}
7728
7729static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7730{
34f80b04 7731 int port = BP_PORT(bp);
a2fbb9ea
ET
7732 int count, i;
7733 u32 val = 0;
7734
7735 /* adjust timeout for emulation/FPGA */
7736 count = NVRAM_TIMEOUT_COUNT;
7737 if (CHIP_REV_IS_SLOW(bp))
7738 count *= 100;
7739
7740 /* request access to nvram interface */
7741 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7742 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7743
7744 for (i = 0; i < count*10; i++) {
7745 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7746 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7747 break;
7748
7749 udelay(5);
7750 }
7751
7752 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 7753 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
7754 return -EBUSY;
7755 }
7756
7757 return 0;
7758}
7759
7760static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7761{
34f80b04 7762 int port = BP_PORT(bp);
a2fbb9ea
ET
7763 int count, i;
7764 u32 val = 0;
7765
7766 /* adjust timeout for emulation/FPGA */
7767 count = NVRAM_TIMEOUT_COUNT;
7768 if (CHIP_REV_IS_SLOW(bp))
7769 count *= 100;
7770
7771 /* relinquish nvram interface */
7772 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7773 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7774
7775 for (i = 0; i < count*10; i++) {
7776 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7777 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7778 break;
7779
7780 udelay(5);
7781 }
7782
7783 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 7784 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
7785 return -EBUSY;
7786 }
7787
7788 return 0;
7789}
7790
7791static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7792{
7793 u32 val;
7794
7795 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7796
7797 /* enable both bits, even on read */
7798 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7799 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7800 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7801}
7802
7803static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7804{
7805 u32 val;
7806
7807 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7808
7809 /* disable both bits, even after read */
7810 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7811 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7812 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7813}
7814
7815static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7816 u32 cmd_flags)
7817{
f1410647 7818 int count, i, rc;
a2fbb9ea
ET
7819 u32 val;
7820
7821 /* build the command word */
7822 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7823
7824 /* need to clear DONE bit separately */
7825 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7826
7827 /* address of the NVRAM to read from */
7828 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7829 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7830
7831 /* issue a read command */
7832 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7833
7834 /* adjust timeout for emulation/FPGA */
7835 count = NVRAM_TIMEOUT_COUNT;
7836 if (CHIP_REV_IS_SLOW(bp))
7837 count *= 100;
7838
7839 /* wait for completion */
7840 *ret_val = 0;
7841 rc = -EBUSY;
7842 for (i = 0; i < count; i++) {
7843 udelay(5);
7844 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7845
7846 if (val & MCPR_NVM_COMMAND_DONE) {
7847 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
7848 /* we read nvram data in cpu order
7849 * but ethtool sees it as an array of bytes
7850 * converting to big-endian will do the work */
7851 val = cpu_to_be32(val);
7852 *ret_val = val;
7853 rc = 0;
7854 break;
7855 }
7856 }
7857
7858 return rc;
7859}
7860
7861static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7862 int buf_size)
7863{
7864 int rc;
7865 u32 cmd_flags;
7866 u32 val;
7867
7868 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 7869 DP(BNX2X_MSG_NVM,
c14423fe 7870 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
7871 offset, buf_size);
7872 return -EINVAL;
7873 }
7874
34f80b04
EG
7875 if (offset + buf_size > bp->common.flash_size) {
7876 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 7877 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 7878 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
7879 return -EINVAL;
7880 }
7881
7882 /* request access to nvram interface */
7883 rc = bnx2x_acquire_nvram_lock(bp);
7884 if (rc)
7885 return rc;
7886
7887 /* enable access to nvram interface */
7888 bnx2x_enable_nvram_access(bp);
7889
7890 /* read the first word(s) */
7891 cmd_flags = MCPR_NVM_COMMAND_FIRST;
7892 while ((buf_size > sizeof(u32)) && (rc == 0)) {
7893 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7894 memcpy(ret_buf, &val, 4);
7895
7896 /* advance to the next dword */
7897 offset += sizeof(u32);
7898 ret_buf += sizeof(u32);
7899 buf_size -= sizeof(u32);
7900 cmd_flags = 0;
7901 }
7902
7903 if (rc == 0) {
7904 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7905 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7906 memcpy(ret_buf, &val, 4);
7907 }
7908
7909 /* disable access to nvram interface */
7910 bnx2x_disable_nvram_access(bp);
7911 bnx2x_release_nvram_lock(bp);
7912
7913 return rc;
7914}
7915
7916static int bnx2x_get_eeprom(struct net_device *dev,
7917 struct ethtool_eeprom *eeprom, u8 *eebuf)
7918{
7919 struct bnx2x *bp = netdev_priv(dev);
7920 int rc;
7921
34f80b04 7922 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
7923 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
7924 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
7925 eeprom->len, eeprom->len);
7926
7927 /* parameters already validated in ethtool_get_eeprom */
7928
7929 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7930
7931 return rc;
7932}
7933
7934static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
7935 u32 cmd_flags)
7936{
f1410647 7937 int count, i, rc;
a2fbb9ea
ET
7938
7939 /* build the command word */
7940 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
7941
7942 /* need to clear DONE bit separately */
7943 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7944
7945 /* write the data */
7946 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
7947
7948 /* address of the NVRAM to write to */
7949 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7950 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7951
7952 /* issue the write command */
7953 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7954
7955 /* adjust timeout for emulation/FPGA */
7956 count = NVRAM_TIMEOUT_COUNT;
7957 if (CHIP_REV_IS_SLOW(bp))
7958 count *= 100;
7959
7960 /* wait for completion */
7961 rc = -EBUSY;
7962 for (i = 0; i < count; i++) {
7963 udelay(5);
7964 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7965 if (val & MCPR_NVM_COMMAND_DONE) {
7966 rc = 0;
7967 break;
7968 }
7969 }
7970
7971 return rc;
7972}
7973
f1410647 7974#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
7975
7976static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
7977 int buf_size)
7978{
7979 int rc;
7980 u32 cmd_flags;
7981 u32 align_offset;
7982 u32 val;
7983
34f80b04
EG
7984 if (offset + buf_size > bp->common.flash_size) {
7985 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 7986 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 7987 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
7988 return -EINVAL;
7989 }
7990
7991 /* request access to nvram interface */
7992 rc = bnx2x_acquire_nvram_lock(bp);
7993 if (rc)
7994 return rc;
7995
7996 /* enable access to nvram interface */
7997 bnx2x_enable_nvram_access(bp);
7998
7999 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8000 align_offset = (offset & ~0x03);
8001 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8002
8003 if (rc == 0) {
8004 val &= ~(0xff << BYTE_OFFSET(offset));
8005 val |= (*data_buf << BYTE_OFFSET(offset));
8006
8007 /* nvram data is returned as an array of bytes
8008 * convert it back to cpu order */
8009 val = be32_to_cpu(val);
8010
a2fbb9ea
ET
8011 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8012 cmd_flags);
8013 }
8014
8015 /* disable access to nvram interface */
8016 bnx2x_disable_nvram_access(bp);
8017 bnx2x_release_nvram_lock(bp);
8018
8019 return rc;
8020}
8021
8022static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8023 int buf_size)
8024{
8025 int rc;
8026 u32 cmd_flags;
8027 u32 val;
8028 u32 written_so_far;
8029
34f80b04 8030 if (buf_size == 1) /* ethtool */
a2fbb9ea 8031 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8032
8033 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8034 DP(BNX2X_MSG_NVM,
c14423fe 8035 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8036 offset, buf_size);
8037 return -EINVAL;
8038 }
8039
34f80b04
EG
8040 if (offset + buf_size > bp->common.flash_size) {
8041 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8042 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8043 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8044 return -EINVAL;
8045 }
8046
8047 /* request access to nvram interface */
8048 rc = bnx2x_acquire_nvram_lock(bp);
8049 if (rc)
8050 return rc;
8051
8052 /* enable access to nvram interface */
8053 bnx2x_enable_nvram_access(bp);
8054
8055 written_so_far = 0;
8056 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8057 while ((written_so_far < buf_size) && (rc == 0)) {
8058 if (written_so_far == (buf_size - sizeof(u32)))
8059 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8060 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8061 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8062 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8063 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8064
8065 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8066
8067 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8068
8069 /* advance to the next dword */
8070 offset += sizeof(u32);
8071 data_buf += sizeof(u32);
8072 written_so_far += sizeof(u32);
8073 cmd_flags = 0;
8074 }
8075
8076 /* disable access to nvram interface */
8077 bnx2x_disable_nvram_access(bp);
8078 bnx2x_release_nvram_lock(bp);
8079
8080 return rc;
8081}
8082
8083static int bnx2x_set_eeprom(struct net_device *dev,
8084 struct ethtool_eeprom *eeprom, u8 *eebuf)
8085{
8086 struct bnx2x *bp = netdev_priv(dev);
8087 int rc;
8088
34f80b04 8089 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8090 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8091 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8092 eeprom->len, eeprom->len);
8093
8094 /* parameters already validated in ethtool_set_eeprom */
8095
c18487ee 8096 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8097 if (eeprom->magic == 0x00504859)
8098 if (bp->port.pmf) {
8099
8100 bnx2x_phy_hw_lock(bp);
8101 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8102 bp->link_params.ext_phy_config,
8103 (bp->state != BNX2X_STATE_CLOSED),
8104 eebuf, eeprom->len);
bb2a0f7a
YG
8105 if ((bp->state == BNX2X_STATE_OPEN) ||
8106 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8107 rc |= bnx2x_link_reset(&bp->link_params,
8108 &bp->link_vars);
8109 rc |= bnx2x_phy_init(&bp->link_params,
8110 &bp->link_vars);
bb2a0f7a 8111 }
34f80b04
EG
8112 bnx2x_phy_hw_unlock(bp);
8113
8114 } else /* Only the PMF can access the PHY */
8115 return -EINVAL;
8116 else
c18487ee 8117 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8118
8119 return rc;
8120}
8121
8122static int bnx2x_get_coalesce(struct net_device *dev,
8123 struct ethtool_coalesce *coal)
8124{
8125 struct bnx2x *bp = netdev_priv(dev);
8126
8127 memset(coal, 0, sizeof(struct ethtool_coalesce));
8128
8129 coal->rx_coalesce_usecs = bp->rx_ticks;
8130 coal->tx_coalesce_usecs = bp->tx_ticks;
8131 coal->stats_block_coalesce_usecs = bp->stats_ticks;
8132
8133 return 0;
8134}
8135
8136static int bnx2x_set_coalesce(struct net_device *dev,
8137 struct ethtool_coalesce *coal)
8138{
8139 struct bnx2x *bp = netdev_priv(dev);
8140
8141 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8142 if (bp->rx_ticks > 3000)
8143 bp->rx_ticks = 3000;
8144
8145 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8146 if (bp->tx_ticks > 0x3000)
8147 bp->tx_ticks = 0x3000;
8148
8149 bp->stats_ticks = coal->stats_block_coalesce_usecs;
8150 if (bp->stats_ticks > 0xffff00)
8151 bp->stats_ticks = 0xffff00;
8152 bp->stats_ticks &= 0xffff00;
8153
34f80b04 8154 if (netif_running(dev))
a2fbb9ea
ET
8155 bnx2x_update_coalesce(bp);
8156
8157 return 0;
8158}
8159
7a9b2557
VZ
8160static int bnx2x_set_flags(struct net_device *dev, u32 data)
8161{
8162 struct bnx2x *bp = netdev_priv(dev);
8163 int changed = 0;
8164 int rc = 0;
8165
8166 if (data & ETH_FLAG_LRO) {
8167 if (!(dev->features & NETIF_F_LRO)) {
8168 dev->features |= NETIF_F_LRO;
8169 bp->flags |= TPA_ENABLE_FLAG;
8170 changed = 1;
8171 }
8172
8173 } else if (dev->features & NETIF_F_LRO) {
8174 dev->features &= ~NETIF_F_LRO;
8175 bp->flags &= ~TPA_ENABLE_FLAG;
8176 changed = 1;
8177 }
8178
8179 if (changed && netif_running(dev)) {
8180 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8181 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8182 }
8183
8184 return rc;
8185}
8186
a2fbb9ea
ET
8187static void bnx2x_get_ringparam(struct net_device *dev,
8188 struct ethtool_ringparam *ering)
8189{
8190 struct bnx2x *bp = netdev_priv(dev);
8191
8192 ering->rx_max_pending = MAX_RX_AVAIL;
8193 ering->rx_mini_max_pending = 0;
8194 ering->rx_jumbo_max_pending = 0;
8195
8196 ering->rx_pending = bp->rx_ring_size;
8197 ering->rx_mini_pending = 0;
8198 ering->rx_jumbo_pending = 0;
8199
8200 ering->tx_max_pending = MAX_TX_AVAIL;
8201 ering->tx_pending = bp->tx_ring_size;
8202}
8203
8204static int bnx2x_set_ringparam(struct net_device *dev,
8205 struct ethtool_ringparam *ering)
8206{
8207 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8208 int rc = 0;
a2fbb9ea
ET
8209
8210 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8211 (ering->tx_pending > MAX_TX_AVAIL) ||
8212 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8213 return -EINVAL;
8214
8215 bp->rx_ring_size = ering->rx_pending;
8216 bp->tx_ring_size = ering->tx_pending;
8217
34f80b04
EG
8218 if (netif_running(dev)) {
8219 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8220 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8221 }
8222
34f80b04 8223 return rc;
a2fbb9ea
ET
8224}
8225
8226static void bnx2x_get_pauseparam(struct net_device *dev,
8227 struct ethtool_pauseparam *epause)
8228{
8229 struct bnx2x *bp = netdev_priv(dev);
8230
c18487ee
YR
8231 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8232 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8233
8234 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8235 FLOW_CTRL_RX);
8236 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8237 FLOW_CTRL_TX);
a2fbb9ea
ET
8238
8239 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8240 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8241 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8242}
8243
8244static int bnx2x_set_pauseparam(struct net_device *dev,
8245 struct ethtool_pauseparam *epause)
8246{
8247 struct bnx2x *bp = netdev_priv(dev);
8248
34f80b04
EG
8249 if (IS_E1HMF(bp))
8250 return 0;
8251
a2fbb9ea
ET
8252 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8253 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8254 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8255
c18487ee 8256 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
a2fbb9ea 8257
f1410647 8258 if (epause->rx_pause)
c18487ee
YR
8259 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8260
f1410647 8261 if (epause->tx_pause)
c18487ee
YR
8262 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8263
8264 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8265 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 8266
c18487ee 8267 if (epause->autoneg) {
34f80b04 8268 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
c18487ee
YR
8269 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8270 return -EINVAL;
8271 }
a2fbb9ea 8272
c18487ee
YR
8273 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8274 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8275 }
a2fbb9ea 8276
c18487ee
YR
8277 DP(NETIF_MSG_LINK,
8278 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8279
8280 if (netif_running(dev)) {
bb2a0f7a 8281 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8282 bnx2x_link_set(bp);
8283 }
a2fbb9ea
ET
8284
8285 return 0;
8286}
8287
8288static u32 bnx2x_get_rx_csum(struct net_device *dev)
8289{
8290 struct bnx2x *bp = netdev_priv(dev);
8291
8292 return bp->rx_csum;
8293}
8294
8295static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8296{
8297 struct bnx2x *bp = netdev_priv(dev);
8298
8299 bp->rx_csum = data;
8300 return 0;
8301}
8302
8303static int bnx2x_set_tso(struct net_device *dev, u32 data)
8304{
755735eb 8305 if (data) {
a2fbb9ea 8306 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8307 dev->features |= NETIF_F_TSO6;
8308 } else {
a2fbb9ea 8309 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8310 dev->features &= ~NETIF_F_TSO6;
8311 }
8312
a2fbb9ea
ET
8313 return 0;
8314}
8315
f3c87cdd 8316static const struct {
a2fbb9ea
ET
8317 char string[ETH_GSTRING_LEN];
8318} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8319 { "register_test (offline)" },
8320 { "memory_test (offline)" },
8321 { "loopback_test (offline)" },
8322 { "nvram_test (online)" },
8323 { "interrupt_test (online)" },
8324 { "link_test (online)" },
8325 { "idle check (online)" },
8326 { "MC errors (online)" }
a2fbb9ea
ET
8327};
8328
8329static int bnx2x_self_test_count(struct net_device *dev)
8330{
8331 return BNX2X_NUM_TESTS;
8332}
8333
f3c87cdd
YG
8334static int bnx2x_test_registers(struct bnx2x *bp)
8335{
8336 int idx, i, rc = -ENODEV;
8337 u32 wr_val = 0;
8338 static const struct {
8339 u32 offset0;
8340 u32 offset1;
8341 u32 mask;
8342 } reg_tbl[] = {
8343/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8344 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8345 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8346 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8347 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8348 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8349 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8350 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8351 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8352 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8353/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8354 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8355 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8356 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8357 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8358 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8359 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8360 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8361 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8362 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8363/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8364 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8365 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8366 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8367 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8368 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8369 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8370 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8371 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8372 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8373/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8374 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8375 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8376 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8377 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8378 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8379 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8380 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8381
8382 { 0xffffffff, 0, 0x00000000 }
8383 };
8384
8385 if (!netif_running(bp->dev))
8386 return rc;
8387
8388 /* Repeat the test twice:
8389 First by writing 0x00000000, second by writing 0xffffffff */
8390 for (idx = 0; idx < 2; idx++) {
8391
8392 switch (idx) {
8393 case 0:
8394 wr_val = 0;
8395 break;
8396 case 1:
8397 wr_val = 0xffffffff;
8398 break;
8399 }
8400
8401 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8402 u32 offset, mask, save_val, val;
8403 int port = BP_PORT(bp);
8404
8405 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8406 mask = reg_tbl[i].mask;
8407
8408 save_val = REG_RD(bp, offset);
8409
8410 REG_WR(bp, offset, wr_val);
8411 val = REG_RD(bp, offset);
8412
8413 /* Restore the original register's value */
8414 REG_WR(bp, offset, save_val);
8415
8416 /* verify that value is as expected value */
8417 if ((val & mask) != (wr_val & mask))
8418 goto test_reg_exit;
8419 }
8420 }
8421
8422 rc = 0;
8423
8424test_reg_exit:
8425 return rc;
8426}
8427
8428static int bnx2x_test_memory(struct bnx2x *bp)
8429{
8430 int i, j, rc = -ENODEV;
8431 u32 val;
8432 static const struct {
8433 u32 offset;
8434 int size;
8435 } mem_tbl[] = {
8436 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8437 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8438 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8439 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8440 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8441 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8442 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8443
8444 { 0xffffffff, 0 }
8445 };
8446 static const struct {
8447 char *name;
8448 u32 offset;
8449 u32 mask;
8450 } prty_tbl[] = {
8451 { "CCM_REG_CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0 },
8452 { "CFC_REG_CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0 },
8453 { "DMAE_REG_DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0 },
8454 { "TCM_REG_TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0 },
8455 { "UCM_REG_UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0 },
8456 { "XCM_REG_XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x1 },
8457
8458 { NULL, 0xffffffff, 0 }
8459 };
8460
8461 if (!netif_running(bp->dev))
8462 return rc;
8463
8464 /* Go through all the memories */
8465 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8466 for (j = 0; j < mem_tbl[i].size; j++)
8467 REG_RD(bp, mem_tbl[i].offset + j*4);
8468
8469 /* Check the parity status */
8470 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8471 val = REG_RD(bp, prty_tbl[i].offset);
8472 if (val & ~(prty_tbl[i].mask)) {
8473 DP(NETIF_MSG_HW,
8474 "%s is 0x%x\n", prty_tbl[i].name, val);
8475 goto test_mem_exit;
8476 }
8477 }
8478
8479 rc = 0;
8480
8481test_mem_exit:
8482 return rc;
8483}
8484
8485static void bnx2x_netif_start(struct bnx2x *bp)
8486{
8487 int i;
8488
8489 if (atomic_dec_and_test(&bp->intr_sem)) {
8490 if (netif_running(bp->dev)) {
8491 bnx2x_int_enable(bp);
8492 for_each_queue(bp, i)
8493 napi_enable(&bnx2x_fp(bp, i, napi));
8494 if (bp->state == BNX2X_STATE_OPEN)
8495 netif_wake_queue(bp->dev);
8496 }
8497 }
8498}
8499
8500static void bnx2x_netif_stop(struct bnx2x *bp)
8501{
8502 int i;
8503
8504 if (netif_running(bp->dev)) {
8505 netif_tx_disable(bp->dev);
8506 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8507 for_each_queue(bp, i)
8508 napi_disable(&bnx2x_fp(bp, i, napi));
8509 }
8510 bnx2x_int_disable_sync(bp);
8511}
8512
8513static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8514{
8515 int cnt = 1000;
8516
8517 if (link_up)
8518 while (bnx2x_link_test(bp) && cnt--)
8519 msleep(10);
8520}
8521
8522static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8523{
8524 unsigned int pkt_size, num_pkts, i;
8525 struct sk_buff *skb;
8526 unsigned char *packet;
8527 struct bnx2x_fastpath *fp = &bp->fp[0];
8528 u16 tx_start_idx, tx_idx;
8529 u16 rx_start_idx, rx_idx;
8530 u16 pkt_prod;
8531 struct sw_tx_bd *tx_buf;
8532 struct eth_tx_bd *tx_bd;
8533 dma_addr_t mapping;
8534 union eth_rx_cqe *cqe;
8535 u8 cqe_fp_flags;
8536 struct sw_rx_bd *rx_buf;
8537 u16 len;
8538 int rc = -ENODEV;
8539
8540 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8541 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8542 bnx2x_phy_hw_lock(bp);
8543 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8544 bnx2x_phy_hw_unlock(bp);
8545
8546 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8547 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8548 bnx2x_phy_hw_lock(bp);
8549 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8550 bnx2x_phy_hw_unlock(bp);
8551 /* wait until link state is restored */
8552 bnx2x_wait_for_link(bp, link_up);
8553
8554 } else
8555 return -EINVAL;
8556
8557 pkt_size = 1514;
8558 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8559 if (!skb) {
8560 rc = -ENOMEM;
8561 goto test_loopback_exit;
8562 }
8563 packet = skb_put(skb, pkt_size);
8564 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8565 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8566 for (i = ETH_HLEN; i < pkt_size; i++)
8567 packet[i] = (unsigned char) (i & 0xff);
8568
8569 num_pkts = 0;
8570 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8571 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8572
8573 pkt_prod = fp->tx_pkt_prod++;
8574 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8575 tx_buf->first_bd = fp->tx_bd_prod;
8576 tx_buf->skb = skb;
8577
8578 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8579 mapping = pci_map_single(bp->pdev, skb->data,
8580 skb_headlen(skb), PCI_DMA_TODEVICE);
8581 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8582 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8583 tx_bd->nbd = cpu_to_le16(1);
8584 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8585 tx_bd->vlan = cpu_to_le16(pkt_prod);
8586 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8587 ETH_TX_BD_FLAGS_END_BD);
8588 tx_bd->general_data = ((UNICAST_ADDRESS <<
8589 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8590
8591 fp->hw_tx_prods->bds_prod =
8592 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8593 mb(); /* FW restriction: must not reorder writing nbd and packets */
8594 fp->hw_tx_prods->packets_prod =
8595 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8596 DOORBELL(bp, FP_IDX(fp), 0);
8597
8598 mmiowb();
8599
8600 num_pkts++;
8601 fp->tx_bd_prod++;
8602 bp->dev->trans_start = jiffies;
8603
8604 udelay(100);
8605
8606 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8607 if (tx_idx != tx_start_idx + num_pkts)
8608 goto test_loopback_exit;
8609
8610 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8611 if (rx_idx != rx_start_idx + num_pkts)
8612 goto test_loopback_exit;
8613
8614 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8615 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8616 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8617 goto test_loopback_rx_exit;
8618
8619 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8620 if (len != pkt_size)
8621 goto test_loopback_rx_exit;
8622
8623 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8624 skb = rx_buf->skb;
8625 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8626 for (i = ETH_HLEN; i < pkt_size; i++)
8627 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8628 goto test_loopback_rx_exit;
8629
8630 rc = 0;
8631
8632test_loopback_rx_exit:
8633 bp->dev->last_rx = jiffies;
8634
8635 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8636 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8637 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8638 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8639
8640 /* Update producers */
8641 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8642 fp->rx_sge_prod);
8643 mmiowb(); /* keep prod updates ordered */
8644
8645test_loopback_exit:
8646 bp->link_params.loopback_mode = LOOPBACK_NONE;
8647
8648 return rc;
8649}
8650
8651static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8652{
8653 int rc = 0;
8654
8655 if (!netif_running(bp->dev))
8656 return BNX2X_LOOPBACK_FAILED;
8657
8658 bnx2x_netif_stop(bp);
8659
8660 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8661 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8662 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8663 }
8664
8665 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8666 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8667 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8668 }
8669
8670 bnx2x_netif_start(bp);
8671
8672 return rc;
8673}
8674
8675#define CRC32_RESIDUAL 0xdebb20e3
8676
8677static int bnx2x_test_nvram(struct bnx2x *bp)
8678{
8679 static const struct {
8680 int offset;
8681 int size;
8682 } nvram_tbl[] = {
8683 { 0, 0x14 }, /* bootstrap */
8684 { 0x14, 0xec }, /* dir */
8685 { 0x100, 0x350 }, /* manuf_info */
8686 { 0x450, 0xf0 }, /* feature_info */
8687 { 0x640, 0x64 }, /* upgrade_key_info */
8688 { 0x6a4, 0x64 },
8689 { 0x708, 0x70 }, /* manuf_key_info */
8690 { 0x778, 0x70 },
8691 { 0, 0 }
8692 };
8693 u32 buf[0x350 / 4];
8694 u8 *data = (u8 *)buf;
8695 int i, rc;
8696 u32 magic, csum;
8697
8698 rc = bnx2x_nvram_read(bp, 0, data, 4);
8699 if (rc) {
8700 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8701 goto test_nvram_exit;
8702 }
8703
8704 magic = be32_to_cpu(buf[0]);
8705 if (magic != 0x669955aa) {
8706 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8707 rc = -ENODEV;
8708 goto test_nvram_exit;
8709 }
8710
8711 for (i = 0; nvram_tbl[i].size; i++) {
8712
8713 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8714 nvram_tbl[i].size);
8715 if (rc) {
8716 DP(NETIF_MSG_PROBE,
8717 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8718 goto test_nvram_exit;
8719 }
8720
8721 csum = ether_crc_le(nvram_tbl[i].size, data);
8722 if (csum != CRC32_RESIDUAL) {
8723 DP(NETIF_MSG_PROBE,
8724 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8725 rc = -ENODEV;
8726 goto test_nvram_exit;
8727 }
8728 }
8729
8730test_nvram_exit:
8731 return rc;
8732}
8733
8734static int bnx2x_test_intr(struct bnx2x *bp)
8735{
8736 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8737 int i, rc;
8738
8739 if (!netif_running(bp->dev))
8740 return -ENODEV;
8741
8742 config->hdr.length_6b = 0;
8743 config->hdr.offset = 0;
8744 config->hdr.client_id = BP_CL_ID(bp);
8745 config->hdr.reserved1 = 0;
8746
8747 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8748 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8749 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8750 if (rc == 0) {
8751 bp->set_mac_pending++;
8752 for (i = 0; i < 10; i++) {
8753 if (!bp->set_mac_pending)
8754 break;
8755 msleep_interruptible(10);
8756 }
8757 if (i == 10)
8758 rc = -ENODEV;
8759 }
8760
8761 return rc;
8762}
8763
a2fbb9ea
ET
8764static void bnx2x_self_test(struct net_device *dev,
8765 struct ethtool_test *etest, u64 *buf)
8766{
8767 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
8768
8769 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8770
f3c87cdd 8771 if (!netif_running(dev))
a2fbb9ea 8772 return;
a2fbb9ea 8773
f3c87cdd
YG
8774 /* offline tests are not suppoerted in MF mode */
8775 if (IS_E1HMF(bp))
8776 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8777
8778 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8779 u8 link_up;
8780
8781 link_up = bp->link_vars.link_up;
8782 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8783 bnx2x_nic_load(bp, LOAD_DIAG);
8784 /* wait until link state is restored */
8785 bnx2x_wait_for_link(bp, link_up);
8786
8787 if (bnx2x_test_registers(bp) != 0) {
8788 buf[0] = 1;
8789 etest->flags |= ETH_TEST_FL_FAILED;
8790 }
8791 if (bnx2x_test_memory(bp) != 0) {
8792 buf[1] = 1;
8793 etest->flags |= ETH_TEST_FL_FAILED;
8794 }
8795 buf[2] = bnx2x_test_loopback(bp, link_up);
8796 if (buf[2] != 0)
8797 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 8798
f3c87cdd
YG
8799 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8800 bnx2x_nic_load(bp, LOAD_NORMAL);
8801 /* wait until link state is restored */
8802 bnx2x_wait_for_link(bp, link_up);
8803 }
8804 if (bnx2x_test_nvram(bp) != 0) {
8805 buf[3] = 1;
a2fbb9ea
ET
8806 etest->flags |= ETH_TEST_FL_FAILED;
8807 }
f3c87cdd
YG
8808 if (bnx2x_test_intr(bp) != 0) {
8809 buf[4] = 1;
8810 etest->flags |= ETH_TEST_FL_FAILED;
8811 }
8812 if (bp->port.pmf)
8813 if (bnx2x_link_test(bp) != 0) {
8814 buf[5] = 1;
8815 etest->flags |= ETH_TEST_FL_FAILED;
8816 }
8817 buf[7] = bnx2x_mc_assert(bp);
8818 if (buf[7] != 0)
8819 etest->flags |= ETH_TEST_FL_FAILED;
8820
8821#ifdef BNX2X_EXTRA_DEBUG
8822 bnx2x_panic_dump(bp);
8823#endif
a2fbb9ea
ET
8824}
8825
bb2a0f7a
YG
8826static const struct {
8827 long offset;
8828 int size;
8829 u32 flags;
a2fbb9ea 8830 char string[ETH_GSTRING_LEN];
bb2a0f7a
YG
8831} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8832/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi), 8, 1, "rx_bytes" },
8833 { STATS_OFFSET32(error_bytes_received_hi), 8, 1, "rx_error_bytes" },
8834 { STATS_OFFSET32(total_bytes_transmitted_hi), 8, 1, "tx_bytes" },
8835 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 8, 0, "tx_error_bytes" },
8836 { STATS_OFFSET32(total_unicast_packets_received_hi),
8837 8, 1, "rx_ucast_packets" },
8838 { STATS_OFFSET32(total_multicast_packets_received_hi),
8839 8, 1, "rx_mcast_packets" },
8840 { STATS_OFFSET32(total_broadcast_packets_received_hi),
8841 8, 1, "rx_bcast_packets" },
8842 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8843 8, 1, "tx_packets" },
8844 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8845 8, 0, "tx_mac_errors" },
8846/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8847 8, 0, "tx_carrier_errors" },
8848 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8849 8, 0, "rx_crc_errors" },
8850 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8851 8, 0, "rx_align_errors" },
8852 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8853 8, 0, "tx_single_collisions" },
8854 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8855 8, 0, "tx_multi_collisions" },
8856 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8857 8, 0, "tx_deferred" },
8858 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8859 8, 0, "tx_excess_collisions" },
8860 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
8861 8, 0, "tx_late_collisions" },
8862 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
8863 8, 0, "tx_total_collisions" },
8864 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
8865 8, 0, "rx_fragments" },
8866/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 8, 0, "rx_jabbers" },
8867 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
8868 8, 0, "rx_undersize_packets" },
8869 { STATS_OFFSET32(jabber_packets_received),
8870 4, 1, "rx_oversize_packets" },
8871 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
8872 8, 0, "tx_64_byte_packets" },
8873 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
8874 8, 0, "tx_65_to_127_byte_packets" },
8875 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
8876 8, 0, "tx_128_to_255_byte_packets" },
8877 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8878 8, 0, "tx_256_to_511_byte_packets" },
8879 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8880 8, 0, "tx_512_to_1023_byte_packets" },
8881 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8882 8, 0, "tx_1024_to_1522_byte_packets" },
8883 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
8884 8, 0, "tx_1523_to_9022_byte_packets" },
8885/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
8886 8, 0, "rx_xon_frames" },
8887 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
8888 8, 0, "rx_xoff_frames" },
8889 { STATS_OFFSET32(tx_stat_outxonsent_hi), 8, 0, "tx_xon_frames" },
8890 { STATS_OFFSET32(tx_stat_outxoffsent_hi), 8, 0, "tx_xoff_frames" },
8891 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
8892 8, 0, "rx_mac_ctrl_frames" },
8893 { STATS_OFFSET32(mac_filter_discard), 4, 1, "rx_filtered_packets" },
8894 { STATS_OFFSET32(no_buff_discard), 4, 1, "rx_discards" },
8895 { STATS_OFFSET32(xxoverflow_discard), 4, 1, "rx_fw_discards" },
8896 { STATS_OFFSET32(brb_drop_hi), 8, 1, "brb_discard" },
8897/* 39 */{ STATS_OFFSET32(brb_truncate_discard), 8, 1, "brb_truncate" }
a2fbb9ea
ET
8898};
8899
8900static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8901{
bb2a0f7a
YG
8902 struct bnx2x *bp = netdev_priv(dev);
8903 int i, j;
8904
a2fbb9ea
ET
8905 switch (stringset) {
8906 case ETH_SS_STATS:
bb2a0f7a
YG
8907 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8908 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags))
8909 continue;
8910 strcpy(buf + j*ETH_GSTRING_LEN,
8911 bnx2x_stats_arr[i].string);
8912 j++;
8913 }
a2fbb9ea
ET
8914 break;
8915
8916 case ETH_SS_TEST:
8917 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
8918 break;
8919 }
8920}
8921
8922static int bnx2x_get_stats_count(struct net_device *dev)
8923{
bb2a0f7a
YG
8924 struct bnx2x *bp = netdev_priv(dev);
8925 int i, num_stats = 0;
8926
8927 for (i = 0; i < BNX2X_NUM_STATS; i++) {
8928 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags))
8929 continue;
8930 num_stats++;
8931 }
8932 return num_stats;
a2fbb9ea
ET
8933}
8934
8935static void bnx2x_get_ethtool_stats(struct net_device *dev,
8936 struct ethtool_stats *stats, u64 *buf)
8937{
8938 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
8939 u32 *hw_stats = (u32 *)&bp->eth_stats;
8940 int i, j;
a2fbb9ea 8941
bb2a0f7a
YG
8942 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8943 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags))
a2fbb9ea 8944 continue;
bb2a0f7a
YG
8945
8946 if (bnx2x_stats_arr[i].size == 0) {
8947 /* skip this counter */
8948 buf[j] = 0;
8949 j++;
a2fbb9ea
ET
8950 continue;
8951 }
bb2a0f7a 8952 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 8953 /* 4-byte counter */
bb2a0f7a
YG
8954 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
8955 j++;
a2fbb9ea
ET
8956 continue;
8957 }
8958 /* 8-byte counter */
bb2a0f7a
YG
8959 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
8960 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
8961 j++;
a2fbb9ea
ET
8962 }
8963}
8964
8965static int bnx2x_phys_id(struct net_device *dev, u32 data)
8966{
8967 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8968 int port = BP_PORT(bp);
a2fbb9ea
ET
8969 int i;
8970
34f80b04
EG
8971 if (!netif_running(dev))
8972 return 0;
8973
8974 if (!bp->port.pmf)
8975 return 0;
8976
a2fbb9ea
ET
8977 if (data == 0)
8978 data = 2;
8979
8980 for (i = 0; i < (data * 2); i++) {
c18487ee 8981 if ((i % 2) == 0)
34f80b04 8982 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
8983 bp->link_params.hw_led_mode,
8984 bp->link_params.chip_id);
8985 else
34f80b04 8986 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
8987 bp->link_params.hw_led_mode,
8988 bp->link_params.chip_id);
8989
a2fbb9ea
ET
8990 msleep_interruptible(500);
8991 if (signal_pending(current))
8992 break;
8993 }
8994
c18487ee 8995 if (bp->link_vars.link_up)
34f80b04 8996 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
8997 bp->link_vars.line_speed,
8998 bp->link_params.hw_led_mode,
8999 bp->link_params.chip_id);
a2fbb9ea
ET
9000
9001 return 0;
9002}
9003
9004static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9005 .get_settings = bnx2x_get_settings,
9006 .set_settings = bnx2x_set_settings,
9007 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9008 .get_wol = bnx2x_get_wol,
9009 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9010 .get_msglevel = bnx2x_get_msglevel,
9011 .set_msglevel = bnx2x_set_msglevel,
9012 .nway_reset = bnx2x_nway_reset,
9013 .get_link = ethtool_op_get_link,
9014 .get_eeprom_len = bnx2x_get_eeprom_len,
9015 .get_eeprom = bnx2x_get_eeprom,
9016 .set_eeprom = bnx2x_set_eeprom,
9017 .get_coalesce = bnx2x_get_coalesce,
9018 .set_coalesce = bnx2x_set_coalesce,
9019 .get_ringparam = bnx2x_get_ringparam,
9020 .set_ringparam = bnx2x_set_ringparam,
9021 .get_pauseparam = bnx2x_get_pauseparam,
9022 .set_pauseparam = bnx2x_set_pauseparam,
9023 .get_rx_csum = bnx2x_get_rx_csum,
9024 .set_rx_csum = bnx2x_set_rx_csum,
9025 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9026 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9027 .set_flags = bnx2x_set_flags,
9028 .get_flags = ethtool_op_get_flags,
9029 .get_sg = ethtool_op_get_sg,
9030 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9031 .get_tso = ethtool_op_get_tso,
9032 .set_tso = bnx2x_set_tso,
9033 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9034 .self_test = bnx2x_self_test,
9035 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9036 .phys_id = bnx2x_phys_id,
9037 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9038 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9039};
9040
9041/* end of ethtool_ops */
9042
9043/****************************************************************************
9044* General service functions
9045****************************************************************************/
9046
9047static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9048{
9049 u16 pmcsr;
9050
9051 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9052
9053 switch (state) {
9054 case PCI_D0:
34f80b04 9055 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9056 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9057 PCI_PM_CTRL_PME_STATUS));
9058
9059 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9060 /* delay required during transition out of D3hot */
9061 msleep(20);
34f80b04 9062 break;
a2fbb9ea 9063
34f80b04
EG
9064 case PCI_D3hot:
9065 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9066 pmcsr |= 3;
a2fbb9ea 9067
34f80b04
EG
9068 if (bp->wol)
9069 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9070
34f80b04
EG
9071 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9072 pmcsr);
a2fbb9ea 9073
34f80b04
EG
9074 /* No more memory access after this point until
9075 * device is brought back to D0.
9076 */
9077 break;
9078
9079 default:
9080 return -EINVAL;
9081 }
9082 return 0;
a2fbb9ea
ET
9083}
9084
34f80b04
EG
9085/*
9086 * net_device service functions
9087 */
9088
a2fbb9ea
ET
9089static int bnx2x_poll(struct napi_struct *napi, int budget)
9090{
9091 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9092 napi);
9093 struct bnx2x *bp = fp->bp;
9094 int work_done = 0;
9095
9096#ifdef BNX2X_STOP_ON_ERROR
9097 if (unlikely(bp->panic))
34f80b04 9098 goto poll_panic;
a2fbb9ea
ET
9099#endif
9100
9101 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9102 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9103 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9104
9105 bnx2x_update_fpsb_idx(fp);
9106
34f80b04
EG
9107 if ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
9108 (fp->tx_pkt_prod != fp->tx_pkt_cons))
a2fbb9ea
ET
9109 bnx2x_tx_int(fp, budget);
9110
a2fbb9ea
ET
9111 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
9112 work_done = bnx2x_rx_int(fp, budget);
9113
a2fbb9ea
ET
9114 rmb(); /* bnx2x_has_work() reads the status block */
9115
9116 /* must not complete if we consumed full budget */
9117 if ((work_done < budget) && !bnx2x_has_work(fp)) {
9118
9119#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9120poll_panic:
a2fbb9ea
ET
9121#endif
9122 netif_rx_complete(bp->dev, napi);
9123
34f80b04 9124 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9125 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9126 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9127 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9128 }
a2fbb9ea
ET
9129 return work_done;
9130}
9131
755735eb
EG
9132
9133/* we split the first BD into headers and data BDs
9134 * to ease the pain of our fellow micocode engineers
9135 * we use one mapping for both BDs
9136 * So far this has only been observed to happen
9137 * in Other Operating Systems(TM)
9138 */
9139static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9140 struct bnx2x_fastpath *fp,
9141 struct eth_tx_bd **tx_bd, u16 hlen,
9142 u16 bd_prod, int nbd)
9143{
9144 struct eth_tx_bd *h_tx_bd = *tx_bd;
9145 struct eth_tx_bd *d_tx_bd;
9146 dma_addr_t mapping;
9147 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9148
9149 /* first fix first BD */
9150 h_tx_bd->nbd = cpu_to_le16(nbd);
9151 h_tx_bd->nbytes = cpu_to_le16(hlen);
9152
9153 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9154 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9155 h_tx_bd->addr_lo, h_tx_bd->nbd);
9156
9157 /* now get a new data BD
9158 * (after the pbd) and fill it */
9159 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9160 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9161
9162 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9163 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9164
9165 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9166 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9167 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9168 d_tx_bd->vlan = 0;
9169 /* this marks the BD as one that has no individual mapping
9170 * the FW ignores this flag in a BD not marked start
9171 */
9172 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9173 DP(NETIF_MSG_TX_QUEUED,
9174 "TSO split data size is %d (%x:%x)\n",
9175 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9176
9177 /* update tx_bd for marking the last BD flag */
9178 *tx_bd = d_tx_bd;
9179
9180 return bd_prod;
9181}
9182
9183static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9184{
9185 if (fix > 0)
9186 csum = (u16) ~csum_fold(csum_sub(csum,
9187 csum_partial(t_header - fix, fix, 0)));
9188
9189 else if (fix < 0)
9190 csum = (u16) ~csum_fold(csum_add(csum,
9191 csum_partial(t_header, -fix, 0)));
9192
9193 return swab16(csum);
9194}
9195
9196static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9197{
9198 u32 rc;
9199
9200 if (skb->ip_summed != CHECKSUM_PARTIAL)
9201 rc = XMIT_PLAIN;
9202
9203 else {
9204 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9205 rc = XMIT_CSUM_V6;
9206 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9207 rc |= XMIT_CSUM_TCP;
9208
9209 } else {
9210 rc = XMIT_CSUM_V4;
9211 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9212 rc |= XMIT_CSUM_TCP;
9213 }
9214 }
9215
9216 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9217 rc |= XMIT_GSO_V4;
9218
9219 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9220 rc |= XMIT_GSO_V6;
9221
9222 return rc;
9223}
9224
9225/* check if packet requires linearization (packet is too fragmented) */
9226static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9227 u32 xmit_type)
9228{
9229 int to_copy = 0;
9230 int hlen = 0;
9231 int first_bd_sz = 0;
9232
9233 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9234 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9235
9236 if (xmit_type & XMIT_GSO) {
9237 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9238 /* Check if LSO packet needs to be copied:
9239 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9240 int wnd_size = MAX_FETCH_BD - 3;
9241 /* Number of widnows to check */
9242 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9243 int wnd_idx = 0;
9244 int frag_idx = 0;
9245 u32 wnd_sum = 0;
9246
9247 /* Headers length */
9248 hlen = (int)(skb_transport_header(skb) - skb->data) +
9249 tcp_hdrlen(skb);
9250
9251 /* Amount of data (w/o headers) on linear part of SKB*/
9252 first_bd_sz = skb_headlen(skb) - hlen;
9253
9254 wnd_sum = first_bd_sz;
9255
9256 /* Calculate the first sum - it's special */
9257 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9258 wnd_sum +=
9259 skb_shinfo(skb)->frags[frag_idx].size;
9260
9261 /* If there was data on linear skb data - check it */
9262 if (first_bd_sz > 0) {
9263 if (unlikely(wnd_sum < lso_mss)) {
9264 to_copy = 1;
9265 goto exit_lbl;
9266 }
9267
9268 wnd_sum -= first_bd_sz;
9269 }
9270
9271 /* Others are easier: run through the frag list and
9272 check all windows */
9273 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9274 wnd_sum +=
9275 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9276
9277 if (unlikely(wnd_sum < lso_mss)) {
9278 to_copy = 1;
9279 break;
9280 }
9281 wnd_sum -=
9282 skb_shinfo(skb)->frags[wnd_idx].size;
9283 }
9284
9285 } else {
9286 /* in non-LSO too fragmented packet should always
9287 be linearized */
9288 to_copy = 1;
9289 }
9290 }
9291
9292exit_lbl:
9293 if (unlikely(to_copy))
9294 DP(NETIF_MSG_TX_QUEUED,
9295 "Linearization IS REQUIRED for %s packet. "
9296 "num_frags %d hlen %d first_bd_sz %d\n",
9297 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9298 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9299
9300 return to_copy;
9301}
9302
9303/* called with netif_tx_lock
a2fbb9ea 9304 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9305 * netif_wake_queue()
a2fbb9ea
ET
9306 */
9307static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9308{
9309 struct bnx2x *bp = netdev_priv(dev);
9310 struct bnx2x_fastpath *fp;
9311 struct sw_tx_bd *tx_buf;
9312 struct eth_tx_bd *tx_bd;
9313 struct eth_tx_parse_bd *pbd = NULL;
9314 u16 pkt_prod, bd_prod;
755735eb 9315 int nbd, fp_index;
a2fbb9ea 9316 dma_addr_t mapping;
755735eb
EG
9317 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9318 int vlan_off = (bp->e1hov ? 4 : 0);
9319 int i;
9320 u8 hlen = 0;
a2fbb9ea
ET
9321
9322#ifdef BNX2X_STOP_ON_ERROR
9323 if (unlikely(bp->panic))
9324 return NETDEV_TX_BUSY;
9325#endif
9326
755735eb 9327 fp_index = (smp_processor_id() % bp->num_queues);
a2fbb9ea 9328 fp = &bp->fp[fp_index];
755735eb 9329
a2fbb9ea
ET
9330 if (unlikely(bnx2x_tx_avail(bp->fp) <
9331 (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9332 bp->eth_stats.driver_xoff++,
a2fbb9ea
ET
9333 netif_stop_queue(dev);
9334 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9335 return NETDEV_TX_BUSY;
9336 }
9337
755735eb
EG
9338 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9339 " gso type %x xmit_type %x\n",
9340 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9341 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9342
9343 /* First, check if we need to linearaize the skb
9344 (due to FW restrictions) */
9345 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9346 /* Statistics of linearization */
9347 bp->lin_cnt++;
9348 if (skb_linearize(skb) != 0) {
9349 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9350 "silently dropping this SKB\n");
9351 dev_kfree_skb_any(skb);
9352 return 0;
9353 }
9354 }
9355
a2fbb9ea 9356 /*
755735eb 9357 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9358 then for TSO or xsum we have a parsing info BD,
755735eb 9359 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9360 (don't forget to mark the last one as last,
9361 and to unmap only AFTER you write to the BD ...)
755735eb 9362 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9363 */
9364
9365 pkt_prod = fp->tx_pkt_prod++;
755735eb 9366 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9367
755735eb 9368 /* get a tx_buf and first BD */
a2fbb9ea
ET
9369 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9370 tx_bd = &fp->tx_desc_ring[bd_prod];
9371
9372 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9373 tx_bd->general_data = (UNICAST_ADDRESS <<
9374 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9375 tx_bd->general_data |= 1; /* header nbd */
9376
755735eb
EG
9377 /* remember the first BD of the packet */
9378 tx_buf->first_bd = fp->tx_bd_prod;
9379 tx_buf->skb = skb;
a2fbb9ea
ET
9380
9381 DP(NETIF_MSG_TX_QUEUED,
9382 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9383 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9384
755735eb
EG
9385 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9386 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9387 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9388 vlan_off += 4;
9389 } else
9390 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9391
755735eb 9392 if (xmit_type) {
a2fbb9ea 9393
755735eb 9394 /* turn on parsing and get a BD */
a2fbb9ea
ET
9395 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9396 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9397
9398 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9399 }
9400
9401 if (xmit_type & XMIT_CSUM) {
9402 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9403
9404 /* for now NS flag is not used in Linux */
755735eb 9405 pbd->global_data = (hlen |
96fc1784 9406 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9407 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9408
755735eb
EG
9409 pbd->ip_hlen = (skb_transport_header(skb) -
9410 skb_network_header(skb)) / 2;
9411
9412 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9413
755735eb
EG
9414 pbd->total_hlen = cpu_to_le16(hlen);
9415 hlen = hlen*2 - vlan_off;
a2fbb9ea 9416
755735eb
EG
9417 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9418
9419 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9420 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9421 ETH_TX_BD_FLAGS_IP_CSUM;
9422 else
9423 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9424
9425 if (xmit_type & XMIT_CSUM_TCP) {
9426 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9427
9428 } else {
9429 s8 fix = SKB_CS_OFF(skb); /* signed! */
9430
a2fbb9ea 9431 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9432 pbd->cs_offset = fix / 2;
a2fbb9ea 9433
755735eb
EG
9434 DP(NETIF_MSG_TX_QUEUED,
9435 "hlen %d offset %d fix %d csum before fix %x\n",
9436 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9437 SKB_CS(skb));
9438
9439 /* HW bug: fixup the CSUM */
9440 pbd->tcp_pseudo_csum =
9441 bnx2x_csum_fix(skb_transport_header(skb),
9442 SKB_CS(skb), fix);
9443
9444 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9445 pbd->tcp_pseudo_csum);
9446 }
a2fbb9ea
ET
9447 }
9448
9449 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9450 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9451
9452 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9453 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9454 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9455 tx_bd->nbd = cpu_to_le16(nbd);
9456 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9457
9458 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9459 " nbytes %d flags %x vlan %x\n",
9460 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9461 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9462 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9463
755735eb 9464 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9465
9466 DP(NETIF_MSG_TX_QUEUED,
9467 "TSO packet len %d hlen %d total len %d tso size %d\n",
9468 skb->len, hlen, skb_headlen(skb),
9469 skb_shinfo(skb)->gso_size);
9470
9471 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9472
755735eb
EG
9473 if (unlikely(skb_headlen(skb) > hlen))
9474 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9475 bd_prod, ++nbd);
a2fbb9ea
ET
9476
9477 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9478 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9479 pbd->tcp_flags = pbd_tcp_flags(skb);
9480
9481 if (xmit_type & XMIT_GSO_V4) {
9482 pbd->ip_id = swab16(ip_hdr(skb)->id);
9483 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9484 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9485 ip_hdr(skb)->daddr,
9486 0, IPPROTO_TCP, 0));
755735eb
EG
9487
9488 } else
9489 pbd->tcp_pseudo_csum =
9490 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9491 &ipv6_hdr(skb)->daddr,
9492 0, IPPROTO_TCP, 0));
9493
a2fbb9ea
ET
9494 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9495 }
9496
755735eb
EG
9497 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9498 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9499
755735eb
EG
9500 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9501 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9502
755735eb
EG
9503 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9504 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9505
755735eb
EG
9506 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9507 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9508 tx_bd->nbytes = cpu_to_le16(frag->size);
9509 tx_bd->vlan = cpu_to_le16(pkt_prod);
9510 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9511
755735eb
EG
9512 DP(NETIF_MSG_TX_QUEUED,
9513 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9514 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9515 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9516 }
9517
755735eb 9518 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9519 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9520
9521 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9522 tx_bd, tx_bd->bd_flags.as_bitfield);
9523
a2fbb9ea
ET
9524 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9525
755735eb 9526 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9527 * if the packet contains or ends with it
9528 */
9529 if (TX_BD_POFF(bd_prod) < nbd)
9530 nbd++;
9531
9532 if (pbd)
9533 DP(NETIF_MSG_TX_QUEUED,
9534 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9535 " tcp_flags %x xsum %x seq %u hlen %u\n",
9536 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9537 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9538 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9539
755735eb 9540 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9541
96fc1784
ET
9542 fp->hw_tx_prods->bds_prod =
9543 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9544 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9545 fp->hw_tx_prods->packets_prod =
9546 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9547 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9548
9549 mmiowb();
9550
755735eb 9551 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9552 dev->trans_start = jiffies;
9553
9554 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9555 netif_stop_queue(dev);
bb2a0f7a 9556 bp->eth_stats.driver_xoff++;
a2fbb9ea
ET
9557 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9558 netif_wake_queue(dev);
9559 }
9560 fp->tx_pkt++;
9561
9562 return NETDEV_TX_OK;
9563}
9564
bb2a0f7a 9565/* called with rtnl_lock */
a2fbb9ea
ET
9566static int bnx2x_open(struct net_device *dev)
9567{
9568 struct bnx2x *bp = netdev_priv(dev);
9569
9570 bnx2x_set_power_state(bp, PCI_D0);
9571
bb2a0f7a 9572 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9573}
9574
bb2a0f7a 9575/* called with rtnl_lock */
a2fbb9ea
ET
9576static int bnx2x_close(struct net_device *dev)
9577{
a2fbb9ea
ET
9578 struct bnx2x *bp = netdev_priv(dev);
9579
9580 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9581 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9582 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9583 if (!CHIP_REV_IS_SLOW(bp))
9584 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9585
9586 return 0;
9587}
9588
34f80b04
EG
9589/* called with netif_tx_lock from set_multicast */
9590static void bnx2x_set_rx_mode(struct net_device *dev)
9591{
9592 struct bnx2x *bp = netdev_priv(dev);
9593 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9594 int port = BP_PORT(bp);
9595
9596 if (bp->state != BNX2X_STATE_OPEN) {
9597 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9598 return;
9599 }
9600
9601 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9602
9603 if (dev->flags & IFF_PROMISC)
9604 rx_mode = BNX2X_RX_MODE_PROMISC;
9605
9606 else if ((dev->flags & IFF_ALLMULTI) ||
9607 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9608 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9609
9610 else { /* some multicasts */
9611 if (CHIP_IS_E1(bp)) {
9612 int i, old, offset;
9613 struct dev_mc_list *mclist;
9614 struct mac_configuration_cmd *config =
9615 bnx2x_sp(bp, mcast_config);
9616
9617 for (i = 0, mclist = dev->mc_list;
9618 mclist && (i < dev->mc_count);
9619 i++, mclist = mclist->next) {
9620
9621 config->config_table[i].
9622 cam_entry.msb_mac_addr =
9623 swab16(*(u16 *)&mclist->dmi_addr[0]);
9624 config->config_table[i].
9625 cam_entry.middle_mac_addr =
9626 swab16(*(u16 *)&mclist->dmi_addr[2]);
9627 config->config_table[i].
9628 cam_entry.lsb_mac_addr =
9629 swab16(*(u16 *)&mclist->dmi_addr[4]);
9630 config->config_table[i].cam_entry.flags =
9631 cpu_to_le16(port);
9632 config->config_table[i].
9633 target_table_entry.flags = 0;
9634 config->config_table[i].
9635 target_table_entry.client_id = 0;
9636 config->config_table[i].
9637 target_table_entry.vlan_id = 0;
9638
9639 DP(NETIF_MSG_IFUP,
9640 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9641 config->config_table[i].
9642 cam_entry.msb_mac_addr,
9643 config->config_table[i].
9644 cam_entry.middle_mac_addr,
9645 config->config_table[i].
9646 cam_entry.lsb_mac_addr);
9647 }
9648 old = config->hdr.length_6b;
9649 if (old > i) {
9650 for (; i < old; i++) {
9651 if (CAM_IS_INVALID(config->
9652 config_table[i])) {
9653 i--; /* already invalidated */
9654 break;
9655 }
9656 /* invalidate */
9657 CAM_INVALIDATE(config->
9658 config_table[i]);
9659 }
9660 }
9661
9662 if (CHIP_REV_IS_SLOW(bp))
9663 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9664 else
9665 offset = BNX2X_MAX_MULTICAST*(1 + port);
9666
9667 config->hdr.length_6b = i;
9668 config->hdr.offset = offset;
9669 config->hdr.client_id = BP_CL_ID(bp);
9670 config->hdr.reserved1 = 0;
9671
9672 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9673 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9674 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9675 0);
9676 } else { /* E1H */
9677 /* Accept one or more multicasts */
9678 struct dev_mc_list *mclist;
9679 u32 mc_filter[MC_HASH_SIZE];
9680 u32 crc, bit, regidx;
9681 int i;
9682
9683 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9684
9685 for (i = 0, mclist = dev->mc_list;
9686 mclist && (i < dev->mc_count);
9687 i++, mclist = mclist->next) {
9688
9689 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9690 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9691 mclist->dmi_addr[0], mclist->dmi_addr[1],
9692 mclist->dmi_addr[2], mclist->dmi_addr[3],
9693 mclist->dmi_addr[4], mclist->dmi_addr[5]);
9694
9695 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9696 bit = (crc >> 24) & 0xff;
9697 regidx = bit >> 5;
9698 bit &= 0x1f;
9699 mc_filter[regidx] |= (1 << bit);
9700 }
9701
9702 for (i = 0; i < MC_HASH_SIZE; i++)
9703 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9704 mc_filter[i]);
9705 }
9706 }
9707
9708 bp->rx_mode = rx_mode;
9709 bnx2x_set_storm_rx_mode(bp);
9710}
9711
9712/* called with rtnl_lock */
a2fbb9ea
ET
9713static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9714{
9715 struct sockaddr *addr = p;
9716 struct bnx2x *bp = netdev_priv(dev);
9717
34f80b04 9718 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
9719 return -EINVAL;
9720
9721 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
9722 if (netif_running(dev)) {
9723 if (CHIP_IS_E1(bp))
9724 bnx2x_set_mac_addr_e1(bp);
9725 else
9726 bnx2x_set_mac_addr_e1h(bp);
9727 }
a2fbb9ea
ET
9728
9729 return 0;
9730}
9731
c18487ee 9732/* called with rtnl_lock */
a2fbb9ea
ET
9733static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9734{
9735 struct mii_ioctl_data *data = if_mii(ifr);
9736 struct bnx2x *bp = netdev_priv(dev);
9737 int err;
9738
9739 switch (cmd) {
9740 case SIOCGMIIPHY:
34f80b04 9741 data->phy_id = bp->port.phy_addr;
a2fbb9ea 9742
c14423fe 9743 /* fallthrough */
c18487ee 9744
a2fbb9ea 9745 case SIOCGMIIREG: {
c18487ee 9746 u16 mii_regval;
a2fbb9ea 9747
c18487ee
YR
9748 if (!netif_running(dev))
9749 return -EAGAIN;
a2fbb9ea 9750
34f80b04
EG
9751 mutex_lock(&bp->port.phy_mutex);
9752 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
c18487ee
YR
9753 DEFAULT_PHY_DEV_ADDR,
9754 (data->reg_num & 0x1f), &mii_regval);
9755 data->val_out = mii_regval;
34f80b04 9756 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9757 return err;
9758 }
9759
9760 case SIOCSMIIREG:
9761 if (!capable(CAP_NET_ADMIN))
9762 return -EPERM;
9763
c18487ee
YR
9764 if (!netif_running(dev))
9765 return -EAGAIN;
9766
34f80b04
EG
9767 mutex_lock(&bp->port.phy_mutex);
9768 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
c18487ee
YR
9769 DEFAULT_PHY_DEV_ADDR,
9770 (data->reg_num & 0x1f), data->val_in);
34f80b04 9771 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9772 return err;
9773
9774 default:
9775 /* do nothing */
9776 break;
9777 }
9778
9779 return -EOPNOTSUPP;
9780}
9781
34f80b04 9782/* called with rtnl_lock */
a2fbb9ea
ET
9783static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9784{
9785 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9786 int rc = 0;
a2fbb9ea
ET
9787
9788 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9789 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9790 return -EINVAL;
9791
9792 /* This does not race with packet allocation
c14423fe 9793 * because the actual alloc size is
a2fbb9ea
ET
9794 * only updated as part of load
9795 */
9796 dev->mtu = new_mtu;
9797
9798 if (netif_running(dev)) {
34f80b04
EG
9799 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9800 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 9801 }
34f80b04
EG
9802
9803 return rc;
a2fbb9ea
ET
9804}
9805
9806static void bnx2x_tx_timeout(struct net_device *dev)
9807{
9808 struct bnx2x *bp = netdev_priv(dev);
9809
9810#ifdef BNX2X_STOP_ON_ERROR
9811 if (!bp->panic)
9812 bnx2x_panic();
9813#endif
9814 /* This allows the netif to be shutdown gracefully before resetting */
9815 schedule_work(&bp->reset_task);
9816}
9817
9818#ifdef BCM_VLAN
34f80b04 9819/* called with rtnl_lock */
a2fbb9ea
ET
9820static void bnx2x_vlan_rx_register(struct net_device *dev,
9821 struct vlan_group *vlgrp)
9822{
9823 struct bnx2x *bp = netdev_priv(dev);
9824
9825 bp->vlgrp = vlgrp;
9826 if (netif_running(dev))
49d66772 9827 bnx2x_set_client_config(bp);
a2fbb9ea 9828}
34f80b04 9829
a2fbb9ea
ET
9830#endif
9831
9832#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9833static void poll_bnx2x(struct net_device *dev)
9834{
9835 struct bnx2x *bp = netdev_priv(dev);
9836
9837 disable_irq(bp->pdev->irq);
9838 bnx2x_interrupt(bp->pdev->irq, dev);
9839 enable_irq(bp->pdev->irq);
9840}
9841#endif
9842
34f80b04
EG
9843static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9844 struct net_device *dev)
a2fbb9ea
ET
9845{
9846 struct bnx2x *bp;
9847 int rc;
9848
9849 SET_NETDEV_DEV(dev, &pdev->dev);
9850 bp = netdev_priv(dev);
9851
34f80b04
EG
9852 bp->dev = dev;
9853 bp->pdev = pdev;
a2fbb9ea 9854 bp->flags = 0;
34f80b04 9855 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
9856
9857 rc = pci_enable_device(pdev);
9858 if (rc) {
9859 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9860 goto err_out;
9861 }
9862
9863 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9864 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9865 " aborting\n");
9866 rc = -ENODEV;
9867 goto err_out_disable;
9868 }
9869
9870 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9871 printk(KERN_ERR PFX "Cannot find second PCI device"
9872 " base address, aborting\n");
9873 rc = -ENODEV;
9874 goto err_out_disable;
9875 }
9876
34f80b04
EG
9877 if (atomic_read(&pdev->enable_cnt) == 1) {
9878 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9879 if (rc) {
9880 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9881 " aborting\n");
9882 goto err_out_disable;
9883 }
a2fbb9ea 9884
34f80b04
EG
9885 pci_set_master(pdev);
9886 pci_save_state(pdev);
9887 }
a2fbb9ea
ET
9888
9889 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9890 if (bp->pm_cap == 0) {
9891 printk(KERN_ERR PFX "Cannot find power management"
9892 " capability, aborting\n");
9893 rc = -EIO;
9894 goto err_out_release;
9895 }
9896
9897 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9898 if (bp->pcie_cap == 0) {
9899 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
9900 " aborting\n");
9901 rc = -EIO;
9902 goto err_out_release;
9903 }
9904
9905 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
9906 bp->flags |= USING_DAC_FLAG;
9907 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
9908 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
9909 " failed, aborting\n");
9910 rc = -EIO;
9911 goto err_out_release;
9912 }
9913
9914 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
9915 printk(KERN_ERR PFX "System does not support DMA,"
9916 " aborting\n");
9917 rc = -EIO;
9918 goto err_out_release;
9919 }
9920
34f80b04
EG
9921 dev->mem_start = pci_resource_start(pdev, 0);
9922 dev->base_addr = dev->mem_start;
9923 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
9924
9925 dev->irq = pdev->irq;
9926
9927 bp->regview = ioremap_nocache(dev->base_addr,
9928 pci_resource_len(pdev, 0));
9929 if (!bp->regview) {
9930 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
9931 rc = -ENOMEM;
9932 goto err_out_release;
9933 }
9934
34f80b04
EG
9935 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
9936 min_t(u64, BNX2X_DB_SIZE,
9937 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
9938 if (!bp->doorbells) {
9939 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
9940 rc = -ENOMEM;
9941 goto err_out_unmap;
9942 }
9943
9944 bnx2x_set_power_state(bp, PCI_D0);
9945
34f80b04
EG
9946 /* clean indirect addresses */
9947 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
9948 PCICFG_VENDOR_ID_OFFSET);
9949 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
9950 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
9951 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
9952 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 9953
34f80b04
EG
9954 dev->hard_start_xmit = bnx2x_start_xmit;
9955 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 9956
34f80b04
EG
9957 dev->ethtool_ops = &bnx2x_ethtool_ops;
9958 dev->open = bnx2x_open;
9959 dev->stop = bnx2x_close;
9960 dev->set_multicast_list = bnx2x_set_rx_mode;
9961 dev->set_mac_address = bnx2x_change_mac_addr;
9962 dev->do_ioctl = bnx2x_ioctl;
9963 dev->change_mtu = bnx2x_change_mtu;
9964 dev->tx_timeout = bnx2x_tx_timeout;
9965#ifdef BCM_VLAN
9966 dev->vlan_rx_register = bnx2x_vlan_rx_register;
9967#endif
9968#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9969 dev->poll_controller = poll_bnx2x;
9970#endif
9971 dev->features |= NETIF_F_SG;
9972 dev->features |= NETIF_F_HW_CSUM;
9973 if (bp->flags & USING_DAC_FLAG)
9974 dev->features |= NETIF_F_HIGHDMA;
9975#ifdef BCM_VLAN
9976 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
9977#endif
9978 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 9979 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
9980
9981 return 0;
9982
9983err_out_unmap:
9984 if (bp->regview) {
9985 iounmap(bp->regview);
9986 bp->regview = NULL;
9987 }
a2fbb9ea
ET
9988 if (bp->doorbells) {
9989 iounmap(bp->doorbells);
9990 bp->doorbells = NULL;
9991 }
9992
9993err_out_release:
34f80b04
EG
9994 if (atomic_read(&pdev->enable_cnt) == 1)
9995 pci_release_regions(pdev);
a2fbb9ea
ET
9996
9997err_out_disable:
9998 pci_disable_device(pdev);
9999 pci_set_drvdata(pdev, NULL);
10000
10001err_out:
10002 return rc;
10003}
10004
25047950
ET
10005static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10006{
10007 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10008
10009 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10010 return val;
10011}
10012
10013/* return value of 1=2.5GHz 2=5GHz */
10014static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10015{
10016 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10017
10018 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10019 return val;
10020}
10021
a2fbb9ea
ET
10022static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10023 const struct pci_device_id *ent)
10024{
10025 static int version_printed;
10026 struct net_device *dev = NULL;
10027 struct bnx2x *bp;
25047950 10028 int rc;
25047950 10029 DECLARE_MAC_BUF(mac);
a2fbb9ea
ET
10030
10031 if (version_printed++ == 0)
10032 printk(KERN_INFO "%s", version);
10033
10034 /* dev zeroed in init_etherdev */
10035 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
10036 if (!dev) {
10037 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10038 return -ENOMEM;
34f80b04 10039 }
a2fbb9ea
ET
10040
10041 netif_carrier_off(dev);
10042
10043 bp = netdev_priv(dev);
10044 bp->msglevel = debug;
10045
34f80b04 10046 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10047 if (rc < 0) {
10048 free_netdev(dev);
10049 return rc;
10050 }
10051
a2fbb9ea
ET
10052 rc = register_netdev(dev);
10053 if (rc) {
c14423fe 10054 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04 10055 goto init_one_exit;
a2fbb9ea
ET
10056 }
10057
10058 pci_set_drvdata(pdev, dev);
10059
34f80b04
EG
10060 rc = bnx2x_init_bp(bp);
10061 if (rc) {
10062 unregister_netdev(dev);
10063 goto init_one_exit;
10064 }
10065
10066 bp->common.name = board_info[ent->driver_data].name;
25047950 10067 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10068 " IRQ %d, ", dev->name, bp->common.name,
10069 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10070 bnx2x_get_pcie_width(bp),
10071 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10072 dev->base_addr, bp->pdev->irq);
10073 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
a2fbb9ea 10074 return 0;
34f80b04
EG
10075
10076init_one_exit:
10077 if (bp->regview)
10078 iounmap(bp->regview);
10079
10080 if (bp->doorbells)
10081 iounmap(bp->doorbells);
10082
10083 free_netdev(dev);
10084
10085 if (atomic_read(&pdev->enable_cnt) == 1)
10086 pci_release_regions(pdev);
10087
10088 pci_disable_device(pdev);
10089 pci_set_drvdata(pdev, NULL);
10090
10091 return rc;
a2fbb9ea
ET
10092}
10093
10094static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10095{
10096 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10097 struct bnx2x *bp;
10098
10099 if (!dev) {
228241eb
ET
10100 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10101 return;
10102 }
228241eb 10103 bp = netdev_priv(dev);
a2fbb9ea 10104
a2fbb9ea
ET
10105 unregister_netdev(dev);
10106
10107 if (bp->regview)
10108 iounmap(bp->regview);
10109
10110 if (bp->doorbells)
10111 iounmap(bp->doorbells);
10112
10113 free_netdev(dev);
34f80b04
EG
10114
10115 if (atomic_read(&pdev->enable_cnt) == 1)
10116 pci_release_regions(pdev);
10117
a2fbb9ea
ET
10118 pci_disable_device(pdev);
10119 pci_set_drvdata(pdev, NULL);
10120}
10121
10122static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10123{
10124 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10125 struct bnx2x *bp;
10126
34f80b04
EG
10127 if (!dev) {
10128 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10129 return -ENODEV;
10130 }
10131 bp = netdev_priv(dev);
a2fbb9ea 10132
34f80b04 10133 rtnl_lock();
a2fbb9ea 10134
34f80b04 10135 pci_save_state(pdev);
228241eb 10136
34f80b04
EG
10137 if (!netif_running(dev)) {
10138 rtnl_unlock();
10139 return 0;
10140 }
a2fbb9ea
ET
10141
10142 netif_device_detach(dev);
a2fbb9ea 10143
34f80b04
EG
10144 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10145
a2fbb9ea 10146 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10147
34f80b04
EG
10148 rtnl_unlock();
10149
a2fbb9ea
ET
10150 return 0;
10151}
10152
10153static int bnx2x_resume(struct pci_dev *pdev)
10154{
10155 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10156 struct bnx2x *bp;
a2fbb9ea
ET
10157 int rc;
10158
228241eb
ET
10159 if (!dev) {
10160 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10161 return -ENODEV;
10162 }
228241eb 10163 bp = netdev_priv(dev);
a2fbb9ea 10164
34f80b04
EG
10165 rtnl_lock();
10166
228241eb 10167 pci_restore_state(pdev);
34f80b04
EG
10168
10169 if (!netif_running(dev)) {
10170 rtnl_unlock();
10171 return 0;
10172 }
10173
a2fbb9ea
ET
10174 bnx2x_set_power_state(bp, PCI_D0);
10175 netif_device_attach(dev);
10176
34f80b04 10177 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10178
34f80b04
EG
10179 rtnl_unlock();
10180
10181 return rc;
a2fbb9ea
ET
10182}
10183
493adb1f
WX
10184/**
10185 * bnx2x_io_error_detected - called when PCI error is detected
10186 * @pdev: Pointer to PCI device
10187 * @state: The current pci connection state
10188 *
10189 * This function is called after a PCI bus error affecting
10190 * this device has been detected.
10191 */
10192static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10193 pci_channel_state_t state)
10194{
10195 struct net_device *dev = pci_get_drvdata(pdev);
10196 struct bnx2x *bp = netdev_priv(dev);
10197
10198 rtnl_lock();
10199
10200 netif_device_detach(dev);
10201
10202 if (netif_running(dev))
10203 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10204
10205 pci_disable_device(pdev);
10206
10207 rtnl_unlock();
10208
10209 /* Request a slot reset */
10210 return PCI_ERS_RESULT_NEED_RESET;
10211}
10212
10213/**
10214 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10215 * @pdev: Pointer to PCI device
10216 *
10217 * Restart the card from scratch, as if from a cold-boot.
10218 */
10219static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10220{
10221 struct net_device *dev = pci_get_drvdata(pdev);
10222 struct bnx2x *bp = netdev_priv(dev);
10223
10224 rtnl_lock();
10225
10226 if (pci_enable_device(pdev)) {
10227 dev_err(&pdev->dev,
10228 "Cannot re-enable PCI device after reset\n");
10229 rtnl_unlock();
10230 return PCI_ERS_RESULT_DISCONNECT;
10231 }
10232
10233 pci_set_master(pdev);
10234 pci_restore_state(pdev);
10235
10236 if (netif_running(dev))
10237 bnx2x_set_power_state(bp, PCI_D0);
10238
10239 rtnl_unlock();
10240
10241 return PCI_ERS_RESULT_RECOVERED;
10242}
10243
10244/**
10245 * bnx2x_io_resume - called when traffic can start flowing again
10246 * @pdev: Pointer to PCI device
10247 *
10248 * This callback is called when the error recovery driver tells us that
10249 * its OK to resume normal operation.
10250 */
10251static void bnx2x_io_resume(struct pci_dev *pdev)
10252{
10253 struct net_device *dev = pci_get_drvdata(pdev);
10254 struct bnx2x *bp = netdev_priv(dev);
10255
10256 rtnl_lock();
10257
10258 if (netif_running(dev))
10259 bnx2x_nic_load(bp, LOAD_OPEN);
10260
10261 netif_device_attach(dev);
10262
10263 rtnl_unlock();
10264}
10265
10266static struct pci_error_handlers bnx2x_err_handler = {
10267 .error_detected = bnx2x_io_error_detected,
10268 .slot_reset = bnx2x_io_slot_reset,
10269 .resume = bnx2x_io_resume,
10270};
10271
a2fbb9ea 10272static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10273 .name = DRV_MODULE_NAME,
10274 .id_table = bnx2x_pci_tbl,
10275 .probe = bnx2x_init_one,
10276 .remove = __devexit_p(bnx2x_remove_one),
10277 .suspend = bnx2x_suspend,
10278 .resume = bnx2x_resume,
10279 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10280};
10281
10282static int __init bnx2x_init(void)
10283{
10284 return pci_register_driver(&bnx2x_pci_driver);
10285}
10286
10287static void __exit bnx2x_cleanup(void)
10288{
10289 pci_unregister_driver(&bnx2x_pci_driver);
10290}
10291
10292module_init(bnx2x_init);
10293module_exit(bnx2x_cleanup);
10294