]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
can: fix slowpath issue in hrtimer callback function
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
a2fbb9ea
ET
43#endif
44#include <net/ip.h>
45#include <net/tcp.h>
46#include <net/checksum.h>
34f80b04 47#include <net/ip6_checksum.h>
a2fbb9ea
ET
48#include <linux/workqueue.h>
49#include <linux/crc32.h>
34f80b04 50#include <linux/crc32c.h>
a2fbb9ea
ET
51#include <linux/prefetch.h>
52#include <linux/zlib.h>
a2fbb9ea
ET
53#include <linux/io.h>
54
55#include "bnx2x_reg.h"
56#include "bnx2x_fw_defs.h"
57#include "bnx2x_hsi.h"
c18487ee 58#include "bnx2x_link.h"
a2fbb9ea
ET
59#include "bnx2x.h"
60#include "bnx2x_init.h"
61
ca8eac55
EG
62#define DRV_MODULE_VERSION "1.45.23"
63#define DRV_MODULE_RELDATE "2008/11/03"
34f80b04 64#define BNX2X_BC_VER 0x040200
a2fbb9ea 65
34f80b04
EG
66/* Time in jiffies before concluding the transmitter is hung */
67#define TX_TIMEOUT (5*HZ)
a2fbb9ea 68
53a10565 69static char version[] __devinitdata =
34f80b04 70 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
71 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
72
24e3fcef 73MODULE_AUTHOR("Eliezer Tamir");
a2fbb9ea
ET
74MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
75MODULE_LICENSE("GPL");
76MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 77
19680c48 78static int disable_tpa;
a2fbb9ea
ET
79static int use_inta;
80static int poll;
a2fbb9ea 81static int debug;
34f80b04 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
83static int use_multi;
84
19680c48 85module_param(disable_tpa, int, 0);
a2fbb9ea
ET
86module_param(use_inta, int, 0);
87module_param(poll, int, 0);
a2fbb9ea 88module_param(debug, int, 0);
19680c48 89MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea
ET
90MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
91MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 92MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
93
94#ifdef BNX2X_MULTI
95module_param(use_multi, int, 0);
96MODULE_PARM_DESC(use_multi, "use per-CPU queues");
97#endif
98
99enum bnx2x_board_type {
100 BCM57710 = 0,
34f80b04
EG
101 BCM57711 = 1,
102 BCM57711E = 2,
a2fbb9ea
ET
103};
104
34f80b04 105/* indexed by board_type, above */
53a10565 106static struct {
a2fbb9ea
ET
107 char *name;
108} board_info[] __devinitdata = {
34f80b04
EG
109 { "Broadcom NetXtreme II BCM57710 XGb" },
110 { "Broadcom NetXtreme II BCM57711 XGb" },
111 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
112};
113
34f80b04 114
a2fbb9ea
ET
115static const struct pci_device_id bnx2x_pci_tbl[] = {
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
122 { 0 }
123};
124
125MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
126
127/****************************************************************************
128* General service functions
129****************************************************************************/
130
131/* used only at init
132 * locking is done by mcp
133 */
134static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
135{
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
139 PCICFG_VENDOR_ID_OFFSET);
140}
141
a2fbb9ea
ET
142static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
143{
144 u32 val;
145
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149 PCICFG_VENDOR_ID_OFFSET);
150
151 return val;
152}
a2fbb9ea
ET
153
154static const u32 dmae_reg_go_c[] = {
155 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
156 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
157 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
158 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
159};
160
161/* copy command into DMAE command memory and set DMAE command go */
162static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
163 int idx)
164{
165 u32 cmd_offset;
166 int i;
167
168 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
169 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
170 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171
ad8d3948
EG
172 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
173 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
174 }
175 REG_WR(bp, dmae_reg_go_c[idx], 1);
176}
177
ad8d3948
EG
178void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
179 u32 len32)
a2fbb9ea 180{
ad8d3948 181 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 182 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
183 int cnt = 200;
184
185 if (!bp->dmae_ready) {
186 u32 *data = bnx2x_sp(bp, wb_data[0]);
187
188 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
189 " using indirect\n", dst_addr, len32);
190 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
191 return;
192 }
193
194 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
195
196 memset(dmae, 0, sizeof(struct dmae_command));
197
198 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
199 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
200 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
201#ifdef __BIG_ENDIAN
202 DMAE_CMD_ENDIANITY_B_DW_SWAP |
203#else
204 DMAE_CMD_ENDIANITY_DW_SWAP |
205#endif
34f80b04
EG
206 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
207 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
208 dmae->src_addr_lo = U64_LO(dma_addr);
209 dmae->src_addr_hi = U64_HI(dma_addr);
210 dmae->dst_addr_lo = dst_addr >> 2;
211 dmae->dst_addr_hi = 0;
212 dmae->len = len32;
213 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
214 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 215 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 216
ad8d3948 217 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
218 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
219 "dst_addr [%x:%08x (%08x)]\n"
220 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
221 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
222 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
223 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 224 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
225 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
226 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
227
228 *wb_comp = 0;
229
34f80b04 230 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
231
232 udelay(5);
ad8d3948
EG
233
234 while (*wb_comp != DMAE_COMP_VAL) {
235 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
236
ad8d3948 237 if (!cnt) {
a2fbb9ea
ET
238 BNX2X_ERR("dmae timeout!\n");
239 break;
240 }
ad8d3948 241 cnt--;
12469401
YG
242 /* adjust delay for emulation/FPGA */
243 if (CHIP_REV_IS_SLOW(bp))
244 msleep(100);
245 else
246 udelay(5);
a2fbb9ea 247 }
ad8d3948
EG
248
249 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
250}
251
c18487ee 252void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 253{
ad8d3948 254 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 255 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
256 int cnt = 200;
257
258 if (!bp->dmae_ready) {
259 u32 *data = bnx2x_sp(bp, wb_data[0]);
260 int i;
261
262 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
263 " using indirect\n", src_addr, len32);
264 for (i = 0; i < len32; i++)
265 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
266 return;
267 }
268
269 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
270
271 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
272 memset(dmae, 0, sizeof(struct dmae_command));
273
274 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
275 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
276 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
277#ifdef __BIG_ENDIAN
278 DMAE_CMD_ENDIANITY_B_DW_SWAP |
279#else
280 DMAE_CMD_ENDIANITY_DW_SWAP |
281#endif
34f80b04
EG
282 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
283 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
284 dmae->src_addr_lo = src_addr >> 2;
285 dmae->src_addr_hi = 0;
286 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
287 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
288 dmae->len = len32;
289 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
290 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 291 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 292
ad8d3948 293 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
294 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
295 "dst_addr [%x:%08x (%08x)]\n"
296 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
297 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
298 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
299 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
300
301 *wb_comp = 0;
302
34f80b04 303 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
304
305 udelay(5);
ad8d3948
EG
306
307 while (*wb_comp != DMAE_COMP_VAL) {
308
ad8d3948 309 if (!cnt) {
a2fbb9ea
ET
310 BNX2X_ERR("dmae timeout!\n");
311 break;
312 }
ad8d3948 313 cnt--;
12469401
YG
314 /* adjust delay for emulation/FPGA */
315 if (CHIP_REV_IS_SLOW(bp))
316 msleep(100);
317 else
318 udelay(5);
a2fbb9ea 319 }
ad8d3948 320 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
321 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
322 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
323
324 mutex_unlock(&bp->dmae_mutex);
325}
326
327/* used only for slowpath so not inlined */
328static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
329{
330 u32 wb_write[2];
331
332 wb_write[0] = val_hi;
333 wb_write[1] = val_lo;
334 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 335}
a2fbb9ea 336
ad8d3948
EG
337#ifdef USE_WB_RD
338static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
339{
340 u32 wb_data[2];
341
342 REG_RD_DMAE(bp, reg, wb_data, 2);
343
344 return HILO_U64(wb_data[0], wb_data[1]);
345}
346#endif
347
a2fbb9ea
ET
348static int bnx2x_mc_assert(struct bnx2x *bp)
349{
a2fbb9ea 350 char last_idx;
34f80b04
EG
351 int i, rc = 0;
352 u32 row0, row1, row2, row3;
353
354 /* XSTORM */
355 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
356 XSTORM_ASSERT_LIST_INDEX_OFFSET);
357 if (last_idx)
358 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
359
360 /* print the asserts */
361 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
362
363 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
364 XSTORM_ASSERT_LIST_OFFSET(i));
365 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
366 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
367 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
368 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
369 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
370 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
371
372 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
373 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
374 " 0x%08x 0x%08x 0x%08x\n",
375 i, row3, row2, row1, row0);
376 rc++;
377 } else {
378 break;
379 }
380 }
381
382 /* TSTORM */
383 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
384 TSTORM_ASSERT_LIST_INDEX_OFFSET);
385 if (last_idx)
386 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
387
388 /* print the asserts */
389 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
390
391 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
392 TSTORM_ASSERT_LIST_OFFSET(i));
393 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
394 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
395 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
396 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
397 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
398 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
399
400 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
401 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
402 " 0x%08x 0x%08x 0x%08x\n",
403 i, row3, row2, row1, row0);
404 rc++;
405 } else {
406 break;
407 }
408 }
409
410 /* CSTORM */
411 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
412 CSTORM_ASSERT_LIST_INDEX_OFFSET);
413 if (last_idx)
414 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
415
416 /* print the asserts */
417 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
418
419 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
420 CSTORM_ASSERT_LIST_OFFSET(i));
421 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
422 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
423 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
424 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
425 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
426 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
427
428 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
429 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
430 " 0x%08x 0x%08x 0x%08x\n",
431 i, row3, row2, row1, row0);
432 rc++;
433 } else {
434 break;
435 }
436 }
437
438 /* USTORM */
439 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
440 USTORM_ASSERT_LIST_INDEX_OFFSET);
441 if (last_idx)
442 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
443
444 /* print the asserts */
445 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
446
447 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
448 USTORM_ASSERT_LIST_OFFSET(i));
449 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
450 USTORM_ASSERT_LIST_OFFSET(i) + 4);
451 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
452 USTORM_ASSERT_LIST_OFFSET(i) + 8);
453 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
454 USTORM_ASSERT_LIST_OFFSET(i) + 12);
455
456 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
457 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
458 " 0x%08x 0x%08x 0x%08x\n",
459 i, row3, row2, row1, row0);
460 rc++;
461 } else {
462 break;
a2fbb9ea
ET
463 }
464 }
34f80b04 465
a2fbb9ea
ET
466 return rc;
467}
c14423fe 468
a2fbb9ea
ET
469static void bnx2x_fw_dump(struct bnx2x *bp)
470{
471 u32 mark, offset;
472 u32 data[9];
473 int word;
474
475 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
476 mark = ((mark + 0x3) & ~0x3);
477 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
478
479 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
480 for (word = 0; word < 8; word++)
481 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
482 offset + 4*word));
483 data[8] = 0x0;
49d66772 484 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
485 }
486 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
487 for (word = 0; word < 8; word++)
488 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
489 offset + 4*word));
490 data[8] = 0x0;
49d66772 491 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
492 }
493 printk("\n" KERN_ERR PFX "end of fw dump\n");
494}
495
496static void bnx2x_panic_dump(struct bnx2x *bp)
497{
498 int i;
499 u16 j, start, end;
500
66e855f3
YG
501 bp->stats_state = STATS_STATE_DISABLED;
502 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
503
a2fbb9ea
ET
504 BNX2X_ERR("begin crash dump -----------------\n");
505
506 for_each_queue(bp, i) {
507 struct bnx2x_fastpath *fp = &bp->fp[i];
508 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509
510 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 511 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 512 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 513 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
514 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
515 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
516 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
517 fp->rx_bd_prod, fp->rx_bd_cons,
518 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
519 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
520 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
521 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
522 " *sb_u_idx(%x) bd data(%x,%x)\n",
523 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
524 fp->status_blk->c_status_block.status_block_index,
525 fp->fp_u_idx,
526 fp->status_blk->u_status_block.status_block_index,
527 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
528
529 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
530 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
531 for (j = start; j < end; j++) {
532 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
533
534 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
535 sw_bd->skb, sw_bd->first_bd);
536 }
537
538 start = TX_BD(fp->tx_bd_cons - 10);
539 end = TX_BD(fp->tx_bd_cons + 254);
540 for (j = start; j < end; j++) {
541 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
542
543 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
544 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
545 }
546
547 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
548 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
549 for (j = start; j < end; j++) {
550 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
551 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
552
553 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 554 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
555 }
556
3196a88a
EG
557 start = RX_SGE(fp->rx_sge_prod);
558 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
559 for (j = start; j < end; j++) {
560 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
561 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
562
563 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
564 j, rx_sge[1], rx_sge[0], sw_page->page);
565 }
566
a2fbb9ea
ET
567 start = RCQ_BD(fp->rx_comp_cons - 10);
568 end = RCQ_BD(fp->rx_comp_cons + 503);
569 for (j = start; j < end; j++) {
570 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
571
572 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
573 j, cqe[0], cqe[1], cqe[2], cqe[3]);
574 }
575 }
576
49d66772
ET
577 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
578 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 579 " spq_prod_idx(%u)\n",
49d66772 580 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
581 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
582
34f80b04 583 bnx2x_fw_dump(bp);
a2fbb9ea
ET
584 bnx2x_mc_assert(bp);
585 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
586}
587
615f8fd9 588static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 589{
34f80b04 590 int port = BP_PORT(bp);
a2fbb9ea
ET
591 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
592 u32 val = REG_RD(bp, addr);
593 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
594
595 if (msix) {
596 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
597 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
598 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
599 } else {
600 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 601 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
602 HC_CONFIG_0_REG_INT_LINE_EN_0 |
603 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 604
615f8fd9
ET
605 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
606 val, port, addr, msix);
607
608 REG_WR(bp, addr, val);
609
a2fbb9ea
ET
610 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
611 }
612
615f8fd9 613 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
614 val, port, addr, msix);
615
616 REG_WR(bp, addr, val);
34f80b04
EG
617
618 if (CHIP_IS_E1H(bp)) {
619 /* init leading/trailing edge */
620 if (IS_E1HMF(bp)) {
621 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
622 if (bp->port.pmf)
623 /* enable nig attention */
624 val |= 0x0100;
625 } else
626 val = 0xffff;
627
628 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
629 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
630 }
a2fbb9ea
ET
631}
632
615f8fd9 633static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 634{
34f80b04 635 int port = BP_PORT(bp);
a2fbb9ea
ET
636 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
637 u32 val = REG_RD(bp, addr);
638
639 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
641 HC_CONFIG_0_REG_INT_LINE_EN_0 |
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643
644 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
645 val, port, addr);
646
647 REG_WR(bp, addr, val);
648 if (REG_RD(bp, addr) != val)
649 BNX2X_ERR("BUG! proper val not read from IGU!\n");
650}
651
f8ef6e44 652static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 653{
a2fbb9ea
ET
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655 int i;
656
34f80b04 657 /* disable interrupt handling */
a2fbb9ea 658 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
659 if (disable_hw)
660 /* prevent the HW from sending interrupts */
661 bnx2x_int_disable(bp);
a2fbb9ea
ET
662
663 /* make sure all ISRs are done */
664 if (msix) {
665 for_each_queue(bp, i)
666 synchronize_irq(bp->msix_table[i].vector);
667
668 /* one more for the Slow Path IRQ */
669 synchronize_irq(bp->msix_table[i].vector);
670 } else
671 synchronize_irq(bp->pdev->irq);
672
673 /* make sure sp_task is not running */
674 cancel_work_sync(&bp->sp_task);
a2fbb9ea
ET
675}
676
34f80b04 677/* fast path */
a2fbb9ea
ET
678
679/*
34f80b04 680 * General service functions
a2fbb9ea
ET
681 */
682
34f80b04 683static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
684 u8 storm, u16 index, u8 op, u8 update)
685{
5c862848
EG
686 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
688 struct igu_ack_register igu_ack;
689
690 igu_ack.status_block_index = index;
691 igu_ack.sb_id_and_flags =
34f80b04 692 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
693 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
5c862848
EG
697 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
700}
701
702static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703{
704 struct host_status_block *fpsb = fp->status_blk;
705 u16 rc = 0;
706
707 barrier(); /* status block is written to by the chip */
708 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710 rc |= 1;
711 }
712 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714 rc |= 2;
715 }
716 return rc;
717}
718
a2fbb9ea
ET
719static u16 bnx2x_ack_int(struct bnx2x *bp)
720{
5c862848
EG
721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722 COMMAND_REG_SIMD_MASK);
723 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 724
5c862848
EG
725 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726 result, hc_addr);
a2fbb9ea 727
a2fbb9ea
ET
728 return result;
729}
730
731
732/*
733 * fast path service functions
734 */
735
736/* free skb in the packet ring at pos idx
737 * return idx of last bd freed
738 */
739static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
740 u16 idx)
741{
742 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
743 struct eth_tx_bd *tx_bd;
744 struct sk_buff *skb = tx_buf->skb;
34f80b04 745 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
746 int nbd;
747
748 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
749 idx, tx_buf, skb);
750
751 /* unmap first bd */
752 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
753 tx_bd = &fp->tx_desc_ring[bd_idx];
754 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
755 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
756
757 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 758 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
759#ifdef BNX2X_STOP_ON_ERROR
760 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 761 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
762 bnx2x_panic();
763 }
764#endif
765
766 /* Skip a parse bd and the TSO split header bd
767 since they have no mapping */
768 if (nbd)
769 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
770
771 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
772 ETH_TX_BD_FLAGS_TCP_CSUM |
773 ETH_TX_BD_FLAGS_SW_LSO)) {
774 if (--nbd)
775 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776 tx_bd = &fp->tx_desc_ring[bd_idx];
777 /* is this a TSO split header bd? */
778 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
779 if (--nbd)
780 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
781 }
782 }
783
784 /* now free frags */
785 while (nbd > 0) {
786
787 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
788 tx_bd = &fp->tx_desc_ring[bd_idx];
789 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
790 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
791 if (--nbd)
792 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
793 }
794
795 /* release skb */
53e5e96e 796 WARN_ON(!skb);
a2fbb9ea
ET
797 dev_kfree_skb(skb);
798 tx_buf->first_bd = 0;
799 tx_buf->skb = NULL;
800
34f80b04 801 return new_cons;
a2fbb9ea
ET
802}
803
34f80b04 804static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 805{
34f80b04
EG
806 s16 used;
807 u16 prod;
808 u16 cons;
a2fbb9ea 809
34f80b04 810 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
811 prod = fp->tx_bd_prod;
812 cons = fp->tx_bd_cons;
813
34f80b04
EG
814 /* NUM_TX_RINGS = number of "next-page" entries
815 It will be used as a threshold */
816 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 817
34f80b04 818#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
819 WARN_ON(used < 0);
820 WARN_ON(used > fp->bp->tx_ring_size);
821 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 822#endif
a2fbb9ea 823
34f80b04 824 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
825}
826
827static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
828{
829 struct bnx2x *bp = fp->bp;
830 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
831 int done = 0;
832
833#ifdef BNX2X_STOP_ON_ERROR
834 if (unlikely(bp->panic))
835 return;
836#endif
837
838 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
839 sw_cons = fp->tx_pkt_cons;
840
841 while (sw_cons != hw_cons) {
842 u16 pkt_cons;
843
844 pkt_cons = TX_BD(sw_cons);
845
846 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
847
34f80b04 848 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
849 hw_cons, sw_cons, pkt_cons);
850
34f80b04 851/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
852 rmb();
853 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
854 }
855*/
856 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
857 sw_cons++;
858 done++;
859
860 if (done == work)
861 break;
862 }
863
864 fp->tx_pkt_cons = sw_cons;
865 fp->tx_bd_cons = bd_cons;
866
867 /* Need to make the tx_cons update visible to start_xmit()
868 * before checking for netif_queue_stopped(). Without the
869 * memory barrier, there is a small possibility that start_xmit()
870 * will miss it and cause the queue to be stopped forever.
871 */
872 smp_mb();
873
874 /* TBD need a thresh? */
875 if (unlikely(netif_queue_stopped(bp->dev))) {
876
877 netif_tx_lock(bp->dev);
878
879 if (netif_queue_stopped(bp->dev) &&
da5a662a 880 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea
ET
881 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
882 netif_wake_queue(bp->dev);
883
884 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
885 }
886}
887
3196a88a 888
a2fbb9ea
ET
889static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
890 union eth_rx_cqe *rr_cqe)
891{
892 struct bnx2x *bp = fp->bp;
893 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
895
34f80b04 896 DP(BNX2X_MSG_SP,
a2fbb9ea 897 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
898 FP_IDX(fp), cid, command, bp->state,
899 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
900
901 bp->spq_left++;
902
34f80b04 903 if (FP_IDX(fp)) {
a2fbb9ea
ET
904 switch (command | fp->state) {
905 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
906 BNX2X_FP_STATE_OPENING):
907 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
908 cid);
909 fp->state = BNX2X_FP_STATE_OPEN;
910 break;
911
912 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
913 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
914 cid);
915 fp->state = BNX2X_FP_STATE_HALTED;
916 break;
917
918 default:
34f80b04
EG
919 BNX2X_ERR("unexpected MC reply (%d) "
920 "fp->state is %x\n", command, fp->state);
921 break;
a2fbb9ea 922 }
34f80b04 923 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
924 return;
925 }
c14423fe 926
a2fbb9ea
ET
927 switch (command | bp->state) {
928 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
929 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
930 bp->state = BNX2X_STATE_OPEN;
931 break;
932
933 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
934 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
935 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
936 fp->state = BNX2X_FP_STATE_HALTED;
937 break;
938
a2fbb9ea 939 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 940 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 941 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
942 break;
943
3196a88a 944
a2fbb9ea 945 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 946 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 947 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 948 bp->set_mac_pending = 0;
a2fbb9ea
ET
949 break;
950
49d66772 951 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 952 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
953 break;
954
a2fbb9ea 955 default:
34f80b04 956 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 957 command, bp->state);
34f80b04 958 break;
a2fbb9ea 959 }
34f80b04 960 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
961}
962
7a9b2557
VZ
963static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
964 struct bnx2x_fastpath *fp, u16 index)
965{
966 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
967 struct page *page = sw_buf->page;
968 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
969
970 /* Skip "next page" elements */
971 if (!page)
972 return;
973
974 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
975 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
976 __free_pages(page, PAGES_PER_SGE_SHIFT);
977
978 sw_buf->page = NULL;
979 sge->addr_hi = 0;
980 sge->addr_lo = 0;
981}
982
983static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
984 struct bnx2x_fastpath *fp, int last)
985{
986 int i;
987
988 for (i = 0; i < last; i++)
989 bnx2x_free_rx_sge(bp, fp, i);
990}
991
992static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
994{
995 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
996 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998 dma_addr_t mapping;
999
1000 if (unlikely(page == NULL))
1001 return -ENOMEM;
1002
1003 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1004 PCI_DMA_FROMDEVICE);
8d8bb39b 1005 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1006 __free_pages(page, PAGES_PER_SGE_SHIFT);
1007 return -ENOMEM;
1008 }
1009
1010 sw_buf->page = page;
1011 pci_unmap_addr_set(sw_buf, mapping, mapping);
1012
1013 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1014 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1015
1016 return 0;
1017}
1018
a2fbb9ea
ET
1019static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1020 struct bnx2x_fastpath *fp, u16 index)
1021{
1022 struct sk_buff *skb;
1023 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1024 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1025 dma_addr_t mapping;
1026
1027 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1028 if (unlikely(skb == NULL))
1029 return -ENOMEM;
1030
437cf2f1 1031 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1032 PCI_DMA_FROMDEVICE);
8d8bb39b 1033 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1034 dev_kfree_skb(skb);
1035 return -ENOMEM;
1036 }
1037
1038 rx_buf->skb = skb;
1039 pci_unmap_addr_set(rx_buf, mapping, mapping);
1040
1041 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1042 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1043
1044 return 0;
1045}
1046
1047/* note that we are not allocating a new skb,
1048 * we are just moving one from cons to prod
1049 * we are not creating a new mapping,
1050 * so there is no need to check for dma_mapping_error().
1051 */
1052static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1053 struct sk_buff *skb, u16 cons, u16 prod)
1054{
1055 struct bnx2x *bp = fp->bp;
1056 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1057 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1058 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1059 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1060
1061 pci_dma_sync_single_for_device(bp->pdev,
1062 pci_unmap_addr(cons_rx_buf, mapping),
1063 bp->rx_offset + RX_COPY_THRESH,
1064 PCI_DMA_FROMDEVICE);
1065
1066 prod_rx_buf->skb = cons_rx_buf->skb;
1067 pci_unmap_addr_set(prod_rx_buf, mapping,
1068 pci_unmap_addr(cons_rx_buf, mapping));
1069 *prod_bd = *cons_bd;
1070}
1071
7a9b2557
VZ
1072static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1073 u16 idx)
1074{
1075 u16 last_max = fp->last_max_sge;
1076
1077 if (SUB_S16(idx, last_max) > 0)
1078 fp->last_max_sge = idx;
1079}
1080
1081static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1082{
1083 int i, j;
1084
1085 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1086 int idx = RX_SGE_CNT * i - 1;
1087
1088 for (j = 0; j < 2; j++) {
1089 SGE_MASK_CLEAR_BIT(fp, idx);
1090 idx--;
1091 }
1092 }
1093}
1094
1095static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1096 struct eth_fast_path_rx_cqe *fp_cqe)
1097{
1098 struct bnx2x *bp = fp->bp;
1099 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1100 le16_to_cpu(fp_cqe->len_on_bd)) >>
1101 BCM_PAGE_SHIFT;
1102 u16 last_max, last_elem, first_elem;
1103 u16 delta = 0;
1104 u16 i;
1105
1106 if (!sge_len)
1107 return;
1108
1109 /* First mark all used pages */
1110 for (i = 0; i < sge_len; i++)
1111 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1112
1113 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1114 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1115
1116 /* Here we assume that the last SGE index is the biggest */
1117 prefetch((void *)(fp->sge_mask));
1118 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1119
1120 last_max = RX_SGE(fp->last_max_sge);
1121 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1122 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1123
1124 /* If ring is not full */
1125 if (last_elem + 1 != first_elem)
1126 last_elem++;
1127
1128 /* Now update the prod */
1129 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1130 if (likely(fp->sge_mask[i]))
1131 break;
1132
1133 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1134 delta += RX_SGE_MASK_ELEM_SZ;
1135 }
1136
1137 if (delta > 0) {
1138 fp->rx_sge_prod += delta;
1139 /* clear page-end entries */
1140 bnx2x_clear_sge_mask_next_elems(fp);
1141 }
1142
1143 DP(NETIF_MSG_RX_STATUS,
1144 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1145 fp->last_max_sge, fp->rx_sge_prod);
1146}
1147
1148static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1149{
1150 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1151 memset(fp->sge_mask, 0xff,
1152 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1153
33471629
EG
1154 /* Clear the two last indices in the page to 1:
1155 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1156 hence will never be indicated and should be removed from
1157 the calculations. */
1158 bnx2x_clear_sge_mask_next_elems(fp);
1159}
1160
1161static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1162 struct sk_buff *skb, u16 cons, u16 prod)
1163{
1164 struct bnx2x *bp = fp->bp;
1165 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1166 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1167 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1168 dma_addr_t mapping;
1169
1170 /* move empty skb from pool to prod and map it */
1171 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1172 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1173 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1174 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1175
1176 /* move partial skb from cons to pool (don't unmap yet) */
1177 fp->tpa_pool[queue] = *cons_rx_buf;
1178
1179 /* mark bin state as start - print error if current state != stop */
1180 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1181 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1182
1183 fp->tpa_state[queue] = BNX2X_TPA_START;
1184
1185 /* point prod_bd to new skb */
1186 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1187 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1188
1189#ifdef BNX2X_STOP_ON_ERROR
1190 fp->tpa_queue_used |= (1 << queue);
1191#ifdef __powerpc64__
1192 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1193#else
1194 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1195#endif
1196 fp->tpa_queue_used);
1197#endif
1198}
1199
1200static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1201 struct sk_buff *skb,
1202 struct eth_fast_path_rx_cqe *fp_cqe,
1203 u16 cqe_idx)
1204{
1205 struct sw_rx_page *rx_pg, old_rx_pg;
1206 struct page *sge;
1207 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1208 u32 i, frag_len, frag_size, pages;
1209 int err;
1210 int j;
1211
1212 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1213 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1214
1215 /* This is needed in order to enable forwarding support */
1216 if (frag_size)
1217 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1218 max(frag_size, (u32)len_on_bd));
1219
1220#ifdef BNX2X_STOP_ON_ERROR
1221 if (pages > 8*PAGES_PER_SGE) {
1222 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1223 pages, cqe_idx);
1224 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1225 fp_cqe->pkt_len, len_on_bd);
1226 bnx2x_panic();
1227 return -EINVAL;
1228 }
1229#endif
1230
1231 /* Run through the SGL and compose the fragmented skb */
1232 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1233 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1234
1235 /* FW gives the indices of the SGE as if the ring is an array
1236 (meaning that "next" element will consume 2 indices) */
1237 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1238 rx_pg = &fp->rx_page_ring[sge_idx];
1239 sge = rx_pg->page;
1240 old_rx_pg = *rx_pg;
1241
1242 /* If we fail to allocate a substitute page, we simply stop
1243 where we are and drop the whole packet */
1244 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1245 if (unlikely(err)) {
66e855f3 1246 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1247 return err;
1248 }
1249
1250 /* Unmap the page as we r going to pass it to the stack */
1251 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1252 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1253
1254 /* Add one frag and update the appropriate fields in the skb */
1255 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1256
1257 skb->data_len += frag_len;
1258 skb->truesize += frag_len;
1259 skb->len += frag_len;
1260
1261 frag_size -= frag_len;
1262 }
1263
1264 return 0;
1265}
1266
1267static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1268 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1269 u16 cqe_idx)
1270{
1271 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1272 struct sk_buff *skb = rx_buf->skb;
1273 /* alloc new skb */
1274 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1275
1276 /* Unmap skb in the pool anyway, as we are going to change
1277 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1278 fails. */
1279 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1280 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1281
7a9b2557 1282 if (likely(new_skb)) {
66e855f3
YG
1283 /* fix ip xsum and give it to the stack */
1284 /* (no need to map the new skb) */
7a9b2557
VZ
1285
1286 prefetch(skb);
1287 prefetch(((char *)(skb)) + 128);
1288
7a9b2557
VZ
1289#ifdef BNX2X_STOP_ON_ERROR
1290 if (pad + len > bp->rx_buf_size) {
1291 BNX2X_ERR("skb_put is about to fail... "
1292 "pad %d len %d rx_buf_size %d\n",
1293 pad, len, bp->rx_buf_size);
1294 bnx2x_panic();
1295 return;
1296 }
1297#endif
1298
1299 skb_reserve(skb, pad);
1300 skb_put(skb, len);
1301
1302 skb->protocol = eth_type_trans(skb, bp->dev);
1303 skb->ip_summed = CHECKSUM_UNNECESSARY;
1304
1305 {
1306 struct iphdr *iph;
1307
1308 iph = (struct iphdr *)skb->data;
1309 iph->check = 0;
1310 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1311 }
1312
1313 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1314 &cqe->fast_path_cqe, cqe_idx)) {
1315#ifdef BCM_VLAN
1316 if ((bp->vlgrp != NULL) &&
1317 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1318 PARSING_FLAGS_VLAN))
1319 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1320 le16_to_cpu(cqe->fast_path_cqe.
1321 vlan_tag));
1322 else
1323#endif
1324 netif_receive_skb(skb);
1325 } else {
1326 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1327 " - dropping packet!\n");
1328 dev_kfree_skb(skb);
1329 }
1330
7a9b2557
VZ
1331
1332 /* put new skb in bin */
1333 fp->tpa_pool[queue].skb = new_skb;
1334
1335 } else {
66e855f3 1336 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1337 DP(NETIF_MSG_RX_STATUS,
1338 "Failed to allocate new skb - dropping packet!\n");
66e855f3 1339 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1340 }
1341
1342 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1343}
1344
1345static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1346 struct bnx2x_fastpath *fp,
1347 u16 bd_prod, u16 rx_comp_prod,
1348 u16 rx_sge_prod)
1349{
1350 struct tstorm_eth_rx_producers rx_prods = {0};
1351 int i;
1352
1353 /* Update producers */
1354 rx_prods.bd_prod = bd_prod;
1355 rx_prods.cqe_prod = rx_comp_prod;
1356 rx_prods.sge_prod = rx_sge_prod;
1357
1358 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1359 REG_WR(bp, BAR_TSTRORM_INTMEM +
1360 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1361 ((u32 *)&rx_prods)[i]);
1362
1363 DP(NETIF_MSG_RX_STATUS,
1364 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1365 bd_prod, rx_comp_prod, rx_sge_prod);
1366}
1367
a2fbb9ea
ET
1368static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1369{
1370 struct bnx2x *bp = fp->bp;
34f80b04 1371 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1372 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1373 int rx_pkt = 0;
1374
1375#ifdef BNX2X_STOP_ON_ERROR
1376 if (unlikely(bp->panic))
1377 return 0;
1378#endif
1379
34f80b04
EG
1380 /* CQ "next element" is of the size of the regular element,
1381 that's why it's ok here */
a2fbb9ea
ET
1382 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1383 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1384 hw_comp_cons++;
1385
1386 bd_cons = fp->rx_bd_cons;
1387 bd_prod = fp->rx_bd_prod;
34f80b04 1388 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1389 sw_comp_cons = fp->rx_comp_cons;
1390 sw_comp_prod = fp->rx_comp_prod;
1391
1392 /* Memory barrier necessary as speculative reads of the rx
1393 * buffer can be ahead of the index in the status block
1394 */
1395 rmb();
1396
1397 DP(NETIF_MSG_RX_STATUS,
1398 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1399 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1400
1401 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1402 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1403 struct sk_buff *skb;
1404 union eth_rx_cqe *cqe;
34f80b04
EG
1405 u8 cqe_fp_flags;
1406 u16 len, pad;
a2fbb9ea
ET
1407
1408 comp_ring_cons = RCQ_BD(sw_comp_cons);
1409 bd_prod = RX_BD(bd_prod);
1410 bd_cons = RX_BD(bd_cons);
1411
1412 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1413 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1414
a2fbb9ea 1415 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1416 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1417 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
a2fbb9ea 1418 cqe->fast_path_cqe.rss_hash_result,
34f80b04
EG
1419 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1420 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1421
1422 /* is this a slowpath msg? */
34f80b04 1423 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1424 bnx2x_sp_event(fp, cqe);
1425 goto next_cqe;
1426
1427 /* this is an rx packet */
1428 } else {
1429 rx_buf = &fp->rx_buf_ring[bd_cons];
1430 skb = rx_buf->skb;
a2fbb9ea
ET
1431 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1432 pad = cqe->fast_path_cqe.placement_offset;
1433
7a9b2557
VZ
1434 /* If CQE is marked both TPA_START and TPA_END
1435 it is a non-TPA CQE */
1436 if ((!fp->disable_tpa) &&
1437 (TPA_TYPE(cqe_fp_flags) !=
1438 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1439 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1440
1441 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1442 DP(NETIF_MSG_RX_STATUS,
1443 "calling tpa_start on queue %d\n",
1444 queue);
1445
1446 bnx2x_tpa_start(fp, queue, skb,
1447 bd_cons, bd_prod);
1448 goto next_rx;
1449 }
1450
1451 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1452 DP(NETIF_MSG_RX_STATUS,
1453 "calling tpa_stop on queue %d\n",
1454 queue);
1455
1456 if (!BNX2X_RX_SUM_FIX(cqe))
1457 BNX2X_ERR("STOP on none TCP "
1458 "data\n");
1459
1460 /* This is a size of the linear data
1461 on this skb */
1462 len = le16_to_cpu(cqe->fast_path_cqe.
1463 len_on_bd);
1464 bnx2x_tpa_stop(bp, fp, queue, pad,
1465 len, cqe, comp_ring_cons);
1466#ifdef BNX2X_STOP_ON_ERROR
1467 if (bp->panic)
1468 return -EINVAL;
1469#endif
1470
1471 bnx2x_update_sge_prod(fp,
1472 &cqe->fast_path_cqe);
1473 goto next_cqe;
1474 }
1475 }
1476
a2fbb9ea
ET
1477 pci_dma_sync_single_for_device(bp->pdev,
1478 pci_unmap_addr(rx_buf, mapping),
1479 pad + RX_COPY_THRESH,
1480 PCI_DMA_FROMDEVICE);
1481 prefetch(skb);
1482 prefetch(((char *)(skb)) + 128);
1483
1484 /* is this an error packet? */
34f80b04 1485 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1486 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1487 "ERROR flags %x rx packet %u\n",
1488 cqe_fp_flags, sw_comp_cons);
66e855f3 1489 bp->eth_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1490 goto reuse_rx;
1491 }
1492
1493 /* Since we don't have a jumbo ring
1494 * copy small packets if mtu > 1500
1495 */
1496 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1497 (len <= RX_COPY_THRESH)) {
1498 struct sk_buff *new_skb;
1499
1500 new_skb = netdev_alloc_skb(bp->dev,
1501 len + pad);
1502 if (new_skb == NULL) {
1503 DP(NETIF_MSG_RX_ERR,
34f80b04 1504 "ERROR packet dropped "
a2fbb9ea 1505 "because of alloc failure\n");
66e855f3 1506 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1507 goto reuse_rx;
1508 }
1509
1510 /* aligned copy */
1511 skb_copy_from_linear_data_offset(skb, pad,
1512 new_skb->data + pad, len);
1513 skb_reserve(new_skb, pad);
1514 skb_put(new_skb, len);
1515
1516 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1517
1518 skb = new_skb;
1519
1520 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1521 pci_unmap_single(bp->pdev,
1522 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1523 bp->rx_buf_size,
a2fbb9ea
ET
1524 PCI_DMA_FROMDEVICE);
1525 skb_reserve(skb, pad);
1526 skb_put(skb, len);
1527
1528 } else {
1529 DP(NETIF_MSG_RX_ERR,
34f80b04 1530 "ERROR packet dropped because "
a2fbb9ea 1531 "of alloc failure\n");
66e855f3 1532 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1533reuse_rx:
1534 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1535 goto next_rx;
1536 }
1537
1538 skb->protocol = eth_type_trans(skb, bp->dev);
1539
1540 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1541 if (bp->rx_csum) {
1adcd8be
EG
1542 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1543 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3
YG
1544 else
1545 bp->eth_stats.hw_csum_err++;
1546 }
a2fbb9ea
ET
1547 }
1548
1549#ifdef BCM_VLAN
34f80b04
EG
1550 if ((bp->vlgrp != NULL) &&
1551 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1552 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1553 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1554 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1555 else
1556#endif
34f80b04 1557 netif_receive_skb(skb);
a2fbb9ea 1558
a2fbb9ea
ET
1559
1560next_rx:
1561 rx_buf->skb = NULL;
1562
1563 bd_cons = NEXT_RX_IDX(bd_cons);
1564 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1565 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1566 rx_pkt++;
a2fbb9ea
ET
1567next_cqe:
1568 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1569 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1570
34f80b04 1571 if (rx_pkt == budget)
a2fbb9ea
ET
1572 break;
1573 } /* while */
1574
1575 fp->rx_bd_cons = bd_cons;
34f80b04 1576 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1577 fp->rx_comp_cons = sw_comp_cons;
1578 fp->rx_comp_prod = sw_comp_prod;
1579
7a9b2557
VZ
1580 /* Update producers */
1581 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1582 fp->rx_sge_prod);
a2fbb9ea
ET
1583 mmiowb(); /* keep prod updates ordered */
1584
1585 fp->rx_pkt += rx_pkt;
1586 fp->rx_calls++;
1587
1588 return rx_pkt;
1589}
1590
1591static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1592{
1593 struct bnx2x_fastpath *fp = fp_cookie;
1594 struct bnx2x *bp = fp->bp;
34f80b04 1595 int index = FP_IDX(fp);
a2fbb9ea 1596
da5a662a
VZ
1597 /* Return here if interrupt is disabled */
1598 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1599 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1600 return IRQ_HANDLED;
1601 }
1602
34f80b04
EG
1603 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1604 index, FP_SB_ID(fp));
1605 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1606
1607#ifdef BNX2X_STOP_ON_ERROR
1608 if (unlikely(bp->panic))
1609 return IRQ_HANDLED;
1610#endif
1611
1612 prefetch(fp->rx_cons_sb);
1613 prefetch(fp->tx_cons_sb);
1614 prefetch(&fp->status_blk->c_status_block.status_block_index);
1615 prefetch(&fp->status_blk->u_status_block.status_block_index);
1616
908a7a16 1617 netif_rx_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1618
a2fbb9ea
ET
1619 return IRQ_HANDLED;
1620}
1621
1622static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1623{
1624 struct net_device *dev = dev_instance;
1625 struct bnx2x *bp = netdev_priv(dev);
1626 u16 status = bnx2x_ack_int(bp);
34f80b04 1627 u16 mask;
a2fbb9ea 1628
34f80b04 1629 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1630 if (unlikely(status == 0)) {
1631 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1632 return IRQ_NONE;
1633 }
34f80b04 1634 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1635
34f80b04 1636 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1637 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1638 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1639 return IRQ_HANDLED;
1640 }
1641
3196a88a
EG
1642#ifdef BNX2X_STOP_ON_ERROR
1643 if (unlikely(bp->panic))
1644 return IRQ_HANDLED;
1645#endif
1646
34f80b04
EG
1647 mask = 0x2 << bp->fp[0].sb_id;
1648 if (status & mask) {
a2fbb9ea
ET
1649 struct bnx2x_fastpath *fp = &bp->fp[0];
1650
1651 prefetch(fp->rx_cons_sb);
1652 prefetch(fp->tx_cons_sb);
1653 prefetch(&fp->status_blk->c_status_block.status_block_index);
1654 prefetch(&fp->status_blk->u_status_block.status_block_index);
1655
908a7a16 1656 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1657
34f80b04 1658 status &= ~mask;
a2fbb9ea
ET
1659 }
1660
a2fbb9ea 1661
34f80b04 1662 if (unlikely(status & 0x1)) {
a2fbb9ea
ET
1663 schedule_work(&bp->sp_task);
1664
1665 status &= ~0x1;
1666 if (!status)
1667 return IRQ_HANDLED;
1668 }
1669
34f80b04
EG
1670 if (status)
1671 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1672 status);
a2fbb9ea 1673
c18487ee 1674 return IRQ_HANDLED;
a2fbb9ea
ET
1675}
1676
c18487ee 1677/* end of fast path */
a2fbb9ea 1678
bb2a0f7a 1679static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1680
c18487ee
YR
1681/* Link */
1682
1683/*
1684 * General service functions
1685 */
a2fbb9ea 1686
4a37fb66 1687static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1688{
1689 u32 lock_status;
1690 u32 resource_bit = (1 << resource);
4a37fb66
YG
1691 int func = BP_FUNC(bp);
1692 u32 hw_lock_control_reg;
c18487ee 1693 int cnt;
a2fbb9ea 1694
c18487ee
YR
1695 /* Validating that the resource is within range */
1696 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1697 DP(NETIF_MSG_HW,
1698 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1699 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1700 return -EINVAL;
1701 }
a2fbb9ea 1702
4a37fb66
YG
1703 if (func <= 5) {
1704 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1705 } else {
1706 hw_lock_control_reg =
1707 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1708 }
1709
c18487ee 1710 /* Validating that the resource is not already taken */
4a37fb66 1711 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1712 if (lock_status & resource_bit) {
1713 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1714 lock_status, resource_bit);
1715 return -EEXIST;
1716 }
a2fbb9ea 1717
46230476
EG
1718 /* Try for 5 second every 5ms */
1719 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1720 /* Try to acquire the lock */
4a37fb66
YG
1721 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1722 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1723 if (lock_status & resource_bit)
1724 return 0;
a2fbb9ea 1725
c18487ee 1726 msleep(5);
a2fbb9ea 1727 }
c18487ee
YR
1728 DP(NETIF_MSG_HW, "Timeout\n");
1729 return -EAGAIN;
1730}
a2fbb9ea 1731
4a37fb66 1732static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1733{
1734 u32 lock_status;
1735 u32 resource_bit = (1 << resource);
4a37fb66
YG
1736 int func = BP_FUNC(bp);
1737 u32 hw_lock_control_reg;
a2fbb9ea 1738
c18487ee
YR
1739 /* Validating that the resource is within range */
1740 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1741 DP(NETIF_MSG_HW,
1742 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1743 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1744 return -EINVAL;
1745 }
1746
4a37fb66
YG
1747 if (func <= 5) {
1748 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1749 } else {
1750 hw_lock_control_reg =
1751 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1752 }
1753
c18487ee 1754 /* Validating that the resource is currently taken */
4a37fb66 1755 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1756 if (!(lock_status & resource_bit)) {
1757 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1758 lock_status, resource_bit);
1759 return -EFAULT;
a2fbb9ea
ET
1760 }
1761
4a37fb66 1762 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1763 return 0;
1764}
1765
1766/* HW Lock for shared dual port PHYs */
4a37fb66 1767static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1768{
1769 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1770
34f80b04 1771 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1772
c18487ee
YR
1773 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1774 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1775 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1776}
a2fbb9ea 1777
4a37fb66 1778static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1779{
1780 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1781
c18487ee
YR
1782 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1783 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1784 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1785
34f80b04 1786 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1787}
a2fbb9ea 1788
17de50b7 1789int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1790{
1791 /* The GPIO should be swapped if swap register is set and active */
1792 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1793 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1794 int gpio_shift = gpio_num +
1795 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1796 u32 gpio_mask = (1 << gpio_shift);
1797 u32 gpio_reg;
a2fbb9ea 1798
c18487ee
YR
1799 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1800 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1801 return -EINVAL;
1802 }
a2fbb9ea 1803
4a37fb66 1804 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1805 /* read GPIO and mask except the float bits */
1806 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1807
c18487ee
YR
1808 switch (mode) {
1809 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1810 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1811 gpio_num, gpio_shift);
1812 /* clear FLOAT and set CLR */
1813 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1814 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1815 break;
a2fbb9ea 1816
c18487ee
YR
1817 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1818 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1819 gpio_num, gpio_shift);
1820 /* clear FLOAT and set SET */
1821 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1822 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1823 break;
a2fbb9ea 1824
17de50b7 1825 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1826 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1827 gpio_num, gpio_shift);
1828 /* set FLOAT */
1829 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1830 break;
a2fbb9ea 1831
c18487ee
YR
1832 default:
1833 break;
a2fbb9ea
ET
1834 }
1835
c18487ee 1836 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1837 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1838
c18487ee 1839 return 0;
a2fbb9ea
ET
1840}
1841
c18487ee 1842static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1843{
c18487ee
YR
1844 u32 spio_mask = (1 << spio_num);
1845 u32 spio_reg;
a2fbb9ea 1846
c18487ee
YR
1847 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1848 (spio_num > MISC_REGISTERS_SPIO_7)) {
1849 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1850 return -EINVAL;
a2fbb9ea
ET
1851 }
1852
4a37fb66 1853 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1854 /* read SPIO and mask except the float bits */
1855 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1856
c18487ee 1857 switch (mode) {
6378c025 1858 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1859 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1860 /* clear FLOAT and set CLR */
1861 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1862 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1863 break;
a2fbb9ea 1864
6378c025 1865 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1866 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1867 /* clear FLOAT and set SET */
1868 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1869 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1870 break;
a2fbb9ea 1871
c18487ee
YR
1872 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1873 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1874 /* set FLOAT */
1875 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1876 break;
a2fbb9ea 1877
c18487ee
YR
1878 default:
1879 break;
a2fbb9ea
ET
1880 }
1881
c18487ee 1882 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1883 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1884
a2fbb9ea
ET
1885 return 0;
1886}
1887
c18487ee 1888static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1889{
c18487ee
YR
1890 switch (bp->link_vars.ieee_fc) {
1891 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1892 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1893 ADVERTISED_Pause);
1894 break;
1895 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1896 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1897 ADVERTISED_Pause);
1898 break;
1899 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1900 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1901 break;
1902 default:
34f80b04 1903 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1904 ADVERTISED_Pause);
1905 break;
1906 }
1907}
f1410647 1908
c18487ee
YR
1909static void bnx2x_link_report(struct bnx2x *bp)
1910{
1911 if (bp->link_vars.link_up) {
1912 if (bp->state == BNX2X_STATE_OPEN)
1913 netif_carrier_on(bp->dev);
1914 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1915
c18487ee 1916 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1917
c18487ee
YR
1918 if (bp->link_vars.duplex == DUPLEX_FULL)
1919 printk("full duplex");
1920 else
1921 printk("half duplex");
f1410647 1922
c0700f90
DM
1923 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1924 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 1925 printk(", receive ");
c0700f90 1926 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
1927 printk("& transmit ");
1928 } else {
1929 printk(", transmit ");
1930 }
1931 printk("flow control ON");
1932 }
1933 printk("\n");
f1410647 1934
c18487ee
YR
1935 } else { /* link_down */
1936 netif_carrier_off(bp->dev);
1937 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1938 }
c18487ee
YR
1939}
1940
1941static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1942{
19680c48
EG
1943 if (!BP_NOMCP(bp)) {
1944 u8 rc;
a2fbb9ea 1945
19680c48 1946 /* Initialize link parameters structure variables */
8c99e7b0
YR
1947 /* It is recommended to turn off RX FC for jumbo frames
1948 for better performance */
1949 if (IS_E1HMF(bp))
c0700f90 1950 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 1951 else if (bp->dev->mtu > 5000)
c0700f90 1952 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1953 else
c0700f90 1954 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1955
4a37fb66 1956 bnx2x_acquire_phy_lock(bp);
19680c48 1957 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1958 bnx2x_release_phy_lock(bp);
a2fbb9ea 1959
19680c48
EG
1960 if (bp->link_vars.link_up)
1961 bnx2x_link_report(bp);
a2fbb9ea 1962
19680c48 1963 bnx2x_calc_fc_adv(bp);
34f80b04 1964
19680c48
EG
1965 return rc;
1966 }
1967 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1968 return -EINVAL;
a2fbb9ea
ET
1969}
1970
c18487ee 1971static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1972{
19680c48 1973 if (!BP_NOMCP(bp)) {
4a37fb66 1974 bnx2x_acquire_phy_lock(bp);
19680c48 1975 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1976 bnx2x_release_phy_lock(bp);
a2fbb9ea 1977
19680c48
EG
1978 bnx2x_calc_fc_adv(bp);
1979 } else
1980 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 1981}
a2fbb9ea 1982
c18487ee
YR
1983static void bnx2x__link_reset(struct bnx2x *bp)
1984{
19680c48 1985 if (!BP_NOMCP(bp)) {
4a37fb66 1986 bnx2x_acquire_phy_lock(bp);
19680c48 1987 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 1988 bnx2x_release_phy_lock(bp);
19680c48
EG
1989 } else
1990 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 1991}
a2fbb9ea 1992
c18487ee
YR
1993static u8 bnx2x_link_test(struct bnx2x *bp)
1994{
1995 u8 rc;
a2fbb9ea 1996
4a37fb66 1997 bnx2x_acquire_phy_lock(bp);
c18487ee 1998 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 1999 bnx2x_release_phy_lock(bp);
a2fbb9ea 2000
c18487ee
YR
2001 return rc;
2002}
a2fbb9ea 2003
34f80b04
EG
2004/* Calculates the sum of vn_min_rates.
2005 It's needed for further normalizing of the min_rates.
2006
2007 Returns:
2008 sum of vn_min_rates
2009 or
2010 0 - if all the min_rates are 0.
33471629 2011 In the later case fairness algorithm should be deactivated.
34f80b04
EG
2012 If not all min_rates are zero then those that are zeroes will
2013 be set to 1.
2014 */
2015static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2016{
2017 int i, port = BP_PORT(bp);
2018 u32 wsum = 0;
2019 int all_zero = 1;
2020
2021 for (i = 0; i < E1HVN_MAX; i++) {
2022 u32 vn_cfg =
2023 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2024 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2025 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2026 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2027 /* If min rate is zero - set it to 1 */
2028 if (!vn_min_rate)
2029 vn_min_rate = DEF_MIN_RATE;
2030 else
2031 all_zero = 0;
2032
2033 wsum += vn_min_rate;
2034 }
2035 }
2036
2037 /* ... only if all min rates are zeros - disable FAIRNESS */
2038 if (all_zero)
2039 return 0;
2040
2041 return wsum;
2042}
2043
2044static void bnx2x_init_port_minmax(struct bnx2x *bp,
2045 int en_fness,
2046 u16 port_rate,
2047 struct cmng_struct_per_port *m_cmng_port)
2048{
2049 u32 r_param = port_rate / 8;
2050 int port = BP_PORT(bp);
2051 int i;
2052
2053 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2054
2055 /* Enable minmax only if we are in e1hmf mode */
2056 if (IS_E1HMF(bp)) {
2057 u32 fair_periodic_timeout_usec;
2058 u32 t_fair;
2059
2060 /* Enable rate shaping and fairness */
2061 m_cmng_port->flags.cmng_vn_enable = 1;
2062 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2063 m_cmng_port->flags.rate_shaping_enable = 1;
2064
2065 if (!en_fness)
2066 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2067 " fairness will be disabled\n");
2068
2069 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2070 m_cmng_port->rs_vars.rs_periodic_timeout =
2071 RS_PERIODIC_TIMEOUT_USEC / 4;
2072
2073 /* this is the threshold below which no timer arming will occur
2074 1.25 coefficient is for the threshold to be a little bigger
2075 than the real time, to compensate for timer in-accuracy */
2076 m_cmng_port->rs_vars.rs_threshold =
2077 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2078
2079 /* resolution of fairness timer */
2080 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2081 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2082 t_fair = T_FAIR_COEF / port_rate;
2083
2084 /* this is the threshold below which we won't arm
2085 the timer anymore */
2086 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2087
2088 /* we multiply by 1e3/8 to get bytes/msec.
2089 We don't want the credits to pass a credit
2090 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2091 m_cmng_port->fair_vars.upper_bound =
2092 r_param * t_fair * FAIR_MEM;
2093 /* since each tick is 4 usec */
2094 m_cmng_port->fair_vars.fairness_timeout =
2095 fair_periodic_timeout_usec / 4;
2096
2097 } else {
2098 /* Disable rate shaping and fairness */
2099 m_cmng_port->flags.cmng_vn_enable = 0;
2100 m_cmng_port->flags.fairness_enable = 0;
2101 m_cmng_port->flags.rate_shaping_enable = 0;
2102
2103 DP(NETIF_MSG_IFUP,
2104 "Single function mode minmax will be disabled\n");
2105 }
2106
2107 /* Store it to internal memory */
2108 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2109 REG_WR(bp, BAR_XSTRORM_INTMEM +
2110 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2111 ((u32 *)(m_cmng_port))[i]);
2112}
2113
2114static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2115 u32 wsum, u16 port_rate,
2116 struct cmng_struct_per_port *m_cmng_port)
2117{
2118 struct rate_shaping_vars_per_vn m_rs_vn;
2119 struct fairness_vars_per_vn m_fair_vn;
2120 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2121 u16 vn_min_rate, vn_max_rate;
2122 int i;
2123
2124 /* If function is hidden - set min and max to zeroes */
2125 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2126 vn_min_rate = 0;
2127 vn_max_rate = 0;
2128
2129 } else {
2130 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2131 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2132 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2133 if current min rate is zero - set it to 1.
33471629 2134 This is a requirement of the algorithm. */
34f80b04
EG
2135 if ((vn_min_rate == 0) && wsum)
2136 vn_min_rate = DEF_MIN_RATE;
2137 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2138 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2139 }
2140
2141 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2142 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2143
2144 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2145 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2146
2147 /* global vn counter - maximal Mbps for this vn */
2148 m_rs_vn.vn_counter.rate = vn_max_rate;
2149
2150 /* quota - number of bytes transmitted in this period */
2151 m_rs_vn.vn_counter.quota =
2152 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2153
2154#ifdef BNX2X_PER_PROT_QOS
2155 /* per protocol counter */
2156 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2157 /* maximal Mbps for this protocol */
2158 m_rs_vn.protocol_counters[protocol].rate =
2159 protocol_max_rate[protocol];
2160 /* the quota in each timer period -
2161 number of bytes transmitted in this period */
2162 m_rs_vn.protocol_counters[protocol].quota =
2163 (u32)(rs_periodic_timeout_usec *
2164 ((double)m_rs_vn.
2165 protocol_counters[protocol].rate/8));
2166 }
2167#endif
2168
2169 if (wsum) {
2170 /* credit for each period of the fairness algorithm:
2171 number of bytes in T_FAIR (the vn share the port rate).
2172 wsum should not be larger than 10000, thus
2173 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2174 m_fair_vn.vn_credit_delta =
2175 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2176 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2177 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2178 m_fair_vn.vn_credit_delta);
2179 }
2180
2181#ifdef BNX2X_PER_PROT_QOS
2182 do {
2183 u32 protocolWeightSum = 0;
2184
2185 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2186 protocolWeightSum +=
2187 drvInit.protocol_min_rate[protocol];
2188 /* per protocol counter -
2189 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2190 if (protocolWeightSum > 0) {
2191 for (protocol = 0;
2192 protocol < NUM_OF_PROTOCOLS; protocol++)
2193 /* credit for each period of the
2194 fairness algorithm - number of bytes in
2195 T_FAIR (the protocol share the vn rate) */
2196 m_fair_vn.protocol_credit_delta[protocol] =
2197 (u32)((vn_min_rate / 8) * t_fair *
2198 protocol_min_rate / protocolWeightSum);
2199 }
2200 } while (0);
2201#endif
2202
2203 /* Store it to internal memory */
2204 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2205 REG_WR(bp, BAR_XSTRORM_INTMEM +
2206 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2207 ((u32 *)(&m_rs_vn))[i]);
2208
2209 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2210 REG_WR(bp, BAR_XSTRORM_INTMEM +
2211 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2212 ((u32 *)(&m_fair_vn))[i]);
2213}
2214
c18487ee
YR
2215/* This function is called upon link interrupt */
2216static void bnx2x_link_attn(struct bnx2x *bp)
2217{
34f80b04
EG
2218 int vn;
2219
bb2a0f7a
YG
2220 /* Make sure that we are synced with the current statistics */
2221 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2222
4a37fb66 2223 bnx2x_acquire_phy_lock(bp);
c18487ee 2224 bnx2x_link_update(&bp->link_params, &bp->link_vars);
4a37fb66 2225 bnx2x_release_phy_lock(bp);
a2fbb9ea 2226
bb2a0f7a
YG
2227 if (bp->link_vars.link_up) {
2228
2229 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2230 struct host_port_stats *pstats;
2231
2232 pstats = bnx2x_sp(bp, port_stats);
2233 /* reset old bmac stats */
2234 memset(&(pstats->mac_stx[0]), 0,
2235 sizeof(struct mac_stx));
2236 }
2237 if ((bp->state == BNX2X_STATE_OPEN) ||
2238 (bp->state == BNX2X_STATE_DISABLED))
2239 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2240 }
2241
c18487ee
YR
2242 /* indicate link status */
2243 bnx2x_link_report(bp);
34f80b04
EG
2244
2245 if (IS_E1HMF(bp)) {
2246 int func;
2247
2248 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2249 if (vn == BP_E1HVN(bp))
2250 continue;
2251
2252 func = ((vn << 1) | BP_PORT(bp));
2253
2254 /* Set the attention towards other drivers
2255 on the same port */
2256 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2257 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2258 }
2259 }
2260
2261 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2262 struct cmng_struct_per_port m_cmng_port;
2263 u32 wsum;
2264 int port = BP_PORT(bp);
2265
2266 /* Init RATE SHAPING and FAIRNESS contexts */
2267 wsum = bnx2x_calc_vn_wsum(bp);
2268 bnx2x_init_port_minmax(bp, (int)wsum,
2269 bp->link_vars.line_speed,
2270 &m_cmng_port);
2271 if (IS_E1HMF(bp))
2272 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2273 bnx2x_init_vn_minmax(bp, 2*vn + port,
2274 wsum, bp->link_vars.line_speed,
2275 &m_cmng_port);
2276 }
c18487ee 2277}
a2fbb9ea 2278
c18487ee
YR
2279static void bnx2x__link_status_update(struct bnx2x *bp)
2280{
2281 if (bp->state != BNX2X_STATE_OPEN)
2282 return;
a2fbb9ea 2283
c18487ee 2284 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2285
bb2a0f7a
YG
2286 if (bp->link_vars.link_up)
2287 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2288 else
2289 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2290
c18487ee
YR
2291 /* indicate link status */
2292 bnx2x_link_report(bp);
a2fbb9ea 2293}
a2fbb9ea 2294
34f80b04
EG
2295static void bnx2x_pmf_update(struct bnx2x *bp)
2296{
2297 int port = BP_PORT(bp);
2298 u32 val;
2299
2300 bp->port.pmf = 1;
2301 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2302
2303 /* enable nig attention */
2304 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2305 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2306 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2307
2308 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2309}
2310
c18487ee 2311/* end of Link */
a2fbb9ea
ET
2312
2313/* slow path */
2314
2315/*
2316 * General service functions
2317 */
2318
2319/* the slow path queue is odd since completions arrive on the fastpath ring */
2320static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2321 u32 data_hi, u32 data_lo, int common)
2322{
34f80b04 2323 int func = BP_FUNC(bp);
a2fbb9ea 2324
34f80b04
EG
2325 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2326 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2327 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2328 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2329 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2330
2331#ifdef BNX2X_STOP_ON_ERROR
2332 if (unlikely(bp->panic))
2333 return -EIO;
2334#endif
2335
34f80b04 2336 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2337
2338 if (!bp->spq_left) {
2339 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2340 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2341 bnx2x_panic();
2342 return -EBUSY;
2343 }
f1410647 2344
a2fbb9ea
ET
2345 /* CID needs port number to be encoded int it */
2346 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2347 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2348 HW_CID(bp, cid)));
2349 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2350 if (common)
2351 bp->spq_prod_bd->hdr.type |=
2352 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2353
2354 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2355 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2356
2357 bp->spq_left--;
2358
2359 if (bp->spq_prod_bd == bp->spq_last_bd) {
2360 bp->spq_prod_bd = bp->spq;
2361 bp->spq_prod_idx = 0;
2362 DP(NETIF_MSG_TIMER, "end of spq\n");
2363
2364 } else {
2365 bp->spq_prod_bd++;
2366 bp->spq_prod_idx++;
2367 }
2368
34f80b04 2369 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2370 bp->spq_prod_idx);
2371
34f80b04 2372 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2373 return 0;
2374}
2375
2376/* acquire split MCP access lock register */
4a37fb66 2377static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2378{
a2fbb9ea 2379 u32 i, j, val;
34f80b04 2380 int rc = 0;
a2fbb9ea
ET
2381
2382 might_sleep();
2383 i = 100;
2384 for (j = 0; j < i*10; j++) {
2385 val = (1UL << 31);
2386 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2387 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2388 if (val & (1L << 31))
2389 break;
2390
2391 msleep(5);
2392 }
a2fbb9ea 2393 if (!(val & (1L << 31))) {
19680c48 2394 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2395 rc = -EBUSY;
2396 }
2397
2398 return rc;
2399}
2400
4a37fb66
YG
2401/* release split MCP access lock register */
2402static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2403{
2404 u32 val = 0;
2405
2406 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2407}
2408
2409static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2410{
2411 struct host_def_status_block *def_sb = bp->def_status_blk;
2412 u16 rc = 0;
2413
2414 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2415 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2416 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2417 rc |= 1;
2418 }
2419 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2420 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2421 rc |= 2;
2422 }
2423 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2424 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2425 rc |= 4;
2426 }
2427 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2428 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2429 rc |= 8;
2430 }
2431 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2432 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2433 rc |= 16;
2434 }
2435 return rc;
2436}
2437
2438/*
2439 * slow path service functions
2440 */
2441
2442static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2443{
34f80b04 2444 int port = BP_PORT(bp);
5c862848
EG
2445 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2446 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2447 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2448 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2449 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2450 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2451 u32 aeu_mask;
a2fbb9ea 2452
a2fbb9ea
ET
2453 if (bp->attn_state & asserted)
2454 BNX2X_ERR("IGU ERROR\n");
2455
3fcaf2e5
EG
2456 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2457 aeu_mask = REG_RD(bp, aeu_addr);
2458
a2fbb9ea 2459 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2460 aeu_mask, asserted);
2461 aeu_mask &= ~(asserted & 0xff);
2462 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2463
3fcaf2e5
EG
2464 REG_WR(bp, aeu_addr, aeu_mask);
2465 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2466
3fcaf2e5 2467 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2468 bp->attn_state |= asserted;
3fcaf2e5 2469 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2470
2471 if (asserted & ATTN_HARD_WIRED_MASK) {
2472 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2473
877e9aa4
ET
2474 /* save nig interrupt mask */
2475 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2476 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2477
c18487ee 2478 bnx2x_link_attn(bp);
a2fbb9ea
ET
2479
2480 /* handle unicore attn? */
2481 }
2482 if (asserted & ATTN_SW_TIMER_4_FUNC)
2483 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2484
2485 if (asserted & GPIO_2_FUNC)
2486 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2487
2488 if (asserted & GPIO_3_FUNC)
2489 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2490
2491 if (asserted & GPIO_4_FUNC)
2492 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2493
2494 if (port == 0) {
2495 if (asserted & ATTN_GENERAL_ATTN_1) {
2496 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2497 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2498 }
2499 if (asserted & ATTN_GENERAL_ATTN_2) {
2500 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2501 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2502 }
2503 if (asserted & ATTN_GENERAL_ATTN_3) {
2504 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2505 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2506 }
2507 } else {
2508 if (asserted & ATTN_GENERAL_ATTN_4) {
2509 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2510 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2511 }
2512 if (asserted & ATTN_GENERAL_ATTN_5) {
2513 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2514 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2515 }
2516 if (asserted & ATTN_GENERAL_ATTN_6) {
2517 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2518 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2519 }
2520 }
2521
2522 } /* if hardwired */
2523
5c862848
EG
2524 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2525 asserted, hc_addr);
2526 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2527
2528 /* now set back the mask */
2529 if (asserted & ATTN_NIG_FOR_FUNC)
877e9aa4 2530 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a2fbb9ea
ET
2531}
2532
877e9aa4 2533static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2534{
34f80b04 2535 int port = BP_PORT(bp);
877e9aa4
ET
2536 int reg_offset;
2537 u32 val;
2538
34f80b04
EG
2539 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2540 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2541
34f80b04 2542 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2543
2544 val = REG_RD(bp, reg_offset);
2545 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2546 REG_WR(bp, reg_offset, val);
2547
2548 BNX2X_ERR("SPIO5 hw attention\n");
2549
34f80b04 2550 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 2551 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
877e9aa4
ET
2552 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2553 /* Fan failure attention */
2554
17de50b7 2555 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2556 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2557 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2558 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2559 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2560 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2561 /* mark the failure */
c18487ee 2562 bp->link_params.ext_phy_config &=
877e9aa4 2563 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2564 bp->link_params.ext_phy_config |=
877e9aa4
ET
2565 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2566 SHMEM_WR(bp,
2567 dev_info.port_hw_config[port].
2568 external_phy_config,
c18487ee 2569 bp->link_params.ext_phy_config);
877e9aa4
ET
2570 /* log the failure */
2571 printk(KERN_ERR PFX "Fan Failure on Network"
2572 " Controller %s has caused the driver to"
2573 " shutdown the card to prevent permanent"
2574 " damage. Please contact Dell Support for"
2575 " assistance\n", bp->dev->name);
2576 break;
2577
2578 default:
2579 break;
2580 }
2581 }
34f80b04
EG
2582
2583 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2584
2585 val = REG_RD(bp, reg_offset);
2586 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2587 REG_WR(bp, reg_offset, val);
2588
2589 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2590 (attn & HW_INTERRUT_ASSERT_SET_0));
2591 bnx2x_panic();
2592 }
877e9aa4
ET
2593}
2594
2595static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2596{
2597 u32 val;
2598
2599 if (attn & BNX2X_DOORQ_ASSERT) {
2600
2601 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2602 BNX2X_ERR("DB hw attention 0x%x\n", val);
2603 /* DORQ discard attention */
2604 if (val & 0x2)
2605 BNX2X_ERR("FATAL error from DORQ\n");
2606 }
34f80b04
EG
2607
2608 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2609
2610 int port = BP_PORT(bp);
2611 int reg_offset;
2612
2613 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2614 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2615
2616 val = REG_RD(bp, reg_offset);
2617 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2618 REG_WR(bp, reg_offset, val);
2619
2620 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2621 (attn & HW_INTERRUT_ASSERT_SET_1));
2622 bnx2x_panic();
2623 }
877e9aa4
ET
2624}
2625
2626static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2627{
2628 u32 val;
2629
2630 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2631
2632 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2633 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2634 /* CFC error attention */
2635 if (val & 0x2)
2636 BNX2X_ERR("FATAL error from CFC\n");
2637 }
2638
2639 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2640
2641 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2642 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2643 /* RQ_USDMDP_FIFO_OVERFLOW */
2644 if (val & 0x18000)
2645 BNX2X_ERR("FATAL error from PXP\n");
2646 }
34f80b04
EG
2647
2648 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2649
2650 int port = BP_PORT(bp);
2651 int reg_offset;
2652
2653 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2654 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2655
2656 val = REG_RD(bp, reg_offset);
2657 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2658 REG_WR(bp, reg_offset, val);
2659
2660 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2661 (attn & HW_INTERRUT_ASSERT_SET_2));
2662 bnx2x_panic();
2663 }
877e9aa4
ET
2664}
2665
2666static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2667{
34f80b04
EG
2668 u32 val;
2669
877e9aa4
ET
2670 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2671
34f80b04
EG
2672 if (attn & BNX2X_PMF_LINK_ASSERT) {
2673 int func = BP_FUNC(bp);
2674
2675 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2676 bnx2x__link_status_update(bp);
2677 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2678 DRV_STATUS_PMF)
2679 bnx2x_pmf_update(bp);
2680
2681 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2682
2683 BNX2X_ERR("MC assert!\n");
2684 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2685 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2686 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2687 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2688 bnx2x_panic();
2689
2690 } else if (attn & BNX2X_MCP_ASSERT) {
2691
2692 BNX2X_ERR("MCP assert!\n");
2693 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2694 bnx2x_fw_dump(bp);
877e9aa4
ET
2695
2696 } else
2697 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2698 }
2699
2700 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2701 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2702 if (attn & BNX2X_GRC_TIMEOUT) {
2703 val = CHIP_IS_E1H(bp) ?
2704 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2705 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2706 }
2707 if (attn & BNX2X_GRC_RSV) {
2708 val = CHIP_IS_E1H(bp) ?
2709 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2710 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2711 }
877e9aa4 2712 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2713 }
2714}
2715
2716static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2717{
a2fbb9ea
ET
2718 struct attn_route attn;
2719 struct attn_route group_mask;
34f80b04 2720 int port = BP_PORT(bp);
877e9aa4 2721 int index;
a2fbb9ea
ET
2722 u32 reg_addr;
2723 u32 val;
3fcaf2e5 2724 u32 aeu_mask;
a2fbb9ea
ET
2725
2726 /* need to take HW lock because MCP or other port might also
2727 try to handle this event */
4a37fb66 2728 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2729
2730 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2731 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2732 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2733 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2734 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2735 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2736
2737 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2738 if (deasserted & (1 << index)) {
2739 group_mask = bp->attn_group[index];
2740
34f80b04
EG
2741 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2742 index, group_mask.sig[0], group_mask.sig[1],
2743 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2744
877e9aa4
ET
2745 bnx2x_attn_int_deasserted3(bp,
2746 attn.sig[3] & group_mask.sig[3]);
2747 bnx2x_attn_int_deasserted1(bp,
2748 attn.sig[1] & group_mask.sig[1]);
2749 bnx2x_attn_int_deasserted2(bp,
2750 attn.sig[2] & group_mask.sig[2]);
2751 bnx2x_attn_int_deasserted0(bp,
2752 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2753
a2fbb9ea
ET
2754 if ((attn.sig[0] & group_mask.sig[0] &
2755 HW_PRTY_ASSERT_SET_0) ||
2756 (attn.sig[1] & group_mask.sig[1] &
2757 HW_PRTY_ASSERT_SET_1) ||
2758 (attn.sig[2] & group_mask.sig[2] &
2759 HW_PRTY_ASSERT_SET_2))
6378c025 2760 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2761 }
2762 }
2763
4a37fb66 2764 bnx2x_release_alr(bp);
a2fbb9ea 2765
5c862848 2766 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2767
2768 val = ~deasserted;
3fcaf2e5
EG
2769 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2770 val, reg_addr);
5c862848 2771 REG_WR(bp, reg_addr, val);
a2fbb9ea 2772
a2fbb9ea 2773 if (~bp->attn_state & deasserted)
3fcaf2e5 2774 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2775
2776 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2777 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2778
3fcaf2e5
EG
2779 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2780 aeu_mask = REG_RD(bp, reg_addr);
2781
2782 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2783 aeu_mask, deasserted);
2784 aeu_mask |= (deasserted & 0xff);
2785 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2786
3fcaf2e5
EG
2787 REG_WR(bp, reg_addr, aeu_mask);
2788 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2789
2790 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2791 bp->attn_state &= ~deasserted;
2792 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2793}
2794
2795static void bnx2x_attn_int(struct bnx2x *bp)
2796{
2797 /* read local copy of bits */
2798 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2799 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2800 u32 attn_state = bp->attn_state;
2801
2802 /* look for changed bits */
2803 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2804 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2805
2806 DP(NETIF_MSG_HW,
2807 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2808 attn_bits, attn_ack, asserted, deasserted);
2809
2810 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2811 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2812
2813 /* handle bits that were raised */
2814 if (asserted)
2815 bnx2x_attn_int_asserted(bp, asserted);
2816
2817 if (deasserted)
2818 bnx2x_attn_int_deasserted(bp, deasserted);
2819}
2820
2821static void bnx2x_sp_task(struct work_struct *work)
2822{
2823 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2824 u16 status;
2825
34f80b04 2826
a2fbb9ea
ET
2827 /* Return here if interrupt is disabled */
2828 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2829 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2830 return;
2831 }
2832
2833 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2834/* if (status == 0) */
2835/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2836
3196a88a 2837 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2838
877e9aa4
ET
2839 /* HW attentions */
2840 if (status & 0x1)
a2fbb9ea 2841 bnx2x_attn_int(bp);
a2fbb9ea 2842
bb2a0f7a
YG
2843 /* CStorm events: query_stats, port delete ramrod */
2844 if (status & 0x2)
2845 bp->stats_pending = 0;
2846
a2fbb9ea
ET
2847 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2848 IGU_INT_NOP, 1);
2849 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2850 IGU_INT_NOP, 1);
2851 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2852 IGU_INT_NOP, 1);
2853 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2854 IGU_INT_NOP, 1);
2855 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2856 IGU_INT_ENABLE, 1);
877e9aa4 2857
a2fbb9ea
ET
2858}
2859
2860static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2861{
2862 struct net_device *dev = dev_instance;
2863 struct bnx2x *bp = netdev_priv(dev);
2864
2865 /* Return here if interrupt is disabled */
2866 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2867 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2868 return IRQ_HANDLED;
2869 }
2870
877e9aa4 2871 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2872
2873#ifdef BNX2X_STOP_ON_ERROR
2874 if (unlikely(bp->panic))
2875 return IRQ_HANDLED;
2876#endif
2877
2878 schedule_work(&bp->sp_task);
2879
2880 return IRQ_HANDLED;
2881}
2882
2883/* end of slow path */
2884
2885/* Statistics */
2886
2887/****************************************************************************
2888* Macros
2889****************************************************************************/
2890
a2fbb9ea
ET
2891/* sum[hi:lo] += add[hi:lo] */
2892#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2893 do { \
2894 s_lo += a_lo; \
2895 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2896 } while (0)
2897
2898/* difference = minuend - subtrahend */
2899#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2900 do { \
bb2a0f7a
YG
2901 if (m_lo < s_lo) { \
2902 /* underflow */ \
a2fbb9ea 2903 d_hi = m_hi - s_hi; \
bb2a0f7a 2904 if (d_hi > 0) { \
6378c025 2905 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2906 d_hi--; \
2907 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2908 } else { \
6378c025 2909 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2910 d_hi = 0; \
2911 d_lo = 0; \
2912 } \
bb2a0f7a
YG
2913 } else { \
2914 /* m_lo >= s_lo */ \
a2fbb9ea 2915 if (m_hi < s_hi) { \
bb2a0f7a
YG
2916 d_hi = 0; \
2917 d_lo = 0; \
2918 } else { \
6378c025 2919 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2920 d_hi = m_hi - s_hi; \
2921 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2922 } \
2923 } \
2924 } while (0)
2925
bb2a0f7a 2926#define UPDATE_STAT64(s, t) \
a2fbb9ea 2927 do { \
bb2a0f7a
YG
2928 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2929 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2930 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2931 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2932 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2933 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2934 } while (0)
2935
bb2a0f7a 2936#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2937 do { \
bb2a0f7a
YG
2938 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2939 diff.lo, new->s##_lo, old->s##_lo); \
2940 ADD_64(estats->t##_hi, diff.hi, \
2941 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2942 } while (0)
2943
2944/* sum[hi:lo] += add */
2945#define ADD_EXTEND_64(s_hi, s_lo, a) \
2946 do { \
2947 s_lo += a; \
2948 s_hi += (s_lo < a) ? 1 : 0; \
2949 } while (0)
2950
bb2a0f7a 2951#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2952 do { \
bb2a0f7a
YG
2953 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2954 pstats->mac_stx[1].s##_lo, \
2955 new->s); \
a2fbb9ea
ET
2956 } while (0)
2957
bb2a0f7a 2958#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2959 do { \
2960 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2961 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
2962 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2963 } while (0)
2964
2965#define UPDATE_EXTEND_XSTAT(s, t) \
2966 do { \
2967 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2968 old_xclient->s = le32_to_cpu(xclient->s); \
2969 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
2970 } while (0)
2971
2972/*
2973 * General service functions
2974 */
2975
2976static inline long bnx2x_hilo(u32 *hiref)
2977{
2978 u32 lo = *(hiref + 1);
2979#if (BITS_PER_LONG == 64)
2980 u32 hi = *hiref;
2981
2982 return HILO_U64(hi, lo);
2983#else
2984 return lo;
2985#endif
2986}
2987
2988/*
2989 * Init service functions
2990 */
2991
bb2a0f7a
YG
2992static void bnx2x_storm_stats_post(struct bnx2x *bp)
2993{
2994 if (!bp->stats_pending) {
2995 struct eth_query_ramrod_data ramrod_data = {0};
2996 int rc;
2997
2998 ramrod_data.drv_counter = bp->stats_counter++;
2999 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3000 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3001
3002 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3003 ((u32 *)&ramrod_data)[1],
3004 ((u32 *)&ramrod_data)[0], 0);
3005 if (rc == 0) {
3006 /* stats ramrod has it's own slot on the spq */
3007 bp->spq_left++;
3008 bp->stats_pending = 1;
3009 }
3010 }
3011}
3012
3013static void bnx2x_stats_init(struct bnx2x *bp)
3014{
3015 int port = BP_PORT(bp);
3016
3017 bp->executer_idx = 0;
3018 bp->stats_counter = 0;
3019
3020 /* port stats */
3021 if (!BP_NOMCP(bp))
3022 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3023 else
3024 bp->port.port_stx = 0;
3025 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3026
3027 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3028 bp->port.old_nig_stats.brb_discard =
3029 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3030 bp->port.old_nig_stats.brb_truncate =
3031 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3032 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3033 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3034 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3035 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3036
3037 /* function stats */
3038 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3039 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3040 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3041 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3042
3043 bp->stats_state = STATS_STATE_DISABLED;
3044 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3045 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3046}
3047
3048static void bnx2x_hw_stats_post(struct bnx2x *bp)
3049{
3050 struct dmae_command *dmae = &bp->stats_dmae;
3051 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3052
3053 *stats_comp = DMAE_COMP_VAL;
3054
3055 /* loader */
3056 if (bp->executer_idx) {
3057 int loader_idx = PMF_DMAE_C(bp);
3058
3059 memset(dmae, 0, sizeof(struct dmae_command));
3060
3061 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3062 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3063 DMAE_CMD_DST_RESET |
3064#ifdef __BIG_ENDIAN
3065 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3066#else
3067 DMAE_CMD_ENDIANITY_DW_SWAP |
3068#endif
3069 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3070 DMAE_CMD_PORT_0) |
3071 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3072 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3073 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3074 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3075 sizeof(struct dmae_command) *
3076 (loader_idx + 1)) >> 2;
3077 dmae->dst_addr_hi = 0;
3078 dmae->len = sizeof(struct dmae_command) >> 2;
3079 if (CHIP_IS_E1(bp))
3080 dmae->len--;
3081 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3082 dmae->comp_addr_hi = 0;
3083 dmae->comp_val = 1;
3084
3085 *stats_comp = 0;
3086 bnx2x_post_dmae(bp, dmae, loader_idx);
3087
3088 } else if (bp->func_stx) {
3089 *stats_comp = 0;
3090 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3091 }
3092}
3093
3094static int bnx2x_stats_comp(struct bnx2x *bp)
3095{
3096 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3097 int cnt = 10;
3098
3099 might_sleep();
3100 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3101 if (!cnt) {
3102 BNX2X_ERR("timeout waiting for stats finished\n");
3103 break;
3104 }
3105 cnt--;
12469401 3106 msleep(1);
bb2a0f7a
YG
3107 }
3108 return 1;
3109}
3110
3111/*
3112 * Statistics service functions
3113 */
3114
3115static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3116{
3117 struct dmae_command *dmae;
3118 u32 opcode;
3119 int loader_idx = PMF_DMAE_C(bp);
3120 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3121
3122 /* sanity */
3123 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3124 BNX2X_ERR("BUG!\n");
3125 return;
3126 }
3127
3128 bp->executer_idx = 0;
3129
3130 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3131 DMAE_CMD_C_ENABLE |
3132 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3133#ifdef __BIG_ENDIAN
3134 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3135#else
3136 DMAE_CMD_ENDIANITY_DW_SWAP |
3137#endif
3138 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3139 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3140
3141 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3142 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3143 dmae->src_addr_lo = bp->port.port_stx >> 2;
3144 dmae->src_addr_hi = 0;
3145 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3146 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3147 dmae->len = DMAE_LEN32_RD_MAX;
3148 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3149 dmae->comp_addr_hi = 0;
3150 dmae->comp_val = 1;
3151
3152 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3153 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3154 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3155 dmae->src_addr_hi = 0;
7a9b2557
VZ
3156 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3157 DMAE_LEN32_RD_MAX * 4);
3158 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3159 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3160 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3161 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3162 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3163 dmae->comp_val = DMAE_COMP_VAL;
3164
3165 *stats_comp = 0;
3166 bnx2x_hw_stats_post(bp);
3167 bnx2x_stats_comp(bp);
3168}
3169
3170static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3171{
3172 struct dmae_command *dmae;
34f80b04 3173 int port = BP_PORT(bp);
bb2a0f7a 3174 int vn = BP_E1HVN(bp);
a2fbb9ea 3175 u32 opcode;
bb2a0f7a 3176 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3177 u32 mac_addr;
bb2a0f7a
YG
3178 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3179
3180 /* sanity */
3181 if (!bp->link_vars.link_up || !bp->port.pmf) {
3182 BNX2X_ERR("BUG!\n");
3183 return;
3184 }
a2fbb9ea
ET
3185
3186 bp->executer_idx = 0;
bb2a0f7a
YG
3187
3188 /* MCP */
3189 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3190 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3191 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3192#ifdef __BIG_ENDIAN
bb2a0f7a 3193 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3194#else
bb2a0f7a 3195 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3196#endif
bb2a0f7a
YG
3197 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3198 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3199
bb2a0f7a 3200 if (bp->port.port_stx) {
a2fbb9ea
ET
3201
3202 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3203 dmae->opcode = opcode;
bb2a0f7a
YG
3204 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3205 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3206 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3207 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3208 dmae->len = sizeof(struct host_port_stats) >> 2;
3209 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3210 dmae->comp_addr_hi = 0;
3211 dmae->comp_val = 1;
a2fbb9ea
ET
3212 }
3213
bb2a0f7a
YG
3214 if (bp->func_stx) {
3215
3216 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3217 dmae->opcode = opcode;
3218 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3219 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3220 dmae->dst_addr_lo = bp->func_stx >> 2;
3221 dmae->dst_addr_hi = 0;
3222 dmae->len = sizeof(struct host_func_stats) >> 2;
3223 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3224 dmae->comp_addr_hi = 0;
3225 dmae->comp_val = 1;
a2fbb9ea
ET
3226 }
3227
bb2a0f7a 3228 /* MAC */
a2fbb9ea
ET
3229 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3230 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3231 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3232#ifdef __BIG_ENDIAN
3233 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3234#else
3235 DMAE_CMD_ENDIANITY_DW_SWAP |
3236#endif
bb2a0f7a
YG
3237 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3238 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3239
c18487ee 3240 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3241
3242 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3243 NIG_REG_INGRESS_BMAC0_MEM);
3244
3245 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3246 BIGMAC_REGISTER_TX_STAT_GTBYT */
3247 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3248 dmae->opcode = opcode;
3249 dmae->src_addr_lo = (mac_addr +
3250 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3251 dmae->src_addr_hi = 0;
3252 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3253 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3254 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3255 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3256 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3257 dmae->comp_addr_hi = 0;
3258 dmae->comp_val = 1;
3259
3260 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3261 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3262 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3263 dmae->opcode = opcode;
3264 dmae->src_addr_lo = (mac_addr +
3265 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3266 dmae->src_addr_hi = 0;
3267 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3268 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3269 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3270 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3271 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3272 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3273 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3274 dmae->comp_addr_hi = 0;
3275 dmae->comp_val = 1;
3276
c18487ee 3277 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3278
3279 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3280
3281 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3282 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3283 dmae->opcode = opcode;
3284 dmae->src_addr_lo = (mac_addr +
3285 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3286 dmae->src_addr_hi = 0;
3287 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3289 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3290 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3291 dmae->comp_addr_hi = 0;
3292 dmae->comp_val = 1;
3293
3294 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3295 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3296 dmae->opcode = opcode;
3297 dmae->src_addr_lo = (mac_addr +
3298 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3299 dmae->src_addr_hi = 0;
3300 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3301 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3302 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3303 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3304 dmae->len = 1;
3305 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3306 dmae->comp_addr_hi = 0;
3307 dmae->comp_val = 1;
3308
3309 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3310 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3311 dmae->opcode = opcode;
3312 dmae->src_addr_lo = (mac_addr +
3313 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3314 dmae->src_addr_hi = 0;
3315 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3316 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3317 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3318 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3319 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3320 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3321 dmae->comp_addr_hi = 0;
3322 dmae->comp_val = 1;
3323 }
3324
3325 /* NIG */
bb2a0f7a
YG
3326 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3327 dmae->opcode = opcode;
3328 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3329 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3330 dmae->src_addr_hi = 0;
3331 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3332 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3333 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3334 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3335 dmae->comp_addr_hi = 0;
3336 dmae->comp_val = 1;
3337
3338 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3339 dmae->opcode = opcode;
3340 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3341 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3342 dmae->src_addr_hi = 0;
3343 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3344 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3345 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3346 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3347 dmae->len = (2*sizeof(u32)) >> 2;
3348 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3349 dmae->comp_addr_hi = 0;
3350 dmae->comp_val = 1;
3351
a2fbb9ea
ET
3352 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3353 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3354 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3355 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3356#ifdef __BIG_ENDIAN
3357 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3358#else
3359 DMAE_CMD_ENDIANITY_DW_SWAP |
3360#endif
bb2a0f7a
YG
3361 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3362 (vn << DMAE_CMD_E1HVN_SHIFT));
3363 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3364 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3365 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3366 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3367 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3368 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3369 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3370 dmae->len = (2*sizeof(u32)) >> 2;
3371 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3372 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3373 dmae->comp_val = DMAE_COMP_VAL;
3374
3375 *stats_comp = 0;
a2fbb9ea
ET
3376}
3377
bb2a0f7a 3378static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3379{
bb2a0f7a
YG
3380 struct dmae_command *dmae = &bp->stats_dmae;
3381 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3382
bb2a0f7a
YG
3383 /* sanity */
3384 if (!bp->func_stx) {
3385 BNX2X_ERR("BUG!\n");
3386 return;
3387 }
a2fbb9ea 3388
bb2a0f7a
YG
3389 bp->executer_idx = 0;
3390 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3391
bb2a0f7a
YG
3392 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3393 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3394 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3395#ifdef __BIG_ENDIAN
3396 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3397#else
3398 DMAE_CMD_ENDIANITY_DW_SWAP |
3399#endif
3400 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3401 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3402 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3403 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3404 dmae->dst_addr_lo = bp->func_stx >> 2;
3405 dmae->dst_addr_hi = 0;
3406 dmae->len = sizeof(struct host_func_stats) >> 2;
3407 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3408 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3409 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3410
bb2a0f7a
YG
3411 *stats_comp = 0;
3412}
a2fbb9ea 3413
bb2a0f7a
YG
3414static void bnx2x_stats_start(struct bnx2x *bp)
3415{
3416 if (bp->port.pmf)
3417 bnx2x_port_stats_init(bp);
3418
3419 else if (bp->func_stx)
3420 bnx2x_func_stats_init(bp);
3421
3422 bnx2x_hw_stats_post(bp);
3423 bnx2x_storm_stats_post(bp);
3424}
3425
3426static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3427{
3428 bnx2x_stats_comp(bp);
3429 bnx2x_stats_pmf_update(bp);
3430 bnx2x_stats_start(bp);
3431}
3432
3433static void bnx2x_stats_restart(struct bnx2x *bp)
3434{
3435 bnx2x_stats_comp(bp);
3436 bnx2x_stats_start(bp);
3437}
3438
3439static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3440{
3441 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3442 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3443 struct regpair diff;
3444
3445 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3446 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3447 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3448 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3449 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3450 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3451 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a
YG
3452 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3453 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3454 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3455 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3456 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3457 UPDATE_STAT64(tx_stat_gt127,
3458 tx_stat_etherstatspkts65octetsto127octets);
3459 UPDATE_STAT64(tx_stat_gt255,
3460 tx_stat_etherstatspkts128octetsto255octets);
3461 UPDATE_STAT64(tx_stat_gt511,
3462 tx_stat_etherstatspkts256octetsto511octets);
3463 UPDATE_STAT64(tx_stat_gt1023,
3464 tx_stat_etherstatspkts512octetsto1023octets);
3465 UPDATE_STAT64(tx_stat_gt1518,
3466 tx_stat_etherstatspkts1024octetsto1522octets);
3467 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3468 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3469 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3470 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3471 UPDATE_STAT64(tx_stat_gterr,
3472 tx_stat_dot3statsinternalmactransmiterrors);
3473 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3474}
3475
3476static void bnx2x_emac_stats_update(struct bnx2x *bp)
3477{
3478 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3479 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3480
3481 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3482 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3483 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3484 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3485 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3486 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3487 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3488 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3489 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3490 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3491 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3492 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3493 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3494 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3495 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3496 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3497 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3498 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3499 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3500 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3501 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3502 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3503 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3504 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3505 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3506 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3507 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3508 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3509 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3510 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3511 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3512}
3513
3514static int bnx2x_hw_stats_update(struct bnx2x *bp)
3515{
3516 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3517 struct nig_stats *old = &(bp->port.old_nig_stats);
3518 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3519 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3520 struct regpair diff;
3521
3522 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3523 bnx2x_bmac_stats_update(bp);
3524
3525 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3526 bnx2x_emac_stats_update(bp);
3527
3528 else { /* unreached */
3529 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3530 return -1;
3531 }
a2fbb9ea 3532
bb2a0f7a
YG
3533 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3534 new->brb_discard - old->brb_discard);
66e855f3
YG
3535 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3536 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3537
bb2a0f7a
YG
3538 UPDATE_STAT64_NIG(egress_mac_pkt0,
3539 etherstatspkts1024octetsto1522octets);
3540 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3541
bb2a0f7a 3542 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3543
bb2a0f7a
YG
3544 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3545 sizeof(struct mac_stx));
3546 estats->brb_drop_hi = pstats->brb_drop_hi;
3547 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3548
bb2a0f7a 3549 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3550
bb2a0f7a 3551 return 0;
a2fbb9ea
ET
3552}
3553
bb2a0f7a 3554static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3555{
3556 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3557 int cl_id = BP_CL_ID(bp);
3558 struct tstorm_per_port_stats *tport =
3559 &stats->tstorm_common.port_statistics;
a2fbb9ea 3560 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3561 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3562 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3563 struct xstorm_per_client_stats *xclient =
3564 &stats->xstorm_common.client_statistics[cl_id];
3565 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3566 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3567 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3568 u32 diff;
3569
bb2a0f7a
YG
3570 /* are storm stats valid? */
3571 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3572 bp->stats_counter) {
3573 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3574 " tstorm counter (%d) != stats_counter (%d)\n",
3575 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3576 return -1;
3577 }
bb2a0f7a
YG
3578 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3579 bp->stats_counter) {
3580 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3581 " xstorm counter (%d) != stats_counter (%d)\n",
3582 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3583 return -2;
3584 }
a2fbb9ea 3585
bb2a0f7a
YG
3586 fstats->total_bytes_received_hi =
3587 fstats->valid_bytes_received_hi =
a2fbb9ea 3588 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3589 fstats->total_bytes_received_lo =
3590 fstats->valid_bytes_received_lo =
a2fbb9ea 3591 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3592
3593 estats->error_bytes_received_hi =
3594 le32_to_cpu(tclient->rcv_error_bytes.hi);
3595 estats->error_bytes_received_lo =
3596 le32_to_cpu(tclient->rcv_error_bytes.lo);
3597 ADD_64(estats->error_bytes_received_hi,
3598 estats->rx_stat_ifhcinbadoctets_hi,
3599 estats->error_bytes_received_lo,
3600 estats->rx_stat_ifhcinbadoctets_lo);
3601
3602 ADD_64(fstats->total_bytes_received_hi,
3603 estats->error_bytes_received_hi,
3604 fstats->total_bytes_received_lo,
3605 estats->error_bytes_received_lo);
3606
3607 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3608 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3609 total_multicast_packets_received);
a2fbb9ea 3610 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3611 total_broadcast_packets_received);
3612
3613 fstats->total_bytes_transmitted_hi =
3614 le32_to_cpu(xclient->total_sent_bytes.hi);
3615 fstats->total_bytes_transmitted_lo =
3616 le32_to_cpu(xclient->total_sent_bytes.lo);
3617
3618 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3619 total_unicast_packets_transmitted);
3620 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3621 total_multicast_packets_transmitted);
3622 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3623 total_broadcast_packets_transmitted);
3624
3625 memcpy(estats, &(fstats->total_bytes_received_hi),
3626 sizeof(struct host_func_stats) - 2*sizeof(u32));
3627
3628 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3629 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3630 estats->brb_truncate_discard =
3631 le32_to_cpu(tport->brb_truncate_discard);
3632 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3633
3634 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3635 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3636 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3637 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3638 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3639 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3640 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3641 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3642 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3643 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3644 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3645 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3646 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3647
bb2a0f7a
YG
3648 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3649 old_tclient->packets_too_big_discard =
a2fbb9ea 3650 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3651 estats->no_buff_discard =
3652 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3653 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3654
3655 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3656 old_xclient->unicast_bytes_sent.hi =
3657 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3658 old_xclient->unicast_bytes_sent.lo =
3659 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3660 old_xclient->multicast_bytes_sent.hi =
3661 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3662 old_xclient->multicast_bytes_sent.lo =
3663 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3664 old_xclient->broadcast_bytes_sent.hi =
3665 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3666 old_xclient->broadcast_bytes_sent.lo =
3667 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3668
3669 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3670
3671 return 0;
3672}
3673
bb2a0f7a 3674static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3675{
bb2a0f7a
YG
3676 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3677 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3678 struct net_device_stats *nstats = &bp->dev->stats;
3679
3680 nstats->rx_packets =
3681 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3682 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3683 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3684
3685 nstats->tx_packets =
3686 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3687 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3688 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3689
bb2a0f7a 3690 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3691
0e39e645 3692 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3693
bb2a0f7a
YG
3694 nstats->rx_dropped = old_tclient->checksum_discard +
3695 estats->mac_discard;
a2fbb9ea
ET
3696 nstats->tx_dropped = 0;
3697
3698 nstats->multicast =
3699 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3700
bb2a0f7a
YG
3701 nstats->collisions =
3702 estats->tx_stat_dot3statssinglecollisionframes_lo +
3703 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3704 estats->tx_stat_dot3statslatecollisions_lo +
3705 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3706
bb2a0f7a
YG
3707 estats->jabber_packets_received =
3708 old_tclient->packets_too_big_discard +
3709 estats->rx_stat_dot3statsframestoolong_lo;
3710
3711 nstats->rx_length_errors =
3712 estats->rx_stat_etherstatsundersizepkts_lo +
3713 estats->jabber_packets_received;
66e855f3 3714 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
bb2a0f7a
YG
3715 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3716 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3717 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3718 nstats->rx_missed_errors = estats->xxoverflow_discard;
3719
3720 nstats->rx_errors = nstats->rx_length_errors +
3721 nstats->rx_over_errors +
3722 nstats->rx_crc_errors +
3723 nstats->rx_frame_errors +
0e39e645
ET
3724 nstats->rx_fifo_errors +
3725 nstats->rx_missed_errors;
a2fbb9ea 3726
bb2a0f7a
YG
3727 nstats->tx_aborted_errors =
3728 estats->tx_stat_dot3statslatecollisions_lo +
3729 estats->tx_stat_dot3statsexcessivecollisions_lo;
3730 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3731 nstats->tx_fifo_errors = 0;
3732 nstats->tx_heartbeat_errors = 0;
3733 nstats->tx_window_errors = 0;
3734
3735 nstats->tx_errors = nstats->tx_aborted_errors +
3736 nstats->tx_carrier_errors;
a2fbb9ea
ET
3737}
3738
bb2a0f7a 3739static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3740{
bb2a0f7a
YG
3741 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3742 int update = 0;
a2fbb9ea 3743
bb2a0f7a
YG
3744 if (*stats_comp != DMAE_COMP_VAL)
3745 return;
3746
3747 if (bp->port.pmf)
3748 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3749
bb2a0f7a 3750 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3751
bb2a0f7a
YG
3752 if (update)
3753 bnx2x_net_stats_update(bp);
a2fbb9ea 3754
bb2a0f7a
YG
3755 else {
3756 if (bp->stats_pending) {
3757 bp->stats_pending++;
3758 if (bp->stats_pending == 3) {
3759 BNX2X_ERR("stats not updated for 3 times\n");
3760 bnx2x_panic();
3761 return;
3762 }
3763 }
a2fbb9ea
ET
3764 }
3765
3766 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3767 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3768 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3769 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3770 int i;
a2fbb9ea
ET
3771
3772 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3773 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3774 " tx pkt (%lx)\n",
3775 bnx2x_tx_avail(bp->fp),
7a9b2557 3776 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3777 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3778 " rx pkt (%lx)\n",
7a9b2557
VZ
3779 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3780 bp->fp->rx_comp_cons),
3781 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea 3782 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
6378c025 3783 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
bb2a0f7a 3784 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3785 printk(KERN_DEBUG "tstats: checksum_discard %u "
3786 "packets_too_big_discard %u no_buff_discard %u "
3787 "mac_discard %u mac_filter_discard %u "
3788 "xxovrflow_discard %u brb_truncate_discard %u "
3789 "ttl0_discard %u\n",
bb2a0f7a
YG
3790 old_tclient->checksum_discard,
3791 old_tclient->packets_too_big_discard,
3792 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3793 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3794 estats->brb_truncate_discard,
3795 old_tclient->ttl0_discard);
a2fbb9ea
ET
3796
3797 for_each_queue(bp, i) {
3798 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3799 bnx2x_fp(bp, i, tx_pkt),
3800 bnx2x_fp(bp, i, rx_pkt),
3801 bnx2x_fp(bp, i, rx_calls));
3802 }
3803 }
3804
bb2a0f7a
YG
3805 bnx2x_hw_stats_post(bp);
3806 bnx2x_storm_stats_post(bp);
3807}
a2fbb9ea 3808
bb2a0f7a
YG
3809static void bnx2x_port_stats_stop(struct bnx2x *bp)
3810{
3811 struct dmae_command *dmae;
3812 u32 opcode;
3813 int loader_idx = PMF_DMAE_C(bp);
3814 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3815
bb2a0f7a 3816 bp->executer_idx = 0;
a2fbb9ea 3817
bb2a0f7a
YG
3818 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3819 DMAE_CMD_C_ENABLE |
3820 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3821#ifdef __BIG_ENDIAN
bb2a0f7a 3822 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3823#else
bb2a0f7a 3824 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3825#endif
bb2a0f7a
YG
3826 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3827 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3828
3829 if (bp->port.port_stx) {
3830
3831 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3832 if (bp->func_stx)
3833 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3834 else
3835 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3836 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3837 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3838 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3839 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3840 dmae->len = sizeof(struct host_port_stats) >> 2;
3841 if (bp->func_stx) {
3842 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3843 dmae->comp_addr_hi = 0;
3844 dmae->comp_val = 1;
3845 } else {
3846 dmae->comp_addr_lo =
3847 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3848 dmae->comp_addr_hi =
3849 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3850 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3851
bb2a0f7a
YG
3852 *stats_comp = 0;
3853 }
a2fbb9ea
ET
3854 }
3855
bb2a0f7a
YG
3856 if (bp->func_stx) {
3857
3858 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3859 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3860 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3861 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3862 dmae->dst_addr_lo = bp->func_stx >> 2;
3863 dmae->dst_addr_hi = 0;
3864 dmae->len = sizeof(struct host_func_stats) >> 2;
3865 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3866 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3867 dmae->comp_val = DMAE_COMP_VAL;
3868
3869 *stats_comp = 0;
a2fbb9ea 3870 }
bb2a0f7a
YG
3871}
3872
3873static void bnx2x_stats_stop(struct bnx2x *bp)
3874{
3875 int update = 0;
3876
3877 bnx2x_stats_comp(bp);
3878
3879 if (bp->port.pmf)
3880 update = (bnx2x_hw_stats_update(bp) == 0);
3881
3882 update |= (bnx2x_storm_stats_update(bp) == 0);
3883
3884 if (update) {
3885 bnx2x_net_stats_update(bp);
a2fbb9ea 3886
bb2a0f7a
YG
3887 if (bp->port.pmf)
3888 bnx2x_port_stats_stop(bp);
3889
3890 bnx2x_hw_stats_post(bp);
3891 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3892 }
3893}
3894
bb2a0f7a
YG
3895static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3896{
3897}
3898
3899static const struct {
3900 void (*action)(struct bnx2x *bp);
3901 enum bnx2x_stats_state next_state;
3902} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3903/* state event */
3904{
3905/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3906/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3907/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3908/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3909},
3910{
3911/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3912/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3913/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3914/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3915}
3916};
3917
3918static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3919{
3920 enum bnx2x_stats_state state = bp->stats_state;
3921
3922 bnx2x_stats_stm[state][event].action(bp);
3923 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3924
3925 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3926 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3927 state, event, bp->stats_state);
3928}
3929
a2fbb9ea
ET
3930static void bnx2x_timer(unsigned long data)
3931{
3932 struct bnx2x *bp = (struct bnx2x *) data;
3933
3934 if (!netif_running(bp->dev))
3935 return;
3936
3937 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3938 goto timer_restart;
a2fbb9ea
ET
3939
3940 if (poll) {
3941 struct bnx2x_fastpath *fp = &bp->fp[0];
3942 int rc;
3943
3944 bnx2x_tx_int(fp, 1000);
3945 rc = bnx2x_rx_int(fp, 1000);
3946 }
3947
34f80b04
EG
3948 if (!BP_NOMCP(bp)) {
3949 int func = BP_FUNC(bp);
a2fbb9ea
ET
3950 u32 drv_pulse;
3951 u32 mcp_pulse;
3952
3953 ++bp->fw_drv_pulse_wr_seq;
3954 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3955 /* TBD - add SYSTEM_TIME */
3956 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 3957 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 3958
34f80b04 3959 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
3960 MCP_PULSE_SEQ_MASK);
3961 /* The delta between driver pulse and mcp response
3962 * should be 1 (before mcp response) or 0 (after mcp response)
3963 */
3964 if ((drv_pulse != mcp_pulse) &&
3965 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3966 /* someone lost a heartbeat... */
3967 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3968 drv_pulse, mcp_pulse);
3969 }
3970 }
3971
bb2a0f7a
YG
3972 if ((bp->state == BNX2X_STATE_OPEN) ||
3973 (bp->state == BNX2X_STATE_DISABLED))
3974 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3975
f1410647 3976timer_restart:
a2fbb9ea
ET
3977 mod_timer(&bp->timer, jiffies + bp->current_interval);
3978}
3979
3980/* end of Statistics */
3981
3982/* nic init */
3983
3984/*
3985 * nic init service functions
3986 */
3987
34f80b04 3988static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 3989{
34f80b04
EG
3990 int port = BP_PORT(bp);
3991
3992 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3993 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 3994 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
3995 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3996 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 3997 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
3998}
3999
5c862848
EG
4000static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4001 dma_addr_t mapping, int sb_id)
34f80b04
EG
4002{
4003 int port = BP_PORT(bp);
bb2a0f7a 4004 int func = BP_FUNC(bp);
a2fbb9ea 4005 int index;
34f80b04 4006 u64 section;
a2fbb9ea
ET
4007
4008 /* USTORM */
4009 section = ((u64)mapping) + offsetof(struct host_status_block,
4010 u_status_block);
34f80b04 4011 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4012
4013 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4014 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4015 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4016 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4017 U64_HI(section));
bb2a0f7a
YG
4018 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4019 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4020
4021 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4022 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4023 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4024
4025 /* CSTORM */
4026 section = ((u64)mapping) + offsetof(struct host_status_block,
4027 c_status_block);
34f80b04 4028 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4029
4030 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4031 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4032 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4033 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4034 U64_HI(section));
7a9b2557
VZ
4035 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4036 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4037
4038 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4039 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4040 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4041
4042 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4043}
4044
4045static void bnx2x_zero_def_sb(struct bnx2x *bp)
4046{
4047 int func = BP_FUNC(bp);
a2fbb9ea 4048
34f80b04
EG
4049 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4050 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4051 sizeof(struct ustorm_def_status_block)/4);
4052 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4053 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4054 sizeof(struct cstorm_def_status_block)/4);
4055 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4056 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4057 sizeof(struct xstorm_def_status_block)/4);
4058 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4059 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4060 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4061}
4062
4063static void bnx2x_init_def_sb(struct bnx2x *bp,
4064 struct host_def_status_block *def_sb,
34f80b04 4065 dma_addr_t mapping, int sb_id)
a2fbb9ea 4066{
34f80b04
EG
4067 int port = BP_PORT(bp);
4068 int func = BP_FUNC(bp);
a2fbb9ea
ET
4069 int index, val, reg_offset;
4070 u64 section;
4071
4072 /* ATTN */
4073 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4074 atten_status_block);
34f80b04 4075 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4076
49d66772
ET
4077 bp->attn_state = 0;
4078
a2fbb9ea
ET
4079 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4080 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4081
34f80b04 4082 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4083 bp->attn_group[index].sig[0] = REG_RD(bp,
4084 reg_offset + 0x10*index);
4085 bp->attn_group[index].sig[1] = REG_RD(bp,
4086 reg_offset + 0x4 + 0x10*index);
4087 bp->attn_group[index].sig[2] = REG_RD(bp,
4088 reg_offset + 0x8 + 0x10*index);
4089 bp->attn_group[index].sig[3] = REG_RD(bp,
4090 reg_offset + 0xc + 0x10*index);
4091 }
4092
a2fbb9ea
ET
4093 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4094 HC_REG_ATTN_MSG0_ADDR_L);
4095
4096 REG_WR(bp, reg_offset, U64_LO(section));
4097 REG_WR(bp, reg_offset + 4, U64_HI(section));
4098
4099 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4100
4101 val = REG_RD(bp, reg_offset);
34f80b04 4102 val |= sb_id;
a2fbb9ea
ET
4103 REG_WR(bp, reg_offset, val);
4104
4105 /* USTORM */
4106 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4107 u_def_status_block);
34f80b04 4108 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4109
4110 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4111 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4112 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4113 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4114 U64_HI(section));
5c862848 4115 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4116 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4117
4118 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4119 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4120 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4121
4122 /* CSTORM */
4123 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4124 c_def_status_block);
34f80b04 4125 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4126
4127 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4128 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4129 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4130 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4131 U64_HI(section));
5c862848 4132 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4133 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4134
4135 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4136 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4137 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4138
4139 /* TSTORM */
4140 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4141 t_def_status_block);
34f80b04 4142 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4143
4144 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4145 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4146 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4147 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4148 U64_HI(section));
5c862848 4149 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4150 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4151
4152 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4153 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4154 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4155
4156 /* XSTORM */
4157 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4158 x_def_status_block);
34f80b04 4159 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4160
4161 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4162 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4163 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4164 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4165 U64_HI(section));
5c862848 4166 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4167 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4168
4169 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4170 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4171 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4172
bb2a0f7a 4173 bp->stats_pending = 0;
66e855f3 4174 bp->set_mac_pending = 0;
bb2a0f7a 4175
34f80b04 4176 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4177}
4178
4179static void bnx2x_update_coalesce(struct bnx2x *bp)
4180{
34f80b04 4181 int port = BP_PORT(bp);
a2fbb9ea
ET
4182 int i;
4183
4184 for_each_queue(bp, i) {
34f80b04 4185 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4186
4187 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4188 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4189 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4190 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4191 bp->rx_ticks/12);
a2fbb9ea 4192 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4193 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4194 U_SB_ETH_RX_CQ_INDEX),
4195 bp->rx_ticks ? 0 : 1);
4196 REG_WR16(bp, BAR_USTRORM_INTMEM +
4197 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4198 U_SB_ETH_RX_BD_INDEX),
34f80b04 4199 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4200
4201 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4202 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4203 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4204 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4205 bp->tx_ticks/12);
a2fbb9ea 4206 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4207 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4208 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4209 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4210 }
4211}
4212
7a9b2557
VZ
4213static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4214 struct bnx2x_fastpath *fp, int last)
4215{
4216 int i;
4217
4218 for (i = 0; i < last; i++) {
4219 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4220 struct sk_buff *skb = rx_buf->skb;
4221
4222 if (skb == NULL) {
4223 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4224 continue;
4225 }
4226
4227 if (fp->tpa_state[i] == BNX2X_TPA_START)
4228 pci_unmap_single(bp->pdev,
4229 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4230 bp->rx_buf_size,
7a9b2557
VZ
4231 PCI_DMA_FROMDEVICE);
4232
4233 dev_kfree_skb(skb);
4234 rx_buf->skb = NULL;
4235 }
4236}
4237
a2fbb9ea
ET
4238static void bnx2x_init_rx_rings(struct bnx2x *bp)
4239{
7a9b2557 4240 int func = BP_FUNC(bp);
32626230
EG
4241 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4242 ETH_MAX_AGGREGATION_QUEUES_E1H;
4243 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4244 int i, j;
a2fbb9ea 4245
437cf2f1
EG
4246 bp->rx_buf_size = bp->dev->mtu;
4247 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4248 BCM_RX_ETH_PAYLOAD_ALIGN;
a2fbb9ea 4249
7a9b2557
VZ
4250 if (bp->flags & TPA_ENABLE_FLAG) {
4251 DP(NETIF_MSG_IFUP,
437cf2f1
EG
4252 "rx_buf_size %d effective_mtu %d\n",
4253 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
7a9b2557
VZ
4254
4255 for_each_queue(bp, j) {
32626230 4256 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4257
32626230 4258 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4259 fp->tpa_pool[i].skb =
4260 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4261 if (!fp->tpa_pool[i].skb) {
4262 BNX2X_ERR("Failed to allocate TPA "
4263 "skb pool for queue[%d] - "
4264 "disabling TPA on this "
4265 "queue!\n", j);
4266 bnx2x_free_tpa_pool(bp, fp, i);
4267 fp->disable_tpa = 1;
4268 break;
4269 }
4270 pci_unmap_addr_set((struct sw_rx_bd *)
4271 &bp->fp->tpa_pool[i],
4272 mapping, 0);
4273 fp->tpa_state[i] = BNX2X_TPA_STOP;
4274 }
4275 }
4276 }
4277
a2fbb9ea
ET
4278 for_each_queue(bp, j) {
4279 struct bnx2x_fastpath *fp = &bp->fp[j];
4280
4281 fp->rx_bd_cons = 0;
4282 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4283 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4284
4285 /* "next page" elements initialization */
4286 /* SGE ring */
4287 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4288 struct eth_rx_sge *sge;
4289
4290 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4291 sge->addr_hi =
4292 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4293 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4294 sge->addr_lo =
4295 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4296 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4297 }
4298
4299 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4300
7a9b2557 4301 /* RX BD ring */
a2fbb9ea
ET
4302 for (i = 1; i <= NUM_RX_RINGS; i++) {
4303 struct eth_rx_bd *rx_bd;
4304
4305 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4306 rx_bd->addr_hi =
4307 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4308 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4309 rx_bd->addr_lo =
4310 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4311 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4312 }
4313
34f80b04 4314 /* CQ ring */
a2fbb9ea
ET
4315 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4316 struct eth_rx_cqe_next_page *nextpg;
4317
4318 nextpg = (struct eth_rx_cqe_next_page *)
4319 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4320 nextpg->addr_hi =
4321 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4322 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4323 nextpg->addr_lo =
4324 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4325 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4326 }
4327
7a9b2557
VZ
4328 /* Allocate SGEs and initialize the ring elements */
4329 for (i = 0, ring_prod = 0;
4330 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4331
7a9b2557
VZ
4332 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4333 BNX2X_ERR("was only able to allocate "
4334 "%d rx sges\n", i);
4335 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4336 /* Cleanup already allocated elements */
4337 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4338 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4339 fp->disable_tpa = 1;
4340 ring_prod = 0;
4341 break;
4342 }
4343 ring_prod = NEXT_SGE_IDX(ring_prod);
4344 }
4345 fp->rx_sge_prod = ring_prod;
4346
4347 /* Allocate BDs and initialize BD ring */
66e855f3 4348 fp->rx_comp_cons = 0;
7a9b2557 4349 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4350 for (i = 0; i < bp->rx_ring_size; i++) {
4351 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4352 BNX2X_ERR("was only able to allocate "
4353 "%d rx skbs\n", i);
66e855f3 4354 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4355 break;
4356 }
4357 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4358 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4359 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4360 }
4361
7a9b2557
VZ
4362 fp->rx_bd_prod = ring_prod;
4363 /* must not have more available CQEs than BDs */
4364 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4365 cqe_ring_prod);
a2fbb9ea
ET
4366 fp->rx_pkt = fp->rx_calls = 0;
4367
7a9b2557
VZ
4368 /* Warning!
4369 * this will generate an interrupt (to the TSTORM)
4370 * must only be done after chip is initialized
4371 */
4372 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4373 fp->rx_sge_prod);
a2fbb9ea
ET
4374 if (j != 0)
4375 continue;
4376
4377 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4378 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4379 U64_LO(fp->rx_comp_mapping));
4380 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4381 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4382 U64_HI(fp->rx_comp_mapping));
4383 }
4384}
4385
4386static void bnx2x_init_tx_ring(struct bnx2x *bp)
4387{
4388 int i, j;
4389
4390 for_each_queue(bp, j) {
4391 struct bnx2x_fastpath *fp = &bp->fp[j];
4392
4393 for (i = 1; i <= NUM_TX_RINGS; i++) {
4394 struct eth_tx_bd *tx_bd =
4395 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4396
4397 tx_bd->addr_hi =
4398 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4399 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4400 tx_bd->addr_lo =
4401 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4402 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4403 }
4404
4405 fp->tx_pkt_prod = 0;
4406 fp->tx_pkt_cons = 0;
4407 fp->tx_bd_prod = 0;
4408 fp->tx_bd_cons = 0;
4409 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4410 fp->tx_pkt = 0;
4411 }
4412}
4413
4414static void bnx2x_init_sp_ring(struct bnx2x *bp)
4415{
34f80b04 4416 int func = BP_FUNC(bp);
a2fbb9ea
ET
4417
4418 spin_lock_init(&bp->spq_lock);
4419
4420 bp->spq_left = MAX_SPQ_PENDING;
4421 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4422 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4423 bp->spq_prod_bd = bp->spq;
4424 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4425
34f80b04 4426 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4427 U64_LO(bp->spq_mapping));
34f80b04
EG
4428 REG_WR(bp,
4429 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4430 U64_HI(bp->spq_mapping));
4431
34f80b04 4432 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4433 bp->spq_prod_idx);
4434}
4435
4436static void bnx2x_init_context(struct bnx2x *bp)
4437{
4438 int i;
4439
4440 for_each_queue(bp, i) {
4441 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4442 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4443 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea
ET
4444
4445 context->xstorm_st_context.tx_bd_page_base_hi =
4446 U64_HI(fp->tx_desc_mapping);
4447 context->xstorm_st_context.tx_bd_page_base_lo =
4448 U64_LO(fp->tx_desc_mapping);
4449 context->xstorm_st_context.db_data_addr_hi =
4450 U64_HI(fp->tx_prods_mapping);
4451 context->xstorm_st_context.db_data_addr_lo =
4452 U64_LO(fp->tx_prods_mapping);
34f80b04
EG
4453 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4454 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4455
4456 context->ustorm_st_context.common.sb_index_numbers =
4457 BNX2X_RX_SB_INDEX_NUM;
4458 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4459 context->ustorm_st_context.common.status_block_id = sb_id;
4460 context->ustorm_st_context.common.flags =
4461 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
437cf2f1
EG
4462 context->ustorm_st_context.common.mc_alignment_size =
4463 BCM_RX_ETH_PAYLOAD_ALIGN;
34f80b04 4464 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4465 bp->rx_buf_size;
34f80b04 4466 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4467 U64_HI(fp->rx_desc_mapping);
34f80b04 4468 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4469 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4470 if (!fp->disable_tpa) {
4471 context->ustorm_st_context.common.flags |=
4472 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4473 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4474 context->ustorm_st_context.common.sge_buff_size =
4475 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4476 context->ustorm_st_context.common.sge_page_base_hi =
4477 U64_HI(fp->rx_sge_mapping);
4478 context->ustorm_st_context.common.sge_page_base_lo =
4479 U64_LO(fp->rx_sge_mapping);
4480 }
4481
a2fbb9ea 4482 context->cstorm_st_context.sb_index_number =
5c862848 4483 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4484 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4485
4486 context->xstorm_ag_context.cdu_reserved =
4487 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4488 CDU_REGION_NUMBER_XCM_AG,
4489 ETH_CONNECTION_TYPE);
4490 context->ustorm_ag_context.cdu_usage =
4491 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4492 CDU_REGION_NUMBER_UCM_AG,
4493 ETH_CONNECTION_TYPE);
4494 }
4495}
4496
4497static void bnx2x_init_ind_table(struct bnx2x *bp)
4498{
34f80b04 4499 int port = BP_PORT(bp);
a2fbb9ea
ET
4500 int i;
4501
4502 if (!is_multi(bp))
4503 return;
4504
34f80b04 4505 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 4506 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04
EG
4507 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4508 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
a2fbb9ea
ET
4509 i % bp->num_queues);
4510
4511 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4512}
4513
49d66772
ET
4514static void bnx2x_set_client_config(struct bnx2x *bp)
4515{
49d66772 4516 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4517 int port = BP_PORT(bp);
4518 int i;
49d66772 4519
34f80b04 4520 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
66e855f3 4521 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
49d66772
ET
4522 tstorm_client.config_flags =
4523 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4524#ifdef BCM_VLAN
34f80b04 4525 if (bp->rx_mode && bp->vlgrp) {
49d66772
ET
4526 tstorm_client.config_flags |=
4527 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4528 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4529 }
4530#endif
49d66772 4531
7a9b2557
VZ
4532 if (bp->flags & TPA_ENABLE_FLAG) {
4533 tstorm_client.max_sges_for_packet =
4534 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4535 tstorm_client.max_sges_for_packet =
4536 ((tstorm_client.max_sges_for_packet +
4537 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4538 PAGES_PER_SGE_SHIFT;
4539
4540 tstorm_client.config_flags |=
4541 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4542 }
4543
49d66772
ET
4544 for_each_queue(bp, i) {
4545 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4546 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4547 ((u32 *)&tstorm_client)[0]);
4548 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4549 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4550 ((u32 *)&tstorm_client)[1]);
4551 }
4552
34f80b04
EG
4553 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4554 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4555}
4556
a2fbb9ea
ET
4557static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4558{
a2fbb9ea 4559 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4560 int mode = bp->rx_mode;
4561 int mask = (1 << BP_L_ID(bp));
4562 int func = BP_FUNC(bp);
a2fbb9ea
ET
4563 int i;
4564
3196a88a 4565 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4566
4567 switch (mode) {
4568 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4569 tstorm_mac_filter.ucast_drop_all = mask;
4570 tstorm_mac_filter.mcast_drop_all = mask;
4571 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4572 break;
4573 case BNX2X_RX_MODE_NORMAL:
34f80b04 4574 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4575 break;
4576 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4577 tstorm_mac_filter.mcast_accept_all = mask;
4578 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4579 break;
4580 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4581 tstorm_mac_filter.ucast_accept_all = mask;
4582 tstorm_mac_filter.mcast_accept_all = mask;
4583 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4584 break;
4585 default:
34f80b04
EG
4586 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4587 break;
a2fbb9ea
ET
4588 }
4589
4590 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4591 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4592 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4593 ((u32 *)&tstorm_mac_filter)[i]);
4594
34f80b04 4595/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4596 ((u32 *)&tstorm_mac_filter)[i]); */
4597 }
a2fbb9ea 4598
49d66772
ET
4599 if (mode != BNX2X_RX_MODE_NONE)
4600 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4601}
4602
471de716
EG
4603static void bnx2x_init_internal_common(struct bnx2x *bp)
4604{
4605 int i;
4606
3cdf1db7
YG
4607 if (bp->flags & TPA_ENABLE_FLAG) {
4608 struct tstorm_eth_tpa_exist tpa = {0};
4609
4610 tpa.tpa_exist = 1;
4611
4612 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4613 ((u32 *)&tpa)[0]);
4614 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4615 ((u32 *)&tpa)[1]);
4616 }
4617
471de716
EG
4618 /* Zero this manually as its initialization is
4619 currently missing in the initTool */
4620 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4621 REG_WR(bp, BAR_USTRORM_INTMEM +
4622 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4623}
4624
4625static void bnx2x_init_internal_port(struct bnx2x *bp)
4626{
4627 int port = BP_PORT(bp);
4628
4629 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4630 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4631 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4632 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4633}
4634
4635static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4636{
a2fbb9ea
ET
4637 struct tstorm_eth_function_common_config tstorm_config = {0};
4638 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4639 int port = BP_PORT(bp);
4640 int func = BP_FUNC(bp);
4641 int i;
471de716 4642 u16 max_agg_size;
a2fbb9ea
ET
4643
4644 if (is_multi(bp)) {
4645 tstorm_config.config_flags = MULTI_FLAGS;
4646 tstorm_config.rss_result_mask = MULTI_MASK;
4647 }
4648
34f80b04
EG
4649 tstorm_config.leading_client_id = BP_L_ID(bp);
4650
a2fbb9ea 4651 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4652 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4653 (*(u32 *)&tstorm_config));
4654
c14423fe 4655 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4656 bnx2x_set_storm_rx_mode(bp);
4657
66e855f3
YG
4658 /* reset xstorm per client statistics */
4659 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4660 REG_WR(bp, BAR_XSTRORM_INTMEM +
4661 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4662 i*4, 0);
4663 }
4664 /* reset tstorm per client statistics */
4665 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4666 REG_WR(bp, BAR_TSTRORM_INTMEM +
4667 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4668 i*4, 0);
4669 }
4670
4671 /* Init statistics related context */
34f80b04 4672 stats_flags.collect_eth = 1;
a2fbb9ea 4673
66e855f3 4674 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4675 ((u32 *)&stats_flags)[0]);
66e855f3 4676 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4677 ((u32 *)&stats_flags)[1]);
4678
66e855f3 4679 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4680 ((u32 *)&stats_flags)[0]);
66e855f3 4681 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4682 ((u32 *)&stats_flags)[1]);
4683
66e855f3 4684 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4685 ((u32 *)&stats_flags)[0]);
66e855f3 4686 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4687 ((u32 *)&stats_flags)[1]);
4688
66e855f3
YG
4689 REG_WR(bp, BAR_XSTRORM_INTMEM +
4690 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4691 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4692 REG_WR(bp, BAR_XSTRORM_INTMEM +
4693 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4694 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4695
4696 REG_WR(bp, BAR_TSTRORM_INTMEM +
4697 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4698 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4699 REG_WR(bp, BAR_TSTRORM_INTMEM +
4700 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4701 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04
EG
4702
4703 if (CHIP_IS_E1H(bp)) {
4704 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4705 IS_E1HMF(bp));
4706 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4707 IS_E1HMF(bp));
4708 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4709 IS_E1HMF(bp));
4710 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4711 IS_E1HMF(bp));
4712
7a9b2557
VZ
4713 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4714 bp->e1hov);
34f80b04
EG
4715 }
4716
471de716 4717 /* Init CQ ring mapping and aggregation size */
437cf2f1 4718 max_agg_size = min((u32)(bp->rx_buf_size +
471de716
EG
4719 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4720 (u32)0xffff);
7a9b2557
VZ
4721 for_each_queue(bp, i) {
4722 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4723
4724 REG_WR(bp, BAR_USTRORM_INTMEM +
4725 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4726 U64_LO(fp->rx_comp_mapping));
4727 REG_WR(bp, BAR_USTRORM_INTMEM +
4728 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4729 U64_HI(fp->rx_comp_mapping));
4730
7a9b2557
VZ
4731 REG_WR16(bp, BAR_USTRORM_INTMEM +
4732 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4733 max_agg_size);
4734 }
a2fbb9ea
ET
4735}
4736
471de716
EG
4737static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4738{
4739 switch (load_code) {
4740 case FW_MSG_CODE_DRV_LOAD_COMMON:
4741 bnx2x_init_internal_common(bp);
4742 /* no break */
4743
4744 case FW_MSG_CODE_DRV_LOAD_PORT:
4745 bnx2x_init_internal_port(bp);
4746 /* no break */
4747
4748 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4749 bnx2x_init_internal_func(bp);
4750 break;
4751
4752 default:
4753 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4754 break;
4755 }
4756}
4757
4758static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4759{
4760 int i;
4761
4762 for_each_queue(bp, i) {
4763 struct bnx2x_fastpath *fp = &bp->fp[i];
4764
34f80b04 4765 fp->bp = bp;
a2fbb9ea 4766 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4767 fp->index = i;
34f80b04
EG
4768 fp->cl_id = BP_L_ID(bp) + i;
4769 fp->sb_id = fp->cl_id;
4770 DP(NETIF_MSG_IFUP,
4771 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4772 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
4773 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4774 FP_SB_ID(fp));
4775 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
4776 }
4777
5c862848
EG
4778 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4779 DEF_SB_ID);
4780 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
4781 bnx2x_update_coalesce(bp);
4782 bnx2x_init_rx_rings(bp);
4783 bnx2x_init_tx_ring(bp);
4784 bnx2x_init_sp_ring(bp);
4785 bnx2x_init_context(bp);
471de716 4786 bnx2x_init_internal(bp, load_code);
a2fbb9ea 4787 bnx2x_init_ind_table(bp);
615f8fd9 4788 bnx2x_int_enable(bp);
a2fbb9ea
ET
4789}
4790
4791/* end of nic init */
4792
4793/*
4794 * gzip service functions
4795 */
4796
4797static int bnx2x_gunzip_init(struct bnx2x *bp)
4798{
4799 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4800 &bp->gunzip_mapping);
4801 if (bp->gunzip_buf == NULL)
4802 goto gunzip_nomem1;
4803
4804 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4805 if (bp->strm == NULL)
4806 goto gunzip_nomem2;
4807
4808 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4809 GFP_KERNEL);
4810 if (bp->strm->workspace == NULL)
4811 goto gunzip_nomem3;
4812
4813 return 0;
4814
4815gunzip_nomem3:
4816 kfree(bp->strm);
4817 bp->strm = NULL;
4818
4819gunzip_nomem2:
4820 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4821 bp->gunzip_mapping);
4822 bp->gunzip_buf = NULL;
4823
4824gunzip_nomem1:
4825 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4826 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4827 return -ENOMEM;
4828}
4829
4830static void bnx2x_gunzip_end(struct bnx2x *bp)
4831{
4832 kfree(bp->strm->workspace);
4833
4834 kfree(bp->strm);
4835 bp->strm = NULL;
4836
4837 if (bp->gunzip_buf) {
4838 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4839 bp->gunzip_mapping);
4840 bp->gunzip_buf = NULL;
4841 }
4842}
4843
4844static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4845{
4846 int n, rc;
4847
4848 /* check gzip header */
4849 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4850 return -EINVAL;
4851
4852 n = 10;
4853
34f80b04 4854#define FNAME 0x8
a2fbb9ea
ET
4855
4856 if (zbuf[3] & FNAME)
4857 while ((zbuf[n++] != 0) && (n < len));
4858
4859 bp->strm->next_in = zbuf + n;
4860 bp->strm->avail_in = len - n;
4861 bp->strm->next_out = bp->gunzip_buf;
4862 bp->strm->avail_out = FW_BUF_SIZE;
4863
4864 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4865 if (rc != Z_OK)
4866 return rc;
4867
4868 rc = zlib_inflate(bp->strm, Z_FINISH);
4869 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4870 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4871 bp->dev->name, bp->strm->msg);
4872
4873 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4874 if (bp->gunzip_outlen & 0x3)
4875 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4876 " gunzip_outlen (%d) not aligned\n",
4877 bp->dev->name, bp->gunzip_outlen);
4878 bp->gunzip_outlen >>= 2;
4879
4880 zlib_inflateEnd(bp->strm);
4881
4882 if (rc == Z_STREAM_END)
4883 return 0;
4884
4885 return rc;
4886}
4887
4888/* nic load/unload */
4889
4890/*
34f80b04 4891 * General service functions
a2fbb9ea
ET
4892 */
4893
4894/* send a NIG loopback debug packet */
4895static void bnx2x_lb_pckt(struct bnx2x *bp)
4896{
a2fbb9ea 4897 u32 wb_write[3];
a2fbb9ea
ET
4898
4899 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4900 wb_write[0] = 0x55555555;
4901 wb_write[1] = 0x55555555;
34f80b04 4902 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4903 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4904
4905 /* NON-IP protocol */
a2fbb9ea
ET
4906 wb_write[0] = 0x09000000;
4907 wb_write[1] = 0x55555555;
34f80b04 4908 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4909 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4910}
4911
4912/* some of the internal memories
4913 * are not directly readable from the driver
4914 * to test them we send debug packets
4915 */
4916static int bnx2x_int_mem_test(struct bnx2x *bp)
4917{
4918 int factor;
4919 int count, i;
4920 u32 val = 0;
4921
ad8d3948 4922 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4923 factor = 120;
ad8d3948
EG
4924 else if (CHIP_REV_IS_EMUL(bp))
4925 factor = 200;
4926 else
a2fbb9ea 4927 factor = 1;
a2fbb9ea
ET
4928
4929 DP(NETIF_MSG_HW, "start part1\n");
4930
4931 /* Disable inputs of parser neighbor blocks */
4932 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4933 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4934 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4935 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4936
4937 /* Write 0 to parser credits for CFC search request */
4938 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4939
4940 /* send Ethernet packet */
4941 bnx2x_lb_pckt(bp);
4942
4943 /* TODO do i reset NIG statistic? */
4944 /* Wait until NIG register shows 1 packet of size 0x10 */
4945 count = 1000 * factor;
4946 while (count) {
34f80b04 4947
a2fbb9ea
ET
4948 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4949 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4950 if (val == 0x10)
4951 break;
4952
4953 msleep(10);
4954 count--;
4955 }
4956 if (val != 0x10) {
4957 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4958 return -1;
4959 }
4960
4961 /* Wait until PRS register shows 1 packet */
4962 count = 1000 * factor;
4963 while (count) {
4964 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4965 if (val == 1)
4966 break;
4967
4968 msleep(10);
4969 count--;
4970 }
4971 if (val != 0x1) {
4972 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4973 return -2;
4974 }
4975
4976 /* Reset and init BRB, PRS */
34f80b04 4977 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4978 msleep(50);
34f80b04 4979 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
4980 msleep(50);
4981 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4982 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4983
4984 DP(NETIF_MSG_HW, "part2\n");
4985
4986 /* Disable inputs of parser neighbor blocks */
4987 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4988 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4989 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4990 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4991
4992 /* Write 0 to parser credits for CFC search request */
4993 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4994
4995 /* send 10 Ethernet packets */
4996 for (i = 0; i < 10; i++)
4997 bnx2x_lb_pckt(bp);
4998
4999 /* Wait until NIG register shows 10 + 1
5000 packets of size 11*0x10 = 0xb0 */
5001 count = 1000 * factor;
5002 while (count) {
34f80b04 5003
a2fbb9ea
ET
5004 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5005 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5006 if (val == 0xb0)
5007 break;
5008
5009 msleep(10);
5010 count--;
5011 }
5012 if (val != 0xb0) {
5013 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5014 return -3;
5015 }
5016
5017 /* Wait until PRS register shows 2 packets */
5018 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5019 if (val != 2)
5020 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5021
5022 /* Write 1 to parser credits for CFC search request */
5023 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5024
5025 /* Wait until PRS register shows 3 packets */
5026 msleep(10 * factor);
5027 /* Wait until NIG register shows 1 packet of size 0x10 */
5028 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5029 if (val != 3)
5030 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5031
5032 /* clear NIG EOP FIFO */
5033 for (i = 0; i < 11; i++)
5034 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5035 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5036 if (val != 1) {
5037 BNX2X_ERR("clear of NIG failed\n");
5038 return -4;
5039 }
5040
5041 /* Reset and init BRB, PRS, NIG */
5042 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5043 msleep(50);
5044 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5045 msleep(50);
5046 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5047 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5048#ifndef BCM_ISCSI
5049 /* set NIC mode */
5050 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5051#endif
5052
5053 /* Enable inputs of parser neighbor blocks */
5054 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5055 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5056 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5057 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5058
5059 DP(NETIF_MSG_HW, "done\n");
5060
5061 return 0; /* OK */
5062}
5063
5064static void enable_blocks_attention(struct bnx2x *bp)
5065{
5066 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5067 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5068 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5069 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5070 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5071 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5072 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5073 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5074 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5075/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5076/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5077 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5078 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5079 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5080/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5081/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5082 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5083 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5084 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5085 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5086/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5087/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5088 if (CHIP_REV_IS_FPGA(bp))
5089 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5090 else
5091 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5092 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5093 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5094 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5095/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5096/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5097 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5098 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5099/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5100 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5101}
5102
34f80b04
EG
5103
5104static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5105{
a2fbb9ea 5106 u32 val, i;
a2fbb9ea 5107
34f80b04 5108 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5109
34f80b04
EG
5110 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5111 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5112
34f80b04
EG
5113 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5114 if (CHIP_IS_E1H(bp))
5115 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5116
34f80b04
EG
5117 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5118 msleep(30);
5119 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5120
34f80b04
EG
5121 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5122 if (CHIP_IS_E1(bp)) {
5123 /* enable HW interrupt from PXP on USDM overflow
5124 bit 16 on INT_MASK_0 */
5125 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5126 }
a2fbb9ea 5127
34f80b04
EG
5128 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5129 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5130
5131#ifdef __BIG_ENDIAN
34f80b04
EG
5132 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5133 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5134 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5135 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5136 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5137 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5138
5139/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5140 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5141 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5142 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5143 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5144#endif
5145
34f80b04 5146 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5147#ifdef BCM_ISCSI
34f80b04
EG
5148 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5149 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5150 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5151#endif
5152
34f80b04
EG
5153 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5154 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5155
34f80b04
EG
5156 /* let the HW do it's magic ... */
5157 msleep(100);
5158 /* finish PXP init */
5159 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5160 if (val != 1) {
5161 BNX2X_ERR("PXP2 CFG failed\n");
5162 return -EBUSY;
5163 }
5164 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5165 if (val != 1) {
5166 BNX2X_ERR("PXP2 RD_INIT failed\n");
5167 return -EBUSY;
5168 }
a2fbb9ea 5169
34f80b04
EG
5170 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5171 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5172
34f80b04 5173 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5174
34f80b04
EG
5175 /* clean the DMAE memory */
5176 bp->dmae_ready = 1;
5177 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5178
34f80b04
EG
5179 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5180 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5181 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5182 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5183
34f80b04
EG
5184 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5185 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5186 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5187 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5188
5189 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5190 /* soft reset pulse */
5191 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5192 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5193
5194#ifdef BCM_ISCSI
34f80b04 5195 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5196#endif
a2fbb9ea 5197
34f80b04
EG
5198 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5199 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5200 if (!CHIP_REV_IS_SLOW(bp)) {
5201 /* enable hw interrupt from doorbell Q */
5202 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5203 }
a2fbb9ea 5204
34f80b04
EG
5205 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5206 if (CHIP_REV_IS_SLOW(bp)) {
5207 /* fix for emulation and FPGA for no pause */
5208 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5209 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5210 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5211 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5212 }
a2fbb9ea 5213
34f80b04 5214 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
3196a88a
EG
5215 /* set NIC mode */
5216 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5217 if (CHIP_IS_E1H(bp))
5218 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5219
34f80b04
EG
5220 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5221 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5222 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5223 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5224
34f80b04
EG
5225 if (CHIP_IS_E1H(bp)) {
5226 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5227 STORM_INTMEM_SIZE_E1H/2);
5228 bnx2x_init_fill(bp,
5229 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5230 0, STORM_INTMEM_SIZE_E1H/2);
5231 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5232 STORM_INTMEM_SIZE_E1H/2);
5233 bnx2x_init_fill(bp,
5234 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5235 0, STORM_INTMEM_SIZE_E1H/2);
5236 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5237 STORM_INTMEM_SIZE_E1H/2);
5238 bnx2x_init_fill(bp,
5239 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5240 0, STORM_INTMEM_SIZE_E1H/2);
5241 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5242 STORM_INTMEM_SIZE_E1H/2);
5243 bnx2x_init_fill(bp,
5244 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5245 0, STORM_INTMEM_SIZE_E1H/2);
5246 } else { /* E1 */
ad8d3948
EG
5247 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5248 STORM_INTMEM_SIZE_E1);
5249 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5250 STORM_INTMEM_SIZE_E1);
5251 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5252 STORM_INTMEM_SIZE_E1);
5253 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5254 STORM_INTMEM_SIZE_E1);
34f80b04 5255 }
a2fbb9ea 5256
34f80b04
EG
5257 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5258 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5259 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5260 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5261
34f80b04
EG
5262 /* sync semi rtc */
5263 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5264 0x80000000);
5265 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5266 0x80000000);
a2fbb9ea 5267
34f80b04
EG
5268 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5269 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5270 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5271
34f80b04
EG
5272 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5273 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5274 REG_WR(bp, i, 0xc0cac01a);
5275 /* TODO: replace with something meaningful */
5276 }
5277 if (CHIP_IS_E1H(bp))
5278 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5279 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5280
34f80b04
EG
5281 if (sizeof(union cdu_context) != 1024)
5282 /* we currently assume that a context is 1024 bytes */
5283 printk(KERN_ALERT PFX "please adjust the size of"
5284 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5285
34f80b04
EG
5286 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5287 val = (4 << 24) + (0 << 12) + 1024;
5288 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5289 if (CHIP_IS_E1(bp)) {
5290 /* !!! fix pxp client crdit until excel update */
5291 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5292 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5293 }
a2fbb9ea 5294
34f80b04
EG
5295 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5296 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
a2fbb9ea 5297
34f80b04
EG
5298 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5299 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5300
34f80b04
EG
5301 /* PXPCS COMMON comes here */
5302 /* Reset PCIE errors for debug */
5303 REG_WR(bp, 0x2814, 0xffffffff);
5304 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5305
34f80b04
EG
5306 /* EMAC0 COMMON comes here */
5307 /* EMAC1 COMMON comes here */
5308 /* DBU COMMON comes here */
5309 /* DBG COMMON comes here */
5310
5311 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5312 if (CHIP_IS_E1H(bp)) {
5313 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5314 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5315 }
5316
5317 if (CHIP_REV_IS_SLOW(bp))
5318 msleep(200);
5319
5320 /* finish CFC init */
5321 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5322 if (val != 1) {
5323 BNX2X_ERR("CFC LL_INIT failed\n");
5324 return -EBUSY;
5325 }
5326 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5327 if (val != 1) {
5328 BNX2X_ERR("CFC AC_INIT failed\n");
5329 return -EBUSY;
5330 }
5331 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5332 if (val != 1) {
5333 BNX2X_ERR("CFC CAM_INIT failed\n");
5334 return -EBUSY;
5335 }
5336 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5337
34f80b04
EG
5338 /* read NIG statistic
5339 to see if this is our first up since powerup */
5340 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5341 val = *bnx2x_sp(bp, wb_data[0]);
5342
5343 /* do internal memory self test */
5344 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5345 BNX2X_ERR("internal mem self test failed\n");
5346 return -EBUSY;
5347 }
5348
5349 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5350 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
34f80b04
EG
5351 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5352 /* Fan failure is indicated by SPIO 5 */
5353 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5354 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5355
5356 /* set to active low mode */
5357 val = REG_RD(bp, MISC_REG_SPIO_INT);
5358 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5359 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5360 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5361
34f80b04
EG
5362 /* enable interrupt to signal the IGU */
5363 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5364 val |= (1 << MISC_REGISTERS_SPIO_5);
5365 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5366 break;
f1410647 5367
34f80b04
EG
5368 default:
5369 break;
5370 }
f1410647 5371
34f80b04
EG
5372 /* clear PXP2 attentions */
5373 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5374
34f80b04 5375 enable_blocks_attention(bp);
a2fbb9ea 5376
6bbca910
YR
5377 if (!BP_NOMCP(bp)) {
5378 bnx2x_acquire_phy_lock(bp);
5379 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5380 bnx2x_release_phy_lock(bp);
5381 } else
5382 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5383
34f80b04
EG
5384 return 0;
5385}
a2fbb9ea 5386
34f80b04
EG
5387static int bnx2x_init_port(struct bnx2x *bp)
5388{
5389 int port = BP_PORT(bp);
5390 u32 val;
a2fbb9ea 5391
34f80b04
EG
5392 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5393
5394 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5395
5396 /* Port PXP comes here */
5397 /* Port PXP2 comes here */
a2fbb9ea
ET
5398#ifdef BCM_ISCSI
5399 /* Port0 1
5400 * Port1 385 */
5401 i++;
5402 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5403 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5404 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5405 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5406
5407 /* Port0 2
5408 * Port1 386 */
5409 i++;
5410 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5411 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5412 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5413 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5414
5415 /* Port0 3
5416 * Port1 387 */
5417 i++;
5418 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5419 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5420 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5421 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5422#endif
34f80b04 5423 /* Port CMs come here */
a2fbb9ea
ET
5424
5425 /* Port QM comes here */
a2fbb9ea
ET
5426#ifdef BCM_ISCSI
5427 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5428 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5429
5430 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5431 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5432#endif
5433 /* Port DQ comes here */
5434 /* Port BRB1 comes here */
ad8d3948 5435 /* Port PRS comes here */
a2fbb9ea
ET
5436 /* Port TSDM comes here */
5437 /* Port CSDM comes here */
5438 /* Port USDM comes here */
5439 /* Port XSDM comes here */
34f80b04
EG
5440 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5441 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5442 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5443 port ? USEM_PORT1_END : USEM_PORT0_END);
5444 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5445 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5446 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5447 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5448 /* Port UPB comes here */
34f80b04
EG
5449 /* Port XPB comes here */
5450
5451 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5452 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5453
5454 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5455 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5456
5457 /* update threshold */
34f80b04 5458 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5459 /* update init credit */
34f80b04 5460 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5461
5462 /* probe changes */
34f80b04 5463 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5464 msleep(5);
34f80b04 5465 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5466
5467#ifdef BCM_ISCSI
5468 /* tell the searcher where the T2 table is */
5469 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5470
5471 wb_write[0] = U64_LO(bp->t2_mapping);
5472 wb_write[1] = U64_HI(bp->t2_mapping);
5473 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5474 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5475 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5476 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5477
5478 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5479 /* Port SRCH comes here */
5480#endif
5481 /* Port CDU comes here */
5482 /* Port CFC comes here */
34f80b04
EG
5483
5484 if (CHIP_IS_E1(bp)) {
5485 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5486 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5487 }
5488 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5489 port ? HC_PORT1_END : HC_PORT0_END);
5490
5491 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5492 MISC_AEU_PORT0_START,
34f80b04
EG
5493 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5494 /* init aeu_mask_attn_func_0/1:
5495 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5496 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5497 * bits 4-7 are used for "per vn group attention" */
5498 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5499 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5500
a2fbb9ea
ET
5501 /* Port PXPCS comes here */
5502 /* Port EMAC0 comes here */
5503 /* Port EMAC1 comes here */
5504 /* Port DBU comes here */
5505 /* Port DBG comes here */
34f80b04
EG
5506 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5507 port ? NIG_PORT1_END : NIG_PORT0_END);
5508
5509 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5510
5511 if (CHIP_IS_E1H(bp)) {
5512 u32 wsum;
5513 struct cmng_struct_per_port m_cmng_port;
5514 int vn;
5515
5516 /* 0x2 disable e1hov, 0x1 enable */
5517 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5518 (IS_E1HMF(bp) ? 0x1 : 0x2));
5519
5520 /* Init RATE SHAPING and FAIRNESS contexts.
5521 Initialize as if there is 10G link. */
5522 wsum = bnx2x_calc_vn_wsum(bp);
5523 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5524 if (IS_E1HMF(bp))
5525 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5526 bnx2x_init_vn_minmax(bp, 2*vn + port,
5527 wsum, 10000, &m_cmng_port);
5528 }
5529
a2fbb9ea
ET
5530 /* Port MCP comes here */
5531 /* Port DMAE comes here */
5532
34f80b04 5533 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5534 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
f1410647
ET
5535 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5536 /* add SPIO 5 to group 0 */
5537 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5538 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5539 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5540 break;
5541
5542 default:
5543 break;
5544 }
5545
c18487ee 5546 bnx2x__link_reset(bp);
a2fbb9ea 5547
34f80b04
EG
5548 return 0;
5549}
5550
5551#define ILT_PER_FUNC (768/2)
5552#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5553/* the phys address is shifted right 12 bits and has an added
5554 1=valid bit added to the 53rd bit
5555 then since this is a wide register(TM)
5556 we split it into two 32 bit writes
5557 */
5558#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5559#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5560#define PXP_ONE_ILT(x) (((x) << 10) | x)
5561#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5562
5563#define CNIC_ILT_LINES 0
5564
5565static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5566{
5567 int reg;
5568
5569 if (CHIP_IS_E1H(bp))
5570 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5571 else /* E1 */
5572 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5573
5574 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5575}
5576
5577static int bnx2x_init_func(struct bnx2x *bp)
5578{
5579 int port = BP_PORT(bp);
5580 int func = BP_FUNC(bp);
5581 int i;
5582
5583 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5584
5585 i = FUNC_ILT_BASE(func);
5586
5587 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5588 if (CHIP_IS_E1H(bp)) {
5589 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5590 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5591 } else /* E1 */
5592 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5593 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5594
5595
5596 if (CHIP_IS_E1H(bp)) {
5597 for (i = 0; i < 9; i++)
5598 bnx2x_init_block(bp,
5599 cm_start[func][i], cm_end[func][i]);
5600
5601 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5602 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5603 }
5604
5605 /* HC init per function */
5606 if (CHIP_IS_E1H(bp)) {
5607 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5608
5609 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5610 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5611 }
5612 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5613
5614 if (CHIP_IS_E1H(bp))
5615 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5616
c14423fe 5617 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5618 REG_WR(bp, 0x2114, 0xffffffff);
5619 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5620
34f80b04
EG
5621 return 0;
5622}
5623
5624static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5625{
5626 int i, rc = 0;
a2fbb9ea 5627
34f80b04
EG
5628 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5629 BP_FUNC(bp), load_code);
a2fbb9ea 5630
34f80b04
EG
5631 bp->dmae_ready = 0;
5632 mutex_init(&bp->dmae_mutex);
5633 bnx2x_gunzip_init(bp);
a2fbb9ea 5634
34f80b04
EG
5635 switch (load_code) {
5636 case FW_MSG_CODE_DRV_LOAD_COMMON:
5637 rc = bnx2x_init_common(bp);
5638 if (rc)
5639 goto init_hw_err;
5640 /* no break */
5641
5642 case FW_MSG_CODE_DRV_LOAD_PORT:
5643 bp->dmae_ready = 1;
5644 rc = bnx2x_init_port(bp);
5645 if (rc)
5646 goto init_hw_err;
5647 /* no break */
5648
5649 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5650 bp->dmae_ready = 1;
5651 rc = bnx2x_init_func(bp);
5652 if (rc)
5653 goto init_hw_err;
5654 break;
5655
5656 default:
5657 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5658 break;
5659 }
5660
5661 if (!BP_NOMCP(bp)) {
5662 int func = BP_FUNC(bp);
a2fbb9ea
ET
5663
5664 bp->fw_drv_pulse_wr_seq =
34f80b04 5665 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5666 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5667 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5668 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5669 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5670 } else
5671 bp->func_stx = 0;
a2fbb9ea 5672
34f80b04
EG
5673 /* this needs to be done before gunzip end */
5674 bnx2x_zero_def_sb(bp);
5675 for_each_queue(bp, i)
5676 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5677
5678init_hw_err:
5679 bnx2x_gunzip_end(bp);
5680
5681 return rc;
a2fbb9ea
ET
5682}
5683
c14423fe 5684/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5685static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5686{
34f80b04 5687 int func = BP_FUNC(bp);
f1410647
ET
5688 u32 seq = ++bp->fw_seq;
5689 u32 rc = 0;
19680c48
EG
5690 u32 cnt = 1;
5691 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5692
34f80b04 5693 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5694 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5695
19680c48
EG
5696 do {
5697 /* let the FW do it's magic ... */
5698 msleep(delay);
a2fbb9ea 5699
19680c48 5700 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5701
19680c48
EG
5702 /* Give the FW up to 2 second (200*10ms) */
5703 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5704
5705 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5706 cnt*delay, rc, seq);
a2fbb9ea
ET
5707
5708 /* is this a reply to our command? */
5709 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5710 rc &= FW_MSG_CODE_MASK;
f1410647 5711
a2fbb9ea
ET
5712 } else {
5713 /* FW BUG! */
5714 BNX2X_ERR("FW failed to respond!\n");
5715 bnx2x_fw_dump(bp);
5716 rc = 0;
5717 }
f1410647 5718
a2fbb9ea
ET
5719 return rc;
5720}
5721
5722static void bnx2x_free_mem(struct bnx2x *bp)
5723{
5724
5725#define BNX2X_PCI_FREE(x, y, size) \
5726 do { \
5727 if (x) { \
5728 pci_free_consistent(bp->pdev, size, x, y); \
5729 x = NULL; \
5730 y = 0; \
5731 } \
5732 } while (0)
5733
5734#define BNX2X_FREE(x) \
5735 do { \
5736 if (x) { \
5737 vfree(x); \
5738 x = NULL; \
5739 } \
5740 } while (0)
5741
5742 int i;
5743
5744 /* fastpath */
5745 for_each_queue(bp, i) {
5746
5747 /* Status blocks */
5748 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5749 bnx2x_fp(bp, i, status_blk_mapping),
5750 sizeof(struct host_status_block) +
5751 sizeof(struct eth_tx_db_data));
5752
5753 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5754 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5755 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5756 bnx2x_fp(bp, i, tx_desc_mapping),
5757 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5758
5759 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5760 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5761 bnx2x_fp(bp, i, rx_desc_mapping),
5762 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5763
5764 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5765 bnx2x_fp(bp, i, rx_comp_mapping),
5766 sizeof(struct eth_fast_path_rx_cqe) *
5767 NUM_RCQ_BD);
a2fbb9ea 5768
7a9b2557 5769 /* SGE ring */
32626230 5770 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5771 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5772 bnx2x_fp(bp, i, rx_sge_mapping),
5773 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5774 }
a2fbb9ea
ET
5775 /* end of fastpath */
5776
5777 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5778 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5779
5780 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5781 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5782
5783#ifdef BCM_ISCSI
5784 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5785 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5786 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5787 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5788#endif
7a9b2557 5789 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5790
5791#undef BNX2X_PCI_FREE
5792#undef BNX2X_KFREE
5793}
5794
5795static int bnx2x_alloc_mem(struct bnx2x *bp)
5796{
5797
5798#define BNX2X_PCI_ALLOC(x, y, size) \
5799 do { \
5800 x = pci_alloc_consistent(bp->pdev, size, y); \
5801 if (x == NULL) \
5802 goto alloc_mem_err; \
5803 memset(x, 0, size); \
5804 } while (0)
5805
5806#define BNX2X_ALLOC(x, size) \
5807 do { \
5808 x = vmalloc(size); \
5809 if (x == NULL) \
5810 goto alloc_mem_err; \
5811 memset(x, 0, size); \
5812 } while (0)
5813
5814 int i;
5815
5816 /* fastpath */
a2fbb9ea
ET
5817 for_each_queue(bp, i) {
5818 bnx2x_fp(bp, i, bp) = bp;
5819
5820 /* Status blocks */
5821 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5822 &bnx2x_fp(bp, i, status_blk_mapping),
5823 sizeof(struct host_status_block) +
5824 sizeof(struct eth_tx_db_data));
5825
5826 bnx2x_fp(bp, i, hw_tx_prods) =
5827 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5828
5829 bnx2x_fp(bp, i, tx_prods_mapping) =
5830 bnx2x_fp(bp, i, status_blk_mapping) +
5831 sizeof(struct host_status_block);
5832
5833 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5834 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5835 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5836 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5837 &bnx2x_fp(bp, i, tx_desc_mapping),
5838 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5839
5840 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5841 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5842 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5843 &bnx2x_fp(bp, i, rx_desc_mapping),
5844 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5845
5846 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5847 &bnx2x_fp(bp, i, rx_comp_mapping),
5848 sizeof(struct eth_fast_path_rx_cqe) *
5849 NUM_RCQ_BD);
5850
7a9b2557
VZ
5851 /* SGE ring */
5852 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5853 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5854 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5855 &bnx2x_fp(bp, i, rx_sge_mapping),
5856 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea
ET
5857 }
5858 /* end of fastpath */
5859
5860 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5861 sizeof(struct host_def_status_block));
5862
5863 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5864 sizeof(struct bnx2x_slowpath));
5865
5866#ifdef BCM_ISCSI
5867 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5868
5869 /* Initialize T1 */
5870 for (i = 0; i < 64*1024; i += 64) {
5871 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5872 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5873 }
5874
5875 /* allocate searcher T2 table
5876 we allocate 1/4 of alloc num for T2
5877 (which is not entered into the ILT) */
5878 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5879
5880 /* Initialize T2 */
5881 for (i = 0; i < 16*1024; i += 64)
5882 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5883
c14423fe 5884 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5885 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5886
5887 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5888 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5889
5890 /* QM queues (128*MAX_CONN) */
5891 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5892#endif
5893
5894 /* Slow path ring */
5895 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5896
5897 return 0;
5898
5899alloc_mem_err:
5900 bnx2x_free_mem(bp);
5901 return -ENOMEM;
5902
5903#undef BNX2X_PCI_ALLOC
5904#undef BNX2X_ALLOC
5905}
5906
5907static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5908{
5909 int i;
5910
5911 for_each_queue(bp, i) {
5912 struct bnx2x_fastpath *fp = &bp->fp[i];
5913
5914 u16 bd_cons = fp->tx_bd_cons;
5915 u16 sw_prod = fp->tx_pkt_prod;
5916 u16 sw_cons = fp->tx_pkt_cons;
5917
a2fbb9ea
ET
5918 while (sw_cons != sw_prod) {
5919 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5920 sw_cons++;
5921 }
5922 }
5923}
5924
5925static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5926{
5927 int i, j;
5928
5929 for_each_queue(bp, j) {
5930 struct bnx2x_fastpath *fp = &bp->fp[j];
5931
a2fbb9ea
ET
5932 for (i = 0; i < NUM_RX_BD; i++) {
5933 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5934 struct sk_buff *skb = rx_buf->skb;
5935
5936 if (skb == NULL)
5937 continue;
5938
5939 pci_unmap_single(bp->pdev,
5940 pci_unmap_addr(rx_buf, mapping),
437cf2f1 5941 bp->rx_buf_size,
a2fbb9ea
ET
5942 PCI_DMA_FROMDEVICE);
5943
5944 rx_buf->skb = NULL;
5945 dev_kfree_skb(skb);
5946 }
7a9b2557 5947 if (!fp->disable_tpa)
32626230
EG
5948 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5949 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 5950 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
5951 }
5952}
5953
5954static void bnx2x_free_skbs(struct bnx2x *bp)
5955{
5956 bnx2x_free_tx_skbs(bp);
5957 bnx2x_free_rx_skbs(bp);
5958}
5959
5960static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5961{
34f80b04 5962 int i, offset = 1;
a2fbb9ea
ET
5963
5964 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 5965 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
5966 bp->msix_table[0].vector);
5967
5968 for_each_queue(bp, i) {
c14423fe 5969 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 5970 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5971 bnx2x_fp(bp, i, state));
5972
228241eb
ET
5973 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5974 BNX2X_ERR("IRQ of fp #%d being freed while "
5975 "state != closed\n", i);
a2fbb9ea 5976
34f80b04 5977 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 5978 }
a2fbb9ea
ET
5979}
5980
5981static void bnx2x_free_irq(struct bnx2x *bp)
5982{
a2fbb9ea 5983 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
5984 bnx2x_free_msix_irqs(bp);
5985 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
5986 bp->flags &= ~USING_MSIX_FLAG;
5987
5988 } else
5989 free_irq(bp->pdev->irq, bp->dev);
5990}
5991
5992static int bnx2x_enable_msix(struct bnx2x *bp)
5993{
34f80b04 5994 int i, rc, offset;
a2fbb9ea
ET
5995
5996 bp->msix_table[0].entry = 0;
34f80b04
EG
5997 offset = 1;
5998 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 5999
34f80b04
EG
6000 for_each_queue(bp, i) {
6001 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 6002
34f80b04
EG
6003 bp->msix_table[i + offset].entry = igu_vec;
6004 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6005 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6006 }
6007
34f80b04
EG
6008 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6009 bp->num_queues + offset);
6010 if (rc) {
6011 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6012 return -1;
6013 }
a2fbb9ea
ET
6014 bp->flags |= USING_MSIX_FLAG;
6015
6016 return 0;
a2fbb9ea
ET
6017}
6018
a2fbb9ea
ET
6019static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6020{
34f80b04 6021 int i, rc, offset = 1;
a2fbb9ea 6022
a2fbb9ea
ET
6023 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6024 bp->dev->name, bp->dev);
a2fbb9ea
ET
6025 if (rc) {
6026 BNX2X_ERR("request sp irq failed\n");
6027 return -EBUSY;
6028 }
6029
6030 for_each_queue(bp, i) {
34f80b04 6031 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6032 bnx2x_msix_fp_int, 0,
6033 bp->dev->name, &bp->fp[i]);
a2fbb9ea 6034 if (rc) {
3196a88a
EG
6035 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6036 i + offset, -rc);
a2fbb9ea
ET
6037 bnx2x_free_msix_irqs(bp);
6038 return -EBUSY;
6039 }
6040
6041 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6042 }
6043
6044 return 0;
a2fbb9ea
ET
6045}
6046
6047static int bnx2x_req_irq(struct bnx2x *bp)
6048{
34f80b04 6049 int rc;
a2fbb9ea 6050
34f80b04
EG
6051 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6052 bp->dev->name, bp->dev);
a2fbb9ea
ET
6053 if (!rc)
6054 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6055
6056 return rc;
a2fbb9ea
ET
6057}
6058
65abd74d
YG
6059static void bnx2x_napi_enable(struct bnx2x *bp)
6060{
6061 int i;
6062
6063 for_each_queue(bp, i)
6064 napi_enable(&bnx2x_fp(bp, i, napi));
6065}
6066
6067static void bnx2x_napi_disable(struct bnx2x *bp)
6068{
6069 int i;
6070
6071 for_each_queue(bp, i)
6072 napi_disable(&bnx2x_fp(bp, i, napi));
6073}
6074
6075static void bnx2x_netif_start(struct bnx2x *bp)
6076{
6077 if (atomic_dec_and_test(&bp->intr_sem)) {
6078 if (netif_running(bp->dev)) {
6079 if (bp->state == BNX2X_STATE_OPEN)
6080 netif_wake_queue(bp->dev);
6081 bnx2x_napi_enable(bp);
6082 bnx2x_int_enable(bp);
6083 }
6084 }
6085}
6086
f8ef6e44 6087static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6088{
f8ef6e44 6089 bnx2x_int_disable_sync(bp, disable_hw);
65abd74d
YG
6090 if (netif_running(bp->dev)) {
6091 bnx2x_napi_disable(bp);
6092 netif_tx_disable(bp->dev);
6093 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6094 }
6095}
6096
a2fbb9ea
ET
6097/*
6098 * Init service functions
6099 */
6100
3101c2bc 6101static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6102{
6103 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6104 int port = BP_PORT(bp);
a2fbb9ea
ET
6105
6106 /* CAM allocation
6107 * unicasts 0-31:port0 32-63:port1
6108 * multicast 64-127:port0 128-191:port1
6109 */
6110 config->hdr.length_6b = 2;
34f80b04
EG
6111 config->hdr.offset = port ? 31 : 0;
6112 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6113 config->hdr.reserved1 = 0;
6114
6115 /* primary MAC */
6116 config->config_table[0].cam_entry.msb_mac_addr =
6117 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6118 config->config_table[0].cam_entry.middle_mac_addr =
6119 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6120 config->config_table[0].cam_entry.lsb_mac_addr =
6121 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6122 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6123 if (set)
6124 config->config_table[0].target_table_entry.flags = 0;
6125 else
6126 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6127 config->config_table[0].target_table_entry.client_id = 0;
6128 config->config_table[0].target_table_entry.vlan_id = 0;
6129
3101c2bc
YG
6130 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6131 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6132 config->config_table[0].cam_entry.msb_mac_addr,
6133 config->config_table[0].cam_entry.middle_mac_addr,
6134 config->config_table[0].cam_entry.lsb_mac_addr);
6135
6136 /* broadcast */
6137 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6138 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6139 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6140 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6141 if (set)
6142 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6143 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6144 else
6145 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6146 config->config_table[1].target_table_entry.client_id = 0;
6147 config->config_table[1].target_table_entry.vlan_id = 0;
6148
6149 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6150 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6151 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6152}
6153
3101c2bc 6154static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6155{
6156 struct mac_configuration_cmd_e1h *config =
6157 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6158
3101c2bc 6159 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6160 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6161 return;
6162 }
6163
6164 /* CAM allocation for E1H
6165 * unicasts: by func number
6166 * multicast: 20+FUNC*20, 20 each
6167 */
6168 config->hdr.length_6b = 1;
6169 config->hdr.offset = BP_FUNC(bp);
6170 config->hdr.client_id = BP_CL_ID(bp);
6171 config->hdr.reserved1 = 0;
6172
6173 /* primary MAC */
6174 config->config_table[0].msb_mac_addr =
6175 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6176 config->config_table[0].middle_mac_addr =
6177 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6178 config->config_table[0].lsb_mac_addr =
6179 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6180 config->config_table[0].client_id = BP_L_ID(bp);
6181 config->config_table[0].vlan_id = 0;
6182 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6183 if (set)
6184 config->config_table[0].flags = BP_PORT(bp);
6185 else
6186 config->config_table[0].flags =
6187 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6188
3101c2bc
YG
6189 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6190 (set ? "setting" : "clearing"),
34f80b04
EG
6191 config->config_table[0].msb_mac_addr,
6192 config->config_table[0].middle_mac_addr,
6193 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6194
6195 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6196 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6197 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6198}
6199
a2fbb9ea
ET
6200static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6201 int *state_p, int poll)
6202{
6203 /* can take a while if any port is running */
34f80b04 6204 int cnt = 500;
a2fbb9ea 6205
c14423fe
ET
6206 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6207 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6208
6209 might_sleep();
34f80b04 6210 while (cnt--) {
a2fbb9ea
ET
6211 if (poll) {
6212 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6213 /* if index is different from 0
6214 * the reply for some commands will
3101c2bc 6215 * be on the non default queue
a2fbb9ea
ET
6216 */
6217 if (idx)
6218 bnx2x_rx_int(&bp->fp[idx], 10);
6219 }
a2fbb9ea 6220
3101c2bc 6221 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6222 if (*state_p == state)
a2fbb9ea
ET
6223 return 0;
6224
a2fbb9ea 6225 msleep(1);
a2fbb9ea
ET
6226 }
6227
a2fbb9ea 6228 /* timeout! */
49d66772
ET
6229 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6230 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6231#ifdef BNX2X_STOP_ON_ERROR
6232 bnx2x_panic();
6233#endif
a2fbb9ea 6234
49d66772 6235 return -EBUSY;
a2fbb9ea
ET
6236}
6237
6238static int bnx2x_setup_leading(struct bnx2x *bp)
6239{
34f80b04 6240 int rc;
a2fbb9ea 6241
c14423fe 6242 /* reset IGU state */
34f80b04 6243 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6244
6245 /* SETUP ramrod */
6246 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6247
34f80b04
EG
6248 /* Wait for completion */
6249 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6250
34f80b04 6251 return rc;
a2fbb9ea
ET
6252}
6253
6254static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6255{
a2fbb9ea 6256 /* reset IGU state */
34f80b04 6257 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6258
228241eb 6259 /* SETUP ramrod */
a2fbb9ea
ET
6260 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6261 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6262
6263 /* Wait for completion */
6264 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 6265 &(bp->fp[index].state), 0);
a2fbb9ea
ET
6266}
6267
a2fbb9ea
ET
6268static int bnx2x_poll(struct napi_struct *napi, int budget);
6269static void bnx2x_set_rx_mode(struct net_device *dev);
6270
34f80b04
EG
6271/* must be called with rtnl_lock */
6272static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 6273{
228241eb 6274 u32 load_code;
34f80b04 6275 int i, rc;
34f80b04
EG
6276#ifdef BNX2X_STOP_ON_ERROR
6277 if (unlikely(bp->panic))
6278 return -EPERM;
6279#endif
a2fbb9ea
ET
6280
6281 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6282
34f80b04
EG
6283 /* Send LOAD_REQUEST command to MCP
6284 Returns the type of LOAD command:
6285 if it is the first port to be initialized
6286 common blocks should be initialized, otherwise - not
a2fbb9ea 6287 */
34f80b04 6288 if (!BP_NOMCP(bp)) {
228241eb
ET
6289 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6290 if (!load_code) {
da5a662a 6291 BNX2X_ERR("MCP response failure, aborting\n");
228241eb
ET
6292 return -EBUSY;
6293 }
34f80b04 6294 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
a2fbb9ea 6295 return -EBUSY; /* other port in diagnostic mode */
34f80b04 6296
a2fbb9ea 6297 } else {
da5a662a
VZ
6298 int port = BP_PORT(bp);
6299
34f80b04
EG
6300 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6301 load_count[0], load_count[1], load_count[2]);
6302 load_count[0]++;
da5a662a 6303 load_count[1 + port]++;
34f80b04
EG
6304 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6305 load_count[0], load_count[1], load_count[2]);
6306 if (load_count[0] == 1)
6307 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
da5a662a 6308 else if (load_count[1 + port] == 1)
34f80b04
EG
6309 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6310 else
6311 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
a2fbb9ea
ET
6312 }
6313
34f80b04
EG
6314 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6315 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6316 bp->port.pmf = 1;
6317 else
6318 bp->port.pmf = 0;
6319 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6320
6321 /* if we can't use MSI-X we only need one fp,
6322 * so try to enable MSI-X with the requested number of fp's
a2fbb9ea
ET
6323 * and fallback to inta with one fp
6324 */
34f80b04
EG
6325 if (use_inta) {
6326 bp->num_queues = 1;
6327
6328 } else {
6329 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6330 /* user requested number */
6331 bp->num_queues = use_multi;
6332
6333 else if (use_multi)
6334 bp->num_queues = min_t(u32, num_online_cpus(),
6335 BP_MAX_QUEUES(bp));
6336 else
a2fbb9ea 6337 bp->num_queues = 1;
34f80b04
EG
6338
6339 if (bnx2x_enable_msix(bp)) {
6340 /* failed to enable MSI-X */
6341 bp->num_queues = 1;
6342 if (use_multi)
6343 BNX2X_ERR("Multi requested but failed"
6344 " to enable MSI-X\n");
a2fbb9ea
ET
6345 }
6346 }
34f80b04
EG
6347 DP(NETIF_MSG_IFUP,
6348 "set number of queues to %d\n", bp->num_queues);
c14423fe 6349
a2fbb9ea
ET
6350 if (bnx2x_alloc_mem(bp))
6351 return -ENOMEM;
6352
7a9b2557
VZ
6353 for_each_queue(bp, i)
6354 bnx2x_fp(bp, i, disable_tpa) =
6355 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6356
34f80b04
EG
6357 if (bp->flags & USING_MSIX_FLAG) {
6358 rc = bnx2x_req_msix_irqs(bp);
6359 if (rc) {
6360 pci_disable_msix(bp->pdev);
6361 goto load_error;
6362 }
6363 } else {
6364 bnx2x_ack_int(bp);
6365 rc = bnx2x_req_irq(bp);
6366 if (rc) {
6367 BNX2X_ERR("IRQ request failed, aborting\n");
6368 goto load_error;
a2fbb9ea
ET
6369 }
6370 }
6371
6372 for_each_queue(bp, i)
6373 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6374 bnx2x_poll, 128);
6375
a2fbb9ea 6376 /* Initialize HW */
34f80b04
EG
6377 rc = bnx2x_init_hw(bp, load_code);
6378 if (rc) {
a2fbb9ea 6379 BNX2X_ERR("HW init failed, aborting\n");
d1014634 6380 goto load_int_disable;
a2fbb9ea
ET
6381 }
6382
a2fbb9ea 6383 /* Setup NIC internals and enable interrupts */
471de716 6384 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6385
6386 /* Send LOAD_DONE command to MCP */
34f80b04 6387 if (!BP_NOMCP(bp)) {
228241eb
ET
6388 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6389 if (!load_code) {
da5a662a 6390 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6391 rc = -EBUSY;
d1014634 6392 goto load_rings_free;
a2fbb9ea
ET
6393 }
6394 }
6395
bb2a0f7a
YG
6396 bnx2x_stats_init(bp);
6397
a2fbb9ea
ET
6398 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6399
6400 /* Enable Rx interrupt handling before sending the ramrod
6401 as it's completed on Rx FP queue */
65abd74d 6402 bnx2x_napi_enable(bp);
a2fbb9ea 6403
da5a662a
VZ
6404 /* Enable interrupt handling */
6405 atomic_set(&bp->intr_sem, 0);
6406
34f80b04
EG
6407 rc = bnx2x_setup_leading(bp);
6408 if (rc) {
da5a662a 6409 BNX2X_ERR("Setup leading failed!\n");
d1014634 6410 goto load_netif_stop;
34f80b04 6411 }
a2fbb9ea 6412
34f80b04
EG
6413 if (CHIP_IS_E1H(bp))
6414 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6415 BNX2X_ERR("!!! mf_cfg function disabled\n");
6416 bp->state = BNX2X_STATE_DISABLED;
6417 }
a2fbb9ea 6418
34f80b04
EG
6419 if (bp->state == BNX2X_STATE_OPEN)
6420 for_each_nondefault_queue(bp, i) {
6421 rc = bnx2x_setup_multi(bp, i);
6422 if (rc)
d1014634 6423 goto load_netif_stop;
34f80b04 6424 }
a2fbb9ea 6425
34f80b04 6426 if (CHIP_IS_E1(bp))
3101c2bc 6427 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6428 else
3101c2bc 6429 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6430
6431 if (bp->port.pmf)
6432 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6433
6434 /* Start fast path */
34f80b04
EG
6435 switch (load_mode) {
6436 case LOAD_NORMAL:
6437 /* Tx queue should be only reenabled */
6438 netif_wake_queue(bp->dev);
6439 bnx2x_set_rx_mode(bp->dev);
6440 break;
6441
6442 case LOAD_OPEN:
a2fbb9ea 6443 netif_start_queue(bp->dev);
34f80b04 6444 bnx2x_set_rx_mode(bp->dev);
a2fbb9ea
ET
6445 if (bp->flags & USING_MSIX_FLAG)
6446 printk(KERN_INFO PFX "%s: using MSI-X\n",
6447 bp->dev->name);
34f80b04 6448 break;
a2fbb9ea 6449
34f80b04 6450 case LOAD_DIAG:
a2fbb9ea 6451 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6452 bp->state = BNX2X_STATE_DIAG;
6453 break;
6454
6455 default:
6456 break;
a2fbb9ea
ET
6457 }
6458
34f80b04
EG
6459 if (!bp->port.pmf)
6460 bnx2x__link_status_update(bp);
6461
a2fbb9ea
ET
6462 /* start the timer */
6463 mod_timer(&bp->timer, jiffies + bp->current_interval);
6464
34f80b04 6465
a2fbb9ea
ET
6466 return 0;
6467
d1014634 6468load_netif_stop:
65abd74d 6469 bnx2x_napi_disable(bp);
d1014634 6470load_rings_free:
7a9b2557
VZ
6471 /* Free SKBs, SGEs, TPA pool and driver internals */
6472 bnx2x_free_skbs(bp);
6473 for_each_queue(bp, i)
3196a88a 6474 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d1014634 6475load_int_disable:
f8ef6e44 6476 bnx2x_int_disable_sync(bp, 1);
d1014634
YG
6477 /* Release IRQs */
6478 bnx2x_free_irq(bp);
228241eb 6479load_error:
a2fbb9ea 6480 bnx2x_free_mem(bp);
9a035440 6481 bp->port.pmf = 0;
a2fbb9ea
ET
6482
6483 /* TBD we really need to reset the chip
6484 if we want to recover from this */
34f80b04 6485 return rc;
a2fbb9ea
ET
6486}
6487
6488static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6489{
a2fbb9ea
ET
6490 int rc;
6491
c14423fe 6492 /* halt the connection */
a2fbb9ea 6493 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
231fd58a 6494 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
a2fbb9ea 6495
34f80b04 6496 /* Wait for completion */
a2fbb9ea 6497 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 6498 &(bp->fp[index].state), 1);
c14423fe 6499 if (rc) /* timeout */
a2fbb9ea
ET
6500 return rc;
6501
6502 /* delete cfc entry */
6503 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6504
34f80b04
EG
6505 /* Wait for completion */
6506 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6507 &(bp->fp[index].state), 1);
6508 return rc;
a2fbb9ea
ET
6509}
6510
da5a662a 6511static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6512{
49d66772 6513 u16 dsb_sp_prod_idx;
c14423fe 6514 /* if the other port is handling traffic,
a2fbb9ea 6515 this can take a lot of time */
34f80b04
EG
6516 int cnt = 500;
6517 int rc;
a2fbb9ea
ET
6518
6519 might_sleep();
6520
6521 /* Send HALT ramrod */
6522 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6523 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6524
34f80b04
EG
6525 /* Wait for completion */
6526 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6527 &(bp->fp[0].state), 1);
6528 if (rc) /* timeout */
da5a662a 6529 return rc;
a2fbb9ea 6530
49d66772 6531 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6532
228241eb 6533 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6534 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6535
49d66772 6536 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6537 we are going to reset the chip anyway
6538 so there is not much to do if this times out
6539 */
34f80b04 6540 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
6541 if (!cnt) {
6542 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6543 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6544 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6545#ifdef BNX2X_STOP_ON_ERROR
6546 bnx2x_panic();
da5a662a
VZ
6547#else
6548 rc = -EBUSY;
34f80b04
EG
6549#endif
6550 break;
6551 }
6552 cnt--;
da5a662a 6553 msleep(1);
49d66772
ET
6554 }
6555 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6556 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6557
6558 return rc;
a2fbb9ea
ET
6559}
6560
34f80b04
EG
6561static void bnx2x_reset_func(struct bnx2x *bp)
6562{
6563 int port = BP_PORT(bp);
6564 int func = BP_FUNC(bp);
6565 int base, i;
6566
6567 /* Configure IGU */
6568 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6569 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6570
6571 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6572
6573 /* Clear ILT */
6574 base = FUNC_ILT_BASE(func);
6575 for (i = base; i < base + ILT_PER_FUNC; i++)
6576 bnx2x_ilt_wr(bp, i, 0);
6577}
6578
6579static void bnx2x_reset_port(struct bnx2x *bp)
6580{
6581 int port = BP_PORT(bp);
6582 u32 val;
6583
6584 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6585
6586 /* Do not rcv packets to BRB */
6587 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6588 /* Do not direct rcv packets that are not for MCP to the BRB */
6589 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6590 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6591
6592 /* Configure AEU */
6593 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6594
6595 msleep(100);
6596 /* Check for BRB port occupancy */
6597 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6598 if (val)
6599 DP(NETIF_MSG_IFDOWN,
33471629 6600 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6601
6602 /* TODO: Close Doorbell port? */
6603}
6604
6605static void bnx2x_reset_common(struct bnx2x *bp)
6606{
6607 /* reset_common */
6608 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6609 0xd3ffff7f);
6610 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6611}
6612
6613static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6614{
6615 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6616 BP_FUNC(bp), reset_code);
6617
6618 switch (reset_code) {
6619 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6620 bnx2x_reset_port(bp);
6621 bnx2x_reset_func(bp);
6622 bnx2x_reset_common(bp);
6623 break;
6624
6625 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6626 bnx2x_reset_port(bp);
6627 bnx2x_reset_func(bp);
6628 break;
6629
6630 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6631 bnx2x_reset_func(bp);
6632 break;
49d66772 6633
34f80b04
EG
6634 default:
6635 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6636 break;
6637 }
6638}
6639
33471629 6640/* must be called with rtnl_lock */
34f80b04 6641static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6642{
da5a662a 6643 int port = BP_PORT(bp);
a2fbb9ea 6644 u32 reset_code = 0;
da5a662a 6645 int i, cnt, rc;
a2fbb9ea
ET
6646
6647 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6648
228241eb
ET
6649 bp->rx_mode = BNX2X_RX_MODE_NONE;
6650 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6651
f8ef6e44 6652 bnx2x_netif_stop(bp, 1);
65abd74d
YG
6653 if (!netif_running(bp->dev))
6654 bnx2x_napi_disable(bp);
34f80b04
EG
6655 del_timer_sync(&bp->timer);
6656 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6657 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6658 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6659
da5a662a 6660 /* Wait until tx fast path tasks complete */
228241eb
ET
6661 for_each_queue(bp, i) {
6662 struct bnx2x_fastpath *fp = &bp->fp[i];
6663
34f80b04
EG
6664 cnt = 1000;
6665 smp_rmb();
da5a662a
VZ
6666 while (BNX2X_HAS_TX_WORK(fp)) {
6667
65abd74d 6668 bnx2x_tx_int(fp, 1000);
34f80b04
EG
6669 if (!cnt) {
6670 BNX2X_ERR("timeout waiting for queue[%d]\n",
6671 i);
6672#ifdef BNX2X_STOP_ON_ERROR
6673 bnx2x_panic();
6674 return -EBUSY;
6675#else
6676 break;
6677#endif
6678 }
6679 cnt--;
da5a662a 6680 msleep(1);
34f80b04
EG
6681 smp_rmb();
6682 }
228241eb 6683 }
da5a662a
VZ
6684 /* Give HW time to discard old tx messages */
6685 msleep(1);
a2fbb9ea 6686
34f80b04
EG
6687 /* Release IRQs */
6688 bnx2x_free_irq(bp);
6689
3101c2bc
YG
6690 if (CHIP_IS_E1(bp)) {
6691 struct mac_configuration_cmd *config =
6692 bnx2x_sp(bp, mcast_config);
6693
6694 bnx2x_set_mac_addr_e1(bp, 0);
6695
6696 for (i = 0; i < config->hdr.length_6b; i++)
6697 CAM_INVALIDATE(config->config_table[i]);
6698
6699 config->hdr.length_6b = i;
6700 if (CHIP_REV_IS_SLOW(bp))
6701 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6702 else
6703 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6704 config->hdr.client_id = BP_CL_ID(bp);
6705 config->hdr.reserved1 = 0;
6706
6707 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6708 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6709 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6710
6711 } else { /* E1H */
65abd74d
YG
6712 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6713
3101c2bc
YG
6714 bnx2x_set_mac_addr_e1h(bp, 0);
6715
6716 for (i = 0; i < MC_HASH_SIZE; i++)
6717 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6718 }
6719
65abd74d
YG
6720 if (unload_mode == UNLOAD_NORMAL)
6721 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6722
6723 else if (bp->flags & NO_WOL_FLAG) {
6724 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6725 if (CHIP_IS_E1H(bp))
6726 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6727
6728 } else if (bp->wol) {
6729 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6730 u8 *mac_addr = bp->dev->dev_addr;
6731 u32 val;
6732 /* The mac address is written to entries 1-4 to
6733 preserve entry 0 which is used by the PMF */
6734 u8 entry = (BP_E1HVN(bp) + 1)*8;
6735
6736 val = (mac_addr[0] << 8) | mac_addr[1];
6737 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6738
6739 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6740 (mac_addr[4] << 8) | mac_addr[5];
6741 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6742
6743 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6744
6745 } else
6746 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6747
34f80b04
EG
6748 /* Close multi and leading connections
6749 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6750 for_each_nondefault_queue(bp, i)
6751 if (bnx2x_stop_multi(bp, i))
228241eb 6752 goto unload_error;
a2fbb9ea 6753
da5a662a
VZ
6754 rc = bnx2x_stop_leading(bp);
6755 if (rc) {
34f80b04 6756 BNX2X_ERR("Stop leading failed!\n");
da5a662a 6757#ifdef BNX2X_STOP_ON_ERROR
34f80b04 6758 return -EBUSY;
da5a662a
VZ
6759#else
6760 goto unload_error;
34f80b04 6761#endif
228241eb
ET
6762 }
6763
6764unload_error:
34f80b04 6765 if (!BP_NOMCP(bp))
228241eb 6766 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6767 else {
6768 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6769 load_count[0], load_count[1], load_count[2]);
6770 load_count[0]--;
da5a662a 6771 load_count[1 + port]--;
34f80b04
EG
6772 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6773 load_count[0], load_count[1], load_count[2]);
6774 if (load_count[0] == 0)
6775 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 6776 else if (load_count[1 + port] == 0)
34f80b04
EG
6777 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6778 else
6779 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6780 }
a2fbb9ea 6781
34f80b04
EG
6782 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6783 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6784 bnx2x__link_reset(bp);
a2fbb9ea
ET
6785
6786 /* Reset the chip */
228241eb 6787 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6788
6789 /* Report UNLOAD_DONE to MCP */
34f80b04 6790 if (!BP_NOMCP(bp))
a2fbb9ea 6791 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 6792 bp->port.pmf = 0;
a2fbb9ea 6793
7a9b2557 6794 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6795 bnx2x_free_skbs(bp);
7a9b2557 6796 for_each_queue(bp, i)
3196a88a 6797 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
a2fbb9ea
ET
6798 bnx2x_free_mem(bp);
6799
6800 bp->state = BNX2X_STATE_CLOSED;
228241eb 6801
a2fbb9ea
ET
6802 netif_carrier_off(bp->dev);
6803
6804 return 0;
6805}
6806
34f80b04
EG
6807static void bnx2x_reset_task(struct work_struct *work)
6808{
6809 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6810
6811#ifdef BNX2X_STOP_ON_ERROR
6812 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6813 " so reset not done to allow debug dump,\n"
6814 KERN_ERR " you will need to reboot when done\n");
6815 return;
6816#endif
6817
6818 rtnl_lock();
6819
6820 if (!netif_running(bp->dev))
6821 goto reset_task_exit;
6822
6823 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6824 bnx2x_nic_load(bp, LOAD_NORMAL);
6825
6826reset_task_exit:
6827 rtnl_unlock();
6828}
6829
a2fbb9ea
ET
6830/* end of nic load/unload */
6831
6832/* ethtool_ops */
6833
6834/*
6835 * Init service functions
6836 */
6837
34f80b04
EG
6838static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6839{
6840 u32 val;
6841
6842 /* Check if there is any driver already loaded */
6843 val = REG_RD(bp, MISC_REG_UNPREPARED);
6844 if (val == 0x1) {
6845 /* Check if it is the UNDI driver
6846 * UNDI driver initializes CID offset for normal bell to 0x7
6847 */
4a37fb66 6848 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04 6849 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
76b190c5
EG
6850 if (val == 0x7)
6851 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6852 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6853
34f80b04
EG
6854 if (val == 0x7) {
6855 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6856 /* save our func */
34f80b04 6857 int func = BP_FUNC(bp);
da5a662a
VZ
6858 u32 swap_en;
6859 u32 swap_val;
34f80b04
EG
6860
6861 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6862
6863 /* try unload UNDI on port 0 */
6864 bp->func = 0;
da5a662a
VZ
6865 bp->fw_seq =
6866 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6867 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6868 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6869
6870 /* if UNDI is loaded on the other port */
6871 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6872
da5a662a
VZ
6873 /* send "DONE" for previous unload */
6874 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6875
6876 /* unload UNDI on port 1 */
34f80b04 6877 bp->func = 1;
da5a662a
VZ
6878 bp->fw_seq =
6879 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6880 DRV_MSG_SEQ_NUMBER_MASK);
6881 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6882
6883 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6884 }
6885
da5a662a
VZ
6886 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6887 HC_REG_CONFIG_0), 0x1000);
6888
6889 /* close input traffic and wait for it */
6890 /* Do not rcv packets to BRB */
6891 REG_WR(bp,
6892 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6893 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6894 /* Do not direct rcv packets that are not for MCP to
6895 * the BRB */
6896 REG_WR(bp,
6897 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6898 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6899 /* clear AEU */
6900 REG_WR(bp,
6901 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6902 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6903 msleep(10);
6904
6905 /* save NIG port swap info */
6906 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6907 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
6908 /* reset device */
6909 REG_WR(bp,
6910 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 6911 0xd3ffffff);
34f80b04
EG
6912 REG_WR(bp,
6913 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6914 0x1403);
da5a662a
VZ
6915 /* take the NIG out of reset and restore swap values */
6916 REG_WR(bp,
6917 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6918 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6919 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6920 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6921
6922 /* send unload done to the MCP */
6923 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6924
6925 /* restore our func and fw_seq */
6926 bp->func = func;
6927 bp->fw_seq =
6928 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6929 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04
EG
6930 }
6931 }
6932}
6933
6934static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6935{
6936 u32 val, val2, val3, val4, id;
72ce58c3 6937 u16 pmc;
34f80b04
EG
6938
6939 /* Get the chip revision id and number. */
6940 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6941 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6942 id = ((val & 0xffff) << 16);
6943 val = REG_RD(bp, MISC_REG_CHIP_REV);
6944 id |= ((val & 0xf) << 12);
6945 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6946 id |= ((val & 0xff) << 4);
6947 REG_RD(bp, MISC_REG_BOND_ID);
6948 id |= (val & 0xf);
6949 bp->common.chip_id = id;
6950 bp->link_params.chip_id = bp->common.chip_id;
6951 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6952
6953 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6954 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6955 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6956 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6957 bp->common.flash_size, bp->common.flash_size);
6958
6959 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6960 bp->link_params.shmem_base = bp->common.shmem_base;
6961 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6962
6963 if (!bp->common.shmem_base ||
6964 (bp->common.shmem_base < 0xA0000) ||
6965 (bp->common.shmem_base >= 0xC0000)) {
6966 BNX2X_DEV_INFO("MCP not active\n");
6967 bp->flags |= NO_MCP_FLAG;
6968 return;
6969 }
6970
6971 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6972 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6973 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6974 BNX2X_ERR("BAD MCP validity signature\n");
6975
6976 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6977 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6978
6979 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6980 bp->common.hw_config, bp->common.board);
6981
6982 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6983 SHARED_HW_CFG_LED_MODE_MASK) >>
6984 SHARED_HW_CFG_LED_MODE_SHIFT);
6985
6986 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6987 bp->common.bc_ver = val;
6988 BNX2X_DEV_INFO("bc_ver %X\n", val);
6989 if (val < BNX2X_BC_VER) {
6990 /* for now only warn
6991 * later we might need to enforce this */
6992 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6993 " please upgrade BC\n", BNX2X_BC_VER, val);
6994 }
72ce58c3
EG
6995
6996 if (BP_E1HVN(bp) == 0) {
6997 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6998 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6999 } else {
7000 /* no WOL capability for E1HVN != 0 */
7001 bp->flags |= NO_WOL_FLAG;
7002 }
7003 BNX2X_DEV_INFO("%sWoL capable\n",
7004 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7005
7006 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7007 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7008 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7009 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7010
7011 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7012 val, val2, val3, val4);
7013}
7014
7015static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7016 u32 switch_cfg)
a2fbb9ea 7017{
34f80b04 7018 int port = BP_PORT(bp);
a2fbb9ea
ET
7019 u32 ext_phy_type;
7020
a2fbb9ea
ET
7021 switch (switch_cfg) {
7022 case SWITCH_CFG_1G:
7023 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7024
c18487ee
YR
7025 ext_phy_type =
7026 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7027 switch (ext_phy_type) {
7028 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7029 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7030 ext_phy_type);
7031
34f80b04
EG
7032 bp->port.supported |= (SUPPORTED_10baseT_Half |
7033 SUPPORTED_10baseT_Full |
7034 SUPPORTED_100baseT_Half |
7035 SUPPORTED_100baseT_Full |
7036 SUPPORTED_1000baseT_Full |
7037 SUPPORTED_2500baseX_Full |
7038 SUPPORTED_TP |
7039 SUPPORTED_FIBRE |
7040 SUPPORTED_Autoneg |
7041 SUPPORTED_Pause |
7042 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7043 break;
7044
7045 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7046 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7047 ext_phy_type);
7048
34f80b04
EG
7049 bp->port.supported |= (SUPPORTED_10baseT_Half |
7050 SUPPORTED_10baseT_Full |
7051 SUPPORTED_100baseT_Half |
7052 SUPPORTED_100baseT_Full |
7053 SUPPORTED_1000baseT_Full |
7054 SUPPORTED_TP |
7055 SUPPORTED_FIBRE |
7056 SUPPORTED_Autoneg |
7057 SUPPORTED_Pause |
7058 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7059 break;
7060
7061 default:
7062 BNX2X_ERR("NVRAM config error. "
7063 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7064 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7065 return;
7066 }
7067
34f80b04
EG
7068 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7069 port*0x10);
7070 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7071 break;
7072
7073 case SWITCH_CFG_10G:
7074 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7075
c18487ee
YR
7076 ext_phy_type =
7077 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7078 switch (ext_phy_type) {
7079 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7080 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7081 ext_phy_type);
7082
34f80b04
EG
7083 bp->port.supported |= (SUPPORTED_10baseT_Half |
7084 SUPPORTED_10baseT_Full |
7085 SUPPORTED_100baseT_Half |
7086 SUPPORTED_100baseT_Full |
7087 SUPPORTED_1000baseT_Full |
7088 SUPPORTED_2500baseX_Full |
7089 SUPPORTED_10000baseT_Full |
7090 SUPPORTED_TP |
7091 SUPPORTED_FIBRE |
7092 SUPPORTED_Autoneg |
7093 SUPPORTED_Pause |
7094 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7095 break;
7096
7097 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7098 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7099 ext_phy_type);
f1410647 7100
34f80b04
EG
7101 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7102 SUPPORTED_FIBRE |
7103 SUPPORTED_Pause |
7104 SUPPORTED_Asym_Pause);
f1410647
ET
7105 break;
7106
a2fbb9ea 7107 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7108 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7109 ext_phy_type);
7110
34f80b04
EG
7111 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7112 SUPPORTED_1000baseT_Full |
7113 SUPPORTED_FIBRE |
7114 SUPPORTED_Pause |
7115 SUPPORTED_Asym_Pause);
f1410647
ET
7116 break;
7117
7118 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7119 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7120 ext_phy_type);
7121
34f80b04
EG
7122 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7123 SUPPORTED_1000baseT_Full |
7124 SUPPORTED_FIBRE |
7125 SUPPORTED_Autoneg |
7126 SUPPORTED_Pause |
7127 SUPPORTED_Asym_Pause);
f1410647
ET
7128 break;
7129
c18487ee
YR
7130 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7131 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7132 ext_phy_type);
7133
34f80b04
EG
7134 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7135 SUPPORTED_2500baseX_Full |
7136 SUPPORTED_1000baseT_Full |
7137 SUPPORTED_FIBRE |
7138 SUPPORTED_Autoneg |
7139 SUPPORTED_Pause |
7140 SUPPORTED_Asym_Pause);
c18487ee
YR
7141 break;
7142
f1410647
ET
7143 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7144 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7145 ext_phy_type);
7146
34f80b04
EG
7147 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7148 SUPPORTED_TP |
7149 SUPPORTED_Autoneg |
7150 SUPPORTED_Pause |
7151 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7152 break;
7153
c18487ee
YR
7154 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7155 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7156 bp->link_params.ext_phy_config);
7157 break;
7158
a2fbb9ea
ET
7159 default:
7160 BNX2X_ERR("NVRAM config error. "
7161 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7162 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7163 return;
7164 }
7165
34f80b04
EG
7166 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7167 port*0x18);
7168 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7169
a2fbb9ea
ET
7170 break;
7171
7172 default:
7173 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7174 bp->port.link_config);
a2fbb9ea
ET
7175 return;
7176 }
34f80b04 7177 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7178
7179 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7180 if (!(bp->link_params.speed_cap_mask &
7181 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7182 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7183
c18487ee
YR
7184 if (!(bp->link_params.speed_cap_mask &
7185 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7186 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7187
c18487ee
YR
7188 if (!(bp->link_params.speed_cap_mask &
7189 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7190 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7191
c18487ee
YR
7192 if (!(bp->link_params.speed_cap_mask &
7193 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7194 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7195
c18487ee
YR
7196 if (!(bp->link_params.speed_cap_mask &
7197 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7198 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7199 SUPPORTED_1000baseT_Full);
a2fbb9ea 7200
c18487ee
YR
7201 if (!(bp->link_params.speed_cap_mask &
7202 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7203 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7204
c18487ee
YR
7205 if (!(bp->link_params.speed_cap_mask &
7206 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7207 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7208
34f80b04 7209 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7210}
7211
34f80b04 7212static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7213{
c18487ee 7214 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7215
34f80b04 7216 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7217 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7218 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7219 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7220 bp->port.advertising = bp->port.supported;
a2fbb9ea 7221 } else {
c18487ee
YR
7222 u32 ext_phy_type =
7223 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7224
7225 if ((ext_phy_type ==
7226 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7227 (ext_phy_type ==
7228 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7229 /* force 10G, no AN */
c18487ee 7230 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7231 bp->port.advertising =
a2fbb9ea
ET
7232 (ADVERTISED_10000baseT_Full |
7233 ADVERTISED_FIBRE);
7234 break;
7235 }
7236 BNX2X_ERR("NVRAM config error. "
7237 "Invalid link_config 0x%x"
7238 " Autoneg not supported\n",
34f80b04 7239 bp->port.link_config);
a2fbb9ea
ET
7240 return;
7241 }
7242 break;
7243
7244 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7245 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7246 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7247 bp->port.advertising = (ADVERTISED_10baseT_Full |
7248 ADVERTISED_TP);
a2fbb9ea
ET
7249 } else {
7250 BNX2X_ERR("NVRAM config error. "
7251 "Invalid link_config 0x%x"
7252 " speed_cap_mask 0x%x\n",
34f80b04 7253 bp->port.link_config,
c18487ee 7254 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7255 return;
7256 }
7257 break;
7258
7259 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7260 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7261 bp->link_params.req_line_speed = SPEED_10;
7262 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7263 bp->port.advertising = (ADVERTISED_10baseT_Half |
7264 ADVERTISED_TP);
a2fbb9ea
ET
7265 } else {
7266 BNX2X_ERR("NVRAM config error. "
7267 "Invalid link_config 0x%x"
7268 " speed_cap_mask 0x%x\n",
34f80b04 7269 bp->port.link_config,
c18487ee 7270 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7271 return;
7272 }
7273 break;
7274
7275 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7276 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7277 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7278 bp->port.advertising = (ADVERTISED_100baseT_Full |
7279 ADVERTISED_TP);
a2fbb9ea
ET
7280 } else {
7281 BNX2X_ERR("NVRAM config error. "
7282 "Invalid link_config 0x%x"
7283 " speed_cap_mask 0x%x\n",
34f80b04 7284 bp->port.link_config,
c18487ee 7285 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7286 return;
7287 }
7288 break;
7289
7290 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7291 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7292 bp->link_params.req_line_speed = SPEED_100;
7293 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7294 bp->port.advertising = (ADVERTISED_100baseT_Half |
7295 ADVERTISED_TP);
a2fbb9ea
ET
7296 } else {
7297 BNX2X_ERR("NVRAM config error. "
7298 "Invalid link_config 0x%x"
7299 " speed_cap_mask 0x%x\n",
34f80b04 7300 bp->port.link_config,
c18487ee 7301 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7302 return;
7303 }
7304 break;
7305
7306 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7307 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7308 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7309 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7310 ADVERTISED_TP);
a2fbb9ea
ET
7311 } else {
7312 BNX2X_ERR("NVRAM config error. "
7313 "Invalid link_config 0x%x"
7314 " speed_cap_mask 0x%x\n",
34f80b04 7315 bp->port.link_config,
c18487ee 7316 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7317 return;
7318 }
7319 break;
7320
7321 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7322 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7323 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7324 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7325 ADVERTISED_TP);
a2fbb9ea
ET
7326 } else {
7327 BNX2X_ERR("NVRAM config error. "
7328 "Invalid link_config 0x%x"
7329 " speed_cap_mask 0x%x\n",
34f80b04 7330 bp->port.link_config,
c18487ee 7331 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7332 return;
7333 }
7334 break;
7335
7336 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7337 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7338 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7339 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7340 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7341 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7342 ADVERTISED_FIBRE);
a2fbb9ea
ET
7343 } else {
7344 BNX2X_ERR("NVRAM config error. "
7345 "Invalid link_config 0x%x"
7346 " speed_cap_mask 0x%x\n",
34f80b04 7347 bp->port.link_config,
c18487ee 7348 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7349 return;
7350 }
7351 break;
7352
7353 default:
7354 BNX2X_ERR("NVRAM config error. "
7355 "BAD link speed link_config 0x%x\n",
34f80b04 7356 bp->port.link_config);
c18487ee 7357 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7358 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7359 break;
7360 }
a2fbb9ea 7361
34f80b04
EG
7362 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7363 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7364 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7365 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7366 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7367
c18487ee 7368 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7369 " advertising 0x%x\n",
c18487ee
YR
7370 bp->link_params.req_line_speed,
7371 bp->link_params.req_duplex,
34f80b04 7372 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7373}
7374
34f80b04 7375static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7376{
34f80b04
EG
7377 int port = BP_PORT(bp);
7378 u32 val, val2;
a2fbb9ea 7379
c18487ee 7380 bp->link_params.bp = bp;
34f80b04 7381 bp->link_params.port = port;
c18487ee 7382
c18487ee 7383 bp->link_params.serdes_config =
f1410647 7384 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7385 bp->link_params.lane_config =
a2fbb9ea 7386 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7387 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7388 SHMEM_RD(bp,
7389 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7390 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7391 SHMEM_RD(bp,
7392 dev_info.port_hw_config[port].speed_capability_mask);
7393
34f80b04 7394 bp->port.link_config =
a2fbb9ea
ET
7395 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7396
34f80b04
EG
7397 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7398 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7399 " link_config 0x%08x\n",
c18487ee
YR
7400 bp->link_params.serdes_config,
7401 bp->link_params.lane_config,
7402 bp->link_params.ext_phy_config,
34f80b04 7403 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7404
34f80b04 7405 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7406 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7407 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7408
7409 bnx2x_link_settings_requested(bp);
7410
7411 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7412 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7413 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7414 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7415 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7416 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7417 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7418 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7419 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7420 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7421}
7422
7423static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7424{
7425 int func = BP_FUNC(bp);
7426 u32 val, val2;
7427 int rc = 0;
a2fbb9ea 7428
34f80b04 7429 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7430
34f80b04
EG
7431 bp->e1hov = 0;
7432 bp->e1hmf = 0;
7433 if (CHIP_IS_E1H(bp)) {
7434 bp->mf_config =
7435 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7436
3196a88a
EG
7437 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7438 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 7439 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7440
34f80b04
EG
7441 bp->e1hov = val;
7442 bp->e1hmf = 1;
7443 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7444 "(0x%04x)\n",
7445 func, bp->e1hov, bp->e1hov);
7446 } else {
7447 BNX2X_DEV_INFO("Single function mode\n");
7448 if (BP_E1HVN(bp)) {
7449 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7450 " aborting\n", func);
7451 rc = -EPERM;
7452 }
7453 }
7454 }
a2fbb9ea 7455
34f80b04
EG
7456 if (!BP_NOMCP(bp)) {
7457 bnx2x_get_port_hwinfo(bp);
7458
7459 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7460 DRV_MSG_SEQ_NUMBER_MASK);
7461 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7462 }
7463
7464 if (IS_E1HMF(bp)) {
7465 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7466 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7467 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7468 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7469 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7470 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7471 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7472 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7473 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7474 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7475 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7476 ETH_ALEN);
7477 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7478 ETH_ALEN);
a2fbb9ea 7479 }
34f80b04
EG
7480
7481 return rc;
a2fbb9ea
ET
7482 }
7483
34f80b04
EG
7484 if (BP_NOMCP(bp)) {
7485 /* only supposed to happen on emulation/FPGA */
33471629 7486 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
7487 random_ether_addr(bp->dev->dev_addr);
7488 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7489 }
a2fbb9ea 7490
34f80b04
EG
7491 return rc;
7492}
7493
7494static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7495{
7496 int func = BP_FUNC(bp);
7497 int rc;
7498
da5a662a
VZ
7499 /* Disable interrupt handling until HW is initialized */
7500 atomic_set(&bp->intr_sem, 1);
7501
34f80b04 7502 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7503
34f80b04
EG
7504 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7505 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7506
7507 rc = bnx2x_get_hwinfo(bp);
7508
7509 /* need to reset chip if undi was active */
7510 if (!BP_NOMCP(bp))
7511 bnx2x_undi_unload(bp);
7512
7513 if (CHIP_REV_IS_FPGA(bp))
7514 printk(KERN_ERR PFX "FPGA detected\n");
7515
7516 if (BP_NOMCP(bp) && (func == 0))
7517 printk(KERN_ERR PFX
7518 "MCP disabled, must load devices in order!\n");
7519
7a9b2557
VZ
7520 /* Set TPA flags */
7521 if (disable_tpa) {
7522 bp->flags &= ~TPA_ENABLE_FLAG;
7523 bp->dev->features &= ~NETIF_F_LRO;
7524 } else {
7525 bp->flags |= TPA_ENABLE_FLAG;
7526 bp->dev->features |= NETIF_F_LRO;
7527 }
7528
7529
34f80b04
EG
7530 bp->tx_ring_size = MAX_TX_AVAIL;
7531 bp->rx_ring_size = MAX_RX_AVAIL;
7532
7533 bp->rx_csum = 1;
7534 bp->rx_offset = 0;
7535
7536 bp->tx_ticks = 50;
7537 bp->rx_ticks = 25;
7538
34f80b04
EG
7539 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7540 bp->current_interval = (poll ? poll : bp->timer_interval);
7541
7542 init_timer(&bp->timer);
7543 bp->timer.expires = jiffies + bp->current_interval;
7544 bp->timer.data = (unsigned long) bp;
7545 bp->timer.function = bnx2x_timer;
7546
7547 return rc;
a2fbb9ea
ET
7548}
7549
7550/*
7551 * ethtool service functions
7552 */
7553
7554/* All ethtool functions called with rtnl_lock */
7555
7556static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7557{
7558 struct bnx2x *bp = netdev_priv(dev);
7559
34f80b04
EG
7560 cmd->supported = bp->port.supported;
7561 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7562
7563 if (netif_carrier_ok(dev)) {
c18487ee
YR
7564 cmd->speed = bp->link_vars.line_speed;
7565 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7566 } else {
c18487ee
YR
7567 cmd->speed = bp->link_params.req_line_speed;
7568 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7569 }
34f80b04
EG
7570 if (IS_E1HMF(bp)) {
7571 u16 vn_max_rate;
7572
7573 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7574 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7575 if (vn_max_rate < cmd->speed)
7576 cmd->speed = vn_max_rate;
7577 }
a2fbb9ea 7578
c18487ee
YR
7579 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7580 u32 ext_phy_type =
7581 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7582
7583 switch (ext_phy_type) {
7584 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7585 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7586 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7587 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7588 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7589 cmd->port = PORT_FIBRE;
7590 break;
7591
7592 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7593 cmd->port = PORT_TP;
7594 break;
7595
c18487ee
YR
7596 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7597 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7598 bp->link_params.ext_phy_config);
7599 break;
7600
f1410647
ET
7601 default:
7602 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7603 bp->link_params.ext_phy_config);
7604 break;
f1410647
ET
7605 }
7606 } else
a2fbb9ea 7607 cmd->port = PORT_TP;
a2fbb9ea 7608
34f80b04 7609 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7610 cmd->transceiver = XCVR_INTERNAL;
7611
c18487ee 7612 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7613 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7614 else
a2fbb9ea 7615 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7616
7617 cmd->maxtxpkt = 0;
7618 cmd->maxrxpkt = 0;
7619
7620 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7621 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7622 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7623 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7624 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7625 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7626 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7627
7628 return 0;
7629}
7630
7631static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7632{
7633 struct bnx2x *bp = netdev_priv(dev);
7634 u32 advertising;
7635
34f80b04
EG
7636 if (IS_E1HMF(bp))
7637 return 0;
7638
a2fbb9ea
ET
7639 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7640 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7641 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7642 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7643 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7644 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7645 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7646
a2fbb9ea 7647 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7648 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7649 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7650 return -EINVAL;
f1410647 7651 }
a2fbb9ea
ET
7652
7653 /* advertise the requested speed and duplex if supported */
34f80b04 7654 cmd->advertising &= bp->port.supported;
a2fbb9ea 7655
c18487ee
YR
7656 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7657 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7658 bp->port.advertising |= (ADVERTISED_Autoneg |
7659 cmd->advertising);
a2fbb9ea
ET
7660
7661 } else { /* forced speed */
7662 /* advertise the requested speed and duplex if supported */
7663 switch (cmd->speed) {
7664 case SPEED_10:
7665 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7666 if (!(bp->port.supported &
f1410647
ET
7667 SUPPORTED_10baseT_Full)) {
7668 DP(NETIF_MSG_LINK,
7669 "10M full not supported\n");
a2fbb9ea 7670 return -EINVAL;
f1410647 7671 }
a2fbb9ea
ET
7672
7673 advertising = (ADVERTISED_10baseT_Full |
7674 ADVERTISED_TP);
7675 } else {
34f80b04 7676 if (!(bp->port.supported &
f1410647
ET
7677 SUPPORTED_10baseT_Half)) {
7678 DP(NETIF_MSG_LINK,
7679 "10M half not supported\n");
a2fbb9ea 7680 return -EINVAL;
f1410647 7681 }
a2fbb9ea
ET
7682
7683 advertising = (ADVERTISED_10baseT_Half |
7684 ADVERTISED_TP);
7685 }
7686 break;
7687
7688 case SPEED_100:
7689 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7690 if (!(bp->port.supported &
f1410647
ET
7691 SUPPORTED_100baseT_Full)) {
7692 DP(NETIF_MSG_LINK,
7693 "100M full not supported\n");
a2fbb9ea 7694 return -EINVAL;
f1410647 7695 }
a2fbb9ea
ET
7696
7697 advertising = (ADVERTISED_100baseT_Full |
7698 ADVERTISED_TP);
7699 } else {
34f80b04 7700 if (!(bp->port.supported &
f1410647
ET
7701 SUPPORTED_100baseT_Half)) {
7702 DP(NETIF_MSG_LINK,
7703 "100M half not supported\n");
a2fbb9ea 7704 return -EINVAL;
f1410647 7705 }
a2fbb9ea
ET
7706
7707 advertising = (ADVERTISED_100baseT_Half |
7708 ADVERTISED_TP);
7709 }
7710 break;
7711
7712 case SPEED_1000:
f1410647
ET
7713 if (cmd->duplex != DUPLEX_FULL) {
7714 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7715 return -EINVAL;
f1410647 7716 }
a2fbb9ea 7717
34f80b04 7718 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7719 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7720 return -EINVAL;
f1410647 7721 }
a2fbb9ea
ET
7722
7723 advertising = (ADVERTISED_1000baseT_Full |
7724 ADVERTISED_TP);
7725 break;
7726
7727 case SPEED_2500:
f1410647
ET
7728 if (cmd->duplex != DUPLEX_FULL) {
7729 DP(NETIF_MSG_LINK,
7730 "2.5G half not supported\n");
a2fbb9ea 7731 return -EINVAL;
f1410647 7732 }
a2fbb9ea 7733
34f80b04 7734 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7735 DP(NETIF_MSG_LINK,
7736 "2.5G full not supported\n");
a2fbb9ea 7737 return -EINVAL;
f1410647 7738 }
a2fbb9ea 7739
f1410647 7740 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7741 ADVERTISED_TP);
7742 break;
7743
7744 case SPEED_10000:
f1410647
ET
7745 if (cmd->duplex != DUPLEX_FULL) {
7746 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7747 return -EINVAL;
f1410647 7748 }
a2fbb9ea 7749
34f80b04 7750 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7751 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7752 return -EINVAL;
f1410647 7753 }
a2fbb9ea
ET
7754
7755 advertising = (ADVERTISED_10000baseT_Full |
7756 ADVERTISED_FIBRE);
7757 break;
7758
7759 default:
f1410647 7760 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7761 return -EINVAL;
7762 }
7763
c18487ee
YR
7764 bp->link_params.req_line_speed = cmd->speed;
7765 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7766 bp->port.advertising = advertising;
a2fbb9ea
ET
7767 }
7768
c18487ee 7769 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7770 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7771 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7772 bp->port.advertising);
a2fbb9ea 7773
34f80b04 7774 if (netif_running(dev)) {
bb2a0f7a 7775 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7776 bnx2x_link_set(bp);
7777 }
a2fbb9ea
ET
7778
7779 return 0;
7780}
7781
c18487ee
YR
7782#define PHY_FW_VER_LEN 10
7783
a2fbb9ea
ET
7784static void bnx2x_get_drvinfo(struct net_device *dev,
7785 struct ethtool_drvinfo *info)
7786{
7787 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 7788 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7789
7790 strcpy(info->driver, DRV_MODULE_NAME);
7791 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7792
7793 phy_fw_ver[0] = '\0';
34f80b04 7794 if (bp->port.pmf) {
4a37fb66 7795 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
7796 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7797 (bp->state != BNX2X_STATE_CLOSED),
7798 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 7799 bnx2x_release_phy_lock(bp);
34f80b04 7800 }
c18487ee 7801
f0e53a84
EG
7802 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7803 (bp->common.bc_ver & 0xff0000) >> 16,
7804 (bp->common.bc_ver & 0xff00) >> 8,
7805 (bp->common.bc_ver & 0xff),
7806 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
7807 strcpy(info->bus_info, pci_name(bp->pdev));
7808 info->n_stats = BNX2X_NUM_STATS;
7809 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7810 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7811 info->regdump_len = 0;
7812}
7813
7814static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7815{
7816 struct bnx2x *bp = netdev_priv(dev);
7817
7818 if (bp->flags & NO_WOL_FLAG) {
7819 wol->supported = 0;
7820 wol->wolopts = 0;
7821 } else {
7822 wol->supported = WAKE_MAGIC;
7823 if (bp->wol)
7824 wol->wolopts = WAKE_MAGIC;
7825 else
7826 wol->wolopts = 0;
7827 }
7828 memset(&wol->sopass, 0, sizeof(wol->sopass));
7829}
7830
7831static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7832{
7833 struct bnx2x *bp = netdev_priv(dev);
7834
7835 if (wol->wolopts & ~WAKE_MAGIC)
7836 return -EINVAL;
7837
7838 if (wol->wolopts & WAKE_MAGIC) {
7839 if (bp->flags & NO_WOL_FLAG)
7840 return -EINVAL;
7841
7842 bp->wol = 1;
34f80b04 7843 } else
a2fbb9ea 7844 bp->wol = 0;
34f80b04 7845
a2fbb9ea
ET
7846 return 0;
7847}
7848
7849static u32 bnx2x_get_msglevel(struct net_device *dev)
7850{
7851 struct bnx2x *bp = netdev_priv(dev);
7852
7853 return bp->msglevel;
7854}
7855
7856static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7857{
7858 struct bnx2x *bp = netdev_priv(dev);
7859
7860 if (capable(CAP_NET_ADMIN))
7861 bp->msglevel = level;
7862}
7863
7864static int bnx2x_nway_reset(struct net_device *dev)
7865{
7866 struct bnx2x *bp = netdev_priv(dev);
7867
34f80b04
EG
7868 if (!bp->port.pmf)
7869 return 0;
a2fbb9ea 7870
34f80b04 7871 if (netif_running(dev)) {
bb2a0f7a 7872 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7873 bnx2x_link_set(bp);
7874 }
a2fbb9ea
ET
7875
7876 return 0;
7877}
7878
7879static int bnx2x_get_eeprom_len(struct net_device *dev)
7880{
7881 struct bnx2x *bp = netdev_priv(dev);
7882
34f80b04 7883 return bp->common.flash_size;
a2fbb9ea
ET
7884}
7885
7886static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7887{
34f80b04 7888 int port = BP_PORT(bp);
a2fbb9ea
ET
7889 int count, i;
7890 u32 val = 0;
7891
7892 /* adjust timeout for emulation/FPGA */
7893 count = NVRAM_TIMEOUT_COUNT;
7894 if (CHIP_REV_IS_SLOW(bp))
7895 count *= 100;
7896
7897 /* request access to nvram interface */
7898 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7899 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7900
7901 for (i = 0; i < count*10; i++) {
7902 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7903 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7904 break;
7905
7906 udelay(5);
7907 }
7908
7909 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 7910 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
7911 return -EBUSY;
7912 }
7913
7914 return 0;
7915}
7916
7917static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7918{
34f80b04 7919 int port = BP_PORT(bp);
a2fbb9ea
ET
7920 int count, i;
7921 u32 val = 0;
7922
7923 /* adjust timeout for emulation/FPGA */
7924 count = NVRAM_TIMEOUT_COUNT;
7925 if (CHIP_REV_IS_SLOW(bp))
7926 count *= 100;
7927
7928 /* relinquish nvram interface */
7929 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7930 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7931
7932 for (i = 0; i < count*10; i++) {
7933 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7934 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7935 break;
7936
7937 udelay(5);
7938 }
7939
7940 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 7941 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
7942 return -EBUSY;
7943 }
7944
7945 return 0;
7946}
7947
7948static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7949{
7950 u32 val;
7951
7952 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7953
7954 /* enable both bits, even on read */
7955 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7956 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7957 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7958}
7959
7960static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7961{
7962 u32 val;
7963
7964 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7965
7966 /* disable both bits, even after read */
7967 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7968 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7969 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7970}
7971
7972static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7973 u32 cmd_flags)
7974{
f1410647 7975 int count, i, rc;
a2fbb9ea
ET
7976 u32 val;
7977
7978 /* build the command word */
7979 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7980
7981 /* need to clear DONE bit separately */
7982 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7983
7984 /* address of the NVRAM to read from */
7985 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7986 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7987
7988 /* issue a read command */
7989 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7990
7991 /* adjust timeout for emulation/FPGA */
7992 count = NVRAM_TIMEOUT_COUNT;
7993 if (CHIP_REV_IS_SLOW(bp))
7994 count *= 100;
7995
7996 /* wait for completion */
7997 *ret_val = 0;
7998 rc = -EBUSY;
7999 for (i = 0; i < count; i++) {
8000 udelay(5);
8001 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8002
8003 if (val & MCPR_NVM_COMMAND_DONE) {
8004 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8005 /* we read nvram data in cpu order
8006 * but ethtool sees it as an array of bytes
8007 * converting to big-endian will do the work */
8008 val = cpu_to_be32(val);
8009 *ret_val = val;
8010 rc = 0;
8011 break;
8012 }
8013 }
8014
8015 return rc;
8016}
8017
8018static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8019 int buf_size)
8020{
8021 int rc;
8022 u32 cmd_flags;
8023 u32 val;
8024
8025 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8026 DP(BNX2X_MSG_NVM,
c14423fe 8027 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8028 offset, buf_size);
8029 return -EINVAL;
8030 }
8031
34f80b04
EG
8032 if (offset + buf_size > bp->common.flash_size) {
8033 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8034 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8035 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8036 return -EINVAL;
8037 }
8038
8039 /* request access to nvram interface */
8040 rc = bnx2x_acquire_nvram_lock(bp);
8041 if (rc)
8042 return rc;
8043
8044 /* enable access to nvram interface */
8045 bnx2x_enable_nvram_access(bp);
8046
8047 /* read the first word(s) */
8048 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8049 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8050 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8051 memcpy(ret_buf, &val, 4);
8052
8053 /* advance to the next dword */
8054 offset += sizeof(u32);
8055 ret_buf += sizeof(u32);
8056 buf_size -= sizeof(u32);
8057 cmd_flags = 0;
8058 }
8059
8060 if (rc == 0) {
8061 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8062 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8063 memcpy(ret_buf, &val, 4);
8064 }
8065
8066 /* disable access to nvram interface */
8067 bnx2x_disable_nvram_access(bp);
8068 bnx2x_release_nvram_lock(bp);
8069
8070 return rc;
8071}
8072
8073static int bnx2x_get_eeprom(struct net_device *dev,
8074 struct ethtool_eeprom *eeprom, u8 *eebuf)
8075{
8076 struct bnx2x *bp = netdev_priv(dev);
8077 int rc;
8078
34f80b04 8079 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8080 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8081 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8082 eeprom->len, eeprom->len);
8083
8084 /* parameters already validated in ethtool_get_eeprom */
8085
8086 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8087
8088 return rc;
8089}
8090
8091static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8092 u32 cmd_flags)
8093{
f1410647 8094 int count, i, rc;
a2fbb9ea
ET
8095
8096 /* build the command word */
8097 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8098
8099 /* need to clear DONE bit separately */
8100 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8101
8102 /* write the data */
8103 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8104
8105 /* address of the NVRAM to write to */
8106 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8107 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8108
8109 /* issue the write command */
8110 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8111
8112 /* adjust timeout for emulation/FPGA */
8113 count = NVRAM_TIMEOUT_COUNT;
8114 if (CHIP_REV_IS_SLOW(bp))
8115 count *= 100;
8116
8117 /* wait for completion */
8118 rc = -EBUSY;
8119 for (i = 0; i < count; i++) {
8120 udelay(5);
8121 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8122 if (val & MCPR_NVM_COMMAND_DONE) {
8123 rc = 0;
8124 break;
8125 }
8126 }
8127
8128 return rc;
8129}
8130
f1410647 8131#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8132
8133static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8134 int buf_size)
8135{
8136 int rc;
8137 u32 cmd_flags;
8138 u32 align_offset;
8139 u32 val;
8140
34f80b04
EG
8141 if (offset + buf_size > bp->common.flash_size) {
8142 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8143 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8144 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8145 return -EINVAL;
8146 }
8147
8148 /* request access to nvram interface */
8149 rc = bnx2x_acquire_nvram_lock(bp);
8150 if (rc)
8151 return rc;
8152
8153 /* enable access to nvram interface */
8154 bnx2x_enable_nvram_access(bp);
8155
8156 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8157 align_offset = (offset & ~0x03);
8158 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8159
8160 if (rc == 0) {
8161 val &= ~(0xff << BYTE_OFFSET(offset));
8162 val |= (*data_buf << BYTE_OFFSET(offset));
8163
8164 /* nvram data is returned as an array of bytes
8165 * convert it back to cpu order */
8166 val = be32_to_cpu(val);
8167
a2fbb9ea
ET
8168 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8169 cmd_flags);
8170 }
8171
8172 /* disable access to nvram interface */
8173 bnx2x_disable_nvram_access(bp);
8174 bnx2x_release_nvram_lock(bp);
8175
8176 return rc;
8177}
8178
8179static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8180 int buf_size)
8181{
8182 int rc;
8183 u32 cmd_flags;
8184 u32 val;
8185 u32 written_so_far;
8186
34f80b04 8187 if (buf_size == 1) /* ethtool */
a2fbb9ea 8188 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8189
8190 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8191 DP(BNX2X_MSG_NVM,
c14423fe 8192 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8193 offset, buf_size);
8194 return -EINVAL;
8195 }
8196
34f80b04
EG
8197 if (offset + buf_size > bp->common.flash_size) {
8198 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8199 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8200 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8201 return -EINVAL;
8202 }
8203
8204 /* request access to nvram interface */
8205 rc = bnx2x_acquire_nvram_lock(bp);
8206 if (rc)
8207 return rc;
8208
8209 /* enable access to nvram interface */
8210 bnx2x_enable_nvram_access(bp);
8211
8212 written_so_far = 0;
8213 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8214 while ((written_so_far < buf_size) && (rc == 0)) {
8215 if (written_so_far == (buf_size - sizeof(u32)))
8216 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8217 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8218 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8219 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8220 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8221
8222 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8223
8224 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8225
8226 /* advance to the next dword */
8227 offset += sizeof(u32);
8228 data_buf += sizeof(u32);
8229 written_so_far += sizeof(u32);
8230 cmd_flags = 0;
8231 }
8232
8233 /* disable access to nvram interface */
8234 bnx2x_disable_nvram_access(bp);
8235 bnx2x_release_nvram_lock(bp);
8236
8237 return rc;
8238}
8239
8240static int bnx2x_set_eeprom(struct net_device *dev,
8241 struct ethtool_eeprom *eeprom, u8 *eebuf)
8242{
8243 struct bnx2x *bp = netdev_priv(dev);
8244 int rc;
8245
9f4c9583
EG
8246 if (!netif_running(dev))
8247 return -EAGAIN;
8248
34f80b04 8249 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8250 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8251 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8252 eeprom->len, eeprom->len);
8253
8254 /* parameters already validated in ethtool_set_eeprom */
8255
c18487ee 8256 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8257 if (eeprom->magic == 0x00504859)
8258 if (bp->port.pmf) {
8259
4a37fb66 8260 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8261 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8262 bp->link_params.ext_phy_config,
8263 (bp->state != BNX2X_STATE_CLOSED),
8264 eebuf, eeprom->len);
bb2a0f7a
YG
8265 if ((bp->state == BNX2X_STATE_OPEN) ||
8266 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8267 rc |= bnx2x_link_reset(&bp->link_params,
8268 &bp->link_vars);
8269 rc |= bnx2x_phy_init(&bp->link_params,
8270 &bp->link_vars);
bb2a0f7a 8271 }
4a37fb66 8272 bnx2x_release_phy_lock(bp);
34f80b04
EG
8273
8274 } else /* Only the PMF can access the PHY */
8275 return -EINVAL;
8276 else
c18487ee 8277 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8278
8279 return rc;
8280}
8281
8282static int bnx2x_get_coalesce(struct net_device *dev,
8283 struct ethtool_coalesce *coal)
8284{
8285 struct bnx2x *bp = netdev_priv(dev);
8286
8287 memset(coal, 0, sizeof(struct ethtool_coalesce));
8288
8289 coal->rx_coalesce_usecs = bp->rx_ticks;
8290 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8291
8292 return 0;
8293}
8294
8295static int bnx2x_set_coalesce(struct net_device *dev,
8296 struct ethtool_coalesce *coal)
8297{
8298 struct bnx2x *bp = netdev_priv(dev);
8299
8300 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8301 if (bp->rx_ticks > 3000)
8302 bp->rx_ticks = 3000;
8303
8304 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8305 if (bp->tx_ticks > 0x3000)
8306 bp->tx_ticks = 0x3000;
8307
34f80b04 8308 if (netif_running(dev))
a2fbb9ea
ET
8309 bnx2x_update_coalesce(bp);
8310
8311 return 0;
8312}
8313
8314static void bnx2x_get_ringparam(struct net_device *dev,
8315 struct ethtool_ringparam *ering)
8316{
8317 struct bnx2x *bp = netdev_priv(dev);
8318
8319 ering->rx_max_pending = MAX_RX_AVAIL;
8320 ering->rx_mini_max_pending = 0;
8321 ering->rx_jumbo_max_pending = 0;
8322
8323 ering->rx_pending = bp->rx_ring_size;
8324 ering->rx_mini_pending = 0;
8325 ering->rx_jumbo_pending = 0;
8326
8327 ering->tx_max_pending = MAX_TX_AVAIL;
8328 ering->tx_pending = bp->tx_ring_size;
8329}
8330
8331static int bnx2x_set_ringparam(struct net_device *dev,
8332 struct ethtool_ringparam *ering)
8333{
8334 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8335 int rc = 0;
a2fbb9ea
ET
8336
8337 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8338 (ering->tx_pending > MAX_TX_AVAIL) ||
8339 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8340 return -EINVAL;
8341
8342 bp->rx_ring_size = ering->rx_pending;
8343 bp->tx_ring_size = ering->tx_pending;
8344
34f80b04
EG
8345 if (netif_running(dev)) {
8346 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8347 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8348 }
8349
34f80b04 8350 return rc;
a2fbb9ea
ET
8351}
8352
8353static void bnx2x_get_pauseparam(struct net_device *dev,
8354 struct ethtool_pauseparam *epause)
8355{
8356 struct bnx2x *bp = netdev_priv(dev);
8357
c0700f90 8358 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
8359 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8360
c0700f90
DM
8361 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8362 BNX2X_FLOW_CTRL_RX);
8363 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8364 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
8365
8366 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8367 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8368 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8369}
8370
8371static int bnx2x_set_pauseparam(struct net_device *dev,
8372 struct ethtool_pauseparam *epause)
8373{
8374 struct bnx2x *bp = netdev_priv(dev);
8375
34f80b04
EG
8376 if (IS_E1HMF(bp))
8377 return 0;
8378
a2fbb9ea
ET
8379 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8380 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8381 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8382
c0700f90 8383 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 8384
f1410647 8385 if (epause->rx_pause)
c0700f90 8386 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 8387
f1410647 8388 if (epause->tx_pause)
c0700f90 8389 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 8390
c0700f90
DM
8391 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8392 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8393
c18487ee 8394 if (epause->autoneg) {
34f80b04 8395 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 8396 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
8397 return -EINVAL;
8398 }
a2fbb9ea 8399
c18487ee 8400 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 8401 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 8402 }
a2fbb9ea 8403
c18487ee
YR
8404 DP(NETIF_MSG_LINK,
8405 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8406
8407 if (netif_running(dev)) {
bb2a0f7a 8408 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8409 bnx2x_link_set(bp);
8410 }
a2fbb9ea
ET
8411
8412 return 0;
8413}
8414
df0f2343
VZ
8415static int bnx2x_set_flags(struct net_device *dev, u32 data)
8416{
8417 struct bnx2x *bp = netdev_priv(dev);
8418 int changed = 0;
8419 int rc = 0;
8420
8421 /* TPA requires Rx CSUM offloading */
8422 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8423 if (!(dev->features & NETIF_F_LRO)) {
8424 dev->features |= NETIF_F_LRO;
8425 bp->flags |= TPA_ENABLE_FLAG;
8426 changed = 1;
8427 }
8428
8429 } else if (dev->features & NETIF_F_LRO) {
8430 dev->features &= ~NETIF_F_LRO;
8431 bp->flags &= ~TPA_ENABLE_FLAG;
8432 changed = 1;
8433 }
8434
8435 if (changed && netif_running(dev)) {
8436 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8437 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8438 }
8439
8440 return rc;
8441}
8442
a2fbb9ea
ET
8443static u32 bnx2x_get_rx_csum(struct net_device *dev)
8444{
8445 struct bnx2x *bp = netdev_priv(dev);
8446
8447 return bp->rx_csum;
8448}
8449
8450static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8451{
8452 struct bnx2x *bp = netdev_priv(dev);
df0f2343 8453 int rc = 0;
a2fbb9ea
ET
8454
8455 bp->rx_csum = data;
df0f2343
VZ
8456
8457 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8458 TPA'ed packets will be discarded due to wrong TCP CSUM */
8459 if (!data) {
8460 u32 flags = ethtool_op_get_flags(dev);
8461
8462 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8463 }
8464
8465 return rc;
a2fbb9ea
ET
8466}
8467
8468static int bnx2x_set_tso(struct net_device *dev, u32 data)
8469{
755735eb 8470 if (data) {
a2fbb9ea 8471 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8472 dev->features |= NETIF_F_TSO6;
8473 } else {
a2fbb9ea 8474 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8475 dev->features &= ~NETIF_F_TSO6;
8476 }
8477
a2fbb9ea
ET
8478 return 0;
8479}
8480
f3c87cdd 8481static const struct {
a2fbb9ea
ET
8482 char string[ETH_GSTRING_LEN];
8483} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8484 { "register_test (offline)" },
8485 { "memory_test (offline)" },
8486 { "loopback_test (offline)" },
8487 { "nvram_test (online)" },
8488 { "interrupt_test (online)" },
8489 { "link_test (online)" },
8490 { "idle check (online)" },
8491 { "MC errors (online)" }
a2fbb9ea
ET
8492};
8493
8494static int bnx2x_self_test_count(struct net_device *dev)
8495{
8496 return BNX2X_NUM_TESTS;
8497}
8498
f3c87cdd
YG
8499static int bnx2x_test_registers(struct bnx2x *bp)
8500{
8501 int idx, i, rc = -ENODEV;
8502 u32 wr_val = 0;
9dabc424 8503 int port = BP_PORT(bp);
f3c87cdd
YG
8504 static const struct {
8505 u32 offset0;
8506 u32 offset1;
8507 u32 mask;
8508 } reg_tbl[] = {
8509/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8510 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8511 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8512 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8513 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8514 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8515 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8516 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8517 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8518 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8519/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8520 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8521 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8522 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8523 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8524 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8525 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8526 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8527 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8528 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8529/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8530 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8531 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8532 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8533 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8534 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8535 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8536 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8537 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8538 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8539/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8540 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8541 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8542 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8543 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8544 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8545 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8546 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8547
8548 { 0xffffffff, 0, 0x00000000 }
8549 };
8550
8551 if (!netif_running(bp->dev))
8552 return rc;
8553
8554 /* Repeat the test twice:
8555 First by writing 0x00000000, second by writing 0xffffffff */
8556 for (idx = 0; idx < 2; idx++) {
8557
8558 switch (idx) {
8559 case 0:
8560 wr_val = 0;
8561 break;
8562 case 1:
8563 wr_val = 0xffffffff;
8564 break;
8565 }
8566
8567 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8568 u32 offset, mask, save_val, val;
f3c87cdd
YG
8569
8570 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8571 mask = reg_tbl[i].mask;
8572
8573 save_val = REG_RD(bp, offset);
8574
8575 REG_WR(bp, offset, wr_val);
8576 val = REG_RD(bp, offset);
8577
8578 /* Restore the original register's value */
8579 REG_WR(bp, offset, save_val);
8580
8581 /* verify that value is as expected value */
8582 if ((val & mask) != (wr_val & mask))
8583 goto test_reg_exit;
8584 }
8585 }
8586
8587 rc = 0;
8588
8589test_reg_exit:
8590 return rc;
8591}
8592
8593static int bnx2x_test_memory(struct bnx2x *bp)
8594{
8595 int i, j, rc = -ENODEV;
8596 u32 val;
8597 static const struct {
8598 u32 offset;
8599 int size;
8600 } mem_tbl[] = {
8601 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8602 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8603 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8604 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8605 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8606 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8607 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8608
8609 { 0xffffffff, 0 }
8610 };
8611 static const struct {
8612 char *name;
8613 u32 offset;
9dabc424
YG
8614 u32 e1_mask;
8615 u32 e1h_mask;
f3c87cdd 8616 } prty_tbl[] = {
9dabc424
YG
8617 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8618 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8619 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8620 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8621 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8622 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8623
8624 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
8625 };
8626
8627 if (!netif_running(bp->dev))
8628 return rc;
8629
8630 /* Go through all the memories */
8631 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8632 for (j = 0; j < mem_tbl[i].size; j++)
8633 REG_RD(bp, mem_tbl[i].offset + j*4);
8634
8635 /* Check the parity status */
8636 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8637 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
8638 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8639 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
8640 DP(NETIF_MSG_HW,
8641 "%s is 0x%x\n", prty_tbl[i].name, val);
8642 goto test_mem_exit;
8643 }
8644 }
8645
8646 rc = 0;
8647
8648test_mem_exit:
8649 return rc;
8650}
8651
f3c87cdd
YG
8652static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8653{
8654 int cnt = 1000;
8655
8656 if (link_up)
8657 while (bnx2x_link_test(bp) && cnt--)
8658 msleep(10);
8659}
8660
8661static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8662{
8663 unsigned int pkt_size, num_pkts, i;
8664 struct sk_buff *skb;
8665 unsigned char *packet;
8666 struct bnx2x_fastpath *fp = &bp->fp[0];
8667 u16 tx_start_idx, tx_idx;
8668 u16 rx_start_idx, rx_idx;
8669 u16 pkt_prod;
8670 struct sw_tx_bd *tx_buf;
8671 struct eth_tx_bd *tx_bd;
8672 dma_addr_t mapping;
8673 union eth_rx_cqe *cqe;
8674 u8 cqe_fp_flags;
8675 struct sw_rx_bd *rx_buf;
8676 u16 len;
8677 int rc = -ENODEV;
8678
8679 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8680 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4a37fb66 8681 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8682 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8683 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8684
8685 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8686 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
4a37fb66 8687 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8688 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8689 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8690 /* wait until link state is restored */
8691 bnx2x_wait_for_link(bp, link_up);
8692
8693 } else
8694 return -EINVAL;
8695
8696 pkt_size = 1514;
8697 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8698 if (!skb) {
8699 rc = -ENOMEM;
8700 goto test_loopback_exit;
8701 }
8702 packet = skb_put(skb, pkt_size);
8703 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8704 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8705 for (i = ETH_HLEN; i < pkt_size; i++)
8706 packet[i] = (unsigned char) (i & 0xff);
8707
8708 num_pkts = 0;
8709 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8710 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8711
8712 pkt_prod = fp->tx_pkt_prod++;
8713 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8714 tx_buf->first_bd = fp->tx_bd_prod;
8715 tx_buf->skb = skb;
8716
8717 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8718 mapping = pci_map_single(bp->pdev, skb->data,
8719 skb_headlen(skb), PCI_DMA_TODEVICE);
8720 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8721 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8722 tx_bd->nbd = cpu_to_le16(1);
8723 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8724 tx_bd->vlan = cpu_to_le16(pkt_prod);
8725 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8726 ETH_TX_BD_FLAGS_END_BD);
8727 tx_bd->general_data = ((UNICAST_ADDRESS <<
8728 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8729
8730 fp->hw_tx_prods->bds_prod =
8731 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8732 mb(); /* FW restriction: must not reorder writing nbd and packets */
8733 fp->hw_tx_prods->packets_prod =
8734 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8735 DOORBELL(bp, FP_IDX(fp), 0);
8736
8737 mmiowb();
8738
8739 num_pkts++;
8740 fp->tx_bd_prod++;
8741 bp->dev->trans_start = jiffies;
8742
8743 udelay(100);
8744
8745 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8746 if (tx_idx != tx_start_idx + num_pkts)
8747 goto test_loopback_exit;
8748
8749 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8750 if (rx_idx != rx_start_idx + num_pkts)
8751 goto test_loopback_exit;
8752
8753 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8754 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8755 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8756 goto test_loopback_rx_exit;
8757
8758 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8759 if (len != pkt_size)
8760 goto test_loopback_rx_exit;
8761
8762 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8763 skb = rx_buf->skb;
8764 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8765 for (i = ETH_HLEN; i < pkt_size; i++)
8766 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8767 goto test_loopback_rx_exit;
8768
8769 rc = 0;
8770
8771test_loopback_rx_exit:
f3c87cdd
YG
8772
8773 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8774 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8775 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8776 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8777
8778 /* Update producers */
8779 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8780 fp->rx_sge_prod);
8781 mmiowb(); /* keep prod updates ordered */
8782
8783test_loopback_exit:
8784 bp->link_params.loopback_mode = LOOPBACK_NONE;
8785
8786 return rc;
8787}
8788
8789static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8790{
8791 int rc = 0;
8792
8793 if (!netif_running(bp->dev))
8794 return BNX2X_LOOPBACK_FAILED;
8795
f8ef6e44 8796 bnx2x_netif_stop(bp, 1);
f3c87cdd
YG
8797
8798 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8799 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8800 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8801 }
8802
8803 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8804 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8805 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8806 }
8807
8808 bnx2x_netif_start(bp);
8809
8810 return rc;
8811}
8812
8813#define CRC32_RESIDUAL 0xdebb20e3
8814
8815static int bnx2x_test_nvram(struct bnx2x *bp)
8816{
8817 static const struct {
8818 int offset;
8819 int size;
8820 } nvram_tbl[] = {
8821 { 0, 0x14 }, /* bootstrap */
8822 { 0x14, 0xec }, /* dir */
8823 { 0x100, 0x350 }, /* manuf_info */
8824 { 0x450, 0xf0 }, /* feature_info */
8825 { 0x640, 0x64 }, /* upgrade_key_info */
8826 { 0x6a4, 0x64 },
8827 { 0x708, 0x70 }, /* manuf_key_info */
8828 { 0x778, 0x70 },
8829 { 0, 0 }
8830 };
8831 u32 buf[0x350 / 4];
8832 u8 *data = (u8 *)buf;
8833 int i, rc;
8834 u32 magic, csum;
8835
8836 rc = bnx2x_nvram_read(bp, 0, data, 4);
8837 if (rc) {
8838 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8839 goto test_nvram_exit;
8840 }
8841
8842 magic = be32_to_cpu(buf[0]);
8843 if (magic != 0x669955aa) {
8844 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8845 rc = -ENODEV;
8846 goto test_nvram_exit;
8847 }
8848
8849 for (i = 0; nvram_tbl[i].size; i++) {
8850
8851 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8852 nvram_tbl[i].size);
8853 if (rc) {
8854 DP(NETIF_MSG_PROBE,
8855 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8856 goto test_nvram_exit;
8857 }
8858
8859 csum = ether_crc_le(nvram_tbl[i].size, data);
8860 if (csum != CRC32_RESIDUAL) {
8861 DP(NETIF_MSG_PROBE,
8862 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8863 rc = -ENODEV;
8864 goto test_nvram_exit;
8865 }
8866 }
8867
8868test_nvram_exit:
8869 return rc;
8870}
8871
8872static int bnx2x_test_intr(struct bnx2x *bp)
8873{
8874 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8875 int i, rc;
8876
8877 if (!netif_running(bp->dev))
8878 return -ENODEV;
8879
8880 config->hdr.length_6b = 0;
8881 config->hdr.offset = 0;
8882 config->hdr.client_id = BP_CL_ID(bp);
8883 config->hdr.reserved1 = 0;
8884
8885 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8886 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8887 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8888 if (rc == 0) {
8889 bp->set_mac_pending++;
8890 for (i = 0; i < 10; i++) {
8891 if (!bp->set_mac_pending)
8892 break;
8893 msleep_interruptible(10);
8894 }
8895 if (i == 10)
8896 rc = -ENODEV;
8897 }
8898
8899 return rc;
8900}
8901
a2fbb9ea
ET
8902static void bnx2x_self_test(struct net_device *dev,
8903 struct ethtool_test *etest, u64 *buf)
8904{
8905 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
8906
8907 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8908
f3c87cdd 8909 if (!netif_running(dev))
a2fbb9ea 8910 return;
a2fbb9ea 8911
33471629 8912 /* offline tests are not supported in MF mode */
f3c87cdd
YG
8913 if (IS_E1HMF(bp))
8914 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8915
8916 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8917 u8 link_up;
8918
8919 link_up = bp->link_vars.link_up;
8920 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8921 bnx2x_nic_load(bp, LOAD_DIAG);
8922 /* wait until link state is restored */
8923 bnx2x_wait_for_link(bp, link_up);
8924
8925 if (bnx2x_test_registers(bp) != 0) {
8926 buf[0] = 1;
8927 etest->flags |= ETH_TEST_FL_FAILED;
8928 }
8929 if (bnx2x_test_memory(bp) != 0) {
8930 buf[1] = 1;
8931 etest->flags |= ETH_TEST_FL_FAILED;
8932 }
8933 buf[2] = bnx2x_test_loopback(bp, link_up);
8934 if (buf[2] != 0)
8935 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 8936
f3c87cdd
YG
8937 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8938 bnx2x_nic_load(bp, LOAD_NORMAL);
8939 /* wait until link state is restored */
8940 bnx2x_wait_for_link(bp, link_up);
8941 }
8942 if (bnx2x_test_nvram(bp) != 0) {
8943 buf[3] = 1;
a2fbb9ea
ET
8944 etest->flags |= ETH_TEST_FL_FAILED;
8945 }
f3c87cdd
YG
8946 if (bnx2x_test_intr(bp) != 0) {
8947 buf[4] = 1;
8948 etest->flags |= ETH_TEST_FL_FAILED;
8949 }
8950 if (bp->port.pmf)
8951 if (bnx2x_link_test(bp) != 0) {
8952 buf[5] = 1;
8953 etest->flags |= ETH_TEST_FL_FAILED;
8954 }
8955 buf[7] = bnx2x_mc_assert(bp);
8956 if (buf[7] != 0)
8957 etest->flags |= ETH_TEST_FL_FAILED;
8958
8959#ifdef BNX2X_EXTRA_DEBUG
8960 bnx2x_panic_dump(bp);
8961#endif
a2fbb9ea
ET
8962}
8963
bb2a0f7a
YG
8964static const struct {
8965 long offset;
8966 int size;
8967 u32 flags;
66e855f3
YG
8968#define STATS_FLAGS_PORT 1
8969#define STATS_FLAGS_FUNC 2
8970 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 8971} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
66e855f3
YG
8972/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8973 8, STATS_FLAGS_FUNC, "rx_bytes" },
8974 { STATS_OFFSET32(error_bytes_received_hi),
8975 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8976 { STATS_OFFSET32(total_bytes_transmitted_hi),
8977 8, STATS_FLAGS_FUNC, "tx_bytes" },
8978 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8979 8, STATS_FLAGS_PORT, "tx_error_bytes" },
bb2a0f7a 8980 { STATS_OFFSET32(total_unicast_packets_received_hi),
66e855f3 8981 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
bb2a0f7a 8982 { STATS_OFFSET32(total_multicast_packets_received_hi),
66e855f3 8983 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
bb2a0f7a 8984 { STATS_OFFSET32(total_broadcast_packets_received_hi),
66e855f3 8985 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
bb2a0f7a 8986 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
66e855f3 8987 8, STATS_FLAGS_FUNC, "tx_packets" },
bb2a0f7a 8988 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
66e855f3 8989 8, STATS_FLAGS_PORT, "tx_mac_errors" },
bb2a0f7a 8990/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
66e855f3 8991 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 8992 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 8993 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 8994 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 8995 8, STATS_FLAGS_PORT, "rx_align_errors" },
bb2a0f7a 8996 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 8997 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 8998 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 8999 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
bb2a0f7a 9000 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9001 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9002 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9003 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9004 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9005 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9006 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9007 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9008 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
66e855f3
YG
9009 8, STATS_FLAGS_PORT, "rx_fragments" },
9010/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9011 8, STATS_FLAGS_PORT, "rx_jabbers" },
bb2a0f7a 9012 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
66e855f3 9013 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
bb2a0f7a 9014 { STATS_OFFSET32(jabber_packets_received),
66e855f3 9015 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
bb2a0f7a 9016 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9017 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9018 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9019 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9020 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9021 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9022 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9023 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9024 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9025 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9026 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9027 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
bb2a0f7a 9028 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9029 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
bb2a0f7a 9030/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
66e855f3 9031 8, STATS_FLAGS_PORT, "rx_xon_frames" },
bb2a0f7a 9032 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
66e855f3
YG
9033 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9034 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9035 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9036 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9037 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
bb2a0f7a 9038 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
66e855f3
YG
9039 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9040 { STATS_OFFSET32(mac_filter_discard),
9041 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9042 { STATS_OFFSET32(no_buff_discard),
9043 4, STATS_FLAGS_FUNC, "rx_discards" },
9044 { STATS_OFFSET32(xxoverflow_discard),
9045 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9046 { STATS_OFFSET32(brb_drop_hi),
9047 8, STATS_FLAGS_PORT, "brb_discard" },
9048 { STATS_OFFSET32(brb_truncate_hi),
9049 8, STATS_FLAGS_PORT, "brb_truncate" },
9050/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9051 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9052 { STATS_OFFSET32(rx_skb_alloc_failed),
9053 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9054/* 42 */{ STATS_OFFSET32(hw_csum_err),
9055 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
a2fbb9ea
ET
9056};
9057
66e855f3
YG
9058#define IS_NOT_E1HMF_STAT(bp, i) \
9059 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9060
a2fbb9ea
ET
9061static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9062{
bb2a0f7a
YG
9063 struct bnx2x *bp = netdev_priv(dev);
9064 int i, j;
9065
a2fbb9ea
ET
9066 switch (stringset) {
9067 case ETH_SS_STATS:
bb2a0f7a 9068 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9069 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9070 continue;
9071 strcpy(buf + j*ETH_GSTRING_LEN,
9072 bnx2x_stats_arr[i].string);
9073 j++;
9074 }
a2fbb9ea
ET
9075 break;
9076
9077 case ETH_SS_TEST:
9078 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9079 break;
9080 }
9081}
9082
9083static int bnx2x_get_stats_count(struct net_device *dev)
9084{
bb2a0f7a
YG
9085 struct bnx2x *bp = netdev_priv(dev);
9086 int i, num_stats = 0;
9087
9088 for (i = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9089 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9090 continue;
9091 num_stats++;
9092 }
9093 return num_stats;
a2fbb9ea
ET
9094}
9095
9096static void bnx2x_get_ethtool_stats(struct net_device *dev,
9097 struct ethtool_stats *stats, u64 *buf)
9098{
9099 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
9100 u32 *hw_stats = (u32 *)&bp->eth_stats;
9101 int i, j;
a2fbb9ea 9102
bb2a0f7a 9103 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9104 if (IS_NOT_E1HMF_STAT(bp, i))
a2fbb9ea 9105 continue;
bb2a0f7a
YG
9106
9107 if (bnx2x_stats_arr[i].size == 0) {
9108 /* skip this counter */
9109 buf[j] = 0;
9110 j++;
a2fbb9ea
ET
9111 continue;
9112 }
bb2a0f7a 9113 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 9114 /* 4-byte counter */
bb2a0f7a
YG
9115 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9116 j++;
a2fbb9ea
ET
9117 continue;
9118 }
9119 /* 8-byte counter */
bb2a0f7a
YG
9120 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9121 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9122 j++;
a2fbb9ea
ET
9123 }
9124}
9125
9126static int bnx2x_phys_id(struct net_device *dev, u32 data)
9127{
9128 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9129 int port = BP_PORT(bp);
a2fbb9ea
ET
9130 int i;
9131
34f80b04
EG
9132 if (!netif_running(dev))
9133 return 0;
9134
9135 if (!bp->port.pmf)
9136 return 0;
9137
a2fbb9ea
ET
9138 if (data == 0)
9139 data = 2;
9140
9141 for (i = 0; i < (data * 2); i++) {
c18487ee 9142 if ((i % 2) == 0)
34f80b04 9143 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9144 bp->link_params.hw_led_mode,
9145 bp->link_params.chip_id);
9146 else
34f80b04 9147 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9148 bp->link_params.hw_led_mode,
9149 bp->link_params.chip_id);
9150
a2fbb9ea
ET
9151 msleep_interruptible(500);
9152 if (signal_pending(current))
9153 break;
9154 }
9155
c18487ee 9156 if (bp->link_vars.link_up)
34f80b04 9157 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9158 bp->link_vars.line_speed,
9159 bp->link_params.hw_led_mode,
9160 bp->link_params.chip_id);
a2fbb9ea
ET
9161
9162 return 0;
9163}
9164
9165static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9166 .get_settings = bnx2x_get_settings,
9167 .set_settings = bnx2x_set_settings,
9168 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9169 .get_wol = bnx2x_get_wol,
9170 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9171 .get_msglevel = bnx2x_get_msglevel,
9172 .set_msglevel = bnx2x_set_msglevel,
9173 .nway_reset = bnx2x_nway_reset,
9174 .get_link = ethtool_op_get_link,
9175 .get_eeprom_len = bnx2x_get_eeprom_len,
9176 .get_eeprom = bnx2x_get_eeprom,
9177 .set_eeprom = bnx2x_set_eeprom,
9178 .get_coalesce = bnx2x_get_coalesce,
9179 .set_coalesce = bnx2x_set_coalesce,
9180 .get_ringparam = bnx2x_get_ringparam,
9181 .set_ringparam = bnx2x_set_ringparam,
9182 .get_pauseparam = bnx2x_get_pauseparam,
9183 .set_pauseparam = bnx2x_set_pauseparam,
9184 .get_rx_csum = bnx2x_get_rx_csum,
9185 .set_rx_csum = bnx2x_set_rx_csum,
9186 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9187 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9188 .set_flags = bnx2x_set_flags,
9189 .get_flags = ethtool_op_get_flags,
9190 .get_sg = ethtool_op_get_sg,
9191 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9192 .get_tso = ethtool_op_get_tso,
9193 .set_tso = bnx2x_set_tso,
9194 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9195 .self_test = bnx2x_self_test,
9196 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9197 .phys_id = bnx2x_phys_id,
9198 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9199 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9200};
9201
9202/* end of ethtool_ops */
9203
9204/****************************************************************************
9205* General service functions
9206****************************************************************************/
9207
9208static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9209{
9210 u16 pmcsr;
9211
9212 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9213
9214 switch (state) {
9215 case PCI_D0:
34f80b04 9216 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9217 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9218 PCI_PM_CTRL_PME_STATUS));
9219
9220 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9221 /* delay required during transition out of D3hot */
a2fbb9ea 9222 msleep(20);
34f80b04 9223 break;
a2fbb9ea 9224
34f80b04
EG
9225 case PCI_D3hot:
9226 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9227 pmcsr |= 3;
a2fbb9ea 9228
34f80b04
EG
9229 if (bp->wol)
9230 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9231
34f80b04
EG
9232 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9233 pmcsr);
a2fbb9ea 9234
34f80b04
EG
9235 /* No more memory access after this point until
9236 * device is brought back to D0.
9237 */
9238 break;
9239
9240 default:
9241 return -EINVAL;
9242 }
9243 return 0;
a2fbb9ea
ET
9244}
9245
34f80b04
EG
9246/*
9247 * net_device service functions
9248 */
9249
a2fbb9ea
ET
9250static int bnx2x_poll(struct napi_struct *napi, int budget)
9251{
9252 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9253 napi);
9254 struct bnx2x *bp = fp->bp;
9255 int work_done = 0;
2772f903 9256 u16 rx_cons_sb;
a2fbb9ea
ET
9257
9258#ifdef BNX2X_STOP_ON_ERROR
9259 if (unlikely(bp->panic))
34f80b04 9260 goto poll_panic;
a2fbb9ea
ET
9261#endif
9262
9263 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9264 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9265 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9266
9267 bnx2x_update_fpsb_idx(fp);
9268
da5a662a 9269 if (BNX2X_HAS_TX_WORK(fp))
a2fbb9ea
ET
9270 bnx2x_tx_int(fp, budget);
9271
2772f903
EG
9272 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9273 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9274 rx_cons_sb++;
da5a662a 9275 if (BNX2X_HAS_RX_WORK(fp))
a2fbb9ea
ET
9276 work_done = bnx2x_rx_int(fp, budget);
9277
da5a662a 9278 rmb(); /* BNX2X_HAS_WORK() reads the status block */
2772f903
EG
9279 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9280 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9281 rx_cons_sb++;
a2fbb9ea
ET
9282
9283 /* must not complete if we consumed full budget */
da5a662a 9284 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9285
9286#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9287poll_panic:
a2fbb9ea 9288#endif
908a7a16 9289 netif_rx_complete(napi);
a2fbb9ea 9290
34f80b04 9291 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9292 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9293 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9294 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9295 }
a2fbb9ea
ET
9296 return work_done;
9297}
9298
755735eb
EG
9299
9300/* we split the first BD into headers and data BDs
33471629 9301 * to ease the pain of our fellow microcode engineers
755735eb
EG
9302 * we use one mapping for both BDs
9303 * So far this has only been observed to happen
9304 * in Other Operating Systems(TM)
9305 */
9306static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9307 struct bnx2x_fastpath *fp,
9308 struct eth_tx_bd **tx_bd, u16 hlen,
9309 u16 bd_prod, int nbd)
9310{
9311 struct eth_tx_bd *h_tx_bd = *tx_bd;
9312 struct eth_tx_bd *d_tx_bd;
9313 dma_addr_t mapping;
9314 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9315
9316 /* first fix first BD */
9317 h_tx_bd->nbd = cpu_to_le16(nbd);
9318 h_tx_bd->nbytes = cpu_to_le16(hlen);
9319
9320 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9321 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9322 h_tx_bd->addr_lo, h_tx_bd->nbd);
9323
9324 /* now get a new data BD
9325 * (after the pbd) and fill it */
9326 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9327 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9328
9329 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9330 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9331
9332 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9333 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9334 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9335 d_tx_bd->vlan = 0;
9336 /* this marks the BD as one that has no individual mapping
9337 * the FW ignores this flag in a BD not marked start
9338 */
9339 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9340 DP(NETIF_MSG_TX_QUEUED,
9341 "TSO split data size is %d (%x:%x)\n",
9342 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9343
9344 /* update tx_bd for marking the last BD flag */
9345 *tx_bd = d_tx_bd;
9346
9347 return bd_prod;
9348}
9349
9350static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9351{
9352 if (fix > 0)
9353 csum = (u16) ~csum_fold(csum_sub(csum,
9354 csum_partial(t_header - fix, fix, 0)));
9355
9356 else if (fix < 0)
9357 csum = (u16) ~csum_fold(csum_add(csum,
9358 csum_partial(t_header, -fix, 0)));
9359
9360 return swab16(csum);
9361}
9362
9363static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9364{
9365 u32 rc;
9366
9367 if (skb->ip_summed != CHECKSUM_PARTIAL)
9368 rc = XMIT_PLAIN;
9369
9370 else {
9371 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9372 rc = XMIT_CSUM_V6;
9373 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9374 rc |= XMIT_CSUM_TCP;
9375
9376 } else {
9377 rc = XMIT_CSUM_V4;
9378 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9379 rc |= XMIT_CSUM_TCP;
9380 }
9381 }
9382
9383 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9384 rc |= XMIT_GSO_V4;
9385
9386 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9387 rc |= XMIT_GSO_V6;
9388
9389 return rc;
9390}
9391
9392/* check if packet requires linearization (packet is too fragmented) */
9393static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9394 u32 xmit_type)
9395{
9396 int to_copy = 0;
9397 int hlen = 0;
9398 int first_bd_sz = 0;
9399
9400 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9401 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9402
9403 if (xmit_type & XMIT_GSO) {
9404 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9405 /* Check if LSO packet needs to be copied:
9406 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9407 int wnd_size = MAX_FETCH_BD - 3;
33471629 9408 /* Number of windows to check */
755735eb
EG
9409 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9410 int wnd_idx = 0;
9411 int frag_idx = 0;
9412 u32 wnd_sum = 0;
9413
9414 /* Headers length */
9415 hlen = (int)(skb_transport_header(skb) - skb->data) +
9416 tcp_hdrlen(skb);
9417
9418 /* Amount of data (w/o headers) on linear part of SKB*/
9419 first_bd_sz = skb_headlen(skb) - hlen;
9420
9421 wnd_sum = first_bd_sz;
9422
9423 /* Calculate the first sum - it's special */
9424 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9425 wnd_sum +=
9426 skb_shinfo(skb)->frags[frag_idx].size;
9427
9428 /* If there was data on linear skb data - check it */
9429 if (first_bd_sz > 0) {
9430 if (unlikely(wnd_sum < lso_mss)) {
9431 to_copy = 1;
9432 goto exit_lbl;
9433 }
9434
9435 wnd_sum -= first_bd_sz;
9436 }
9437
9438 /* Others are easier: run through the frag list and
9439 check all windows */
9440 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9441 wnd_sum +=
9442 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9443
9444 if (unlikely(wnd_sum < lso_mss)) {
9445 to_copy = 1;
9446 break;
9447 }
9448 wnd_sum -=
9449 skb_shinfo(skb)->frags[wnd_idx].size;
9450 }
9451
9452 } else {
9453 /* in non-LSO too fragmented packet should always
9454 be linearized */
9455 to_copy = 1;
9456 }
9457 }
9458
9459exit_lbl:
9460 if (unlikely(to_copy))
9461 DP(NETIF_MSG_TX_QUEUED,
9462 "Linearization IS REQUIRED for %s packet. "
9463 "num_frags %d hlen %d first_bd_sz %d\n",
9464 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9465 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9466
9467 return to_copy;
9468}
9469
9470/* called with netif_tx_lock
a2fbb9ea 9471 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9472 * netif_wake_queue()
a2fbb9ea
ET
9473 */
9474static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9475{
9476 struct bnx2x *bp = netdev_priv(dev);
9477 struct bnx2x_fastpath *fp;
9478 struct sw_tx_bd *tx_buf;
9479 struct eth_tx_bd *tx_bd;
9480 struct eth_tx_parse_bd *pbd = NULL;
9481 u16 pkt_prod, bd_prod;
755735eb 9482 int nbd, fp_index;
a2fbb9ea 9483 dma_addr_t mapping;
755735eb
EG
9484 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9485 int vlan_off = (bp->e1hov ? 4 : 0);
9486 int i;
9487 u8 hlen = 0;
a2fbb9ea
ET
9488
9489#ifdef BNX2X_STOP_ON_ERROR
9490 if (unlikely(bp->panic))
9491 return NETDEV_TX_BUSY;
9492#endif
9493
755735eb 9494 fp_index = (smp_processor_id() % bp->num_queues);
a2fbb9ea 9495 fp = &bp->fp[fp_index];
755735eb 9496
231fd58a 9497 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9498 bp->eth_stats.driver_xoff++,
a2fbb9ea
ET
9499 netif_stop_queue(dev);
9500 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9501 return NETDEV_TX_BUSY;
9502 }
9503
755735eb
EG
9504 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9505 " gso type %x xmit_type %x\n",
9506 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9507 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9508
33471629 9509 /* First, check if we need to linearize the skb
755735eb
EG
9510 (due to FW restrictions) */
9511 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9512 /* Statistics of linearization */
9513 bp->lin_cnt++;
9514 if (skb_linearize(skb) != 0) {
9515 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9516 "silently dropping this SKB\n");
9517 dev_kfree_skb_any(skb);
da5a662a 9518 return NETDEV_TX_OK;
755735eb
EG
9519 }
9520 }
9521
a2fbb9ea 9522 /*
755735eb 9523 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9524 then for TSO or xsum we have a parsing info BD,
755735eb 9525 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9526 (don't forget to mark the last one as last,
9527 and to unmap only AFTER you write to the BD ...)
755735eb 9528 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9529 */
9530
9531 pkt_prod = fp->tx_pkt_prod++;
755735eb 9532 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9533
755735eb 9534 /* get a tx_buf and first BD */
a2fbb9ea
ET
9535 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9536 tx_bd = &fp->tx_desc_ring[bd_prod];
9537
9538 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9539 tx_bd->general_data = (UNICAST_ADDRESS <<
9540 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
9541 /* header nbd */
9542 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 9543
755735eb
EG
9544 /* remember the first BD of the packet */
9545 tx_buf->first_bd = fp->tx_bd_prod;
9546 tx_buf->skb = skb;
a2fbb9ea
ET
9547
9548 DP(NETIF_MSG_TX_QUEUED,
9549 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9550 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9551
755735eb
EG
9552 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9553 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9554 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9555 vlan_off += 4;
9556 } else
9557 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9558
755735eb 9559 if (xmit_type) {
755735eb 9560 /* turn on parsing and get a BD */
a2fbb9ea
ET
9561 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9562 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9563
9564 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9565 }
9566
9567 if (xmit_type & XMIT_CSUM) {
9568 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9569
9570 /* for now NS flag is not used in Linux */
755735eb 9571 pbd->global_data = (hlen |
96fc1784 9572 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9573 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9574
755735eb
EG
9575 pbd->ip_hlen = (skb_transport_header(skb) -
9576 skb_network_header(skb)) / 2;
9577
9578 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9579
755735eb
EG
9580 pbd->total_hlen = cpu_to_le16(hlen);
9581 hlen = hlen*2 - vlan_off;
a2fbb9ea 9582
755735eb
EG
9583 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9584
9585 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9586 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9587 ETH_TX_BD_FLAGS_IP_CSUM;
9588 else
9589 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9590
9591 if (xmit_type & XMIT_CSUM_TCP) {
9592 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9593
9594 } else {
9595 s8 fix = SKB_CS_OFF(skb); /* signed! */
9596
a2fbb9ea 9597 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9598 pbd->cs_offset = fix / 2;
a2fbb9ea 9599
755735eb
EG
9600 DP(NETIF_MSG_TX_QUEUED,
9601 "hlen %d offset %d fix %d csum before fix %x\n",
9602 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9603 SKB_CS(skb));
9604
9605 /* HW bug: fixup the CSUM */
9606 pbd->tcp_pseudo_csum =
9607 bnx2x_csum_fix(skb_transport_header(skb),
9608 SKB_CS(skb), fix);
9609
9610 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9611 pbd->tcp_pseudo_csum);
9612 }
a2fbb9ea
ET
9613 }
9614
9615 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9616 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9617
9618 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9619 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 9620 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
9621 tx_bd->nbd = cpu_to_le16(nbd);
9622 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9623
9624 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9625 " nbytes %d flags %x vlan %x\n",
9626 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9627 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9628 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9629
755735eb 9630 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9631
9632 DP(NETIF_MSG_TX_QUEUED,
9633 "TSO packet len %d hlen %d total len %d tso size %d\n",
9634 skb->len, hlen, skb_headlen(skb),
9635 skb_shinfo(skb)->gso_size);
9636
9637 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9638
755735eb
EG
9639 if (unlikely(skb_headlen(skb) > hlen))
9640 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9641 bd_prod, ++nbd);
a2fbb9ea
ET
9642
9643 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9644 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9645 pbd->tcp_flags = pbd_tcp_flags(skb);
9646
9647 if (xmit_type & XMIT_GSO_V4) {
9648 pbd->ip_id = swab16(ip_hdr(skb)->id);
9649 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9650 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9651 ip_hdr(skb)->daddr,
9652 0, IPPROTO_TCP, 0));
755735eb
EG
9653
9654 } else
9655 pbd->tcp_pseudo_csum =
9656 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9657 &ipv6_hdr(skb)->daddr,
9658 0, IPPROTO_TCP, 0));
9659
a2fbb9ea
ET
9660 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9661 }
9662
755735eb
EG
9663 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9664 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9665
755735eb
EG
9666 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9667 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9668
755735eb
EG
9669 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9670 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9671
755735eb
EG
9672 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9673 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9674 tx_bd->nbytes = cpu_to_le16(frag->size);
9675 tx_bd->vlan = cpu_to_le16(pkt_prod);
9676 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9677
755735eb
EG
9678 DP(NETIF_MSG_TX_QUEUED,
9679 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9680 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9681 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9682 }
9683
755735eb 9684 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9685 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9686
9687 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9688 tx_bd, tx_bd->bd_flags.as_bitfield);
9689
a2fbb9ea
ET
9690 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9691
755735eb 9692 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9693 * if the packet contains or ends with it
9694 */
9695 if (TX_BD_POFF(bd_prod) < nbd)
9696 nbd++;
9697
9698 if (pbd)
9699 DP(NETIF_MSG_TX_QUEUED,
9700 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9701 " tcp_flags %x xsum %x seq %u hlen %u\n",
9702 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9703 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9704 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9705
755735eb 9706 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9707
96fc1784
ET
9708 fp->hw_tx_prods->bds_prod =
9709 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9710 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9711 fp->hw_tx_prods->packets_prod =
9712 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9713 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9714
9715 mmiowb();
9716
755735eb 9717 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9718 dev->trans_start = jiffies;
9719
9720 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9721 netif_stop_queue(dev);
bb2a0f7a 9722 bp->eth_stats.driver_xoff++;
a2fbb9ea
ET
9723 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9724 netif_wake_queue(dev);
9725 }
9726 fp->tx_pkt++;
9727
9728 return NETDEV_TX_OK;
9729}
9730
bb2a0f7a 9731/* called with rtnl_lock */
a2fbb9ea
ET
9732static int bnx2x_open(struct net_device *dev)
9733{
9734 struct bnx2x *bp = netdev_priv(dev);
9735
9736 bnx2x_set_power_state(bp, PCI_D0);
9737
bb2a0f7a 9738 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9739}
9740
bb2a0f7a 9741/* called with rtnl_lock */
a2fbb9ea
ET
9742static int bnx2x_close(struct net_device *dev)
9743{
a2fbb9ea
ET
9744 struct bnx2x *bp = netdev_priv(dev);
9745
9746 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9747 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9748 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9749 if (!CHIP_REV_IS_SLOW(bp))
9750 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9751
9752 return 0;
9753}
9754
34f80b04
EG
9755/* called with netif_tx_lock from set_multicast */
9756static void bnx2x_set_rx_mode(struct net_device *dev)
9757{
9758 struct bnx2x *bp = netdev_priv(dev);
9759 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9760 int port = BP_PORT(bp);
9761
9762 if (bp->state != BNX2X_STATE_OPEN) {
9763 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9764 return;
9765 }
9766
9767 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9768
9769 if (dev->flags & IFF_PROMISC)
9770 rx_mode = BNX2X_RX_MODE_PROMISC;
9771
9772 else if ((dev->flags & IFF_ALLMULTI) ||
9773 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9774 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9775
9776 else { /* some multicasts */
9777 if (CHIP_IS_E1(bp)) {
9778 int i, old, offset;
9779 struct dev_mc_list *mclist;
9780 struct mac_configuration_cmd *config =
9781 bnx2x_sp(bp, mcast_config);
9782
9783 for (i = 0, mclist = dev->mc_list;
9784 mclist && (i < dev->mc_count);
9785 i++, mclist = mclist->next) {
9786
9787 config->config_table[i].
9788 cam_entry.msb_mac_addr =
9789 swab16(*(u16 *)&mclist->dmi_addr[0]);
9790 config->config_table[i].
9791 cam_entry.middle_mac_addr =
9792 swab16(*(u16 *)&mclist->dmi_addr[2]);
9793 config->config_table[i].
9794 cam_entry.lsb_mac_addr =
9795 swab16(*(u16 *)&mclist->dmi_addr[4]);
9796 config->config_table[i].cam_entry.flags =
9797 cpu_to_le16(port);
9798 config->config_table[i].
9799 target_table_entry.flags = 0;
9800 config->config_table[i].
9801 target_table_entry.client_id = 0;
9802 config->config_table[i].
9803 target_table_entry.vlan_id = 0;
9804
9805 DP(NETIF_MSG_IFUP,
9806 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9807 config->config_table[i].
9808 cam_entry.msb_mac_addr,
9809 config->config_table[i].
9810 cam_entry.middle_mac_addr,
9811 config->config_table[i].
9812 cam_entry.lsb_mac_addr);
9813 }
9814 old = config->hdr.length_6b;
9815 if (old > i) {
9816 for (; i < old; i++) {
9817 if (CAM_IS_INVALID(config->
9818 config_table[i])) {
9819 i--; /* already invalidated */
9820 break;
9821 }
9822 /* invalidate */
9823 CAM_INVALIDATE(config->
9824 config_table[i]);
9825 }
9826 }
9827
9828 if (CHIP_REV_IS_SLOW(bp))
9829 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9830 else
9831 offset = BNX2X_MAX_MULTICAST*(1 + port);
9832
9833 config->hdr.length_6b = i;
9834 config->hdr.offset = offset;
9835 config->hdr.client_id = BP_CL_ID(bp);
9836 config->hdr.reserved1 = 0;
9837
9838 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9839 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9840 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9841 0);
9842 } else { /* E1H */
9843 /* Accept one or more multicasts */
9844 struct dev_mc_list *mclist;
9845 u32 mc_filter[MC_HASH_SIZE];
9846 u32 crc, bit, regidx;
9847 int i;
9848
9849 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9850
9851 for (i = 0, mclist = dev->mc_list;
9852 mclist && (i < dev->mc_count);
9853 i++, mclist = mclist->next) {
9854
7c510e4b
JB
9855 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9856 mclist->dmi_addr);
34f80b04
EG
9857
9858 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9859 bit = (crc >> 24) & 0xff;
9860 regidx = bit >> 5;
9861 bit &= 0x1f;
9862 mc_filter[regidx] |= (1 << bit);
9863 }
9864
9865 for (i = 0; i < MC_HASH_SIZE; i++)
9866 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9867 mc_filter[i]);
9868 }
9869 }
9870
9871 bp->rx_mode = rx_mode;
9872 bnx2x_set_storm_rx_mode(bp);
9873}
9874
9875/* called with rtnl_lock */
a2fbb9ea
ET
9876static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9877{
9878 struct sockaddr *addr = p;
9879 struct bnx2x *bp = netdev_priv(dev);
9880
34f80b04 9881 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
9882 return -EINVAL;
9883
9884 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
9885 if (netif_running(dev)) {
9886 if (CHIP_IS_E1(bp))
3101c2bc 9887 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 9888 else
3101c2bc 9889 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 9890 }
a2fbb9ea
ET
9891
9892 return 0;
9893}
9894
c18487ee 9895/* called with rtnl_lock */
a2fbb9ea
ET
9896static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9897{
9898 struct mii_ioctl_data *data = if_mii(ifr);
9899 struct bnx2x *bp = netdev_priv(dev);
3196a88a 9900 int port = BP_PORT(bp);
a2fbb9ea
ET
9901 int err;
9902
9903 switch (cmd) {
9904 case SIOCGMIIPHY:
34f80b04 9905 data->phy_id = bp->port.phy_addr;
a2fbb9ea 9906
c14423fe 9907 /* fallthrough */
c18487ee 9908
a2fbb9ea 9909 case SIOCGMIIREG: {
c18487ee 9910 u16 mii_regval;
a2fbb9ea 9911
c18487ee
YR
9912 if (!netif_running(dev))
9913 return -EAGAIN;
a2fbb9ea 9914
34f80b04 9915 mutex_lock(&bp->port.phy_mutex);
3196a88a 9916 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
9917 DEFAULT_PHY_DEV_ADDR,
9918 (data->reg_num & 0x1f), &mii_regval);
9919 data->val_out = mii_regval;
34f80b04 9920 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9921 return err;
9922 }
9923
9924 case SIOCSMIIREG:
9925 if (!capable(CAP_NET_ADMIN))
9926 return -EPERM;
9927
c18487ee
YR
9928 if (!netif_running(dev))
9929 return -EAGAIN;
9930
34f80b04 9931 mutex_lock(&bp->port.phy_mutex);
3196a88a 9932 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
9933 DEFAULT_PHY_DEV_ADDR,
9934 (data->reg_num & 0x1f), data->val_in);
34f80b04 9935 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9936 return err;
9937
9938 default:
9939 /* do nothing */
9940 break;
9941 }
9942
9943 return -EOPNOTSUPP;
9944}
9945
34f80b04 9946/* called with rtnl_lock */
a2fbb9ea
ET
9947static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9948{
9949 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9950 int rc = 0;
a2fbb9ea
ET
9951
9952 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9953 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9954 return -EINVAL;
9955
9956 /* This does not race with packet allocation
c14423fe 9957 * because the actual alloc size is
a2fbb9ea
ET
9958 * only updated as part of load
9959 */
9960 dev->mtu = new_mtu;
9961
9962 if (netif_running(dev)) {
34f80b04
EG
9963 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9964 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 9965 }
34f80b04
EG
9966
9967 return rc;
a2fbb9ea
ET
9968}
9969
9970static void bnx2x_tx_timeout(struct net_device *dev)
9971{
9972 struct bnx2x *bp = netdev_priv(dev);
9973
9974#ifdef BNX2X_STOP_ON_ERROR
9975 if (!bp->panic)
9976 bnx2x_panic();
9977#endif
9978 /* This allows the netif to be shutdown gracefully before resetting */
9979 schedule_work(&bp->reset_task);
9980}
9981
9982#ifdef BCM_VLAN
34f80b04 9983/* called with rtnl_lock */
a2fbb9ea
ET
9984static void bnx2x_vlan_rx_register(struct net_device *dev,
9985 struct vlan_group *vlgrp)
9986{
9987 struct bnx2x *bp = netdev_priv(dev);
9988
9989 bp->vlgrp = vlgrp;
9990 if (netif_running(dev))
49d66772 9991 bnx2x_set_client_config(bp);
a2fbb9ea 9992}
34f80b04 9993
a2fbb9ea
ET
9994#endif
9995
9996#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9997static void poll_bnx2x(struct net_device *dev)
9998{
9999 struct bnx2x *bp = netdev_priv(dev);
10000
10001 disable_irq(bp->pdev->irq);
10002 bnx2x_interrupt(bp->pdev->irq, dev);
10003 enable_irq(bp->pdev->irq);
10004}
10005#endif
10006
c64213cd
SH
10007static const struct net_device_ops bnx2x_netdev_ops = {
10008 .ndo_open = bnx2x_open,
10009 .ndo_stop = bnx2x_close,
10010 .ndo_start_xmit = bnx2x_start_xmit,
10011 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10012 .ndo_set_mac_address = bnx2x_change_mac_addr,
10013 .ndo_validate_addr = eth_validate_addr,
10014 .ndo_do_ioctl = bnx2x_ioctl,
10015 .ndo_change_mtu = bnx2x_change_mtu,
10016 .ndo_tx_timeout = bnx2x_tx_timeout,
10017#ifdef BCM_VLAN
10018 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10019#endif
10020#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10021 .ndo_poll_controller = poll_bnx2x,
10022#endif
10023};
10024
10025
34f80b04
EG
10026static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10027 struct net_device *dev)
a2fbb9ea
ET
10028{
10029 struct bnx2x *bp;
10030 int rc;
10031
10032 SET_NETDEV_DEV(dev, &pdev->dev);
10033 bp = netdev_priv(dev);
10034
34f80b04
EG
10035 bp->dev = dev;
10036 bp->pdev = pdev;
a2fbb9ea 10037 bp->flags = 0;
34f80b04 10038 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10039
10040 rc = pci_enable_device(pdev);
10041 if (rc) {
10042 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10043 goto err_out;
10044 }
10045
10046 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10047 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10048 " aborting\n");
10049 rc = -ENODEV;
10050 goto err_out_disable;
10051 }
10052
10053 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10054 printk(KERN_ERR PFX "Cannot find second PCI device"
10055 " base address, aborting\n");
10056 rc = -ENODEV;
10057 goto err_out_disable;
10058 }
10059
34f80b04
EG
10060 if (atomic_read(&pdev->enable_cnt) == 1) {
10061 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10062 if (rc) {
10063 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10064 " aborting\n");
10065 goto err_out_disable;
10066 }
a2fbb9ea 10067
34f80b04
EG
10068 pci_set_master(pdev);
10069 pci_save_state(pdev);
10070 }
a2fbb9ea
ET
10071
10072 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10073 if (bp->pm_cap == 0) {
10074 printk(KERN_ERR PFX "Cannot find power management"
10075 " capability, aborting\n");
10076 rc = -EIO;
10077 goto err_out_release;
10078 }
10079
10080 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10081 if (bp->pcie_cap == 0) {
10082 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10083 " aborting\n");
10084 rc = -EIO;
10085 goto err_out_release;
10086 }
10087
10088 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10089 bp->flags |= USING_DAC_FLAG;
10090 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10091 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10092 " failed, aborting\n");
10093 rc = -EIO;
10094 goto err_out_release;
10095 }
10096
10097 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10098 printk(KERN_ERR PFX "System does not support DMA,"
10099 " aborting\n");
10100 rc = -EIO;
10101 goto err_out_release;
10102 }
10103
34f80b04
EG
10104 dev->mem_start = pci_resource_start(pdev, 0);
10105 dev->base_addr = dev->mem_start;
10106 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10107
10108 dev->irq = pdev->irq;
10109
275f165f 10110 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10111 if (!bp->regview) {
10112 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10113 rc = -ENOMEM;
10114 goto err_out_release;
10115 }
10116
34f80b04
EG
10117 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10118 min_t(u64, BNX2X_DB_SIZE,
10119 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10120 if (!bp->doorbells) {
10121 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10122 rc = -ENOMEM;
10123 goto err_out_unmap;
10124 }
10125
10126 bnx2x_set_power_state(bp, PCI_D0);
10127
34f80b04
EG
10128 /* clean indirect addresses */
10129 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10130 PCICFG_VENDOR_ID_OFFSET);
10131 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10132 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10133 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10134 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10135
34f80b04 10136 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10137
c64213cd 10138 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10139 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10140 dev->features |= NETIF_F_SG;
10141 dev->features |= NETIF_F_HW_CSUM;
10142 if (bp->flags & USING_DAC_FLAG)
10143 dev->features |= NETIF_F_HIGHDMA;
10144#ifdef BCM_VLAN
10145 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10146#endif
10147 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10148 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10149
10150 return 0;
10151
10152err_out_unmap:
10153 if (bp->regview) {
10154 iounmap(bp->regview);
10155 bp->regview = NULL;
10156 }
a2fbb9ea
ET
10157 if (bp->doorbells) {
10158 iounmap(bp->doorbells);
10159 bp->doorbells = NULL;
10160 }
10161
10162err_out_release:
34f80b04
EG
10163 if (atomic_read(&pdev->enable_cnt) == 1)
10164 pci_release_regions(pdev);
a2fbb9ea
ET
10165
10166err_out_disable:
10167 pci_disable_device(pdev);
10168 pci_set_drvdata(pdev, NULL);
10169
10170err_out:
10171 return rc;
10172}
10173
25047950
ET
10174static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10175{
10176 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10177
10178 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10179 return val;
10180}
10181
10182/* return value of 1=2.5GHz 2=5GHz */
10183static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10184{
10185 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10186
10187 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10188 return val;
10189}
10190
a2fbb9ea
ET
10191static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10192 const struct pci_device_id *ent)
10193{
10194 static int version_printed;
10195 struct net_device *dev = NULL;
10196 struct bnx2x *bp;
25047950 10197 int rc;
a2fbb9ea
ET
10198
10199 if (version_printed++ == 0)
10200 printk(KERN_INFO "%s", version);
10201
10202 /* dev zeroed in init_etherdev */
10203 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
10204 if (!dev) {
10205 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10206 return -ENOMEM;
34f80b04 10207 }
a2fbb9ea 10208
a2fbb9ea
ET
10209 bp = netdev_priv(dev);
10210 bp->msglevel = debug;
10211
34f80b04 10212 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10213 if (rc < 0) {
10214 free_netdev(dev);
10215 return rc;
10216 }
10217
a2fbb9ea
ET
10218 rc = register_netdev(dev);
10219 if (rc) {
c14423fe 10220 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04 10221 goto init_one_exit;
a2fbb9ea
ET
10222 }
10223
10224 pci_set_drvdata(pdev, dev);
10225
34f80b04
EG
10226 rc = bnx2x_init_bp(bp);
10227 if (rc) {
10228 unregister_netdev(dev);
10229 goto init_one_exit;
10230 }
10231
12b56ea8
EG
10232 netif_carrier_off(dev);
10233
34f80b04 10234 bp->common.name = board_info[ent->driver_data].name;
25047950 10235 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10236 " IRQ %d, ", dev->name, bp->common.name,
10237 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10238 bnx2x_get_pcie_width(bp),
10239 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10240 dev->base_addr, bp->pdev->irq);
e174961c 10241 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 10242 return 0;
34f80b04
EG
10243
10244init_one_exit:
10245 if (bp->regview)
10246 iounmap(bp->regview);
10247
10248 if (bp->doorbells)
10249 iounmap(bp->doorbells);
10250
10251 free_netdev(dev);
10252
10253 if (atomic_read(&pdev->enable_cnt) == 1)
10254 pci_release_regions(pdev);
10255
10256 pci_disable_device(pdev);
10257 pci_set_drvdata(pdev, NULL);
10258
10259 return rc;
a2fbb9ea
ET
10260}
10261
10262static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10263{
10264 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10265 struct bnx2x *bp;
10266
10267 if (!dev) {
228241eb
ET
10268 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10269 return;
10270 }
228241eb 10271 bp = netdev_priv(dev);
a2fbb9ea 10272
a2fbb9ea
ET
10273 unregister_netdev(dev);
10274
10275 if (bp->regview)
10276 iounmap(bp->regview);
10277
10278 if (bp->doorbells)
10279 iounmap(bp->doorbells);
10280
10281 free_netdev(dev);
34f80b04
EG
10282
10283 if (atomic_read(&pdev->enable_cnt) == 1)
10284 pci_release_regions(pdev);
10285
a2fbb9ea
ET
10286 pci_disable_device(pdev);
10287 pci_set_drvdata(pdev, NULL);
10288}
10289
10290static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10291{
10292 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10293 struct bnx2x *bp;
10294
34f80b04
EG
10295 if (!dev) {
10296 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10297 return -ENODEV;
10298 }
10299 bp = netdev_priv(dev);
a2fbb9ea 10300
34f80b04 10301 rtnl_lock();
a2fbb9ea 10302
34f80b04 10303 pci_save_state(pdev);
228241eb 10304
34f80b04
EG
10305 if (!netif_running(dev)) {
10306 rtnl_unlock();
10307 return 0;
10308 }
a2fbb9ea
ET
10309
10310 netif_device_detach(dev);
a2fbb9ea 10311
da5a662a 10312 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10313
a2fbb9ea 10314 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10315
34f80b04
EG
10316 rtnl_unlock();
10317
a2fbb9ea
ET
10318 return 0;
10319}
10320
10321static int bnx2x_resume(struct pci_dev *pdev)
10322{
10323 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10324 struct bnx2x *bp;
a2fbb9ea
ET
10325 int rc;
10326
228241eb
ET
10327 if (!dev) {
10328 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10329 return -ENODEV;
10330 }
228241eb 10331 bp = netdev_priv(dev);
a2fbb9ea 10332
34f80b04
EG
10333 rtnl_lock();
10334
228241eb 10335 pci_restore_state(pdev);
34f80b04
EG
10336
10337 if (!netif_running(dev)) {
10338 rtnl_unlock();
10339 return 0;
10340 }
10341
a2fbb9ea
ET
10342 bnx2x_set_power_state(bp, PCI_D0);
10343 netif_device_attach(dev);
10344
da5a662a 10345 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10346
34f80b04
EG
10347 rtnl_unlock();
10348
10349 return rc;
a2fbb9ea
ET
10350}
10351
f8ef6e44
YG
10352static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10353{
10354 int i;
10355
10356 bp->state = BNX2X_STATE_ERROR;
10357
10358 bp->rx_mode = BNX2X_RX_MODE_NONE;
10359
10360 bnx2x_netif_stop(bp, 0);
10361
10362 del_timer_sync(&bp->timer);
10363 bp->stats_state = STATS_STATE_DISABLED;
10364 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10365
10366 /* Release IRQs */
10367 bnx2x_free_irq(bp);
10368
10369 if (CHIP_IS_E1(bp)) {
10370 struct mac_configuration_cmd *config =
10371 bnx2x_sp(bp, mcast_config);
10372
10373 for (i = 0; i < config->hdr.length_6b; i++)
10374 CAM_INVALIDATE(config->config_table[i]);
10375 }
10376
10377 /* Free SKBs, SGEs, TPA pool and driver internals */
10378 bnx2x_free_skbs(bp);
10379 for_each_queue(bp, i)
10380 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10381 bnx2x_free_mem(bp);
10382
10383 bp->state = BNX2X_STATE_CLOSED;
10384
10385 netif_carrier_off(bp->dev);
10386
10387 return 0;
10388}
10389
10390static void bnx2x_eeh_recover(struct bnx2x *bp)
10391{
10392 u32 val;
10393
10394 mutex_init(&bp->port.phy_mutex);
10395
10396 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10397 bp->link_params.shmem_base = bp->common.shmem_base;
10398 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10399
10400 if (!bp->common.shmem_base ||
10401 (bp->common.shmem_base < 0xA0000) ||
10402 (bp->common.shmem_base >= 0xC0000)) {
10403 BNX2X_DEV_INFO("MCP not active\n");
10404 bp->flags |= NO_MCP_FLAG;
10405 return;
10406 }
10407
10408 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10409 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10410 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10411 BNX2X_ERR("BAD MCP validity signature\n");
10412
10413 if (!BP_NOMCP(bp)) {
10414 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10415 & DRV_MSG_SEQ_NUMBER_MASK);
10416 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10417 }
10418}
10419
493adb1f
WX
10420/**
10421 * bnx2x_io_error_detected - called when PCI error is detected
10422 * @pdev: Pointer to PCI device
10423 * @state: The current pci connection state
10424 *
10425 * This function is called after a PCI bus error affecting
10426 * this device has been detected.
10427 */
10428static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10429 pci_channel_state_t state)
10430{
10431 struct net_device *dev = pci_get_drvdata(pdev);
10432 struct bnx2x *bp = netdev_priv(dev);
10433
10434 rtnl_lock();
10435
10436 netif_device_detach(dev);
10437
10438 if (netif_running(dev))
f8ef6e44 10439 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
10440
10441 pci_disable_device(pdev);
10442
10443 rtnl_unlock();
10444
10445 /* Request a slot reset */
10446 return PCI_ERS_RESULT_NEED_RESET;
10447}
10448
10449/**
10450 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10451 * @pdev: Pointer to PCI device
10452 *
10453 * Restart the card from scratch, as if from a cold-boot.
10454 */
10455static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10456{
10457 struct net_device *dev = pci_get_drvdata(pdev);
10458 struct bnx2x *bp = netdev_priv(dev);
10459
10460 rtnl_lock();
10461
10462 if (pci_enable_device(pdev)) {
10463 dev_err(&pdev->dev,
10464 "Cannot re-enable PCI device after reset\n");
10465 rtnl_unlock();
10466 return PCI_ERS_RESULT_DISCONNECT;
10467 }
10468
10469 pci_set_master(pdev);
10470 pci_restore_state(pdev);
10471
10472 if (netif_running(dev))
10473 bnx2x_set_power_state(bp, PCI_D0);
10474
10475 rtnl_unlock();
10476
10477 return PCI_ERS_RESULT_RECOVERED;
10478}
10479
10480/**
10481 * bnx2x_io_resume - called when traffic can start flowing again
10482 * @pdev: Pointer to PCI device
10483 *
10484 * This callback is called when the error recovery driver tells us that
10485 * its OK to resume normal operation.
10486 */
10487static void bnx2x_io_resume(struct pci_dev *pdev)
10488{
10489 struct net_device *dev = pci_get_drvdata(pdev);
10490 struct bnx2x *bp = netdev_priv(dev);
10491
10492 rtnl_lock();
10493
f8ef6e44
YG
10494 bnx2x_eeh_recover(bp);
10495
493adb1f 10496 if (netif_running(dev))
f8ef6e44 10497 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
10498
10499 netif_device_attach(dev);
10500
10501 rtnl_unlock();
10502}
10503
10504static struct pci_error_handlers bnx2x_err_handler = {
10505 .error_detected = bnx2x_io_error_detected,
10506 .slot_reset = bnx2x_io_slot_reset,
10507 .resume = bnx2x_io_resume,
10508};
10509
a2fbb9ea 10510static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10511 .name = DRV_MODULE_NAME,
10512 .id_table = bnx2x_pci_tbl,
10513 .probe = bnx2x_init_one,
10514 .remove = __devexit_p(bnx2x_remove_one),
10515 .suspend = bnx2x_suspend,
10516 .resume = bnx2x_resume,
10517 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10518};
10519
10520static int __init bnx2x_init(void)
10521{
10522 return pci_register_driver(&bnx2x_pci_driver);
10523}
10524
10525static void __exit bnx2x_cleanup(void)
10526{
10527 pci_unregister_driver(&bnx2x_pci_driver);
10528}
10529
10530module_init(bnx2x_init);
10531module_exit(bnx2x_cleanup);
10532