]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: NIC load failure cleanup
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
a2fbb9ea
ET
43#endif
44#include <net/ip.h>
45#include <net/tcp.h>
46#include <net/checksum.h>
34f80b04 47#include <net/ip6_checksum.h>
a2fbb9ea
ET
48#include <linux/workqueue.h>
49#include <linux/crc32.h>
34f80b04 50#include <linux/crc32c.h>
a2fbb9ea
ET
51#include <linux/prefetch.h>
52#include <linux/zlib.h>
a2fbb9ea
ET
53#include <linux/io.h>
54
55#include "bnx2x_reg.h"
56#include "bnx2x_fw_defs.h"
57#include "bnx2x_hsi.h"
c18487ee 58#include "bnx2x_link.h"
a2fbb9ea
ET
59#include "bnx2x.h"
60#include "bnx2x_init.h"
61
1bb5bd2c
EG
62#define DRV_MODULE_VERSION "1.45.17"
63#define DRV_MODULE_RELDATE "2008/08/13"
34f80b04 64#define BNX2X_BC_VER 0x040200
a2fbb9ea 65
34f80b04
EG
66/* Time in jiffies before concluding the transmitter is hung */
67#define TX_TIMEOUT (5*HZ)
a2fbb9ea 68
53a10565 69static char version[] __devinitdata =
34f80b04 70 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
71 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
72
24e3fcef 73MODULE_AUTHOR("Eliezer Tamir");
a2fbb9ea
ET
74MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
75MODULE_LICENSE("GPL");
76MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 77
19680c48 78static int disable_tpa;
a2fbb9ea
ET
79static int use_inta;
80static int poll;
a2fbb9ea 81static int debug;
34f80b04 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
83static int use_multi;
84
19680c48 85module_param(disable_tpa, int, 0);
a2fbb9ea
ET
86module_param(use_inta, int, 0);
87module_param(poll, int, 0);
a2fbb9ea 88module_param(debug, int, 0);
19680c48 89MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea
ET
90MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
91MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 92MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
93
94#ifdef BNX2X_MULTI
95module_param(use_multi, int, 0);
96MODULE_PARM_DESC(use_multi, "use per-CPU queues");
97#endif
98
99enum bnx2x_board_type {
100 BCM57710 = 0,
34f80b04
EG
101 BCM57711 = 1,
102 BCM57711E = 2,
a2fbb9ea
ET
103};
104
34f80b04 105/* indexed by board_type, above */
53a10565 106static struct {
a2fbb9ea
ET
107 char *name;
108} board_info[] __devinitdata = {
34f80b04
EG
109 { "Broadcom NetXtreme II BCM57710 XGb" },
110 { "Broadcom NetXtreme II BCM57711 XGb" },
111 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
112};
113
34f80b04 114
a2fbb9ea
ET
115static const struct pci_device_id bnx2x_pci_tbl[] = {
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
122 { 0 }
123};
124
125MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
126
127/****************************************************************************
128* General service functions
129****************************************************************************/
130
131/* used only at init
132 * locking is done by mcp
133 */
134static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
135{
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
139 PCICFG_VENDOR_ID_OFFSET);
140}
141
a2fbb9ea
ET
142static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
143{
144 u32 val;
145
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149 PCICFG_VENDOR_ID_OFFSET);
150
151 return val;
152}
a2fbb9ea
ET
153
154static const u32 dmae_reg_go_c[] = {
155 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
156 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
157 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
158 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
159};
160
161/* copy command into DMAE command memory and set DMAE command go */
162static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
163 int idx)
164{
165 u32 cmd_offset;
166 int i;
167
168 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
169 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
170 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171
ad8d3948
EG
172 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
173 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
174 }
175 REG_WR(bp, dmae_reg_go_c[idx], 1);
176}
177
ad8d3948
EG
178void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
179 u32 len32)
a2fbb9ea 180{
ad8d3948 181 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 182 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
183 int cnt = 200;
184
185 if (!bp->dmae_ready) {
186 u32 *data = bnx2x_sp(bp, wb_data[0]);
187
188 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
189 " using indirect\n", dst_addr, len32);
190 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
191 return;
192 }
193
194 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
195
196 memset(dmae, 0, sizeof(struct dmae_command));
197
198 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
199 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
200 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
201#ifdef __BIG_ENDIAN
202 DMAE_CMD_ENDIANITY_B_DW_SWAP |
203#else
204 DMAE_CMD_ENDIANITY_DW_SWAP |
205#endif
34f80b04
EG
206 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
207 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
208 dmae->src_addr_lo = U64_LO(dma_addr);
209 dmae->src_addr_hi = U64_HI(dma_addr);
210 dmae->dst_addr_lo = dst_addr >> 2;
211 dmae->dst_addr_hi = 0;
212 dmae->len = len32;
213 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
214 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 215 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 216
ad8d3948 217 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
218 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
219 "dst_addr [%x:%08x (%08x)]\n"
220 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
221 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
222 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
223 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 224 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
225 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
226 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
227
228 *wb_comp = 0;
229
34f80b04 230 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
231
232 udelay(5);
ad8d3948
EG
233
234 while (*wb_comp != DMAE_COMP_VAL) {
235 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
236
ad8d3948 237 if (!cnt) {
a2fbb9ea
ET
238 BNX2X_ERR("dmae timeout!\n");
239 break;
240 }
ad8d3948 241 cnt--;
12469401
YG
242 /* adjust delay for emulation/FPGA */
243 if (CHIP_REV_IS_SLOW(bp))
244 msleep(100);
245 else
246 udelay(5);
a2fbb9ea 247 }
ad8d3948
EG
248
249 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
250}
251
c18487ee 252void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 253{
ad8d3948 254 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 255 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
256 int cnt = 200;
257
258 if (!bp->dmae_ready) {
259 u32 *data = bnx2x_sp(bp, wb_data[0]);
260 int i;
261
262 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
263 " using indirect\n", src_addr, len32);
264 for (i = 0; i < len32; i++)
265 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
266 return;
267 }
268
269 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
270
271 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
272 memset(dmae, 0, sizeof(struct dmae_command));
273
274 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
275 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
276 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
277#ifdef __BIG_ENDIAN
278 DMAE_CMD_ENDIANITY_B_DW_SWAP |
279#else
280 DMAE_CMD_ENDIANITY_DW_SWAP |
281#endif
34f80b04
EG
282 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
283 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
284 dmae->src_addr_lo = src_addr >> 2;
285 dmae->src_addr_hi = 0;
286 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
287 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
288 dmae->len = len32;
289 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
290 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 291 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 292
ad8d3948 293 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
294 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
295 "dst_addr [%x:%08x (%08x)]\n"
296 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
297 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
298 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
299 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
300
301 *wb_comp = 0;
302
34f80b04 303 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
304
305 udelay(5);
ad8d3948
EG
306
307 while (*wb_comp != DMAE_COMP_VAL) {
308
ad8d3948 309 if (!cnt) {
a2fbb9ea
ET
310 BNX2X_ERR("dmae timeout!\n");
311 break;
312 }
ad8d3948 313 cnt--;
12469401
YG
314 /* adjust delay for emulation/FPGA */
315 if (CHIP_REV_IS_SLOW(bp))
316 msleep(100);
317 else
318 udelay(5);
a2fbb9ea 319 }
ad8d3948 320 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
321 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
322 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
323
324 mutex_unlock(&bp->dmae_mutex);
325}
326
327/* used only for slowpath so not inlined */
328static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
329{
330 u32 wb_write[2];
331
332 wb_write[0] = val_hi;
333 wb_write[1] = val_lo;
334 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 335}
a2fbb9ea 336
ad8d3948
EG
337#ifdef USE_WB_RD
338static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
339{
340 u32 wb_data[2];
341
342 REG_RD_DMAE(bp, reg, wb_data, 2);
343
344 return HILO_U64(wb_data[0], wb_data[1]);
345}
346#endif
347
a2fbb9ea
ET
348static int bnx2x_mc_assert(struct bnx2x *bp)
349{
a2fbb9ea 350 char last_idx;
34f80b04
EG
351 int i, rc = 0;
352 u32 row0, row1, row2, row3;
353
354 /* XSTORM */
355 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
356 XSTORM_ASSERT_LIST_INDEX_OFFSET);
357 if (last_idx)
358 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
359
360 /* print the asserts */
361 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
362
363 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
364 XSTORM_ASSERT_LIST_OFFSET(i));
365 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
366 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
367 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
368 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
369 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
370 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
371
372 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
373 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
374 " 0x%08x 0x%08x 0x%08x\n",
375 i, row3, row2, row1, row0);
376 rc++;
377 } else {
378 break;
379 }
380 }
381
382 /* TSTORM */
383 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
384 TSTORM_ASSERT_LIST_INDEX_OFFSET);
385 if (last_idx)
386 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
387
388 /* print the asserts */
389 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
390
391 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
392 TSTORM_ASSERT_LIST_OFFSET(i));
393 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
394 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
395 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
396 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
397 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
398 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
399
400 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
401 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
402 " 0x%08x 0x%08x 0x%08x\n",
403 i, row3, row2, row1, row0);
404 rc++;
405 } else {
406 break;
407 }
408 }
409
410 /* CSTORM */
411 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
412 CSTORM_ASSERT_LIST_INDEX_OFFSET);
413 if (last_idx)
414 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
415
416 /* print the asserts */
417 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
418
419 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
420 CSTORM_ASSERT_LIST_OFFSET(i));
421 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
422 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
423 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
424 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
425 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
426 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
427
428 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
429 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
430 " 0x%08x 0x%08x 0x%08x\n",
431 i, row3, row2, row1, row0);
432 rc++;
433 } else {
434 break;
435 }
436 }
437
438 /* USTORM */
439 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
440 USTORM_ASSERT_LIST_INDEX_OFFSET);
441 if (last_idx)
442 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
443
444 /* print the asserts */
445 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
446
447 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
448 USTORM_ASSERT_LIST_OFFSET(i));
449 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
450 USTORM_ASSERT_LIST_OFFSET(i) + 4);
451 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
452 USTORM_ASSERT_LIST_OFFSET(i) + 8);
453 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
454 USTORM_ASSERT_LIST_OFFSET(i) + 12);
455
456 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
457 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
458 " 0x%08x 0x%08x 0x%08x\n",
459 i, row3, row2, row1, row0);
460 rc++;
461 } else {
462 break;
a2fbb9ea
ET
463 }
464 }
34f80b04 465
a2fbb9ea
ET
466 return rc;
467}
c14423fe 468
a2fbb9ea
ET
469static void bnx2x_fw_dump(struct bnx2x *bp)
470{
471 u32 mark, offset;
472 u32 data[9];
473 int word;
474
475 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
476 mark = ((mark + 0x3) & ~0x3);
477 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
478
479 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
480 for (word = 0; word < 8; word++)
481 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
482 offset + 4*word));
483 data[8] = 0x0;
49d66772 484 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
485 }
486 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
487 for (word = 0; word < 8; word++)
488 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
489 offset + 4*word));
490 data[8] = 0x0;
49d66772 491 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
492 }
493 printk("\n" KERN_ERR PFX "end of fw dump\n");
494}
495
496static void bnx2x_panic_dump(struct bnx2x *bp)
497{
498 int i;
499 u16 j, start, end;
500
66e855f3
YG
501 bp->stats_state = STATS_STATE_DISABLED;
502 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
503
a2fbb9ea
ET
504 BNX2X_ERR("begin crash dump -----------------\n");
505
506 for_each_queue(bp, i) {
507 struct bnx2x_fastpath *fp = &bp->fp[i];
508 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509
510 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 511 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 512 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 513 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
514 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
515 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
516 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
517 fp->rx_bd_prod, fp->rx_bd_cons,
518 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
519 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
520 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
521 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
522 " *sb_u_idx(%x) bd data(%x,%x)\n",
523 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
524 fp->status_blk->c_status_block.status_block_index,
525 fp->fp_u_idx,
526 fp->status_blk->u_status_block.status_block_index,
527 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
528
529 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
530 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
531 for (j = start; j < end; j++) {
532 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
533
534 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
535 sw_bd->skb, sw_bd->first_bd);
536 }
537
538 start = TX_BD(fp->tx_bd_cons - 10);
539 end = TX_BD(fp->tx_bd_cons + 254);
540 for (j = start; j < end; j++) {
541 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
542
543 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
544 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
545 }
546
547 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
548 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
549 for (j = start; j < end; j++) {
550 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
551 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
552
553 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 554 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
555 }
556
3196a88a
EG
557 start = RX_SGE(fp->rx_sge_prod);
558 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
559 for (j = start; j < end; j++) {
560 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
561 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
562
563 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
564 j, rx_sge[1], rx_sge[0], sw_page->page);
565 }
566
a2fbb9ea
ET
567 start = RCQ_BD(fp->rx_comp_cons - 10);
568 end = RCQ_BD(fp->rx_comp_cons + 503);
569 for (j = start; j < end; j++) {
570 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
571
572 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
573 j, cqe[0], cqe[1], cqe[2], cqe[3]);
574 }
575 }
576
49d66772
ET
577 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
578 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 579 " spq_prod_idx(%u)\n",
49d66772 580 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
581 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
582
34f80b04 583 bnx2x_fw_dump(bp);
a2fbb9ea
ET
584 bnx2x_mc_assert(bp);
585 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
586}
587
615f8fd9 588static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 589{
34f80b04 590 int port = BP_PORT(bp);
a2fbb9ea
ET
591 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
592 u32 val = REG_RD(bp, addr);
593 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
594
595 if (msix) {
596 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
597 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
598 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
599 } else {
600 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 601 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
602 HC_CONFIG_0_REG_INT_LINE_EN_0 |
603 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 604
615f8fd9
ET
605 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
606 val, port, addr, msix);
607
608 REG_WR(bp, addr, val);
609
a2fbb9ea
ET
610 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
611 }
612
615f8fd9 613 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
614 val, port, addr, msix);
615
616 REG_WR(bp, addr, val);
34f80b04
EG
617
618 if (CHIP_IS_E1H(bp)) {
619 /* init leading/trailing edge */
620 if (IS_E1HMF(bp)) {
621 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
622 if (bp->port.pmf)
623 /* enable nig attention */
624 val |= 0x0100;
625 } else
626 val = 0xffff;
627
628 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
629 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
630 }
a2fbb9ea
ET
631}
632
615f8fd9 633static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 634{
34f80b04 635 int port = BP_PORT(bp);
a2fbb9ea
ET
636 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
637 u32 val = REG_RD(bp, addr);
638
639 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
641 HC_CONFIG_0_REG_INT_LINE_EN_0 |
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643
644 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
645 val, port, addr);
646
647 REG_WR(bp, addr, val);
648 if (REG_RD(bp, addr) != val)
649 BNX2X_ERR("BUG! proper val not read from IGU!\n");
650}
651
615f8fd9 652static void bnx2x_int_disable_sync(struct bnx2x *bp)
a2fbb9ea 653{
a2fbb9ea
ET
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655 int i;
656
34f80b04 657 /* disable interrupt handling */
a2fbb9ea 658 atomic_inc(&bp->intr_sem);
c14423fe 659 /* prevent the HW from sending interrupts */
615f8fd9 660 bnx2x_int_disable(bp);
a2fbb9ea
ET
661
662 /* make sure all ISRs are done */
663 if (msix) {
664 for_each_queue(bp, i)
665 synchronize_irq(bp->msix_table[i].vector);
666
667 /* one more for the Slow Path IRQ */
668 synchronize_irq(bp->msix_table[i].vector);
669 } else
670 synchronize_irq(bp->pdev->irq);
671
672 /* make sure sp_task is not running */
673 cancel_work_sync(&bp->sp_task);
a2fbb9ea
ET
674}
675
34f80b04 676/* fast path */
a2fbb9ea
ET
677
678/*
34f80b04 679 * General service functions
a2fbb9ea
ET
680 */
681
34f80b04 682static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
683 u8 storm, u16 index, u8 op, u8 update)
684{
5c862848
EG
685 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
686 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
687 struct igu_ack_register igu_ack;
688
689 igu_ack.status_block_index = index;
690 igu_ack.sb_id_and_flags =
34f80b04 691 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
692 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
693 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
694 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
695
5c862848
EG
696 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
697 (*(u32 *)&igu_ack), hc_addr);
698 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
699}
700
701static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
702{
703 struct host_status_block *fpsb = fp->status_blk;
704 u16 rc = 0;
705
706 barrier(); /* status block is written to by the chip */
707 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
708 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
709 rc |= 1;
710 }
711 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
712 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
713 rc |= 2;
714 }
715 return rc;
716}
717
a2fbb9ea
ET
718static u16 bnx2x_ack_int(struct bnx2x *bp)
719{
5c862848
EG
720 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
721 COMMAND_REG_SIMD_MASK);
722 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 723
5c862848
EG
724 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
725 result, hc_addr);
a2fbb9ea 726
a2fbb9ea
ET
727 return result;
728}
729
730
731/*
732 * fast path service functions
733 */
734
735/* free skb in the packet ring at pos idx
736 * return idx of last bd freed
737 */
738static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
739 u16 idx)
740{
741 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
742 struct eth_tx_bd *tx_bd;
743 struct sk_buff *skb = tx_buf->skb;
34f80b04 744 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
745 int nbd;
746
747 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
748 idx, tx_buf, skb);
749
750 /* unmap first bd */
751 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
752 tx_bd = &fp->tx_desc_ring[bd_idx];
753 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
754 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
755
756 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 757 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
758#ifdef BNX2X_STOP_ON_ERROR
759 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 760 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
761 bnx2x_panic();
762 }
763#endif
764
765 /* Skip a parse bd and the TSO split header bd
766 since they have no mapping */
767 if (nbd)
768 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
769
770 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
771 ETH_TX_BD_FLAGS_TCP_CSUM |
772 ETH_TX_BD_FLAGS_SW_LSO)) {
773 if (--nbd)
774 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
775 tx_bd = &fp->tx_desc_ring[bd_idx];
776 /* is this a TSO split header bd? */
777 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
778 if (--nbd)
779 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
780 }
781 }
782
783 /* now free frags */
784 while (nbd > 0) {
785
786 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
787 tx_bd = &fp->tx_desc_ring[bd_idx];
788 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
789 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
790 if (--nbd)
791 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
792 }
793
794 /* release skb */
53e5e96e 795 WARN_ON(!skb);
a2fbb9ea
ET
796 dev_kfree_skb(skb);
797 tx_buf->first_bd = 0;
798 tx_buf->skb = NULL;
799
34f80b04 800 return new_cons;
a2fbb9ea
ET
801}
802
34f80b04 803static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 804{
34f80b04
EG
805 s16 used;
806 u16 prod;
807 u16 cons;
a2fbb9ea 808
34f80b04 809 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
810 prod = fp->tx_bd_prod;
811 cons = fp->tx_bd_cons;
812
34f80b04
EG
813 /* NUM_TX_RINGS = number of "next-page" entries
814 It will be used as a threshold */
815 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 816
34f80b04 817#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
818 WARN_ON(used < 0);
819 WARN_ON(used > fp->bp->tx_ring_size);
820 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 821#endif
a2fbb9ea 822
34f80b04 823 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
824}
825
826static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
827{
828 struct bnx2x *bp = fp->bp;
829 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
830 int done = 0;
831
832#ifdef BNX2X_STOP_ON_ERROR
833 if (unlikely(bp->panic))
834 return;
835#endif
836
837 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
838 sw_cons = fp->tx_pkt_cons;
839
840 while (sw_cons != hw_cons) {
841 u16 pkt_cons;
842
843 pkt_cons = TX_BD(sw_cons);
844
845 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
846
34f80b04 847 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
848 hw_cons, sw_cons, pkt_cons);
849
34f80b04 850/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
851 rmb();
852 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
853 }
854*/
855 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
856 sw_cons++;
857 done++;
858
859 if (done == work)
860 break;
861 }
862
863 fp->tx_pkt_cons = sw_cons;
864 fp->tx_bd_cons = bd_cons;
865
866 /* Need to make the tx_cons update visible to start_xmit()
867 * before checking for netif_queue_stopped(). Without the
868 * memory barrier, there is a small possibility that start_xmit()
869 * will miss it and cause the queue to be stopped forever.
870 */
871 smp_mb();
872
873 /* TBD need a thresh? */
874 if (unlikely(netif_queue_stopped(bp->dev))) {
875
876 netif_tx_lock(bp->dev);
877
878 if (netif_queue_stopped(bp->dev) &&
da5a662a 879 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea
ET
880 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
881 netif_wake_queue(bp->dev);
882
883 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
884 }
885}
886
3196a88a 887
a2fbb9ea
ET
888static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
889 union eth_rx_cqe *rr_cqe)
890{
891 struct bnx2x *bp = fp->bp;
892 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
893 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894
34f80b04 895 DP(BNX2X_MSG_SP,
a2fbb9ea 896 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
897 FP_IDX(fp), cid, command, bp->state,
898 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
899
900 bp->spq_left++;
901
34f80b04 902 if (FP_IDX(fp)) {
a2fbb9ea
ET
903 switch (command | fp->state) {
904 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
905 BNX2X_FP_STATE_OPENING):
906 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
907 cid);
908 fp->state = BNX2X_FP_STATE_OPEN;
909 break;
910
911 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
912 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
913 cid);
914 fp->state = BNX2X_FP_STATE_HALTED;
915 break;
916
917 default:
34f80b04
EG
918 BNX2X_ERR("unexpected MC reply (%d) "
919 "fp->state is %x\n", command, fp->state);
920 break;
a2fbb9ea 921 }
34f80b04 922 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
923 return;
924 }
c14423fe 925
a2fbb9ea
ET
926 switch (command | bp->state) {
927 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
928 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
929 bp->state = BNX2X_STATE_OPEN;
930 break;
931
932 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
933 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
934 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
935 fp->state = BNX2X_FP_STATE_HALTED;
936 break;
937
a2fbb9ea 938 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 939 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 940 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
941 break;
942
3196a88a 943
a2fbb9ea 944 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 945 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 946 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 947 bp->set_mac_pending = 0;
a2fbb9ea
ET
948 break;
949
49d66772 950 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 951 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
952 break;
953
a2fbb9ea 954 default:
34f80b04 955 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 956 command, bp->state);
34f80b04 957 break;
a2fbb9ea 958 }
34f80b04 959 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
960}
961
7a9b2557
VZ
962static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
963 struct bnx2x_fastpath *fp, u16 index)
964{
965 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
966 struct page *page = sw_buf->page;
967 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
968
969 /* Skip "next page" elements */
970 if (!page)
971 return;
972
973 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
974 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
975 __free_pages(page, PAGES_PER_SGE_SHIFT);
976
977 sw_buf->page = NULL;
978 sge->addr_hi = 0;
979 sge->addr_lo = 0;
980}
981
982static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
983 struct bnx2x_fastpath *fp, int last)
984{
985 int i;
986
987 for (i = 0; i < last; i++)
988 bnx2x_free_rx_sge(bp, fp, i);
989}
990
991static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
992 struct bnx2x_fastpath *fp, u16 index)
993{
994 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
995 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
996 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
997 dma_addr_t mapping;
998
999 if (unlikely(page == NULL))
1000 return -ENOMEM;
1001
1002 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1003 PCI_DMA_FROMDEVICE);
8d8bb39b 1004 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1005 __free_pages(page, PAGES_PER_SGE_SHIFT);
1006 return -ENOMEM;
1007 }
1008
1009 sw_buf->page = page;
1010 pci_unmap_addr_set(sw_buf, mapping, mapping);
1011
1012 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1013 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1014
1015 return 0;
1016}
1017
a2fbb9ea
ET
1018static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1019 struct bnx2x_fastpath *fp, u16 index)
1020{
1021 struct sk_buff *skb;
1022 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1023 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1024 dma_addr_t mapping;
1025
1026 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1027 if (unlikely(skb == NULL))
1028 return -ENOMEM;
1029
1030 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1031 PCI_DMA_FROMDEVICE);
8d8bb39b 1032 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1033 dev_kfree_skb(skb);
1034 return -ENOMEM;
1035 }
1036
1037 rx_buf->skb = skb;
1038 pci_unmap_addr_set(rx_buf, mapping, mapping);
1039
1040 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1041 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1042
1043 return 0;
1044}
1045
1046/* note that we are not allocating a new skb,
1047 * we are just moving one from cons to prod
1048 * we are not creating a new mapping,
1049 * so there is no need to check for dma_mapping_error().
1050 */
1051static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1052 struct sk_buff *skb, u16 cons, u16 prod)
1053{
1054 struct bnx2x *bp = fp->bp;
1055 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1056 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1057 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1058 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1059
1060 pci_dma_sync_single_for_device(bp->pdev,
1061 pci_unmap_addr(cons_rx_buf, mapping),
1062 bp->rx_offset + RX_COPY_THRESH,
1063 PCI_DMA_FROMDEVICE);
1064
1065 prod_rx_buf->skb = cons_rx_buf->skb;
1066 pci_unmap_addr_set(prod_rx_buf, mapping,
1067 pci_unmap_addr(cons_rx_buf, mapping));
1068 *prod_bd = *cons_bd;
1069}
1070
7a9b2557
VZ
1071static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1072 u16 idx)
1073{
1074 u16 last_max = fp->last_max_sge;
1075
1076 if (SUB_S16(idx, last_max) > 0)
1077 fp->last_max_sge = idx;
1078}
1079
1080static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1081{
1082 int i, j;
1083
1084 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1085 int idx = RX_SGE_CNT * i - 1;
1086
1087 for (j = 0; j < 2; j++) {
1088 SGE_MASK_CLEAR_BIT(fp, idx);
1089 idx--;
1090 }
1091 }
1092}
1093
1094static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1095 struct eth_fast_path_rx_cqe *fp_cqe)
1096{
1097 struct bnx2x *bp = fp->bp;
1098 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1099 le16_to_cpu(fp_cqe->len_on_bd)) >>
1100 BCM_PAGE_SHIFT;
1101 u16 last_max, last_elem, first_elem;
1102 u16 delta = 0;
1103 u16 i;
1104
1105 if (!sge_len)
1106 return;
1107
1108 /* First mark all used pages */
1109 for (i = 0; i < sge_len; i++)
1110 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1111
1112 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1113 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1114
1115 /* Here we assume that the last SGE index is the biggest */
1116 prefetch((void *)(fp->sge_mask));
1117 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1118
1119 last_max = RX_SGE(fp->last_max_sge);
1120 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1121 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1122
1123 /* If ring is not full */
1124 if (last_elem + 1 != first_elem)
1125 last_elem++;
1126
1127 /* Now update the prod */
1128 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1129 if (likely(fp->sge_mask[i]))
1130 break;
1131
1132 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1133 delta += RX_SGE_MASK_ELEM_SZ;
1134 }
1135
1136 if (delta > 0) {
1137 fp->rx_sge_prod += delta;
1138 /* clear page-end entries */
1139 bnx2x_clear_sge_mask_next_elems(fp);
1140 }
1141
1142 DP(NETIF_MSG_RX_STATUS,
1143 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1144 fp->last_max_sge, fp->rx_sge_prod);
1145}
1146
1147static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1148{
1149 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1150 memset(fp->sge_mask, 0xff,
1151 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1152
33471629
EG
1153 /* Clear the two last indices in the page to 1:
1154 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1155 hence will never be indicated and should be removed from
1156 the calculations. */
1157 bnx2x_clear_sge_mask_next_elems(fp);
1158}
1159
1160static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1161 struct sk_buff *skb, u16 cons, u16 prod)
1162{
1163 struct bnx2x *bp = fp->bp;
1164 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1165 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1166 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1167 dma_addr_t mapping;
1168
1169 /* move empty skb from pool to prod and map it */
1170 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1171 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1172 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1173 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1174
1175 /* move partial skb from cons to pool (don't unmap yet) */
1176 fp->tpa_pool[queue] = *cons_rx_buf;
1177
1178 /* mark bin state as start - print error if current state != stop */
1179 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1180 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1181
1182 fp->tpa_state[queue] = BNX2X_TPA_START;
1183
1184 /* point prod_bd to new skb */
1185 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1186 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1187
1188#ifdef BNX2X_STOP_ON_ERROR
1189 fp->tpa_queue_used |= (1 << queue);
1190#ifdef __powerpc64__
1191 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1192#else
1193 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1194#endif
1195 fp->tpa_queue_used);
1196#endif
1197}
1198
1199static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1200 struct sk_buff *skb,
1201 struct eth_fast_path_rx_cqe *fp_cqe,
1202 u16 cqe_idx)
1203{
1204 struct sw_rx_page *rx_pg, old_rx_pg;
1205 struct page *sge;
1206 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1207 u32 i, frag_len, frag_size, pages;
1208 int err;
1209 int j;
1210
1211 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1212 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1213
1214 /* This is needed in order to enable forwarding support */
1215 if (frag_size)
1216 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1217 max(frag_size, (u32)len_on_bd));
1218
1219#ifdef BNX2X_STOP_ON_ERROR
1220 if (pages > 8*PAGES_PER_SGE) {
1221 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1222 pages, cqe_idx);
1223 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1224 fp_cqe->pkt_len, len_on_bd);
1225 bnx2x_panic();
1226 return -EINVAL;
1227 }
1228#endif
1229
1230 /* Run through the SGL and compose the fragmented skb */
1231 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1232 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1233
1234 /* FW gives the indices of the SGE as if the ring is an array
1235 (meaning that "next" element will consume 2 indices) */
1236 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1237 rx_pg = &fp->rx_page_ring[sge_idx];
1238 sge = rx_pg->page;
1239 old_rx_pg = *rx_pg;
1240
1241 /* If we fail to allocate a substitute page, we simply stop
1242 where we are and drop the whole packet */
1243 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1244 if (unlikely(err)) {
66e855f3 1245 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1246 return err;
1247 }
1248
1249 /* Unmap the page as we r going to pass it to the stack */
1250 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1251 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1252
1253 /* Add one frag and update the appropriate fields in the skb */
1254 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1255
1256 skb->data_len += frag_len;
1257 skb->truesize += frag_len;
1258 skb->len += frag_len;
1259
1260 frag_size -= frag_len;
1261 }
1262
1263 return 0;
1264}
1265
1266static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1268 u16 cqe_idx)
1269{
1270 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1271 struct sk_buff *skb = rx_buf->skb;
1272 /* alloc new skb */
1273 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1274
1275 /* Unmap skb in the pool anyway, as we are going to change
1276 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1277 fails. */
1278 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1279 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1280
7a9b2557 1281 if (likely(new_skb)) {
66e855f3
YG
1282 /* fix ip xsum and give it to the stack */
1283 /* (no need to map the new skb) */
7a9b2557
VZ
1284
1285 prefetch(skb);
1286 prefetch(((char *)(skb)) + 128);
1287
7a9b2557
VZ
1288#ifdef BNX2X_STOP_ON_ERROR
1289 if (pad + len > bp->rx_buf_size) {
1290 BNX2X_ERR("skb_put is about to fail... "
1291 "pad %d len %d rx_buf_size %d\n",
1292 pad, len, bp->rx_buf_size);
1293 bnx2x_panic();
1294 return;
1295 }
1296#endif
1297
1298 skb_reserve(skb, pad);
1299 skb_put(skb, len);
1300
1301 skb->protocol = eth_type_trans(skb, bp->dev);
1302 skb->ip_summed = CHECKSUM_UNNECESSARY;
1303
1304 {
1305 struct iphdr *iph;
1306
1307 iph = (struct iphdr *)skb->data;
1308 iph->check = 0;
1309 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1310 }
1311
1312 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1313 &cqe->fast_path_cqe, cqe_idx)) {
1314#ifdef BCM_VLAN
1315 if ((bp->vlgrp != NULL) &&
1316 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1317 PARSING_FLAGS_VLAN))
1318 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1319 le16_to_cpu(cqe->fast_path_cqe.
1320 vlan_tag));
1321 else
1322#endif
1323 netif_receive_skb(skb);
1324 } else {
1325 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1326 " - dropping packet!\n");
1327 dev_kfree_skb(skb);
1328 }
1329
1330 bp->dev->last_rx = jiffies;
1331
1332 /* put new skb in bin */
1333 fp->tpa_pool[queue].skb = new_skb;
1334
1335 } else {
66e855f3 1336 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1337 DP(NETIF_MSG_RX_STATUS,
1338 "Failed to allocate new skb - dropping packet!\n");
66e855f3 1339 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1340 }
1341
1342 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1343}
1344
1345static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1346 struct bnx2x_fastpath *fp,
1347 u16 bd_prod, u16 rx_comp_prod,
1348 u16 rx_sge_prod)
1349{
1350 struct tstorm_eth_rx_producers rx_prods = {0};
1351 int i;
1352
1353 /* Update producers */
1354 rx_prods.bd_prod = bd_prod;
1355 rx_prods.cqe_prod = rx_comp_prod;
1356 rx_prods.sge_prod = rx_sge_prod;
1357
1358 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1359 REG_WR(bp, BAR_TSTRORM_INTMEM +
1360 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1361 ((u32 *)&rx_prods)[i]);
1362
1363 DP(NETIF_MSG_RX_STATUS,
1364 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1365 bd_prod, rx_comp_prod, rx_sge_prod);
1366}
1367
a2fbb9ea
ET
1368static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1369{
1370 struct bnx2x *bp = fp->bp;
34f80b04 1371 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1372 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1373 int rx_pkt = 0;
1374
1375#ifdef BNX2X_STOP_ON_ERROR
1376 if (unlikely(bp->panic))
1377 return 0;
1378#endif
1379
34f80b04
EG
1380 /* CQ "next element" is of the size of the regular element,
1381 that's why it's ok here */
a2fbb9ea
ET
1382 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1383 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1384 hw_comp_cons++;
1385
1386 bd_cons = fp->rx_bd_cons;
1387 bd_prod = fp->rx_bd_prod;
34f80b04 1388 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1389 sw_comp_cons = fp->rx_comp_cons;
1390 sw_comp_prod = fp->rx_comp_prod;
1391
1392 /* Memory barrier necessary as speculative reads of the rx
1393 * buffer can be ahead of the index in the status block
1394 */
1395 rmb();
1396
1397 DP(NETIF_MSG_RX_STATUS,
1398 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1399 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1400
1401 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1402 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1403 struct sk_buff *skb;
1404 union eth_rx_cqe *cqe;
34f80b04
EG
1405 u8 cqe_fp_flags;
1406 u16 len, pad;
a2fbb9ea
ET
1407
1408 comp_ring_cons = RCQ_BD(sw_comp_cons);
1409 bd_prod = RX_BD(bd_prod);
1410 bd_cons = RX_BD(bd_cons);
1411
1412 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1413 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1414
a2fbb9ea 1415 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1416 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1417 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
a2fbb9ea 1418 cqe->fast_path_cqe.rss_hash_result,
34f80b04
EG
1419 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1420 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1421
1422 /* is this a slowpath msg? */
34f80b04 1423 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1424 bnx2x_sp_event(fp, cqe);
1425 goto next_cqe;
1426
1427 /* this is an rx packet */
1428 } else {
1429 rx_buf = &fp->rx_buf_ring[bd_cons];
1430 skb = rx_buf->skb;
a2fbb9ea
ET
1431 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1432 pad = cqe->fast_path_cqe.placement_offset;
1433
7a9b2557
VZ
1434 /* If CQE is marked both TPA_START and TPA_END
1435 it is a non-TPA CQE */
1436 if ((!fp->disable_tpa) &&
1437 (TPA_TYPE(cqe_fp_flags) !=
1438 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1439 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1440
1441 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1442 DP(NETIF_MSG_RX_STATUS,
1443 "calling tpa_start on queue %d\n",
1444 queue);
1445
1446 bnx2x_tpa_start(fp, queue, skb,
1447 bd_cons, bd_prod);
1448 goto next_rx;
1449 }
1450
1451 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1452 DP(NETIF_MSG_RX_STATUS,
1453 "calling tpa_stop on queue %d\n",
1454 queue);
1455
1456 if (!BNX2X_RX_SUM_FIX(cqe))
1457 BNX2X_ERR("STOP on none TCP "
1458 "data\n");
1459
1460 /* This is a size of the linear data
1461 on this skb */
1462 len = le16_to_cpu(cqe->fast_path_cqe.
1463 len_on_bd);
1464 bnx2x_tpa_stop(bp, fp, queue, pad,
1465 len, cqe, comp_ring_cons);
1466#ifdef BNX2X_STOP_ON_ERROR
1467 if (bp->panic)
1468 return -EINVAL;
1469#endif
1470
1471 bnx2x_update_sge_prod(fp,
1472 &cqe->fast_path_cqe);
1473 goto next_cqe;
1474 }
1475 }
1476
a2fbb9ea
ET
1477 pci_dma_sync_single_for_device(bp->pdev,
1478 pci_unmap_addr(rx_buf, mapping),
1479 pad + RX_COPY_THRESH,
1480 PCI_DMA_FROMDEVICE);
1481 prefetch(skb);
1482 prefetch(((char *)(skb)) + 128);
1483
1484 /* is this an error packet? */
34f80b04 1485 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1486 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1487 "ERROR flags %x rx packet %u\n",
1488 cqe_fp_flags, sw_comp_cons);
66e855f3 1489 bp->eth_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1490 goto reuse_rx;
1491 }
1492
1493 /* Since we don't have a jumbo ring
1494 * copy small packets if mtu > 1500
1495 */
1496 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1497 (len <= RX_COPY_THRESH)) {
1498 struct sk_buff *new_skb;
1499
1500 new_skb = netdev_alloc_skb(bp->dev,
1501 len + pad);
1502 if (new_skb == NULL) {
1503 DP(NETIF_MSG_RX_ERR,
34f80b04 1504 "ERROR packet dropped "
a2fbb9ea 1505 "because of alloc failure\n");
66e855f3 1506 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1507 goto reuse_rx;
1508 }
1509
1510 /* aligned copy */
1511 skb_copy_from_linear_data_offset(skb, pad,
1512 new_skb->data + pad, len);
1513 skb_reserve(new_skb, pad);
1514 skb_put(new_skb, len);
1515
1516 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1517
1518 skb = new_skb;
1519
1520 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1521 pci_unmap_single(bp->pdev,
1522 pci_unmap_addr(rx_buf, mapping),
1523 bp->rx_buf_use_size,
1524 PCI_DMA_FROMDEVICE);
1525 skb_reserve(skb, pad);
1526 skb_put(skb, len);
1527
1528 } else {
1529 DP(NETIF_MSG_RX_ERR,
34f80b04 1530 "ERROR packet dropped because "
a2fbb9ea 1531 "of alloc failure\n");
66e855f3 1532 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1533reuse_rx:
1534 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1535 goto next_rx;
1536 }
1537
1538 skb->protocol = eth_type_trans(skb, bp->dev);
1539
1540 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1541 if (bp->rx_csum) {
1adcd8be
EG
1542 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1543 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3
YG
1544 else
1545 bp->eth_stats.hw_csum_err++;
1546 }
a2fbb9ea
ET
1547 }
1548
1549#ifdef BCM_VLAN
34f80b04
EG
1550 if ((bp->vlgrp != NULL) &&
1551 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1552 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1553 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1554 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1555 else
1556#endif
34f80b04 1557 netif_receive_skb(skb);
a2fbb9ea
ET
1558
1559 bp->dev->last_rx = jiffies;
1560
1561next_rx:
1562 rx_buf->skb = NULL;
1563
1564 bd_cons = NEXT_RX_IDX(bd_cons);
1565 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1566 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1567 rx_pkt++;
a2fbb9ea
ET
1568next_cqe:
1569 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1570 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1571
34f80b04 1572 if (rx_pkt == budget)
a2fbb9ea
ET
1573 break;
1574 } /* while */
1575
1576 fp->rx_bd_cons = bd_cons;
34f80b04 1577 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1578 fp->rx_comp_cons = sw_comp_cons;
1579 fp->rx_comp_prod = sw_comp_prod;
1580
7a9b2557
VZ
1581 /* Update producers */
1582 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1583 fp->rx_sge_prod);
a2fbb9ea
ET
1584 mmiowb(); /* keep prod updates ordered */
1585
1586 fp->rx_pkt += rx_pkt;
1587 fp->rx_calls++;
1588
1589 return rx_pkt;
1590}
1591
1592static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1593{
1594 struct bnx2x_fastpath *fp = fp_cookie;
1595 struct bnx2x *bp = fp->bp;
1596 struct net_device *dev = bp->dev;
34f80b04 1597 int index = FP_IDX(fp);
a2fbb9ea 1598
da5a662a
VZ
1599 /* Return here if interrupt is disabled */
1600 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1601 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1602 return IRQ_HANDLED;
1603 }
1604
34f80b04
EG
1605 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1606 index, FP_SB_ID(fp));
1607 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1608
1609#ifdef BNX2X_STOP_ON_ERROR
1610 if (unlikely(bp->panic))
1611 return IRQ_HANDLED;
1612#endif
1613
1614 prefetch(fp->rx_cons_sb);
1615 prefetch(fp->tx_cons_sb);
1616 prefetch(&fp->status_blk->c_status_block.status_block_index);
1617 prefetch(&fp->status_blk->u_status_block.status_block_index);
1618
1619 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
34f80b04 1620
a2fbb9ea
ET
1621 return IRQ_HANDLED;
1622}
1623
1624static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1625{
1626 struct net_device *dev = dev_instance;
1627 struct bnx2x *bp = netdev_priv(dev);
1628 u16 status = bnx2x_ack_int(bp);
34f80b04 1629 u16 mask;
a2fbb9ea 1630
34f80b04 1631 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1632 if (unlikely(status == 0)) {
1633 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1634 return IRQ_NONE;
1635 }
34f80b04 1636 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1637
34f80b04 1638 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1639 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1640 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1641 return IRQ_HANDLED;
1642 }
1643
3196a88a
EG
1644#ifdef BNX2X_STOP_ON_ERROR
1645 if (unlikely(bp->panic))
1646 return IRQ_HANDLED;
1647#endif
1648
34f80b04
EG
1649 mask = 0x2 << bp->fp[0].sb_id;
1650 if (status & mask) {
a2fbb9ea
ET
1651 struct bnx2x_fastpath *fp = &bp->fp[0];
1652
1653 prefetch(fp->rx_cons_sb);
1654 prefetch(fp->tx_cons_sb);
1655 prefetch(&fp->status_blk->c_status_block.status_block_index);
1656 prefetch(&fp->status_blk->u_status_block.status_block_index);
1657
1658 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1659
34f80b04 1660 status &= ~mask;
a2fbb9ea
ET
1661 }
1662
a2fbb9ea 1663
34f80b04 1664 if (unlikely(status & 0x1)) {
a2fbb9ea
ET
1665 schedule_work(&bp->sp_task);
1666
1667 status &= ~0x1;
1668 if (!status)
1669 return IRQ_HANDLED;
1670 }
1671
34f80b04
EG
1672 if (status)
1673 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1674 status);
a2fbb9ea 1675
c18487ee 1676 return IRQ_HANDLED;
a2fbb9ea
ET
1677}
1678
c18487ee 1679/* end of fast path */
a2fbb9ea 1680
bb2a0f7a 1681static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1682
c18487ee
YR
1683/* Link */
1684
1685/*
1686 * General service functions
1687 */
a2fbb9ea 1688
4a37fb66 1689static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1690{
1691 u32 lock_status;
1692 u32 resource_bit = (1 << resource);
4a37fb66
YG
1693 int func = BP_FUNC(bp);
1694 u32 hw_lock_control_reg;
c18487ee 1695 int cnt;
a2fbb9ea 1696
c18487ee
YR
1697 /* Validating that the resource is within range */
1698 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1699 DP(NETIF_MSG_HW,
1700 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1701 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1702 return -EINVAL;
1703 }
a2fbb9ea 1704
4a37fb66
YG
1705 if (func <= 5) {
1706 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1707 } else {
1708 hw_lock_control_reg =
1709 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1710 }
1711
c18487ee 1712 /* Validating that the resource is not already taken */
4a37fb66 1713 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1714 if (lock_status & resource_bit) {
1715 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1716 lock_status, resource_bit);
1717 return -EEXIST;
1718 }
a2fbb9ea 1719
46230476
EG
1720 /* Try for 5 second every 5ms */
1721 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1722 /* Try to acquire the lock */
4a37fb66
YG
1723 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1724 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1725 if (lock_status & resource_bit)
1726 return 0;
a2fbb9ea 1727
c18487ee 1728 msleep(5);
a2fbb9ea 1729 }
c18487ee
YR
1730 DP(NETIF_MSG_HW, "Timeout\n");
1731 return -EAGAIN;
1732}
a2fbb9ea 1733
4a37fb66 1734static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1735{
1736 u32 lock_status;
1737 u32 resource_bit = (1 << resource);
4a37fb66
YG
1738 int func = BP_FUNC(bp);
1739 u32 hw_lock_control_reg;
a2fbb9ea 1740
c18487ee
YR
1741 /* Validating that the resource is within range */
1742 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1743 DP(NETIF_MSG_HW,
1744 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1745 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1746 return -EINVAL;
1747 }
1748
4a37fb66
YG
1749 if (func <= 5) {
1750 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1751 } else {
1752 hw_lock_control_reg =
1753 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1754 }
1755
c18487ee 1756 /* Validating that the resource is currently taken */
4a37fb66 1757 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1758 if (!(lock_status & resource_bit)) {
1759 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1760 lock_status, resource_bit);
1761 return -EFAULT;
a2fbb9ea
ET
1762 }
1763
4a37fb66 1764 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1765 return 0;
1766}
1767
1768/* HW Lock for shared dual port PHYs */
4a37fb66 1769static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1770{
1771 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1772
34f80b04 1773 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1774
c18487ee
YR
1775 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1776 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1777 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1778}
a2fbb9ea 1779
4a37fb66 1780static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1781{
1782 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1783
c18487ee
YR
1784 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1785 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1786 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1787
34f80b04 1788 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1789}
a2fbb9ea 1790
17de50b7 1791int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1792{
1793 /* The GPIO should be swapped if swap register is set and active */
1794 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1795 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1796 int gpio_shift = gpio_num +
1797 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1798 u32 gpio_mask = (1 << gpio_shift);
1799 u32 gpio_reg;
a2fbb9ea 1800
c18487ee
YR
1801 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1802 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1803 return -EINVAL;
1804 }
a2fbb9ea 1805
4a37fb66 1806 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1807 /* read GPIO and mask except the float bits */
1808 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1809
c18487ee
YR
1810 switch (mode) {
1811 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1812 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1813 gpio_num, gpio_shift);
1814 /* clear FLOAT and set CLR */
1815 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1816 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1817 break;
a2fbb9ea 1818
c18487ee
YR
1819 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1820 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1821 gpio_num, gpio_shift);
1822 /* clear FLOAT and set SET */
1823 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1824 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1825 break;
a2fbb9ea 1826
17de50b7 1827 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1828 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1829 gpio_num, gpio_shift);
1830 /* set FLOAT */
1831 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1832 break;
a2fbb9ea 1833
c18487ee
YR
1834 default:
1835 break;
a2fbb9ea
ET
1836 }
1837
c18487ee 1838 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1839 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1840
c18487ee 1841 return 0;
a2fbb9ea
ET
1842}
1843
c18487ee 1844static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1845{
c18487ee
YR
1846 u32 spio_mask = (1 << spio_num);
1847 u32 spio_reg;
a2fbb9ea 1848
c18487ee
YR
1849 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1850 (spio_num > MISC_REGISTERS_SPIO_7)) {
1851 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1852 return -EINVAL;
a2fbb9ea
ET
1853 }
1854
4a37fb66 1855 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1856 /* read SPIO and mask except the float bits */
1857 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1858
c18487ee 1859 switch (mode) {
6378c025 1860 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1861 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1862 /* clear FLOAT and set CLR */
1863 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1864 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1865 break;
a2fbb9ea 1866
6378c025 1867 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1868 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1869 /* clear FLOAT and set SET */
1870 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1871 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1872 break;
a2fbb9ea 1873
c18487ee
YR
1874 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1875 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1876 /* set FLOAT */
1877 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1878 break;
a2fbb9ea 1879
c18487ee
YR
1880 default:
1881 break;
a2fbb9ea
ET
1882 }
1883
c18487ee 1884 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1885 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1886
a2fbb9ea
ET
1887 return 0;
1888}
1889
c18487ee 1890static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1891{
c18487ee
YR
1892 switch (bp->link_vars.ieee_fc) {
1893 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1894 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1895 ADVERTISED_Pause);
1896 break;
1897 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1898 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1899 ADVERTISED_Pause);
1900 break;
1901 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1902 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1903 break;
1904 default:
34f80b04 1905 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1906 ADVERTISED_Pause);
1907 break;
1908 }
1909}
f1410647 1910
c18487ee
YR
1911static void bnx2x_link_report(struct bnx2x *bp)
1912{
1913 if (bp->link_vars.link_up) {
1914 if (bp->state == BNX2X_STATE_OPEN)
1915 netif_carrier_on(bp->dev);
1916 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1917
c18487ee 1918 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1919
c18487ee
YR
1920 if (bp->link_vars.duplex == DUPLEX_FULL)
1921 printk("full duplex");
1922 else
1923 printk("half duplex");
f1410647 1924
c18487ee
YR
1925 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1926 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1927 printk(", receive ");
1928 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1929 printk("& transmit ");
1930 } else {
1931 printk(", transmit ");
1932 }
1933 printk("flow control ON");
1934 }
1935 printk("\n");
f1410647 1936
c18487ee
YR
1937 } else { /* link_down */
1938 netif_carrier_off(bp->dev);
1939 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1940 }
c18487ee
YR
1941}
1942
1943static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1944{
19680c48
EG
1945 if (!BP_NOMCP(bp)) {
1946 u8 rc;
a2fbb9ea 1947
19680c48 1948 /* Initialize link parameters structure variables */
8c99e7b0
YR
1949 /* It is recommended to turn off RX FC for jumbo frames
1950 for better performance */
1951 if (IS_E1HMF(bp))
1952 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1953 else if (bp->dev->mtu > 5000)
1954 bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX;
1955 else
1956 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
a2fbb9ea 1957
4a37fb66 1958 bnx2x_acquire_phy_lock(bp);
19680c48 1959 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1960 bnx2x_release_phy_lock(bp);
a2fbb9ea 1961
19680c48
EG
1962 if (bp->link_vars.link_up)
1963 bnx2x_link_report(bp);
a2fbb9ea 1964
19680c48 1965 bnx2x_calc_fc_adv(bp);
34f80b04 1966
19680c48
EG
1967 return rc;
1968 }
1969 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1970 return -EINVAL;
a2fbb9ea
ET
1971}
1972
c18487ee 1973static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1974{
19680c48 1975 if (!BP_NOMCP(bp)) {
4a37fb66 1976 bnx2x_acquire_phy_lock(bp);
19680c48 1977 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1978 bnx2x_release_phy_lock(bp);
a2fbb9ea 1979
19680c48
EG
1980 bnx2x_calc_fc_adv(bp);
1981 } else
1982 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 1983}
a2fbb9ea 1984
c18487ee
YR
1985static void bnx2x__link_reset(struct bnx2x *bp)
1986{
19680c48 1987 if (!BP_NOMCP(bp)) {
4a37fb66 1988 bnx2x_acquire_phy_lock(bp);
19680c48 1989 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 1990 bnx2x_release_phy_lock(bp);
19680c48
EG
1991 } else
1992 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 1993}
a2fbb9ea 1994
c18487ee
YR
1995static u8 bnx2x_link_test(struct bnx2x *bp)
1996{
1997 u8 rc;
a2fbb9ea 1998
4a37fb66 1999 bnx2x_acquire_phy_lock(bp);
c18487ee 2000 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2001 bnx2x_release_phy_lock(bp);
a2fbb9ea 2002
c18487ee
YR
2003 return rc;
2004}
a2fbb9ea 2005
34f80b04
EG
2006/* Calculates the sum of vn_min_rates.
2007 It's needed for further normalizing of the min_rates.
2008
2009 Returns:
2010 sum of vn_min_rates
2011 or
2012 0 - if all the min_rates are 0.
33471629 2013 In the later case fairness algorithm should be deactivated.
34f80b04
EG
2014 If not all min_rates are zero then those that are zeroes will
2015 be set to 1.
2016 */
2017static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2018{
2019 int i, port = BP_PORT(bp);
2020 u32 wsum = 0;
2021 int all_zero = 1;
2022
2023 for (i = 0; i < E1HVN_MAX; i++) {
2024 u32 vn_cfg =
2025 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2026 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2027 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2028 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2029 /* If min rate is zero - set it to 1 */
2030 if (!vn_min_rate)
2031 vn_min_rate = DEF_MIN_RATE;
2032 else
2033 all_zero = 0;
2034
2035 wsum += vn_min_rate;
2036 }
2037 }
2038
2039 /* ... only if all min rates are zeros - disable FAIRNESS */
2040 if (all_zero)
2041 return 0;
2042
2043 return wsum;
2044}
2045
2046static void bnx2x_init_port_minmax(struct bnx2x *bp,
2047 int en_fness,
2048 u16 port_rate,
2049 struct cmng_struct_per_port *m_cmng_port)
2050{
2051 u32 r_param = port_rate / 8;
2052 int port = BP_PORT(bp);
2053 int i;
2054
2055 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2056
2057 /* Enable minmax only if we are in e1hmf mode */
2058 if (IS_E1HMF(bp)) {
2059 u32 fair_periodic_timeout_usec;
2060 u32 t_fair;
2061
2062 /* Enable rate shaping and fairness */
2063 m_cmng_port->flags.cmng_vn_enable = 1;
2064 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2065 m_cmng_port->flags.rate_shaping_enable = 1;
2066
2067 if (!en_fness)
2068 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2069 " fairness will be disabled\n");
2070
2071 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2072 m_cmng_port->rs_vars.rs_periodic_timeout =
2073 RS_PERIODIC_TIMEOUT_USEC / 4;
2074
2075 /* this is the threshold below which no timer arming will occur
2076 1.25 coefficient is for the threshold to be a little bigger
2077 than the real time, to compensate for timer in-accuracy */
2078 m_cmng_port->rs_vars.rs_threshold =
2079 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2080
2081 /* resolution of fairness timer */
2082 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2083 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2084 t_fair = T_FAIR_COEF / port_rate;
2085
2086 /* this is the threshold below which we won't arm
2087 the timer anymore */
2088 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2089
2090 /* we multiply by 1e3/8 to get bytes/msec.
2091 We don't want the credits to pass a credit
2092 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2093 m_cmng_port->fair_vars.upper_bound =
2094 r_param * t_fair * FAIR_MEM;
2095 /* since each tick is 4 usec */
2096 m_cmng_port->fair_vars.fairness_timeout =
2097 fair_periodic_timeout_usec / 4;
2098
2099 } else {
2100 /* Disable rate shaping and fairness */
2101 m_cmng_port->flags.cmng_vn_enable = 0;
2102 m_cmng_port->flags.fairness_enable = 0;
2103 m_cmng_port->flags.rate_shaping_enable = 0;
2104
2105 DP(NETIF_MSG_IFUP,
2106 "Single function mode minmax will be disabled\n");
2107 }
2108
2109 /* Store it to internal memory */
2110 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2111 REG_WR(bp, BAR_XSTRORM_INTMEM +
2112 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2113 ((u32 *)(m_cmng_port))[i]);
2114}
2115
2116static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2117 u32 wsum, u16 port_rate,
2118 struct cmng_struct_per_port *m_cmng_port)
2119{
2120 struct rate_shaping_vars_per_vn m_rs_vn;
2121 struct fairness_vars_per_vn m_fair_vn;
2122 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2123 u16 vn_min_rate, vn_max_rate;
2124 int i;
2125
2126 /* If function is hidden - set min and max to zeroes */
2127 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2128 vn_min_rate = 0;
2129 vn_max_rate = 0;
2130
2131 } else {
2132 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2133 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2134 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2135 if current min rate is zero - set it to 1.
33471629 2136 This is a requirement of the algorithm. */
34f80b04
EG
2137 if ((vn_min_rate == 0) && wsum)
2138 vn_min_rate = DEF_MIN_RATE;
2139 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2140 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2141 }
2142
2143 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2144 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2145
2146 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2147 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2148
2149 /* global vn counter - maximal Mbps for this vn */
2150 m_rs_vn.vn_counter.rate = vn_max_rate;
2151
2152 /* quota - number of bytes transmitted in this period */
2153 m_rs_vn.vn_counter.quota =
2154 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2155
2156#ifdef BNX2X_PER_PROT_QOS
2157 /* per protocol counter */
2158 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2159 /* maximal Mbps for this protocol */
2160 m_rs_vn.protocol_counters[protocol].rate =
2161 protocol_max_rate[protocol];
2162 /* the quota in each timer period -
2163 number of bytes transmitted in this period */
2164 m_rs_vn.protocol_counters[protocol].quota =
2165 (u32)(rs_periodic_timeout_usec *
2166 ((double)m_rs_vn.
2167 protocol_counters[protocol].rate/8));
2168 }
2169#endif
2170
2171 if (wsum) {
2172 /* credit for each period of the fairness algorithm:
2173 number of bytes in T_FAIR (the vn share the port rate).
2174 wsum should not be larger than 10000, thus
2175 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2176 m_fair_vn.vn_credit_delta =
2177 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2178 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2179 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2180 m_fair_vn.vn_credit_delta);
2181 }
2182
2183#ifdef BNX2X_PER_PROT_QOS
2184 do {
2185 u32 protocolWeightSum = 0;
2186
2187 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2188 protocolWeightSum +=
2189 drvInit.protocol_min_rate[protocol];
2190 /* per protocol counter -
2191 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2192 if (protocolWeightSum > 0) {
2193 for (protocol = 0;
2194 protocol < NUM_OF_PROTOCOLS; protocol++)
2195 /* credit for each period of the
2196 fairness algorithm - number of bytes in
2197 T_FAIR (the protocol share the vn rate) */
2198 m_fair_vn.protocol_credit_delta[protocol] =
2199 (u32)((vn_min_rate / 8) * t_fair *
2200 protocol_min_rate / protocolWeightSum);
2201 }
2202 } while (0);
2203#endif
2204
2205 /* Store it to internal memory */
2206 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2207 REG_WR(bp, BAR_XSTRORM_INTMEM +
2208 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2209 ((u32 *)(&m_rs_vn))[i]);
2210
2211 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2212 REG_WR(bp, BAR_XSTRORM_INTMEM +
2213 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2214 ((u32 *)(&m_fair_vn))[i]);
2215}
2216
c18487ee
YR
2217/* This function is called upon link interrupt */
2218static void bnx2x_link_attn(struct bnx2x *bp)
2219{
34f80b04
EG
2220 int vn;
2221
bb2a0f7a
YG
2222 /* Make sure that we are synced with the current statistics */
2223 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2224
4a37fb66 2225 bnx2x_acquire_phy_lock(bp);
c18487ee 2226 bnx2x_link_update(&bp->link_params, &bp->link_vars);
4a37fb66 2227 bnx2x_release_phy_lock(bp);
a2fbb9ea 2228
bb2a0f7a
YG
2229 if (bp->link_vars.link_up) {
2230
2231 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2232 struct host_port_stats *pstats;
2233
2234 pstats = bnx2x_sp(bp, port_stats);
2235 /* reset old bmac stats */
2236 memset(&(pstats->mac_stx[0]), 0,
2237 sizeof(struct mac_stx));
2238 }
2239 if ((bp->state == BNX2X_STATE_OPEN) ||
2240 (bp->state == BNX2X_STATE_DISABLED))
2241 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2242 }
2243
c18487ee
YR
2244 /* indicate link status */
2245 bnx2x_link_report(bp);
34f80b04
EG
2246
2247 if (IS_E1HMF(bp)) {
2248 int func;
2249
2250 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2251 if (vn == BP_E1HVN(bp))
2252 continue;
2253
2254 func = ((vn << 1) | BP_PORT(bp));
2255
2256 /* Set the attention towards other drivers
2257 on the same port */
2258 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2259 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2260 }
2261 }
2262
2263 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2264 struct cmng_struct_per_port m_cmng_port;
2265 u32 wsum;
2266 int port = BP_PORT(bp);
2267
2268 /* Init RATE SHAPING and FAIRNESS contexts */
2269 wsum = bnx2x_calc_vn_wsum(bp);
2270 bnx2x_init_port_minmax(bp, (int)wsum,
2271 bp->link_vars.line_speed,
2272 &m_cmng_port);
2273 if (IS_E1HMF(bp))
2274 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2275 bnx2x_init_vn_minmax(bp, 2*vn + port,
2276 wsum, bp->link_vars.line_speed,
2277 &m_cmng_port);
2278 }
c18487ee 2279}
a2fbb9ea 2280
c18487ee
YR
2281static void bnx2x__link_status_update(struct bnx2x *bp)
2282{
2283 if (bp->state != BNX2X_STATE_OPEN)
2284 return;
a2fbb9ea 2285
c18487ee 2286 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2287
bb2a0f7a
YG
2288 if (bp->link_vars.link_up)
2289 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2290 else
2291 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2292
c18487ee
YR
2293 /* indicate link status */
2294 bnx2x_link_report(bp);
a2fbb9ea 2295}
a2fbb9ea 2296
34f80b04
EG
2297static void bnx2x_pmf_update(struct bnx2x *bp)
2298{
2299 int port = BP_PORT(bp);
2300 u32 val;
2301
2302 bp->port.pmf = 1;
2303 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2304
2305 /* enable nig attention */
2306 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2307 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2308 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2309
2310 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2311}
2312
c18487ee 2313/* end of Link */
a2fbb9ea
ET
2314
2315/* slow path */
2316
2317/*
2318 * General service functions
2319 */
2320
2321/* the slow path queue is odd since completions arrive on the fastpath ring */
2322static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2323 u32 data_hi, u32 data_lo, int common)
2324{
34f80b04 2325 int func = BP_FUNC(bp);
a2fbb9ea 2326
34f80b04
EG
2327 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2328 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2329 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2330 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2331 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2332
2333#ifdef BNX2X_STOP_ON_ERROR
2334 if (unlikely(bp->panic))
2335 return -EIO;
2336#endif
2337
34f80b04 2338 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2339
2340 if (!bp->spq_left) {
2341 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2342 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2343 bnx2x_panic();
2344 return -EBUSY;
2345 }
f1410647 2346
a2fbb9ea
ET
2347 /* CID needs port number to be encoded int it */
2348 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2349 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2350 HW_CID(bp, cid)));
2351 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2352 if (common)
2353 bp->spq_prod_bd->hdr.type |=
2354 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2355
2356 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2357 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2358
2359 bp->spq_left--;
2360
2361 if (bp->spq_prod_bd == bp->spq_last_bd) {
2362 bp->spq_prod_bd = bp->spq;
2363 bp->spq_prod_idx = 0;
2364 DP(NETIF_MSG_TIMER, "end of spq\n");
2365
2366 } else {
2367 bp->spq_prod_bd++;
2368 bp->spq_prod_idx++;
2369 }
2370
34f80b04 2371 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2372 bp->spq_prod_idx);
2373
34f80b04 2374 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2375 return 0;
2376}
2377
2378/* acquire split MCP access lock register */
4a37fb66 2379static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2380{
a2fbb9ea 2381 u32 i, j, val;
34f80b04 2382 int rc = 0;
a2fbb9ea
ET
2383
2384 might_sleep();
2385 i = 100;
2386 for (j = 0; j < i*10; j++) {
2387 val = (1UL << 31);
2388 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2389 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2390 if (val & (1L << 31))
2391 break;
2392
2393 msleep(5);
2394 }
a2fbb9ea 2395 if (!(val & (1L << 31))) {
19680c48 2396 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2397 rc = -EBUSY;
2398 }
2399
2400 return rc;
2401}
2402
4a37fb66
YG
2403/* release split MCP access lock register */
2404static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2405{
2406 u32 val = 0;
2407
2408 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2409}
2410
2411static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2412{
2413 struct host_def_status_block *def_sb = bp->def_status_blk;
2414 u16 rc = 0;
2415
2416 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2417 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2418 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2419 rc |= 1;
2420 }
2421 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2422 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2423 rc |= 2;
2424 }
2425 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2426 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2427 rc |= 4;
2428 }
2429 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2430 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2431 rc |= 8;
2432 }
2433 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2434 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2435 rc |= 16;
2436 }
2437 return rc;
2438}
2439
2440/*
2441 * slow path service functions
2442 */
2443
2444static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2445{
34f80b04 2446 int port = BP_PORT(bp);
5c862848
EG
2447 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2448 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2449 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2450 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2451 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2452 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2453 u32 aeu_mask;
a2fbb9ea 2454
a2fbb9ea
ET
2455 if (bp->attn_state & asserted)
2456 BNX2X_ERR("IGU ERROR\n");
2457
3fcaf2e5
EG
2458 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2459 aeu_mask = REG_RD(bp, aeu_addr);
2460
a2fbb9ea 2461 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2462 aeu_mask, asserted);
2463 aeu_mask &= ~(asserted & 0xff);
2464 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2465
3fcaf2e5
EG
2466 REG_WR(bp, aeu_addr, aeu_mask);
2467 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2468
3fcaf2e5 2469 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2470 bp->attn_state |= asserted;
3fcaf2e5 2471 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2472
2473 if (asserted & ATTN_HARD_WIRED_MASK) {
2474 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2475
877e9aa4
ET
2476 /* save nig interrupt mask */
2477 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2478 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2479
c18487ee 2480 bnx2x_link_attn(bp);
a2fbb9ea
ET
2481
2482 /* handle unicore attn? */
2483 }
2484 if (asserted & ATTN_SW_TIMER_4_FUNC)
2485 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2486
2487 if (asserted & GPIO_2_FUNC)
2488 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2489
2490 if (asserted & GPIO_3_FUNC)
2491 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2492
2493 if (asserted & GPIO_4_FUNC)
2494 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2495
2496 if (port == 0) {
2497 if (asserted & ATTN_GENERAL_ATTN_1) {
2498 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2499 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2500 }
2501 if (asserted & ATTN_GENERAL_ATTN_2) {
2502 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2503 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2504 }
2505 if (asserted & ATTN_GENERAL_ATTN_3) {
2506 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2507 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2508 }
2509 } else {
2510 if (asserted & ATTN_GENERAL_ATTN_4) {
2511 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2512 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2513 }
2514 if (asserted & ATTN_GENERAL_ATTN_5) {
2515 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2516 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2517 }
2518 if (asserted & ATTN_GENERAL_ATTN_6) {
2519 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2520 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2521 }
2522 }
2523
2524 } /* if hardwired */
2525
5c862848
EG
2526 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2527 asserted, hc_addr);
2528 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2529
2530 /* now set back the mask */
2531 if (asserted & ATTN_NIG_FOR_FUNC)
877e9aa4 2532 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a2fbb9ea
ET
2533}
2534
877e9aa4 2535static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2536{
34f80b04 2537 int port = BP_PORT(bp);
877e9aa4
ET
2538 int reg_offset;
2539 u32 val;
2540
34f80b04
EG
2541 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2542 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2543
34f80b04 2544 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2545
2546 val = REG_RD(bp, reg_offset);
2547 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2548 REG_WR(bp, reg_offset, val);
2549
2550 BNX2X_ERR("SPIO5 hw attention\n");
2551
34f80b04 2552 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 2553 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
877e9aa4
ET
2554 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2555 /* Fan failure attention */
2556
17de50b7 2557 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2558 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2559 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2560 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2561 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2562 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2563 /* mark the failure */
c18487ee 2564 bp->link_params.ext_phy_config &=
877e9aa4 2565 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2566 bp->link_params.ext_phy_config |=
877e9aa4
ET
2567 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2568 SHMEM_WR(bp,
2569 dev_info.port_hw_config[port].
2570 external_phy_config,
c18487ee 2571 bp->link_params.ext_phy_config);
877e9aa4
ET
2572 /* log the failure */
2573 printk(KERN_ERR PFX "Fan Failure on Network"
2574 " Controller %s has caused the driver to"
2575 " shutdown the card to prevent permanent"
2576 " damage. Please contact Dell Support for"
2577 " assistance\n", bp->dev->name);
2578 break;
2579
2580 default:
2581 break;
2582 }
2583 }
34f80b04
EG
2584
2585 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2586
2587 val = REG_RD(bp, reg_offset);
2588 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2589 REG_WR(bp, reg_offset, val);
2590
2591 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2592 (attn & HW_INTERRUT_ASSERT_SET_0));
2593 bnx2x_panic();
2594 }
877e9aa4
ET
2595}
2596
2597static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2598{
2599 u32 val;
2600
2601 if (attn & BNX2X_DOORQ_ASSERT) {
2602
2603 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2604 BNX2X_ERR("DB hw attention 0x%x\n", val);
2605 /* DORQ discard attention */
2606 if (val & 0x2)
2607 BNX2X_ERR("FATAL error from DORQ\n");
2608 }
34f80b04
EG
2609
2610 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2611
2612 int port = BP_PORT(bp);
2613 int reg_offset;
2614
2615 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2616 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2617
2618 val = REG_RD(bp, reg_offset);
2619 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2620 REG_WR(bp, reg_offset, val);
2621
2622 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2623 (attn & HW_INTERRUT_ASSERT_SET_1));
2624 bnx2x_panic();
2625 }
877e9aa4
ET
2626}
2627
2628static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2629{
2630 u32 val;
2631
2632 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2633
2634 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2635 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2636 /* CFC error attention */
2637 if (val & 0x2)
2638 BNX2X_ERR("FATAL error from CFC\n");
2639 }
2640
2641 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2642
2643 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2644 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2645 /* RQ_USDMDP_FIFO_OVERFLOW */
2646 if (val & 0x18000)
2647 BNX2X_ERR("FATAL error from PXP\n");
2648 }
34f80b04
EG
2649
2650 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2651
2652 int port = BP_PORT(bp);
2653 int reg_offset;
2654
2655 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2656 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2657
2658 val = REG_RD(bp, reg_offset);
2659 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2660 REG_WR(bp, reg_offset, val);
2661
2662 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2663 (attn & HW_INTERRUT_ASSERT_SET_2));
2664 bnx2x_panic();
2665 }
877e9aa4
ET
2666}
2667
2668static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2669{
34f80b04
EG
2670 u32 val;
2671
877e9aa4
ET
2672 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2673
34f80b04
EG
2674 if (attn & BNX2X_PMF_LINK_ASSERT) {
2675 int func = BP_FUNC(bp);
2676
2677 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2678 bnx2x__link_status_update(bp);
2679 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2680 DRV_STATUS_PMF)
2681 bnx2x_pmf_update(bp);
2682
2683 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2684
2685 BNX2X_ERR("MC assert!\n");
2686 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2687 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2688 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2689 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2690 bnx2x_panic();
2691
2692 } else if (attn & BNX2X_MCP_ASSERT) {
2693
2694 BNX2X_ERR("MCP assert!\n");
2695 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2696 bnx2x_fw_dump(bp);
877e9aa4
ET
2697
2698 } else
2699 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2700 }
2701
2702 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2703 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2704 if (attn & BNX2X_GRC_TIMEOUT) {
2705 val = CHIP_IS_E1H(bp) ?
2706 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2707 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2708 }
2709 if (attn & BNX2X_GRC_RSV) {
2710 val = CHIP_IS_E1H(bp) ?
2711 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2712 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2713 }
877e9aa4 2714 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2715 }
2716}
2717
2718static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2719{
a2fbb9ea
ET
2720 struct attn_route attn;
2721 struct attn_route group_mask;
34f80b04 2722 int port = BP_PORT(bp);
877e9aa4 2723 int index;
a2fbb9ea
ET
2724 u32 reg_addr;
2725 u32 val;
3fcaf2e5 2726 u32 aeu_mask;
a2fbb9ea
ET
2727
2728 /* need to take HW lock because MCP or other port might also
2729 try to handle this event */
4a37fb66 2730 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2731
2732 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2733 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2734 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2735 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2736 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2737 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2738
2739 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2740 if (deasserted & (1 << index)) {
2741 group_mask = bp->attn_group[index];
2742
34f80b04
EG
2743 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2744 index, group_mask.sig[0], group_mask.sig[1],
2745 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2746
877e9aa4
ET
2747 bnx2x_attn_int_deasserted3(bp,
2748 attn.sig[3] & group_mask.sig[3]);
2749 bnx2x_attn_int_deasserted1(bp,
2750 attn.sig[1] & group_mask.sig[1]);
2751 bnx2x_attn_int_deasserted2(bp,
2752 attn.sig[2] & group_mask.sig[2]);
2753 bnx2x_attn_int_deasserted0(bp,
2754 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2755
a2fbb9ea
ET
2756 if ((attn.sig[0] & group_mask.sig[0] &
2757 HW_PRTY_ASSERT_SET_0) ||
2758 (attn.sig[1] & group_mask.sig[1] &
2759 HW_PRTY_ASSERT_SET_1) ||
2760 (attn.sig[2] & group_mask.sig[2] &
2761 HW_PRTY_ASSERT_SET_2))
6378c025 2762 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2763 }
2764 }
2765
4a37fb66 2766 bnx2x_release_alr(bp);
a2fbb9ea 2767
5c862848 2768 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2769
2770 val = ~deasserted;
3fcaf2e5
EG
2771 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2772 val, reg_addr);
5c862848 2773 REG_WR(bp, reg_addr, val);
a2fbb9ea 2774
a2fbb9ea 2775 if (~bp->attn_state & deasserted)
3fcaf2e5 2776 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2777
2778 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2779 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2780
3fcaf2e5
EG
2781 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2782 aeu_mask = REG_RD(bp, reg_addr);
2783
2784 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2785 aeu_mask, deasserted);
2786 aeu_mask |= (deasserted & 0xff);
2787 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2788
3fcaf2e5
EG
2789 REG_WR(bp, reg_addr, aeu_mask);
2790 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2791
2792 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2793 bp->attn_state &= ~deasserted;
2794 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2795}
2796
2797static void bnx2x_attn_int(struct bnx2x *bp)
2798{
2799 /* read local copy of bits */
2800 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2801 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2802 u32 attn_state = bp->attn_state;
2803
2804 /* look for changed bits */
2805 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2806 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2807
2808 DP(NETIF_MSG_HW,
2809 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2810 attn_bits, attn_ack, asserted, deasserted);
2811
2812 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2813 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2814
2815 /* handle bits that were raised */
2816 if (asserted)
2817 bnx2x_attn_int_asserted(bp, asserted);
2818
2819 if (deasserted)
2820 bnx2x_attn_int_deasserted(bp, deasserted);
2821}
2822
2823static void bnx2x_sp_task(struct work_struct *work)
2824{
2825 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2826 u16 status;
2827
34f80b04 2828
a2fbb9ea
ET
2829 /* Return here if interrupt is disabled */
2830 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2831 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2832 return;
2833 }
2834
2835 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2836/* if (status == 0) */
2837/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2838
3196a88a 2839 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2840
877e9aa4
ET
2841 /* HW attentions */
2842 if (status & 0x1)
a2fbb9ea 2843 bnx2x_attn_int(bp);
a2fbb9ea 2844
bb2a0f7a
YG
2845 /* CStorm events: query_stats, port delete ramrod */
2846 if (status & 0x2)
2847 bp->stats_pending = 0;
2848
a2fbb9ea
ET
2849 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2850 IGU_INT_NOP, 1);
2851 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2852 IGU_INT_NOP, 1);
2853 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2854 IGU_INT_NOP, 1);
2855 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2856 IGU_INT_NOP, 1);
2857 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2858 IGU_INT_ENABLE, 1);
877e9aa4 2859
a2fbb9ea
ET
2860}
2861
2862static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2863{
2864 struct net_device *dev = dev_instance;
2865 struct bnx2x *bp = netdev_priv(dev);
2866
2867 /* Return here if interrupt is disabled */
2868 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2869 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2870 return IRQ_HANDLED;
2871 }
2872
877e9aa4 2873 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2874
2875#ifdef BNX2X_STOP_ON_ERROR
2876 if (unlikely(bp->panic))
2877 return IRQ_HANDLED;
2878#endif
2879
2880 schedule_work(&bp->sp_task);
2881
2882 return IRQ_HANDLED;
2883}
2884
2885/* end of slow path */
2886
2887/* Statistics */
2888
2889/****************************************************************************
2890* Macros
2891****************************************************************************/
2892
a2fbb9ea
ET
2893/* sum[hi:lo] += add[hi:lo] */
2894#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2895 do { \
2896 s_lo += a_lo; \
2897 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2898 } while (0)
2899
2900/* difference = minuend - subtrahend */
2901#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2902 do { \
bb2a0f7a
YG
2903 if (m_lo < s_lo) { \
2904 /* underflow */ \
a2fbb9ea 2905 d_hi = m_hi - s_hi; \
bb2a0f7a 2906 if (d_hi > 0) { \
6378c025 2907 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2908 d_hi--; \
2909 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2910 } else { \
6378c025 2911 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2912 d_hi = 0; \
2913 d_lo = 0; \
2914 } \
bb2a0f7a
YG
2915 } else { \
2916 /* m_lo >= s_lo */ \
a2fbb9ea 2917 if (m_hi < s_hi) { \
bb2a0f7a
YG
2918 d_hi = 0; \
2919 d_lo = 0; \
2920 } else { \
6378c025 2921 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2922 d_hi = m_hi - s_hi; \
2923 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2924 } \
2925 } \
2926 } while (0)
2927
bb2a0f7a 2928#define UPDATE_STAT64(s, t) \
a2fbb9ea 2929 do { \
bb2a0f7a
YG
2930 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2931 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2932 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2933 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2934 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2935 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2936 } while (0)
2937
bb2a0f7a 2938#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2939 do { \
bb2a0f7a
YG
2940 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2941 diff.lo, new->s##_lo, old->s##_lo); \
2942 ADD_64(estats->t##_hi, diff.hi, \
2943 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2944 } while (0)
2945
2946/* sum[hi:lo] += add */
2947#define ADD_EXTEND_64(s_hi, s_lo, a) \
2948 do { \
2949 s_lo += a; \
2950 s_hi += (s_lo < a) ? 1 : 0; \
2951 } while (0)
2952
bb2a0f7a 2953#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2954 do { \
bb2a0f7a
YG
2955 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2956 pstats->mac_stx[1].s##_lo, \
2957 new->s); \
a2fbb9ea
ET
2958 } while (0)
2959
bb2a0f7a 2960#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2961 do { \
2962 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2963 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
2964 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2965 } while (0)
2966
2967#define UPDATE_EXTEND_XSTAT(s, t) \
2968 do { \
2969 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2970 old_xclient->s = le32_to_cpu(xclient->s); \
2971 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
2972 } while (0)
2973
2974/*
2975 * General service functions
2976 */
2977
2978static inline long bnx2x_hilo(u32 *hiref)
2979{
2980 u32 lo = *(hiref + 1);
2981#if (BITS_PER_LONG == 64)
2982 u32 hi = *hiref;
2983
2984 return HILO_U64(hi, lo);
2985#else
2986 return lo;
2987#endif
2988}
2989
2990/*
2991 * Init service functions
2992 */
2993
bb2a0f7a
YG
2994static void bnx2x_storm_stats_post(struct bnx2x *bp)
2995{
2996 if (!bp->stats_pending) {
2997 struct eth_query_ramrod_data ramrod_data = {0};
2998 int rc;
2999
3000 ramrod_data.drv_counter = bp->stats_counter++;
3001 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3002 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3003
3004 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3005 ((u32 *)&ramrod_data)[1],
3006 ((u32 *)&ramrod_data)[0], 0);
3007 if (rc == 0) {
3008 /* stats ramrod has it's own slot on the spq */
3009 bp->spq_left++;
3010 bp->stats_pending = 1;
3011 }
3012 }
3013}
3014
3015static void bnx2x_stats_init(struct bnx2x *bp)
3016{
3017 int port = BP_PORT(bp);
3018
3019 bp->executer_idx = 0;
3020 bp->stats_counter = 0;
3021
3022 /* port stats */
3023 if (!BP_NOMCP(bp))
3024 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3025 else
3026 bp->port.port_stx = 0;
3027 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3028
3029 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3030 bp->port.old_nig_stats.brb_discard =
3031 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3032 bp->port.old_nig_stats.brb_truncate =
3033 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3034 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3035 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3036 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3037 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3038
3039 /* function stats */
3040 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3041 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3042 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3043 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3044
3045 bp->stats_state = STATS_STATE_DISABLED;
3046 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3047 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3048}
3049
3050static void bnx2x_hw_stats_post(struct bnx2x *bp)
3051{
3052 struct dmae_command *dmae = &bp->stats_dmae;
3053 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3054
3055 *stats_comp = DMAE_COMP_VAL;
3056
3057 /* loader */
3058 if (bp->executer_idx) {
3059 int loader_idx = PMF_DMAE_C(bp);
3060
3061 memset(dmae, 0, sizeof(struct dmae_command));
3062
3063 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3064 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3065 DMAE_CMD_DST_RESET |
3066#ifdef __BIG_ENDIAN
3067 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3068#else
3069 DMAE_CMD_ENDIANITY_DW_SWAP |
3070#endif
3071 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3072 DMAE_CMD_PORT_0) |
3073 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3074 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3075 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3076 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3077 sizeof(struct dmae_command) *
3078 (loader_idx + 1)) >> 2;
3079 dmae->dst_addr_hi = 0;
3080 dmae->len = sizeof(struct dmae_command) >> 2;
3081 if (CHIP_IS_E1(bp))
3082 dmae->len--;
3083 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3084 dmae->comp_addr_hi = 0;
3085 dmae->comp_val = 1;
3086
3087 *stats_comp = 0;
3088 bnx2x_post_dmae(bp, dmae, loader_idx);
3089
3090 } else if (bp->func_stx) {
3091 *stats_comp = 0;
3092 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3093 }
3094}
3095
3096static int bnx2x_stats_comp(struct bnx2x *bp)
3097{
3098 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3099 int cnt = 10;
3100
3101 might_sleep();
3102 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3103 if (!cnt) {
3104 BNX2X_ERR("timeout waiting for stats finished\n");
3105 break;
3106 }
3107 cnt--;
12469401 3108 msleep(1);
bb2a0f7a
YG
3109 }
3110 return 1;
3111}
3112
3113/*
3114 * Statistics service functions
3115 */
3116
3117static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3118{
3119 struct dmae_command *dmae;
3120 u32 opcode;
3121 int loader_idx = PMF_DMAE_C(bp);
3122 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3123
3124 /* sanity */
3125 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3126 BNX2X_ERR("BUG!\n");
3127 return;
3128 }
3129
3130 bp->executer_idx = 0;
3131
3132 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3133 DMAE_CMD_C_ENABLE |
3134 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3135#ifdef __BIG_ENDIAN
3136 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3137#else
3138 DMAE_CMD_ENDIANITY_DW_SWAP |
3139#endif
3140 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3141 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3142
3143 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3144 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3145 dmae->src_addr_lo = bp->port.port_stx >> 2;
3146 dmae->src_addr_hi = 0;
3147 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3148 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3149 dmae->len = DMAE_LEN32_RD_MAX;
3150 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3151 dmae->comp_addr_hi = 0;
3152 dmae->comp_val = 1;
3153
3154 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3155 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3156 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3157 dmae->src_addr_hi = 0;
7a9b2557
VZ
3158 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3159 DMAE_LEN32_RD_MAX * 4);
3160 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3161 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3162 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3163 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3164 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3165 dmae->comp_val = DMAE_COMP_VAL;
3166
3167 *stats_comp = 0;
3168 bnx2x_hw_stats_post(bp);
3169 bnx2x_stats_comp(bp);
3170}
3171
3172static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3173{
3174 struct dmae_command *dmae;
34f80b04 3175 int port = BP_PORT(bp);
bb2a0f7a 3176 int vn = BP_E1HVN(bp);
a2fbb9ea 3177 u32 opcode;
bb2a0f7a 3178 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3179 u32 mac_addr;
bb2a0f7a
YG
3180 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3181
3182 /* sanity */
3183 if (!bp->link_vars.link_up || !bp->port.pmf) {
3184 BNX2X_ERR("BUG!\n");
3185 return;
3186 }
a2fbb9ea
ET
3187
3188 bp->executer_idx = 0;
bb2a0f7a
YG
3189
3190 /* MCP */
3191 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3192 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3193 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3194#ifdef __BIG_ENDIAN
bb2a0f7a 3195 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3196#else
bb2a0f7a 3197 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3198#endif
bb2a0f7a
YG
3199 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3200 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3201
bb2a0f7a 3202 if (bp->port.port_stx) {
a2fbb9ea
ET
3203
3204 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3205 dmae->opcode = opcode;
bb2a0f7a
YG
3206 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3207 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3208 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3209 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3210 dmae->len = sizeof(struct host_port_stats) >> 2;
3211 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3212 dmae->comp_addr_hi = 0;
3213 dmae->comp_val = 1;
a2fbb9ea
ET
3214 }
3215
bb2a0f7a
YG
3216 if (bp->func_stx) {
3217
3218 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3219 dmae->opcode = opcode;
3220 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3221 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3222 dmae->dst_addr_lo = bp->func_stx >> 2;
3223 dmae->dst_addr_hi = 0;
3224 dmae->len = sizeof(struct host_func_stats) >> 2;
3225 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3226 dmae->comp_addr_hi = 0;
3227 dmae->comp_val = 1;
a2fbb9ea
ET
3228 }
3229
bb2a0f7a 3230 /* MAC */
a2fbb9ea
ET
3231 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3232 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3233 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3234#ifdef __BIG_ENDIAN
3235 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3236#else
3237 DMAE_CMD_ENDIANITY_DW_SWAP |
3238#endif
bb2a0f7a
YG
3239 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3240 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3241
c18487ee 3242 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3243
3244 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3245 NIG_REG_INGRESS_BMAC0_MEM);
3246
3247 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3248 BIGMAC_REGISTER_TX_STAT_GTBYT */
3249 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3250 dmae->opcode = opcode;
3251 dmae->src_addr_lo = (mac_addr +
3252 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3253 dmae->src_addr_hi = 0;
3254 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3255 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3256 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3257 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3258 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3259 dmae->comp_addr_hi = 0;
3260 dmae->comp_val = 1;
3261
3262 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3263 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3264 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3265 dmae->opcode = opcode;
3266 dmae->src_addr_lo = (mac_addr +
3267 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3268 dmae->src_addr_hi = 0;
3269 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3270 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3271 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3272 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3273 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3274 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3275 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3276 dmae->comp_addr_hi = 0;
3277 dmae->comp_val = 1;
3278
c18487ee 3279 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3280
3281 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3282
3283 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3284 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3285 dmae->opcode = opcode;
3286 dmae->src_addr_lo = (mac_addr +
3287 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3288 dmae->src_addr_hi = 0;
3289 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3290 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3291 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3292 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3293 dmae->comp_addr_hi = 0;
3294 dmae->comp_val = 1;
3295
3296 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3297 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3298 dmae->opcode = opcode;
3299 dmae->src_addr_lo = (mac_addr +
3300 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3301 dmae->src_addr_hi = 0;
3302 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3303 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3304 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3305 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3306 dmae->len = 1;
3307 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3308 dmae->comp_addr_hi = 0;
3309 dmae->comp_val = 1;
3310
3311 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3312 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3313 dmae->opcode = opcode;
3314 dmae->src_addr_lo = (mac_addr +
3315 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3316 dmae->src_addr_hi = 0;
3317 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3318 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3319 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3320 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3321 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3322 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3323 dmae->comp_addr_hi = 0;
3324 dmae->comp_val = 1;
3325 }
3326
3327 /* NIG */
bb2a0f7a
YG
3328 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3329 dmae->opcode = opcode;
3330 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3331 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3332 dmae->src_addr_hi = 0;
3333 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3334 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3335 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3336 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3337 dmae->comp_addr_hi = 0;
3338 dmae->comp_val = 1;
3339
3340 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3341 dmae->opcode = opcode;
3342 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3343 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3344 dmae->src_addr_hi = 0;
3345 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3346 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3347 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3348 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3349 dmae->len = (2*sizeof(u32)) >> 2;
3350 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3351 dmae->comp_addr_hi = 0;
3352 dmae->comp_val = 1;
3353
a2fbb9ea
ET
3354 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3355 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3356 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3357 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3358#ifdef __BIG_ENDIAN
3359 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3360#else
3361 DMAE_CMD_ENDIANITY_DW_SWAP |
3362#endif
bb2a0f7a
YG
3363 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3364 (vn << DMAE_CMD_E1HVN_SHIFT));
3365 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3366 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3367 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3368 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3369 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3370 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3371 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3372 dmae->len = (2*sizeof(u32)) >> 2;
3373 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3374 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3375 dmae->comp_val = DMAE_COMP_VAL;
3376
3377 *stats_comp = 0;
a2fbb9ea
ET
3378}
3379
bb2a0f7a 3380static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3381{
bb2a0f7a
YG
3382 struct dmae_command *dmae = &bp->stats_dmae;
3383 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3384
bb2a0f7a
YG
3385 /* sanity */
3386 if (!bp->func_stx) {
3387 BNX2X_ERR("BUG!\n");
3388 return;
3389 }
a2fbb9ea 3390
bb2a0f7a
YG
3391 bp->executer_idx = 0;
3392 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3393
bb2a0f7a
YG
3394 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3395 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3396 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3397#ifdef __BIG_ENDIAN
3398 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3399#else
3400 DMAE_CMD_ENDIANITY_DW_SWAP |
3401#endif
3402 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3403 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3404 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3405 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3406 dmae->dst_addr_lo = bp->func_stx >> 2;
3407 dmae->dst_addr_hi = 0;
3408 dmae->len = sizeof(struct host_func_stats) >> 2;
3409 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3410 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3411 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3412
bb2a0f7a
YG
3413 *stats_comp = 0;
3414}
a2fbb9ea 3415
bb2a0f7a
YG
3416static void bnx2x_stats_start(struct bnx2x *bp)
3417{
3418 if (bp->port.pmf)
3419 bnx2x_port_stats_init(bp);
3420
3421 else if (bp->func_stx)
3422 bnx2x_func_stats_init(bp);
3423
3424 bnx2x_hw_stats_post(bp);
3425 bnx2x_storm_stats_post(bp);
3426}
3427
3428static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3429{
3430 bnx2x_stats_comp(bp);
3431 bnx2x_stats_pmf_update(bp);
3432 bnx2x_stats_start(bp);
3433}
3434
3435static void bnx2x_stats_restart(struct bnx2x *bp)
3436{
3437 bnx2x_stats_comp(bp);
3438 bnx2x_stats_start(bp);
3439}
3440
3441static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3442{
3443 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3444 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3445 struct regpair diff;
3446
3447 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3448 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3449 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3450 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3451 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3452 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3453 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a
YG
3454 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3455 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3456 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3457 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3458 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3459 UPDATE_STAT64(tx_stat_gt127,
3460 tx_stat_etherstatspkts65octetsto127octets);
3461 UPDATE_STAT64(tx_stat_gt255,
3462 tx_stat_etherstatspkts128octetsto255octets);
3463 UPDATE_STAT64(tx_stat_gt511,
3464 tx_stat_etherstatspkts256octetsto511octets);
3465 UPDATE_STAT64(tx_stat_gt1023,
3466 tx_stat_etherstatspkts512octetsto1023octets);
3467 UPDATE_STAT64(tx_stat_gt1518,
3468 tx_stat_etherstatspkts1024octetsto1522octets);
3469 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3470 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3471 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3472 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3473 UPDATE_STAT64(tx_stat_gterr,
3474 tx_stat_dot3statsinternalmactransmiterrors);
3475 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3476}
3477
3478static void bnx2x_emac_stats_update(struct bnx2x *bp)
3479{
3480 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3481 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3482
3483 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3484 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3485 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3486 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3487 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3488 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3489 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3490 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3491 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3492 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3493 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3494 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3495 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3496 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3497 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3498 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3499 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3500 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3501 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3502 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3503 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3504 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3505 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3506 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3507 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3508 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3509 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3510 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3511 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3512 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3513 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3514}
3515
3516static int bnx2x_hw_stats_update(struct bnx2x *bp)
3517{
3518 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3519 struct nig_stats *old = &(bp->port.old_nig_stats);
3520 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3521 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3522 struct regpair diff;
3523
3524 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3525 bnx2x_bmac_stats_update(bp);
3526
3527 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3528 bnx2x_emac_stats_update(bp);
3529
3530 else { /* unreached */
3531 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3532 return -1;
3533 }
a2fbb9ea 3534
bb2a0f7a
YG
3535 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3536 new->brb_discard - old->brb_discard);
66e855f3
YG
3537 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3538 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3539
bb2a0f7a
YG
3540 UPDATE_STAT64_NIG(egress_mac_pkt0,
3541 etherstatspkts1024octetsto1522octets);
3542 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3543
bb2a0f7a 3544 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3545
bb2a0f7a
YG
3546 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3547 sizeof(struct mac_stx));
3548 estats->brb_drop_hi = pstats->brb_drop_hi;
3549 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3550
bb2a0f7a 3551 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3552
bb2a0f7a 3553 return 0;
a2fbb9ea
ET
3554}
3555
bb2a0f7a 3556static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3557{
3558 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3559 int cl_id = BP_CL_ID(bp);
3560 struct tstorm_per_port_stats *tport =
3561 &stats->tstorm_common.port_statistics;
a2fbb9ea 3562 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3563 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3564 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3565 struct xstorm_per_client_stats *xclient =
3566 &stats->xstorm_common.client_statistics[cl_id];
3567 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3568 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3569 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3570 u32 diff;
3571
bb2a0f7a
YG
3572 /* are storm stats valid? */
3573 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3574 bp->stats_counter) {
3575 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3576 " tstorm counter (%d) != stats_counter (%d)\n",
3577 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3578 return -1;
3579 }
bb2a0f7a
YG
3580 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3581 bp->stats_counter) {
3582 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3583 " xstorm counter (%d) != stats_counter (%d)\n",
3584 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3585 return -2;
3586 }
a2fbb9ea 3587
bb2a0f7a
YG
3588 fstats->total_bytes_received_hi =
3589 fstats->valid_bytes_received_hi =
a2fbb9ea 3590 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3591 fstats->total_bytes_received_lo =
3592 fstats->valid_bytes_received_lo =
a2fbb9ea 3593 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3594
3595 estats->error_bytes_received_hi =
3596 le32_to_cpu(tclient->rcv_error_bytes.hi);
3597 estats->error_bytes_received_lo =
3598 le32_to_cpu(tclient->rcv_error_bytes.lo);
3599 ADD_64(estats->error_bytes_received_hi,
3600 estats->rx_stat_ifhcinbadoctets_hi,
3601 estats->error_bytes_received_lo,
3602 estats->rx_stat_ifhcinbadoctets_lo);
3603
3604 ADD_64(fstats->total_bytes_received_hi,
3605 estats->error_bytes_received_hi,
3606 fstats->total_bytes_received_lo,
3607 estats->error_bytes_received_lo);
3608
3609 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3610 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3611 total_multicast_packets_received);
a2fbb9ea 3612 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3613 total_broadcast_packets_received);
3614
3615 fstats->total_bytes_transmitted_hi =
3616 le32_to_cpu(xclient->total_sent_bytes.hi);
3617 fstats->total_bytes_transmitted_lo =
3618 le32_to_cpu(xclient->total_sent_bytes.lo);
3619
3620 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3621 total_unicast_packets_transmitted);
3622 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3623 total_multicast_packets_transmitted);
3624 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3625 total_broadcast_packets_transmitted);
3626
3627 memcpy(estats, &(fstats->total_bytes_received_hi),
3628 sizeof(struct host_func_stats) - 2*sizeof(u32));
3629
3630 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3631 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3632 estats->brb_truncate_discard =
3633 le32_to_cpu(tport->brb_truncate_discard);
3634 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3635
3636 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3637 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3638 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3639 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3640 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3641 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3642 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3643 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3644 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3645 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3646 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3647 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3648 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3649
bb2a0f7a
YG
3650 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3651 old_tclient->packets_too_big_discard =
a2fbb9ea 3652 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3653 estats->no_buff_discard =
3654 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3655 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3656
3657 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3658 old_xclient->unicast_bytes_sent.hi =
3659 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3660 old_xclient->unicast_bytes_sent.lo =
3661 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3662 old_xclient->multicast_bytes_sent.hi =
3663 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3664 old_xclient->multicast_bytes_sent.lo =
3665 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3666 old_xclient->broadcast_bytes_sent.hi =
3667 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3668 old_xclient->broadcast_bytes_sent.lo =
3669 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3670
3671 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3672
3673 return 0;
3674}
3675
bb2a0f7a 3676static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3677{
bb2a0f7a
YG
3678 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3679 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3680 struct net_device_stats *nstats = &bp->dev->stats;
3681
3682 nstats->rx_packets =
3683 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3684 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3685 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3686
3687 nstats->tx_packets =
3688 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3689 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3690 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3691
bb2a0f7a 3692 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3693
0e39e645 3694 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3695
bb2a0f7a
YG
3696 nstats->rx_dropped = old_tclient->checksum_discard +
3697 estats->mac_discard;
a2fbb9ea
ET
3698 nstats->tx_dropped = 0;
3699
3700 nstats->multicast =
3701 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3702
bb2a0f7a
YG
3703 nstats->collisions =
3704 estats->tx_stat_dot3statssinglecollisionframes_lo +
3705 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3706 estats->tx_stat_dot3statslatecollisions_lo +
3707 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3708
bb2a0f7a
YG
3709 estats->jabber_packets_received =
3710 old_tclient->packets_too_big_discard +
3711 estats->rx_stat_dot3statsframestoolong_lo;
3712
3713 nstats->rx_length_errors =
3714 estats->rx_stat_etherstatsundersizepkts_lo +
3715 estats->jabber_packets_received;
66e855f3 3716 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
bb2a0f7a
YG
3717 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3718 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3719 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3720 nstats->rx_missed_errors = estats->xxoverflow_discard;
3721
3722 nstats->rx_errors = nstats->rx_length_errors +
3723 nstats->rx_over_errors +
3724 nstats->rx_crc_errors +
3725 nstats->rx_frame_errors +
0e39e645
ET
3726 nstats->rx_fifo_errors +
3727 nstats->rx_missed_errors;
a2fbb9ea 3728
bb2a0f7a
YG
3729 nstats->tx_aborted_errors =
3730 estats->tx_stat_dot3statslatecollisions_lo +
3731 estats->tx_stat_dot3statsexcessivecollisions_lo;
3732 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3733 nstats->tx_fifo_errors = 0;
3734 nstats->tx_heartbeat_errors = 0;
3735 nstats->tx_window_errors = 0;
3736
3737 nstats->tx_errors = nstats->tx_aborted_errors +
3738 nstats->tx_carrier_errors;
a2fbb9ea
ET
3739}
3740
bb2a0f7a 3741static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3742{
bb2a0f7a
YG
3743 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3744 int update = 0;
a2fbb9ea 3745
bb2a0f7a
YG
3746 if (*stats_comp != DMAE_COMP_VAL)
3747 return;
3748
3749 if (bp->port.pmf)
3750 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3751
bb2a0f7a 3752 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3753
bb2a0f7a
YG
3754 if (update)
3755 bnx2x_net_stats_update(bp);
a2fbb9ea 3756
bb2a0f7a
YG
3757 else {
3758 if (bp->stats_pending) {
3759 bp->stats_pending++;
3760 if (bp->stats_pending == 3) {
3761 BNX2X_ERR("stats not updated for 3 times\n");
3762 bnx2x_panic();
3763 return;
3764 }
3765 }
a2fbb9ea
ET
3766 }
3767
3768 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3769 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3770 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3771 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3772 int i;
a2fbb9ea
ET
3773
3774 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3775 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3776 " tx pkt (%lx)\n",
3777 bnx2x_tx_avail(bp->fp),
7a9b2557 3778 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3779 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3780 " rx pkt (%lx)\n",
7a9b2557
VZ
3781 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3782 bp->fp->rx_comp_cons),
3783 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea 3784 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
6378c025 3785 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
bb2a0f7a 3786 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3787 printk(KERN_DEBUG "tstats: checksum_discard %u "
3788 "packets_too_big_discard %u no_buff_discard %u "
3789 "mac_discard %u mac_filter_discard %u "
3790 "xxovrflow_discard %u brb_truncate_discard %u "
3791 "ttl0_discard %u\n",
bb2a0f7a
YG
3792 old_tclient->checksum_discard,
3793 old_tclient->packets_too_big_discard,
3794 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3795 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3796 estats->brb_truncate_discard,
3797 old_tclient->ttl0_discard);
a2fbb9ea
ET
3798
3799 for_each_queue(bp, i) {
3800 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3801 bnx2x_fp(bp, i, tx_pkt),
3802 bnx2x_fp(bp, i, rx_pkt),
3803 bnx2x_fp(bp, i, rx_calls));
3804 }
3805 }
3806
bb2a0f7a
YG
3807 bnx2x_hw_stats_post(bp);
3808 bnx2x_storm_stats_post(bp);
3809}
a2fbb9ea 3810
bb2a0f7a
YG
3811static void bnx2x_port_stats_stop(struct bnx2x *bp)
3812{
3813 struct dmae_command *dmae;
3814 u32 opcode;
3815 int loader_idx = PMF_DMAE_C(bp);
3816 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3817
bb2a0f7a 3818 bp->executer_idx = 0;
a2fbb9ea 3819
bb2a0f7a
YG
3820 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3821 DMAE_CMD_C_ENABLE |
3822 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3823#ifdef __BIG_ENDIAN
bb2a0f7a 3824 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3825#else
bb2a0f7a 3826 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3827#endif
bb2a0f7a
YG
3828 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3829 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3830
3831 if (bp->port.port_stx) {
3832
3833 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3834 if (bp->func_stx)
3835 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3836 else
3837 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3838 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3839 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3840 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3841 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3842 dmae->len = sizeof(struct host_port_stats) >> 2;
3843 if (bp->func_stx) {
3844 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3845 dmae->comp_addr_hi = 0;
3846 dmae->comp_val = 1;
3847 } else {
3848 dmae->comp_addr_lo =
3849 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3850 dmae->comp_addr_hi =
3851 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3852 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3853
bb2a0f7a
YG
3854 *stats_comp = 0;
3855 }
a2fbb9ea
ET
3856 }
3857
bb2a0f7a
YG
3858 if (bp->func_stx) {
3859
3860 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3861 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3862 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3863 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3864 dmae->dst_addr_lo = bp->func_stx >> 2;
3865 dmae->dst_addr_hi = 0;
3866 dmae->len = sizeof(struct host_func_stats) >> 2;
3867 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3868 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3869 dmae->comp_val = DMAE_COMP_VAL;
3870
3871 *stats_comp = 0;
a2fbb9ea 3872 }
bb2a0f7a
YG
3873}
3874
3875static void bnx2x_stats_stop(struct bnx2x *bp)
3876{
3877 int update = 0;
3878
3879 bnx2x_stats_comp(bp);
3880
3881 if (bp->port.pmf)
3882 update = (bnx2x_hw_stats_update(bp) == 0);
3883
3884 update |= (bnx2x_storm_stats_update(bp) == 0);
3885
3886 if (update) {
3887 bnx2x_net_stats_update(bp);
a2fbb9ea 3888
bb2a0f7a
YG
3889 if (bp->port.pmf)
3890 bnx2x_port_stats_stop(bp);
3891
3892 bnx2x_hw_stats_post(bp);
3893 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3894 }
3895}
3896
bb2a0f7a
YG
3897static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3898{
3899}
3900
3901static const struct {
3902 void (*action)(struct bnx2x *bp);
3903 enum bnx2x_stats_state next_state;
3904} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3905/* state event */
3906{
3907/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3908/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3909/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3910/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3911},
3912{
3913/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3914/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3915/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3916/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3917}
3918};
3919
3920static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3921{
3922 enum bnx2x_stats_state state = bp->stats_state;
3923
3924 bnx2x_stats_stm[state][event].action(bp);
3925 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3926
3927 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3928 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3929 state, event, bp->stats_state);
3930}
3931
a2fbb9ea
ET
3932static void bnx2x_timer(unsigned long data)
3933{
3934 struct bnx2x *bp = (struct bnx2x *) data;
3935
3936 if (!netif_running(bp->dev))
3937 return;
3938
3939 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3940 goto timer_restart;
a2fbb9ea
ET
3941
3942 if (poll) {
3943 struct bnx2x_fastpath *fp = &bp->fp[0];
3944 int rc;
3945
3946 bnx2x_tx_int(fp, 1000);
3947 rc = bnx2x_rx_int(fp, 1000);
3948 }
3949
34f80b04
EG
3950 if (!BP_NOMCP(bp)) {
3951 int func = BP_FUNC(bp);
a2fbb9ea
ET
3952 u32 drv_pulse;
3953 u32 mcp_pulse;
3954
3955 ++bp->fw_drv_pulse_wr_seq;
3956 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3957 /* TBD - add SYSTEM_TIME */
3958 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 3959 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 3960
34f80b04 3961 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
3962 MCP_PULSE_SEQ_MASK);
3963 /* The delta between driver pulse and mcp response
3964 * should be 1 (before mcp response) or 0 (after mcp response)
3965 */
3966 if ((drv_pulse != mcp_pulse) &&
3967 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3968 /* someone lost a heartbeat... */
3969 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3970 drv_pulse, mcp_pulse);
3971 }
3972 }
3973
bb2a0f7a
YG
3974 if ((bp->state == BNX2X_STATE_OPEN) ||
3975 (bp->state == BNX2X_STATE_DISABLED))
3976 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3977
f1410647 3978timer_restart:
a2fbb9ea
ET
3979 mod_timer(&bp->timer, jiffies + bp->current_interval);
3980}
3981
3982/* end of Statistics */
3983
3984/* nic init */
3985
3986/*
3987 * nic init service functions
3988 */
3989
34f80b04 3990static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 3991{
34f80b04
EG
3992 int port = BP_PORT(bp);
3993
3994 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3995 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 3996 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
3997 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3998 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 3999 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4000}
4001
5c862848
EG
4002static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4003 dma_addr_t mapping, int sb_id)
34f80b04
EG
4004{
4005 int port = BP_PORT(bp);
bb2a0f7a 4006 int func = BP_FUNC(bp);
a2fbb9ea 4007 int index;
34f80b04 4008 u64 section;
a2fbb9ea
ET
4009
4010 /* USTORM */
4011 section = ((u64)mapping) + offsetof(struct host_status_block,
4012 u_status_block);
34f80b04 4013 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4014
4015 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4016 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4017 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4018 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4019 U64_HI(section));
bb2a0f7a
YG
4020 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4021 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4022
4023 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4024 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4025 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4026
4027 /* CSTORM */
4028 section = ((u64)mapping) + offsetof(struct host_status_block,
4029 c_status_block);
34f80b04 4030 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4031
4032 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4033 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4034 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4035 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4036 U64_HI(section));
7a9b2557
VZ
4037 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4038 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4039
4040 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4041 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4042 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4043
4044 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4045}
4046
4047static void bnx2x_zero_def_sb(struct bnx2x *bp)
4048{
4049 int func = BP_FUNC(bp);
a2fbb9ea 4050
34f80b04
EG
4051 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4052 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4053 sizeof(struct ustorm_def_status_block)/4);
4054 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4055 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4056 sizeof(struct cstorm_def_status_block)/4);
4057 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4058 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4059 sizeof(struct xstorm_def_status_block)/4);
4060 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4061 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4062 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4063}
4064
4065static void bnx2x_init_def_sb(struct bnx2x *bp,
4066 struct host_def_status_block *def_sb,
34f80b04 4067 dma_addr_t mapping, int sb_id)
a2fbb9ea 4068{
34f80b04
EG
4069 int port = BP_PORT(bp);
4070 int func = BP_FUNC(bp);
a2fbb9ea
ET
4071 int index, val, reg_offset;
4072 u64 section;
4073
4074 /* ATTN */
4075 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4076 atten_status_block);
34f80b04 4077 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4078
49d66772
ET
4079 bp->attn_state = 0;
4080
a2fbb9ea
ET
4081 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4082 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4083
34f80b04 4084 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4085 bp->attn_group[index].sig[0] = REG_RD(bp,
4086 reg_offset + 0x10*index);
4087 bp->attn_group[index].sig[1] = REG_RD(bp,
4088 reg_offset + 0x4 + 0x10*index);
4089 bp->attn_group[index].sig[2] = REG_RD(bp,
4090 reg_offset + 0x8 + 0x10*index);
4091 bp->attn_group[index].sig[3] = REG_RD(bp,
4092 reg_offset + 0xc + 0x10*index);
4093 }
4094
a2fbb9ea
ET
4095 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4096 HC_REG_ATTN_MSG0_ADDR_L);
4097
4098 REG_WR(bp, reg_offset, U64_LO(section));
4099 REG_WR(bp, reg_offset + 4, U64_HI(section));
4100
4101 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4102
4103 val = REG_RD(bp, reg_offset);
34f80b04 4104 val |= sb_id;
a2fbb9ea
ET
4105 REG_WR(bp, reg_offset, val);
4106
4107 /* USTORM */
4108 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4109 u_def_status_block);
34f80b04 4110 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4111
4112 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4113 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4114 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4115 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4116 U64_HI(section));
5c862848 4117 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4118 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4119
4120 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4121 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4122 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4123
4124 /* CSTORM */
4125 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4126 c_def_status_block);
34f80b04 4127 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4128
4129 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4130 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4131 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4132 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4133 U64_HI(section));
5c862848 4134 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4135 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4136
4137 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4138 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4139 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4140
4141 /* TSTORM */
4142 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4143 t_def_status_block);
34f80b04 4144 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4145
4146 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4147 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4148 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4149 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4150 U64_HI(section));
5c862848 4151 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4152 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4153
4154 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4155 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4156 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4157
4158 /* XSTORM */
4159 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4160 x_def_status_block);
34f80b04 4161 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4162
4163 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4164 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4165 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4166 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4167 U64_HI(section));
5c862848 4168 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4169 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4170
4171 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4172 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4173 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4174
bb2a0f7a 4175 bp->stats_pending = 0;
66e855f3 4176 bp->set_mac_pending = 0;
bb2a0f7a 4177
34f80b04 4178 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4179}
4180
4181static void bnx2x_update_coalesce(struct bnx2x *bp)
4182{
34f80b04 4183 int port = BP_PORT(bp);
a2fbb9ea
ET
4184 int i;
4185
4186 for_each_queue(bp, i) {
34f80b04 4187 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4188
4189 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4190 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4191 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4192 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4193 bp->rx_ticks/12);
a2fbb9ea 4194 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4195 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4196 U_SB_ETH_RX_CQ_INDEX),
4197 bp->rx_ticks ? 0 : 1);
4198 REG_WR16(bp, BAR_USTRORM_INTMEM +
4199 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4200 U_SB_ETH_RX_BD_INDEX),
34f80b04 4201 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4202
4203 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4204 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4205 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4206 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4207 bp->tx_ticks/12);
a2fbb9ea 4208 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4209 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4210 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4211 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4212 }
4213}
4214
7a9b2557
VZ
4215static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4216 struct bnx2x_fastpath *fp, int last)
4217{
4218 int i;
4219
4220 for (i = 0; i < last; i++) {
4221 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4222 struct sk_buff *skb = rx_buf->skb;
4223
4224 if (skb == NULL) {
4225 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4226 continue;
4227 }
4228
4229 if (fp->tpa_state[i] == BNX2X_TPA_START)
4230 pci_unmap_single(bp->pdev,
4231 pci_unmap_addr(rx_buf, mapping),
4232 bp->rx_buf_use_size,
4233 PCI_DMA_FROMDEVICE);
4234
4235 dev_kfree_skb(skb);
4236 rx_buf->skb = NULL;
4237 }
4238}
4239
a2fbb9ea
ET
4240static void bnx2x_init_rx_rings(struct bnx2x *bp)
4241{
7a9b2557 4242 int func = BP_FUNC(bp);
32626230
EG
4243 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4244 ETH_MAX_AGGREGATION_QUEUES_E1H;
4245 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4246 int i, j;
a2fbb9ea
ET
4247
4248 bp->rx_buf_use_size = bp->dev->mtu;
a2fbb9ea
ET
4249 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4250 bp->rx_buf_size = bp->rx_buf_use_size + 64;
4251
7a9b2557
VZ
4252 if (bp->flags & TPA_ENABLE_FLAG) {
4253 DP(NETIF_MSG_IFUP,
4254 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n",
4255 bp->rx_buf_use_size, bp->rx_buf_size,
4256 bp->dev->mtu + ETH_OVREHEAD);
4257
4258 for_each_queue(bp, j) {
32626230 4259 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4260
32626230 4261 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4262 fp->tpa_pool[i].skb =
4263 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4264 if (!fp->tpa_pool[i].skb) {
4265 BNX2X_ERR("Failed to allocate TPA "
4266 "skb pool for queue[%d] - "
4267 "disabling TPA on this "
4268 "queue!\n", j);
4269 bnx2x_free_tpa_pool(bp, fp, i);
4270 fp->disable_tpa = 1;
4271 break;
4272 }
4273 pci_unmap_addr_set((struct sw_rx_bd *)
4274 &bp->fp->tpa_pool[i],
4275 mapping, 0);
4276 fp->tpa_state[i] = BNX2X_TPA_STOP;
4277 }
4278 }
4279 }
4280
a2fbb9ea
ET
4281 for_each_queue(bp, j) {
4282 struct bnx2x_fastpath *fp = &bp->fp[j];
4283
4284 fp->rx_bd_cons = 0;
4285 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4286 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4287
4288 /* "next page" elements initialization */
4289 /* SGE ring */
4290 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4291 struct eth_rx_sge *sge;
4292
4293 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4294 sge->addr_hi =
4295 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4296 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4297 sge->addr_lo =
4298 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4299 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4300 }
4301
4302 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4303
7a9b2557 4304 /* RX BD ring */
a2fbb9ea
ET
4305 for (i = 1; i <= NUM_RX_RINGS; i++) {
4306 struct eth_rx_bd *rx_bd;
4307
4308 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4309 rx_bd->addr_hi =
4310 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4311 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4312 rx_bd->addr_lo =
4313 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4314 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4315 }
4316
34f80b04 4317 /* CQ ring */
a2fbb9ea
ET
4318 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4319 struct eth_rx_cqe_next_page *nextpg;
4320
4321 nextpg = (struct eth_rx_cqe_next_page *)
4322 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4323 nextpg->addr_hi =
4324 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4325 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4326 nextpg->addr_lo =
4327 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4328 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4329 }
4330
7a9b2557
VZ
4331 /* Allocate SGEs and initialize the ring elements */
4332 for (i = 0, ring_prod = 0;
4333 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4334
7a9b2557
VZ
4335 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4336 BNX2X_ERR("was only able to allocate "
4337 "%d rx sges\n", i);
4338 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4339 /* Cleanup already allocated elements */
4340 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4341 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4342 fp->disable_tpa = 1;
4343 ring_prod = 0;
4344 break;
4345 }
4346 ring_prod = NEXT_SGE_IDX(ring_prod);
4347 }
4348 fp->rx_sge_prod = ring_prod;
4349
4350 /* Allocate BDs and initialize BD ring */
66e855f3 4351 fp->rx_comp_cons = 0;
7a9b2557 4352 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4353 for (i = 0; i < bp->rx_ring_size; i++) {
4354 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4355 BNX2X_ERR("was only able to allocate "
4356 "%d rx skbs\n", i);
66e855f3 4357 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4358 break;
4359 }
4360 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4361 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4362 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4363 }
4364
7a9b2557
VZ
4365 fp->rx_bd_prod = ring_prod;
4366 /* must not have more available CQEs than BDs */
4367 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4368 cqe_ring_prod);
a2fbb9ea
ET
4369 fp->rx_pkt = fp->rx_calls = 0;
4370
7a9b2557
VZ
4371 /* Warning!
4372 * this will generate an interrupt (to the TSTORM)
4373 * must only be done after chip is initialized
4374 */
4375 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4376 fp->rx_sge_prod);
a2fbb9ea
ET
4377 if (j != 0)
4378 continue;
4379
4380 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4381 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4382 U64_LO(fp->rx_comp_mapping));
4383 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4384 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4385 U64_HI(fp->rx_comp_mapping));
4386 }
4387}
4388
4389static void bnx2x_init_tx_ring(struct bnx2x *bp)
4390{
4391 int i, j;
4392
4393 for_each_queue(bp, j) {
4394 struct bnx2x_fastpath *fp = &bp->fp[j];
4395
4396 for (i = 1; i <= NUM_TX_RINGS; i++) {
4397 struct eth_tx_bd *tx_bd =
4398 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4399
4400 tx_bd->addr_hi =
4401 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4402 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4403 tx_bd->addr_lo =
4404 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4405 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4406 }
4407
4408 fp->tx_pkt_prod = 0;
4409 fp->tx_pkt_cons = 0;
4410 fp->tx_bd_prod = 0;
4411 fp->tx_bd_cons = 0;
4412 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4413 fp->tx_pkt = 0;
4414 }
4415}
4416
4417static void bnx2x_init_sp_ring(struct bnx2x *bp)
4418{
34f80b04 4419 int func = BP_FUNC(bp);
a2fbb9ea
ET
4420
4421 spin_lock_init(&bp->spq_lock);
4422
4423 bp->spq_left = MAX_SPQ_PENDING;
4424 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4425 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4426 bp->spq_prod_bd = bp->spq;
4427 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4428
34f80b04 4429 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4430 U64_LO(bp->spq_mapping));
34f80b04
EG
4431 REG_WR(bp,
4432 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4433 U64_HI(bp->spq_mapping));
4434
34f80b04 4435 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4436 bp->spq_prod_idx);
4437}
4438
4439static void bnx2x_init_context(struct bnx2x *bp)
4440{
4441 int i;
4442
4443 for_each_queue(bp, i) {
4444 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4445 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4446 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea
ET
4447
4448 context->xstorm_st_context.tx_bd_page_base_hi =
4449 U64_HI(fp->tx_desc_mapping);
4450 context->xstorm_st_context.tx_bd_page_base_lo =
4451 U64_LO(fp->tx_desc_mapping);
4452 context->xstorm_st_context.db_data_addr_hi =
4453 U64_HI(fp->tx_prods_mapping);
4454 context->xstorm_st_context.db_data_addr_lo =
4455 U64_LO(fp->tx_prods_mapping);
34f80b04
EG
4456 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4457 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4458
4459 context->ustorm_st_context.common.sb_index_numbers =
4460 BNX2X_RX_SB_INDEX_NUM;
4461 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4462 context->ustorm_st_context.common.status_block_id = sb_id;
4463 context->ustorm_st_context.common.flags =
4464 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4465 context->ustorm_st_context.common.mc_alignment_size = 64;
4466 context->ustorm_st_context.common.bd_buff_size =
4467 bp->rx_buf_use_size;
4468 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4469 U64_HI(fp->rx_desc_mapping);
34f80b04 4470 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4471 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4472 if (!fp->disable_tpa) {
4473 context->ustorm_st_context.common.flags |=
4474 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4475 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4476 context->ustorm_st_context.common.sge_buff_size =
4477 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4478 context->ustorm_st_context.common.sge_page_base_hi =
4479 U64_HI(fp->rx_sge_mapping);
4480 context->ustorm_st_context.common.sge_page_base_lo =
4481 U64_LO(fp->rx_sge_mapping);
4482 }
4483
a2fbb9ea 4484 context->cstorm_st_context.sb_index_number =
5c862848 4485 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4486 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4487
4488 context->xstorm_ag_context.cdu_reserved =
4489 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4490 CDU_REGION_NUMBER_XCM_AG,
4491 ETH_CONNECTION_TYPE);
4492 context->ustorm_ag_context.cdu_usage =
4493 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4494 CDU_REGION_NUMBER_UCM_AG,
4495 ETH_CONNECTION_TYPE);
4496 }
4497}
4498
4499static void bnx2x_init_ind_table(struct bnx2x *bp)
4500{
34f80b04 4501 int port = BP_PORT(bp);
a2fbb9ea
ET
4502 int i;
4503
4504 if (!is_multi(bp))
4505 return;
4506
34f80b04 4507 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 4508 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04
EG
4509 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4510 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
a2fbb9ea
ET
4511 i % bp->num_queues);
4512
4513 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4514}
4515
49d66772
ET
4516static void bnx2x_set_client_config(struct bnx2x *bp)
4517{
49d66772 4518 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4519 int port = BP_PORT(bp);
4520 int i;
49d66772 4521
34f80b04 4522 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
66e855f3 4523 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
49d66772
ET
4524 tstorm_client.config_flags =
4525 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4526#ifdef BCM_VLAN
34f80b04 4527 if (bp->rx_mode && bp->vlgrp) {
49d66772
ET
4528 tstorm_client.config_flags |=
4529 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4530 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4531 }
4532#endif
49d66772 4533
7a9b2557
VZ
4534 if (bp->flags & TPA_ENABLE_FLAG) {
4535 tstorm_client.max_sges_for_packet =
4536 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4537 tstorm_client.max_sges_for_packet =
4538 ((tstorm_client.max_sges_for_packet +
4539 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4540 PAGES_PER_SGE_SHIFT;
4541
4542 tstorm_client.config_flags |=
4543 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4544 }
4545
49d66772
ET
4546 for_each_queue(bp, i) {
4547 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4548 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4549 ((u32 *)&tstorm_client)[0]);
4550 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4551 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4552 ((u32 *)&tstorm_client)[1]);
4553 }
4554
34f80b04
EG
4555 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4556 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4557}
4558
a2fbb9ea
ET
4559static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4560{
a2fbb9ea 4561 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4562 int mode = bp->rx_mode;
4563 int mask = (1 << BP_L_ID(bp));
4564 int func = BP_FUNC(bp);
a2fbb9ea
ET
4565 int i;
4566
3196a88a 4567 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4568
4569 switch (mode) {
4570 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4571 tstorm_mac_filter.ucast_drop_all = mask;
4572 tstorm_mac_filter.mcast_drop_all = mask;
4573 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4574 break;
4575 case BNX2X_RX_MODE_NORMAL:
34f80b04 4576 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4577 break;
4578 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4579 tstorm_mac_filter.mcast_accept_all = mask;
4580 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4581 break;
4582 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4583 tstorm_mac_filter.ucast_accept_all = mask;
4584 tstorm_mac_filter.mcast_accept_all = mask;
4585 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4586 break;
4587 default:
34f80b04
EG
4588 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4589 break;
a2fbb9ea
ET
4590 }
4591
4592 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4593 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4594 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4595 ((u32 *)&tstorm_mac_filter)[i]);
4596
34f80b04 4597/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4598 ((u32 *)&tstorm_mac_filter)[i]); */
4599 }
a2fbb9ea 4600
49d66772
ET
4601 if (mode != BNX2X_RX_MODE_NONE)
4602 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4603}
4604
471de716
EG
4605static void bnx2x_init_internal_common(struct bnx2x *bp)
4606{
4607 int i;
4608
3cdf1db7
YG
4609 if (bp->flags & TPA_ENABLE_FLAG) {
4610 struct tstorm_eth_tpa_exist tpa = {0};
4611
4612 tpa.tpa_exist = 1;
4613
4614 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4615 ((u32 *)&tpa)[0]);
4616 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4617 ((u32 *)&tpa)[1]);
4618 }
4619
471de716
EG
4620 /* Zero this manually as its initialization is
4621 currently missing in the initTool */
4622 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4623 REG_WR(bp, BAR_USTRORM_INTMEM +
4624 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4625}
4626
4627static void bnx2x_init_internal_port(struct bnx2x *bp)
4628{
4629 int port = BP_PORT(bp);
4630
4631 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4632 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4633 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4634 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4635}
4636
4637static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4638{
a2fbb9ea
ET
4639 struct tstorm_eth_function_common_config tstorm_config = {0};
4640 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4641 int port = BP_PORT(bp);
4642 int func = BP_FUNC(bp);
4643 int i;
471de716 4644 u16 max_agg_size;
a2fbb9ea
ET
4645
4646 if (is_multi(bp)) {
4647 tstorm_config.config_flags = MULTI_FLAGS;
4648 tstorm_config.rss_result_mask = MULTI_MASK;
4649 }
4650
34f80b04
EG
4651 tstorm_config.leading_client_id = BP_L_ID(bp);
4652
a2fbb9ea 4653 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4654 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4655 (*(u32 *)&tstorm_config));
4656
c14423fe 4657 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4658 bnx2x_set_storm_rx_mode(bp);
4659
66e855f3
YG
4660 /* reset xstorm per client statistics */
4661 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4662 REG_WR(bp, BAR_XSTRORM_INTMEM +
4663 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4664 i*4, 0);
4665 }
4666 /* reset tstorm per client statistics */
4667 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4668 REG_WR(bp, BAR_TSTRORM_INTMEM +
4669 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4670 i*4, 0);
4671 }
4672
4673 /* Init statistics related context */
34f80b04 4674 stats_flags.collect_eth = 1;
a2fbb9ea 4675
66e855f3 4676 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4677 ((u32 *)&stats_flags)[0]);
66e855f3 4678 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4679 ((u32 *)&stats_flags)[1]);
4680
66e855f3 4681 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4682 ((u32 *)&stats_flags)[0]);
66e855f3 4683 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4684 ((u32 *)&stats_flags)[1]);
4685
66e855f3 4686 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4687 ((u32 *)&stats_flags)[0]);
66e855f3 4688 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4689 ((u32 *)&stats_flags)[1]);
4690
66e855f3
YG
4691 REG_WR(bp, BAR_XSTRORM_INTMEM +
4692 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4693 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4694 REG_WR(bp, BAR_XSTRORM_INTMEM +
4695 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4696 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4697
4698 REG_WR(bp, BAR_TSTRORM_INTMEM +
4699 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4700 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4701 REG_WR(bp, BAR_TSTRORM_INTMEM +
4702 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4703 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04
EG
4704
4705 if (CHIP_IS_E1H(bp)) {
4706 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4707 IS_E1HMF(bp));
4708 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4709 IS_E1HMF(bp));
4710 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4711 IS_E1HMF(bp));
4712 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4713 IS_E1HMF(bp));
4714
7a9b2557
VZ
4715 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4716 bp->e1hov);
34f80b04
EG
4717 }
4718
471de716
EG
4719 /* Init CQ ring mapping and aggregation size */
4720 max_agg_size = min((u32)(bp->rx_buf_use_size +
4721 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4722 (u32)0xffff);
7a9b2557
VZ
4723 for_each_queue(bp, i) {
4724 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4725
4726 REG_WR(bp, BAR_USTRORM_INTMEM +
4727 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4728 U64_LO(fp->rx_comp_mapping));
4729 REG_WR(bp, BAR_USTRORM_INTMEM +
4730 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4731 U64_HI(fp->rx_comp_mapping));
4732
7a9b2557
VZ
4733 REG_WR16(bp, BAR_USTRORM_INTMEM +
4734 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4735 max_agg_size);
4736 }
a2fbb9ea
ET
4737}
4738
471de716
EG
4739static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4740{
4741 switch (load_code) {
4742 case FW_MSG_CODE_DRV_LOAD_COMMON:
4743 bnx2x_init_internal_common(bp);
4744 /* no break */
4745
4746 case FW_MSG_CODE_DRV_LOAD_PORT:
4747 bnx2x_init_internal_port(bp);
4748 /* no break */
4749
4750 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4751 bnx2x_init_internal_func(bp);
4752 break;
4753
4754 default:
4755 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4756 break;
4757 }
4758}
4759
4760static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4761{
4762 int i;
4763
4764 for_each_queue(bp, i) {
4765 struct bnx2x_fastpath *fp = &bp->fp[i];
4766
34f80b04 4767 fp->bp = bp;
a2fbb9ea 4768 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4769 fp->index = i;
34f80b04
EG
4770 fp->cl_id = BP_L_ID(bp) + i;
4771 fp->sb_id = fp->cl_id;
4772 DP(NETIF_MSG_IFUP,
4773 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4774 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
4775 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4776 FP_SB_ID(fp));
4777 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
4778 }
4779
5c862848
EG
4780 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4781 DEF_SB_ID);
4782 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
4783 bnx2x_update_coalesce(bp);
4784 bnx2x_init_rx_rings(bp);
4785 bnx2x_init_tx_ring(bp);
4786 bnx2x_init_sp_ring(bp);
4787 bnx2x_init_context(bp);
471de716 4788 bnx2x_init_internal(bp, load_code);
a2fbb9ea 4789 bnx2x_init_ind_table(bp);
615f8fd9 4790 bnx2x_int_enable(bp);
a2fbb9ea
ET
4791}
4792
4793/* end of nic init */
4794
4795/*
4796 * gzip service functions
4797 */
4798
4799static int bnx2x_gunzip_init(struct bnx2x *bp)
4800{
4801 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4802 &bp->gunzip_mapping);
4803 if (bp->gunzip_buf == NULL)
4804 goto gunzip_nomem1;
4805
4806 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4807 if (bp->strm == NULL)
4808 goto gunzip_nomem2;
4809
4810 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4811 GFP_KERNEL);
4812 if (bp->strm->workspace == NULL)
4813 goto gunzip_nomem3;
4814
4815 return 0;
4816
4817gunzip_nomem3:
4818 kfree(bp->strm);
4819 bp->strm = NULL;
4820
4821gunzip_nomem2:
4822 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4823 bp->gunzip_mapping);
4824 bp->gunzip_buf = NULL;
4825
4826gunzip_nomem1:
4827 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4828 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4829 return -ENOMEM;
4830}
4831
4832static void bnx2x_gunzip_end(struct bnx2x *bp)
4833{
4834 kfree(bp->strm->workspace);
4835
4836 kfree(bp->strm);
4837 bp->strm = NULL;
4838
4839 if (bp->gunzip_buf) {
4840 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4841 bp->gunzip_mapping);
4842 bp->gunzip_buf = NULL;
4843 }
4844}
4845
4846static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4847{
4848 int n, rc;
4849
4850 /* check gzip header */
4851 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4852 return -EINVAL;
4853
4854 n = 10;
4855
34f80b04 4856#define FNAME 0x8
a2fbb9ea
ET
4857
4858 if (zbuf[3] & FNAME)
4859 while ((zbuf[n++] != 0) && (n < len));
4860
4861 bp->strm->next_in = zbuf + n;
4862 bp->strm->avail_in = len - n;
4863 bp->strm->next_out = bp->gunzip_buf;
4864 bp->strm->avail_out = FW_BUF_SIZE;
4865
4866 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4867 if (rc != Z_OK)
4868 return rc;
4869
4870 rc = zlib_inflate(bp->strm, Z_FINISH);
4871 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4872 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4873 bp->dev->name, bp->strm->msg);
4874
4875 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4876 if (bp->gunzip_outlen & 0x3)
4877 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4878 " gunzip_outlen (%d) not aligned\n",
4879 bp->dev->name, bp->gunzip_outlen);
4880 bp->gunzip_outlen >>= 2;
4881
4882 zlib_inflateEnd(bp->strm);
4883
4884 if (rc == Z_STREAM_END)
4885 return 0;
4886
4887 return rc;
4888}
4889
4890/* nic load/unload */
4891
4892/*
34f80b04 4893 * General service functions
a2fbb9ea
ET
4894 */
4895
4896/* send a NIG loopback debug packet */
4897static void bnx2x_lb_pckt(struct bnx2x *bp)
4898{
a2fbb9ea 4899 u32 wb_write[3];
a2fbb9ea
ET
4900
4901 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4902 wb_write[0] = 0x55555555;
4903 wb_write[1] = 0x55555555;
34f80b04 4904 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4905 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4906
4907 /* NON-IP protocol */
a2fbb9ea
ET
4908 wb_write[0] = 0x09000000;
4909 wb_write[1] = 0x55555555;
34f80b04 4910 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4911 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4912}
4913
4914/* some of the internal memories
4915 * are not directly readable from the driver
4916 * to test them we send debug packets
4917 */
4918static int bnx2x_int_mem_test(struct bnx2x *bp)
4919{
4920 int factor;
4921 int count, i;
4922 u32 val = 0;
4923
ad8d3948 4924 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4925 factor = 120;
ad8d3948
EG
4926 else if (CHIP_REV_IS_EMUL(bp))
4927 factor = 200;
4928 else
a2fbb9ea 4929 factor = 1;
a2fbb9ea
ET
4930
4931 DP(NETIF_MSG_HW, "start part1\n");
4932
4933 /* Disable inputs of parser neighbor blocks */
4934 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4935 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4936 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4937 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4938
4939 /* Write 0 to parser credits for CFC search request */
4940 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4941
4942 /* send Ethernet packet */
4943 bnx2x_lb_pckt(bp);
4944
4945 /* TODO do i reset NIG statistic? */
4946 /* Wait until NIG register shows 1 packet of size 0x10 */
4947 count = 1000 * factor;
4948 while (count) {
34f80b04 4949
a2fbb9ea
ET
4950 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4951 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4952 if (val == 0x10)
4953 break;
4954
4955 msleep(10);
4956 count--;
4957 }
4958 if (val != 0x10) {
4959 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4960 return -1;
4961 }
4962
4963 /* Wait until PRS register shows 1 packet */
4964 count = 1000 * factor;
4965 while (count) {
4966 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4967 if (val == 1)
4968 break;
4969
4970 msleep(10);
4971 count--;
4972 }
4973 if (val != 0x1) {
4974 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4975 return -2;
4976 }
4977
4978 /* Reset and init BRB, PRS */
34f80b04 4979 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4980 msleep(50);
34f80b04 4981 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
4982 msleep(50);
4983 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4984 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4985
4986 DP(NETIF_MSG_HW, "part2\n");
4987
4988 /* Disable inputs of parser neighbor blocks */
4989 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4990 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4991 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4992 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4993
4994 /* Write 0 to parser credits for CFC search request */
4995 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4996
4997 /* send 10 Ethernet packets */
4998 for (i = 0; i < 10; i++)
4999 bnx2x_lb_pckt(bp);
5000
5001 /* Wait until NIG register shows 10 + 1
5002 packets of size 11*0x10 = 0xb0 */
5003 count = 1000 * factor;
5004 while (count) {
34f80b04 5005
a2fbb9ea
ET
5006 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5007 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5008 if (val == 0xb0)
5009 break;
5010
5011 msleep(10);
5012 count--;
5013 }
5014 if (val != 0xb0) {
5015 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5016 return -3;
5017 }
5018
5019 /* Wait until PRS register shows 2 packets */
5020 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5021 if (val != 2)
5022 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5023
5024 /* Write 1 to parser credits for CFC search request */
5025 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5026
5027 /* Wait until PRS register shows 3 packets */
5028 msleep(10 * factor);
5029 /* Wait until NIG register shows 1 packet of size 0x10 */
5030 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5031 if (val != 3)
5032 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5033
5034 /* clear NIG EOP FIFO */
5035 for (i = 0; i < 11; i++)
5036 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5037 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5038 if (val != 1) {
5039 BNX2X_ERR("clear of NIG failed\n");
5040 return -4;
5041 }
5042
5043 /* Reset and init BRB, PRS, NIG */
5044 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5045 msleep(50);
5046 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5047 msleep(50);
5048 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5049 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5050#ifndef BCM_ISCSI
5051 /* set NIC mode */
5052 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5053#endif
5054
5055 /* Enable inputs of parser neighbor blocks */
5056 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5057 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5058 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5059 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5060
5061 DP(NETIF_MSG_HW, "done\n");
5062
5063 return 0; /* OK */
5064}
5065
5066static void enable_blocks_attention(struct bnx2x *bp)
5067{
5068 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5069 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5070 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5071 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5072 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5073 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5074 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5075 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5076 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5077/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5078/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5079 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5080 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5081 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5082/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5083/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5084 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5085 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5086 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5087 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5088/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5089/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5090 if (CHIP_REV_IS_FPGA(bp))
5091 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5092 else
5093 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5094 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5095 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5096 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5097/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5098/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5099 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5100 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5101/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5102 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5103}
5104
34f80b04
EG
5105
5106static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5107{
a2fbb9ea 5108 u32 val, i;
a2fbb9ea 5109
34f80b04 5110 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5111
34f80b04
EG
5112 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5113 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5114
34f80b04
EG
5115 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5116 if (CHIP_IS_E1H(bp))
5117 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5118
34f80b04
EG
5119 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5120 msleep(30);
5121 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5122
34f80b04
EG
5123 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5124 if (CHIP_IS_E1(bp)) {
5125 /* enable HW interrupt from PXP on USDM overflow
5126 bit 16 on INT_MASK_0 */
5127 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5128 }
a2fbb9ea 5129
34f80b04
EG
5130 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5131 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5132
5133#ifdef __BIG_ENDIAN
34f80b04
EG
5134 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5135 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5136 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5137 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5138 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5139 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5140
5141/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5142 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5143 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5144 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5145 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5146#endif
5147
34f80b04 5148 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5149#ifdef BCM_ISCSI
34f80b04
EG
5150 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5151 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5152 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5153#endif
5154
34f80b04
EG
5155 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5156 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5157
34f80b04
EG
5158 /* let the HW do it's magic ... */
5159 msleep(100);
5160 /* finish PXP init */
5161 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5162 if (val != 1) {
5163 BNX2X_ERR("PXP2 CFG failed\n");
5164 return -EBUSY;
5165 }
5166 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5167 if (val != 1) {
5168 BNX2X_ERR("PXP2 RD_INIT failed\n");
5169 return -EBUSY;
5170 }
a2fbb9ea 5171
34f80b04
EG
5172 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5173 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5174
34f80b04 5175 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5176
34f80b04
EG
5177 /* clean the DMAE memory */
5178 bp->dmae_ready = 1;
5179 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5180
34f80b04
EG
5181 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5182 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5183 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5184 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5185
34f80b04
EG
5186 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5187 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5188 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5189 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5190
5191 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5192 /* soft reset pulse */
5193 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5194 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5195
5196#ifdef BCM_ISCSI
34f80b04 5197 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5198#endif
a2fbb9ea 5199
34f80b04
EG
5200 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5201 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5202 if (!CHIP_REV_IS_SLOW(bp)) {
5203 /* enable hw interrupt from doorbell Q */
5204 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5205 }
a2fbb9ea 5206
34f80b04
EG
5207 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5208 if (CHIP_REV_IS_SLOW(bp)) {
5209 /* fix for emulation and FPGA for no pause */
5210 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5211 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5212 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5213 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5214 }
a2fbb9ea 5215
34f80b04 5216 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
3196a88a
EG
5217 /* set NIC mode */
5218 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5219 if (CHIP_IS_E1H(bp))
5220 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5221
34f80b04
EG
5222 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5223 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5224 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5225 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5226
34f80b04
EG
5227 if (CHIP_IS_E1H(bp)) {
5228 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5229 STORM_INTMEM_SIZE_E1H/2);
5230 bnx2x_init_fill(bp,
5231 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5232 0, STORM_INTMEM_SIZE_E1H/2);
5233 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5234 STORM_INTMEM_SIZE_E1H/2);
5235 bnx2x_init_fill(bp,
5236 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5237 0, STORM_INTMEM_SIZE_E1H/2);
5238 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5239 STORM_INTMEM_SIZE_E1H/2);
5240 bnx2x_init_fill(bp,
5241 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5242 0, STORM_INTMEM_SIZE_E1H/2);
5243 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5244 STORM_INTMEM_SIZE_E1H/2);
5245 bnx2x_init_fill(bp,
5246 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5247 0, STORM_INTMEM_SIZE_E1H/2);
5248 } else { /* E1 */
ad8d3948
EG
5249 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5250 STORM_INTMEM_SIZE_E1);
5251 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5252 STORM_INTMEM_SIZE_E1);
5253 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5254 STORM_INTMEM_SIZE_E1);
5255 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5256 STORM_INTMEM_SIZE_E1);
34f80b04 5257 }
a2fbb9ea 5258
34f80b04
EG
5259 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5260 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5261 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5262 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5263
34f80b04
EG
5264 /* sync semi rtc */
5265 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5266 0x80000000);
5267 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5268 0x80000000);
a2fbb9ea 5269
34f80b04
EG
5270 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5271 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5272 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5273
34f80b04
EG
5274 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5275 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5276 REG_WR(bp, i, 0xc0cac01a);
5277 /* TODO: replace with something meaningful */
5278 }
5279 if (CHIP_IS_E1H(bp))
5280 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5281 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5282
34f80b04
EG
5283 if (sizeof(union cdu_context) != 1024)
5284 /* we currently assume that a context is 1024 bytes */
5285 printk(KERN_ALERT PFX "please adjust the size of"
5286 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5287
34f80b04
EG
5288 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5289 val = (4 << 24) + (0 << 12) + 1024;
5290 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5291 if (CHIP_IS_E1(bp)) {
5292 /* !!! fix pxp client crdit until excel update */
5293 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5294 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5295 }
a2fbb9ea 5296
34f80b04
EG
5297 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5298 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
a2fbb9ea 5299
34f80b04
EG
5300 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5301 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5302
34f80b04
EG
5303 /* PXPCS COMMON comes here */
5304 /* Reset PCIE errors for debug */
5305 REG_WR(bp, 0x2814, 0xffffffff);
5306 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5307
34f80b04
EG
5308 /* EMAC0 COMMON comes here */
5309 /* EMAC1 COMMON comes here */
5310 /* DBU COMMON comes here */
5311 /* DBG COMMON comes here */
5312
5313 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5314 if (CHIP_IS_E1H(bp)) {
5315 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5316 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5317 }
5318
5319 if (CHIP_REV_IS_SLOW(bp))
5320 msleep(200);
5321
5322 /* finish CFC init */
5323 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5324 if (val != 1) {
5325 BNX2X_ERR("CFC LL_INIT failed\n");
5326 return -EBUSY;
5327 }
5328 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5329 if (val != 1) {
5330 BNX2X_ERR("CFC AC_INIT failed\n");
5331 return -EBUSY;
5332 }
5333 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5334 if (val != 1) {
5335 BNX2X_ERR("CFC CAM_INIT failed\n");
5336 return -EBUSY;
5337 }
5338 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5339
34f80b04
EG
5340 /* read NIG statistic
5341 to see if this is our first up since powerup */
5342 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5343 val = *bnx2x_sp(bp, wb_data[0]);
5344
5345 /* do internal memory self test */
5346 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5347 BNX2X_ERR("internal mem self test failed\n");
5348 return -EBUSY;
5349 }
5350
5351 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5352 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
34f80b04
EG
5353 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5354 /* Fan failure is indicated by SPIO 5 */
5355 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5356 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5357
5358 /* set to active low mode */
5359 val = REG_RD(bp, MISC_REG_SPIO_INT);
5360 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5361 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5362 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5363
34f80b04
EG
5364 /* enable interrupt to signal the IGU */
5365 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5366 val |= (1 << MISC_REGISTERS_SPIO_5);
5367 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5368 break;
f1410647 5369
34f80b04
EG
5370 default:
5371 break;
5372 }
f1410647 5373
34f80b04
EG
5374 /* clear PXP2 attentions */
5375 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5376
34f80b04 5377 enable_blocks_attention(bp);
a2fbb9ea 5378
6bbca910
YR
5379 if (!BP_NOMCP(bp)) {
5380 bnx2x_acquire_phy_lock(bp);
5381 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5382 bnx2x_release_phy_lock(bp);
5383 } else
5384 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5385
34f80b04
EG
5386 return 0;
5387}
a2fbb9ea 5388
34f80b04
EG
5389static int bnx2x_init_port(struct bnx2x *bp)
5390{
5391 int port = BP_PORT(bp);
5392 u32 val;
a2fbb9ea 5393
34f80b04
EG
5394 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5395
5396 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5397
5398 /* Port PXP comes here */
5399 /* Port PXP2 comes here */
a2fbb9ea
ET
5400#ifdef BCM_ISCSI
5401 /* Port0 1
5402 * Port1 385 */
5403 i++;
5404 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5405 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5406 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5407 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5408
5409 /* Port0 2
5410 * Port1 386 */
5411 i++;
5412 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5413 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5414 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5415 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5416
5417 /* Port0 3
5418 * Port1 387 */
5419 i++;
5420 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5421 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5422 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5423 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5424#endif
34f80b04 5425 /* Port CMs come here */
a2fbb9ea
ET
5426
5427 /* Port QM comes here */
a2fbb9ea
ET
5428#ifdef BCM_ISCSI
5429 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5430 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5431
5432 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5433 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5434#endif
5435 /* Port DQ comes here */
5436 /* Port BRB1 comes here */
ad8d3948 5437 /* Port PRS comes here */
a2fbb9ea
ET
5438 /* Port TSDM comes here */
5439 /* Port CSDM comes here */
5440 /* Port USDM comes here */
5441 /* Port XSDM comes here */
34f80b04
EG
5442 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5443 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5444 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5445 port ? USEM_PORT1_END : USEM_PORT0_END);
5446 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5447 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5448 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5449 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5450 /* Port UPB comes here */
34f80b04
EG
5451 /* Port XPB comes here */
5452
5453 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5454 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5455
5456 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5457 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5458
5459 /* update threshold */
34f80b04 5460 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5461 /* update init credit */
34f80b04 5462 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5463
5464 /* probe changes */
34f80b04 5465 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5466 msleep(5);
34f80b04 5467 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5468
5469#ifdef BCM_ISCSI
5470 /* tell the searcher where the T2 table is */
5471 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5472
5473 wb_write[0] = U64_LO(bp->t2_mapping);
5474 wb_write[1] = U64_HI(bp->t2_mapping);
5475 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5476 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5477 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5478 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5479
5480 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5481 /* Port SRCH comes here */
5482#endif
5483 /* Port CDU comes here */
5484 /* Port CFC comes here */
34f80b04
EG
5485
5486 if (CHIP_IS_E1(bp)) {
5487 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5488 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5489 }
5490 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5491 port ? HC_PORT1_END : HC_PORT0_END);
5492
5493 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5494 MISC_AEU_PORT0_START,
34f80b04
EG
5495 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5496 /* init aeu_mask_attn_func_0/1:
5497 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5498 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5499 * bits 4-7 are used for "per vn group attention" */
5500 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5501 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5502
a2fbb9ea
ET
5503 /* Port PXPCS comes here */
5504 /* Port EMAC0 comes here */
5505 /* Port EMAC1 comes here */
5506 /* Port DBU comes here */
5507 /* Port DBG comes here */
34f80b04
EG
5508 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5509 port ? NIG_PORT1_END : NIG_PORT0_END);
5510
5511 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5512
5513 if (CHIP_IS_E1H(bp)) {
5514 u32 wsum;
5515 struct cmng_struct_per_port m_cmng_port;
5516 int vn;
5517
5518 /* 0x2 disable e1hov, 0x1 enable */
5519 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5520 (IS_E1HMF(bp) ? 0x1 : 0x2));
5521
5522 /* Init RATE SHAPING and FAIRNESS contexts.
5523 Initialize as if there is 10G link. */
5524 wsum = bnx2x_calc_vn_wsum(bp);
5525 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5526 if (IS_E1HMF(bp))
5527 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5528 bnx2x_init_vn_minmax(bp, 2*vn + port,
5529 wsum, 10000, &m_cmng_port);
5530 }
5531
a2fbb9ea
ET
5532 /* Port MCP comes here */
5533 /* Port DMAE comes here */
5534
34f80b04 5535 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5536 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
f1410647
ET
5537 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5538 /* add SPIO 5 to group 0 */
5539 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5540 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5541 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5542 break;
5543
5544 default:
5545 break;
5546 }
5547
c18487ee 5548 bnx2x__link_reset(bp);
a2fbb9ea 5549
34f80b04
EG
5550 return 0;
5551}
5552
5553#define ILT_PER_FUNC (768/2)
5554#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5555/* the phys address is shifted right 12 bits and has an added
5556 1=valid bit added to the 53rd bit
5557 then since this is a wide register(TM)
5558 we split it into two 32 bit writes
5559 */
5560#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5561#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5562#define PXP_ONE_ILT(x) (((x) << 10) | x)
5563#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5564
5565#define CNIC_ILT_LINES 0
5566
5567static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5568{
5569 int reg;
5570
5571 if (CHIP_IS_E1H(bp))
5572 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5573 else /* E1 */
5574 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5575
5576 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5577}
5578
5579static int bnx2x_init_func(struct bnx2x *bp)
5580{
5581 int port = BP_PORT(bp);
5582 int func = BP_FUNC(bp);
5583 int i;
5584
5585 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5586
5587 i = FUNC_ILT_BASE(func);
5588
5589 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5590 if (CHIP_IS_E1H(bp)) {
5591 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5592 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5593 } else /* E1 */
5594 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5595 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5596
5597
5598 if (CHIP_IS_E1H(bp)) {
5599 for (i = 0; i < 9; i++)
5600 bnx2x_init_block(bp,
5601 cm_start[func][i], cm_end[func][i]);
5602
5603 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5604 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5605 }
5606
5607 /* HC init per function */
5608 if (CHIP_IS_E1H(bp)) {
5609 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5610
5611 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5612 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5613 }
5614 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5615
5616 if (CHIP_IS_E1H(bp))
5617 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5618
c14423fe 5619 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5620 REG_WR(bp, 0x2114, 0xffffffff);
5621 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5622
34f80b04
EG
5623 return 0;
5624}
5625
5626static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5627{
5628 int i, rc = 0;
a2fbb9ea 5629
34f80b04
EG
5630 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5631 BP_FUNC(bp), load_code);
a2fbb9ea 5632
34f80b04
EG
5633 bp->dmae_ready = 0;
5634 mutex_init(&bp->dmae_mutex);
5635 bnx2x_gunzip_init(bp);
a2fbb9ea 5636
34f80b04
EG
5637 switch (load_code) {
5638 case FW_MSG_CODE_DRV_LOAD_COMMON:
5639 rc = bnx2x_init_common(bp);
5640 if (rc)
5641 goto init_hw_err;
5642 /* no break */
5643
5644 case FW_MSG_CODE_DRV_LOAD_PORT:
5645 bp->dmae_ready = 1;
5646 rc = bnx2x_init_port(bp);
5647 if (rc)
5648 goto init_hw_err;
5649 /* no break */
5650
5651 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5652 bp->dmae_ready = 1;
5653 rc = bnx2x_init_func(bp);
5654 if (rc)
5655 goto init_hw_err;
5656 break;
5657
5658 default:
5659 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5660 break;
5661 }
5662
5663 if (!BP_NOMCP(bp)) {
5664 int func = BP_FUNC(bp);
a2fbb9ea
ET
5665
5666 bp->fw_drv_pulse_wr_seq =
34f80b04 5667 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5668 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5669 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5670 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5671 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5672 } else
5673 bp->func_stx = 0;
a2fbb9ea 5674
34f80b04
EG
5675 /* this needs to be done before gunzip end */
5676 bnx2x_zero_def_sb(bp);
5677 for_each_queue(bp, i)
5678 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5679
5680init_hw_err:
5681 bnx2x_gunzip_end(bp);
5682
5683 return rc;
a2fbb9ea
ET
5684}
5685
c14423fe 5686/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5687static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5688{
34f80b04 5689 int func = BP_FUNC(bp);
f1410647
ET
5690 u32 seq = ++bp->fw_seq;
5691 u32 rc = 0;
19680c48
EG
5692 u32 cnt = 1;
5693 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5694
34f80b04 5695 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5696 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5697
19680c48
EG
5698 do {
5699 /* let the FW do it's magic ... */
5700 msleep(delay);
a2fbb9ea 5701
19680c48 5702 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5703
19680c48
EG
5704 /* Give the FW up to 2 second (200*10ms) */
5705 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5706
5707 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5708 cnt*delay, rc, seq);
a2fbb9ea
ET
5709
5710 /* is this a reply to our command? */
5711 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5712 rc &= FW_MSG_CODE_MASK;
f1410647 5713
a2fbb9ea
ET
5714 } else {
5715 /* FW BUG! */
5716 BNX2X_ERR("FW failed to respond!\n");
5717 bnx2x_fw_dump(bp);
5718 rc = 0;
5719 }
f1410647 5720
a2fbb9ea
ET
5721 return rc;
5722}
5723
5724static void bnx2x_free_mem(struct bnx2x *bp)
5725{
5726
5727#define BNX2X_PCI_FREE(x, y, size) \
5728 do { \
5729 if (x) { \
5730 pci_free_consistent(bp->pdev, size, x, y); \
5731 x = NULL; \
5732 y = 0; \
5733 } \
5734 } while (0)
5735
5736#define BNX2X_FREE(x) \
5737 do { \
5738 if (x) { \
5739 vfree(x); \
5740 x = NULL; \
5741 } \
5742 } while (0)
5743
5744 int i;
5745
5746 /* fastpath */
5747 for_each_queue(bp, i) {
5748
5749 /* Status blocks */
5750 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5751 bnx2x_fp(bp, i, status_blk_mapping),
5752 sizeof(struct host_status_block) +
5753 sizeof(struct eth_tx_db_data));
5754
5755 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5756 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5757 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5758 bnx2x_fp(bp, i, tx_desc_mapping),
5759 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5760
5761 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5762 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5763 bnx2x_fp(bp, i, rx_desc_mapping),
5764 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5765
5766 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5767 bnx2x_fp(bp, i, rx_comp_mapping),
5768 sizeof(struct eth_fast_path_rx_cqe) *
5769 NUM_RCQ_BD);
a2fbb9ea 5770
7a9b2557 5771 /* SGE ring */
32626230 5772 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5773 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5774 bnx2x_fp(bp, i, rx_sge_mapping),
5775 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5776 }
a2fbb9ea
ET
5777 /* end of fastpath */
5778
5779 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5780 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5781
5782 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5783 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5784
5785#ifdef BCM_ISCSI
5786 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5787 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5788 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5789 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5790#endif
7a9b2557 5791 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5792
5793#undef BNX2X_PCI_FREE
5794#undef BNX2X_KFREE
5795}
5796
5797static int bnx2x_alloc_mem(struct bnx2x *bp)
5798{
5799
5800#define BNX2X_PCI_ALLOC(x, y, size) \
5801 do { \
5802 x = pci_alloc_consistent(bp->pdev, size, y); \
5803 if (x == NULL) \
5804 goto alloc_mem_err; \
5805 memset(x, 0, size); \
5806 } while (0)
5807
5808#define BNX2X_ALLOC(x, size) \
5809 do { \
5810 x = vmalloc(size); \
5811 if (x == NULL) \
5812 goto alloc_mem_err; \
5813 memset(x, 0, size); \
5814 } while (0)
5815
5816 int i;
5817
5818 /* fastpath */
a2fbb9ea
ET
5819 for_each_queue(bp, i) {
5820 bnx2x_fp(bp, i, bp) = bp;
5821
5822 /* Status blocks */
5823 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5824 &bnx2x_fp(bp, i, status_blk_mapping),
5825 sizeof(struct host_status_block) +
5826 sizeof(struct eth_tx_db_data));
5827
5828 bnx2x_fp(bp, i, hw_tx_prods) =
5829 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5830
5831 bnx2x_fp(bp, i, tx_prods_mapping) =
5832 bnx2x_fp(bp, i, status_blk_mapping) +
5833 sizeof(struct host_status_block);
5834
5835 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5836 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5837 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5838 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5839 &bnx2x_fp(bp, i, tx_desc_mapping),
5840 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5841
5842 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5843 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5844 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5845 &bnx2x_fp(bp, i, rx_desc_mapping),
5846 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5847
5848 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5849 &bnx2x_fp(bp, i, rx_comp_mapping),
5850 sizeof(struct eth_fast_path_rx_cqe) *
5851 NUM_RCQ_BD);
5852
7a9b2557
VZ
5853 /* SGE ring */
5854 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5855 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5856 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5857 &bnx2x_fp(bp, i, rx_sge_mapping),
5858 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea
ET
5859 }
5860 /* end of fastpath */
5861
5862 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5863 sizeof(struct host_def_status_block));
5864
5865 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5866 sizeof(struct bnx2x_slowpath));
5867
5868#ifdef BCM_ISCSI
5869 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5870
5871 /* Initialize T1 */
5872 for (i = 0; i < 64*1024; i += 64) {
5873 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5874 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5875 }
5876
5877 /* allocate searcher T2 table
5878 we allocate 1/4 of alloc num for T2
5879 (which is not entered into the ILT) */
5880 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5881
5882 /* Initialize T2 */
5883 for (i = 0; i < 16*1024; i += 64)
5884 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5885
c14423fe 5886 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5887 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5888
5889 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5890 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5891
5892 /* QM queues (128*MAX_CONN) */
5893 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5894#endif
5895
5896 /* Slow path ring */
5897 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5898
5899 return 0;
5900
5901alloc_mem_err:
5902 bnx2x_free_mem(bp);
5903 return -ENOMEM;
5904
5905#undef BNX2X_PCI_ALLOC
5906#undef BNX2X_ALLOC
5907}
5908
5909static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5910{
5911 int i;
5912
5913 for_each_queue(bp, i) {
5914 struct bnx2x_fastpath *fp = &bp->fp[i];
5915
5916 u16 bd_cons = fp->tx_bd_cons;
5917 u16 sw_prod = fp->tx_pkt_prod;
5918 u16 sw_cons = fp->tx_pkt_cons;
5919
a2fbb9ea
ET
5920 while (sw_cons != sw_prod) {
5921 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5922 sw_cons++;
5923 }
5924 }
5925}
5926
5927static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5928{
5929 int i, j;
5930
5931 for_each_queue(bp, j) {
5932 struct bnx2x_fastpath *fp = &bp->fp[j];
5933
a2fbb9ea
ET
5934 for (i = 0; i < NUM_RX_BD; i++) {
5935 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5936 struct sk_buff *skb = rx_buf->skb;
5937
5938 if (skb == NULL)
5939 continue;
5940
5941 pci_unmap_single(bp->pdev,
5942 pci_unmap_addr(rx_buf, mapping),
5943 bp->rx_buf_use_size,
5944 PCI_DMA_FROMDEVICE);
5945
5946 rx_buf->skb = NULL;
5947 dev_kfree_skb(skb);
5948 }
7a9b2557 5949 if (!fp->disable_tpa)
32626230
EG
5950 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5951 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 5952 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
5953 }
5954}
5955
5956static void bnx2x_free_skbs(struct bnx2x *bp)
5957{
5958 bnx2x_free_tx_skbs(bp);
5959 bnx2x_free_rx_skbs(bp);
5960}
5961
5962static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5963{
34f80b04 5964 int i, offset = 1;
a2fbb9ea
ET
5965
5966 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 5967 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
5968 bp->msix_table[0].vector);
5969
5970 for_each_queue(bp, i) {
c14423fe 5971 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 5972 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5973 bnx2x_fp(bp, i, state));
5974
228241eb
ET
5975 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5976 BNX2X_ERR("IRQ of fp #%d being freed while "
5977 "state != closed\n", i);
a2fbb9ea 5978
34f80b04 5979 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 5980 }
a2fbb9ea
ET
5981}
5982
5983static void bnx2x_free_irq(struct bnx2x *bp)
5984{
a2fbb9ea 5985 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
5986 bnx2x_free_msix_irqs(bp);
5987 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
5988 bp->flags &= ~USING_MSIX_FLAG;
5989
5990 } else
5991 free_irq(bp->pdev->irq, bp->dev);
5992}
5993
5994static int bnx2x_enable_msix(struct bnx2x *bp)
5995{
34f80b04 5996 int i, rc, offset;
a2fbb9ea
ET
5997
5998 bp->msix_table[0].entry = 0;
34f80b04
EG
5999 offset = 1;
6000 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 6001
34f80b04
EG
6002 for_each_queue(bp, i) {
6003 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 6004
34f80b04
EG
6005 bp->msix_table[i + offset].entry = igu_vec;
6006 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6007 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6008 }
6009
34f80b04
EG
6010 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6011 bp->num_queues + offset);
6012 if (rc) {
6013 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6014 return -1;
6015 }
a2fbb9ea
ET
6016 bp->flags |= USING_MSIX_FLAG;
6017
6018 return 0;
a2fbb9ea
ET
6019}
6020
a2fbb9ea
ET
6021static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6022{
34f80b04 6023 int i, rc, offset = 1;
a2fbb9ea 6024
a2fbb9ea
ET
6025 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6026 bp->dev->name, bp->dev);
a2fbb9ea
ET
6027 if (rc) {
6028 BNX2X_ERR("request sp irq failed\n");
6029 return -EBUSY;
6030 }
6031
6032 for_each_queue(bp, i) {
34f80b04 6033 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6034 bnx2x_msix_fp_int, 0,
6035 bp->dev->name, &bp->fp[i]);
a2fbb9ea 6036 if (rc) {
3196a88a
EG
6037 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6038 i + offset, -rc);
a2fbb9ea
ET
6039 bnx2x_free_msix_irqs(bp);
6040 return -EBUSY;
6041 }
6042
6043 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6044 }
6045
6046 return 0;
a2fbb9ea
ET
6047}
6048
6049static int bnx2x_req_irq(struct bnx2x *bp)
6050{
34f80b04 6051 int rc;
a2fbb9ea 6052
34f80b04
EG
6053 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6054 bp->dev->name, bp->dev);
a2fbb9ea
ET
6055 if (!rc)
6056 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6057
6058 return rc;
a2fbb9ea
ET
6059}
6060
6061/*
6062 * Init service functions
6063 */
6064
3101c2bc 6065static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6066{
6067 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6068 int port = BP_PORT(bp);
a2fbb9ea
ET
6069
6070 /* CAM allocation
6071 * unicasts 0-31:port0 32-63:port1
6072 * multicast 64-127:port0 128-191:port1
6073 */
6074 config->hdr.length_6b = 2;
34f80b04
EG
6075 config->hdr.offset = port ? 31 : 0;
6076 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6077 config->hdr.reserved1 = 0;
6078
6079 /* primary MAC */
6080 config->config_table[0].cam_entry.msb_mac_addr =
6081 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6082 config->config_table[0].cam_entry.middle_mac_addr =
6083 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6084 config->config_table[0].cam_entry.lsb_mac_addr =
6085 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6086 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6087 if (set)
6088 config->config_table[0].target_table_entry.flags = 0;
6089 else
6090 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6091 config->config_table[0].target_table_entry.client_id = 0;
6092 config->config_table[0].target_table_entry.vlan_id = 0;
6093
3101c2bc
YG
6094 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6095 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6096 config->config_table[0].cam_entry.msb_mac_addr,
6097 config->config_table[0].cam_entry.middle_mac_addr,
6098 config->config_table[0].cam_entry.lsb_mac_addr);
6099
6100 /* broadcast */
6101 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6102 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6103 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6104 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6105 if (set)
6106 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6107 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6108 else
6109 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6110 config->config_table[1].target_table_entry.client_id = 0;
6111 config->config_table[1].target_table_entry.vlan_id = 0;
6112
6113 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6114 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6115 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6116}
6117
3101c2bc 6118static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6119{
6120 struct mac_configuration_cmd_e1h *config =
6121 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6122
3101c2bc 6123 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6124 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6125 return;
6126 }
6127
6128 /* CAM allocation for E1H
6129 * unicasts: by func number
6130 * multicast: 20+FUNC*20, 20 each
6131 */
6132 config->hdr.length_6b = 1;
6133 config->hdr.offset = BP_FUNC(bp);
6134 config->hdr.client_id = BP_CL_ID(bp);
6135 config->hdr.reserved1 = 0;
6136
6137 /* primary MAC */
6138 config->config_table[0].msb_mac_addr =
6139 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6140 config->config_table[0].middle_mac_addr =
6141 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6142 config->config_table[0].lsb_mac_addr =
6143 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6144 config->config_table[0].client_id = BP_L_ID(bp);
6145 config->config_table[0].vlan_id = 0;
6146 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6147 if (set)
6148 config->config_table[0].flags = BP_PORT(bp);
6149 else
6150 config->config_table[0].flags =
6151 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6152
3101c2bc
YG
6153 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6154 (set ? "setting" : "clearing"),
34f80b04
EG
6155 config->config_table[0].msb_mac_addr,
6156 config->config_table[0].middle_mac_addr,
6157 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6158
6159 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6160 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6161 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6162}
6163
a2fbb9ea
ET
6164static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6165 int *state_p, int poll)
6166{
6167 /* can take a while if any port is running */
34f80b04 6168 int cnt = 500;
a2fbb9ea 6169
c14423fe
ET
6170 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6171 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6172
6173 might_sleep();
34f80b04 6174 while (cnt--) {
a2fbb9ea
ET
6175 if (poll) {
6176 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6177 /* if index is different from 0
6178 * the reply for some commands will
3101c2bc 6179 * be on the non default queue
a2fbb9ea
ET
6180 */
6181 if (idx)
6182 bnx2x_rx_int(&bp->fp[idx], 10);
6183 }
a2fbb9ea 6184
3101c2bc 6185 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6186 if (*state_p == state)
a2fbb9ea
ET
6187 return 0;
6188
a2fbb9ea 6189 msleep(1);
a2fbb9ea
ET
6190 }
6191
a2fbb9ea 6192 /* timeout! */
49d66772
ET
6193 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6194 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6195#ifdef BNX2X_STOP_ON_ERROR
6196 bnx2x_panic();
6197#endif
a2fbb9ea 6198
49d66772 6199 return -EBUSY;
a2fbb9ea
ET
6200}
6201
6202static int bnx2x_setup_leading(struct bnx2x *bp)
6203{
34f80b04 6204 int rc;
a2fbb9ea 6205
c14423fe 6206 /* reset IGU state */
34f80b04 6207 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6208
6209 /* SETUP ramrod */
6210 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6211
34f80b04
EG
6212 /* Wait for completion */
6213 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6214
34f80b04 6215 return rc;
a2fbb9ea
ET
6216}
6217
6218static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6219{
a2fbb9ea 6220 /* reset IGU state */
34f80b04 6221 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6222
228241eb 6223 /* SETUP ramrod */
a2fbb9ea
ET
6224 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6225 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6226
6227 /* Wait for completion */
6228 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 6229 &(bp->fp[index].state), 0);
a2fbb9ea
ET
6230}
6231
a2fbb9ea
ET
6232static int bnx2x_poll(struct napi_struct *napi, int budget);
6233static void bnx2x_set_rx_mode(struct net_device *dev);
6234
34f80b04
EG
6235/* must be called with rtnl_lock */
6236static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 6237{
228241eb 6238 u32 load_code;
34f80b04 6239 int i, rc;
34f80b04
EG
6240#ifdef BNX2X_STOP_ON_ERROR
6241 if (unlikely(bp->panic))
6242 return -EPERM;
6243#endif
a2fbb9ea
ET
6244
6245 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6246
34f80b04
EG
6247 /* Send LOAD_REQUEST command to MCP
6248 Returns the type of LOAD command:
6249 if it is the first port to be initialized
6250 common blocks should be initialized, otherwise - not
a2fbb9ea 6251 */
34f80b04 6252 if (!BP_NOMCP(bp)) {
228241eb
ET
6253 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6254 if (!load_code) {
da5a662a 6255 BNX2X_ERR("MCP response failure, aborting\n");
228241eb
ET
6256 return -EBUSY;
6257 }
34f80b04 6258 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
a2fbb9ea 6259 return -EBUSY; /* other port in diagnostic mode */
34f80b04 6260
a2fbb9ea 6261 } else {
da5a662a
VZ
6262 int port = BP_PORT(bp);
6263
34f80b04
EG
6264 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6265 load_count[0], load_count[1], load_count[2]);
6266 load_count[0]++;
da5a662a 6267 load_count[1 + port]++;
34f80b04
EG
6268 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6269 load_count[0], load_count[1], load_count[2]);
6270 if (load_count[0] == 1)
6271 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
da5a662a 6272 else if (load_count[1 + port] == 1)
34f80b04
EG
6273 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6274 else
6275 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
a2fbb9ea
ET
6276 }
6277
34f80b04
EG
6278 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6279 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6280 bp->port.pmf = 1;
6281 else
6282 bp->port.pmf = 0;
6283 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6284
6285 /* if we can't use MSI-X we only need one fp,
6286 * so try to enable MSI-X with the requested number of fp's
a2fbb9ea
ET
6287 * and fallback to inta with one fp
6288 */
34f80b04
EG
6289 if (use_inta) {
6290 bp->num_queues = 1;
6291
6292 } else {
6293 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6294 /* user requested number */
6295 bp->num_queues = use_multi;
6296
6297 else if (use_multi)
6298 bp->num_queues = min_t(u32, num_online_cpus(),
6299 BP_MAX_QUEUES(bp));
6300 else
a2fbb9ea 6301 bp->num_queues = 1;
34f80b04
EG
6302
6303 if (bnx2x_enable_msix(bp)) {
6304 /* failed to enable MSI-X */
6305 bp->num_queues = 1;
6306 if (use_multi)
6307 BNX2X_ERR("Multi requested but failed"
6308 " to enable MSI-X\n");
a2fbb9ea
ET
6309 }
6310 }
34f80b04
EG
6311 DP(NETIF_MSG_IFUP,
6312 "set number of queues to %d\n", bp->num_queues);
c14423fe 6313
a2fbb9ea
ET
6314 if (bnx2x_alloc_mem(bp))
6315 return -ENOMEM;
6316
7a9b2557
VZ
6317 for_each_queue(bp, i)
6318 bnx2x_fp(bp, i, disable_tpa) =
6319 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6320
34f80b04
EG
6321 if (bp->flags & USING_MSIX_FLAG) {
6322 rc = bnx2x_req_msix_irqs(bp);
6323 if (rc) {
6324 pci_disable_msix(bp->pdev);
6325 goto load_error;
6326 }
6327 } else {
6328 bnx2x_ack_int(bp);
6329 rc = bnx2x_req_irq(bp);
6330 if (rc) {
6331 BNX2X_ERR("IRQ request failed, aborting\n");
6332 goto load_error;
a2fbb9ea
ET
6333 }
6334 }
6335
6336 for_each_queue(bp, i)
6337 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6338 bnx2x_poll, 128);
6339
a2fbb9ea 6340 /* Initialize HW */
34f80b04
EG
6341 rc = bnx2x_init_hw(bp, load_code);
6342 if (rc) {
a2fbb9ea 6343 BNX2X_ERR("HW init failed, aborting\n");
d1014634 6344 goto load_int_disable;
a2fbb9ea
ET
6345 }
6346
a2fbb9ea 6347 /* Setup NIC internals and enable interrupts */
471de716 6348 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6349
6350 /* Send LOAD_DONE command to MCP */
34f80b04 6351 if (!BP_NOMCP(bp)) {
228241eb
ET
6352 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6353 if (!load_code) {
da5a662a 6354 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6355 rc = -EBUSY;
d1014634 6356 goto load_rings_free;
a2fbb9ea
ET
6357 }
6358 }
6359
bb2a0f7a
YG
6360 bnx2x_stats_init(bp);
6361
a2fbb9ea
ET
6362 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6363
6364 /* Enable Rx interrupt handling before sending the ramrod
6365 as it's completed on Rx FP queue */
6366 for_each_queue(bp, i)
6367 napi_enable(&bnx2x_fp(bp, i, napi));
6368
da5a662a
VZ
6369 /* Enable interrupt handling */
6370 atomic_set(&bp->intr_sem, 0);
6371
34f80b04
EG
6372 rc = bnx2x_setup_leading(bp);
6373 if (rc) {
da5a662a 6374 BNX2X_ERR("Setup leading failed!\n");
d1014634 6375 goto load_netif_stop;
34f80b04 6376 }
a2fbb9ea 6377
34f80b04
EG
6378 if (CHIP_IS_E1H(bp))
6379 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6380 BNX2X_ERR("!!! mf_cfg function disabled\n");
6381 bp->state = BNX2X_STATE_DISABLED;
6382 }
a2fbb9ea 6383
34f80b04
EG
6384 if (bp->state == BNX2X_STATE_OPEN)
6385 for_each_nondefault_queue(bp, i) {
6386 rc = bnx2x_setup_multi(bp, i);
6387 if (rc)
d1014634 6388 goto load_netif_stop;
34f80b04 6389 }
a2fbb9ea 6390
34f80b04 6391 if (CHIP_IS_E1(bp))
3101c2bc 6392 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6393 else
3101c2bc 6394 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6395
6396 if (bp->port.pmf)
6397 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6398
6399 /* Start fast path */
34f80b04
EG
6400 switch (load_mode) {
6401 case LOAD_NORMAL:
6402 /* Tx queue should be only reenabled */
6403 netif_wake_queue(bp->dev);
6404 bnx2x_set_rx_mode(bp->dev);
6405 break;
6406
6407 case LOAD_OPEN:
a2fbb9ea 6408 netif_start_queue(bp->dev);
34f80b04 6409 bnx2x_set_rx_mode(bp->dev);
a2fbb9ea
ET
6410 if (bp->flags & USING_MSIX_FLAG)
6411 printk(KERN_INFO PFX "%s: using MSI-X\n",
6412 bp->dev->name);
34f80b04 6413 break;
a2fbb9ea 6414
34f80b04 6415 case LOAD_DIAG:
a2fbb9ea 6416 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6417 bp->state = BNX2X_STATE_DIAG;
6418 break;
6419
6420 default:
6421 break;
a2fbb9ea
ET
6422 }
6423
34f80b04
EG
6424 if (!bp->port.pmf)
6425 bnx2x__link_status_update(bp);
6426
a2fbb9ea
ET
6427 /* start the timer */
6428 mod_timer(&bp->timer, jiffies + bp->current_interval);
6429
34f80b04 6430
a2fbb9ea
ET
6431 return 0;
6432
d1014634 6433load_netif_stop:
a2fbb9ea
ET
6434 for_each_queue(bp, i)
6435 napi_disable(&bnx2x_fp(bp, i, napi));
d1014634 6436load_rings_free:
7a9b2557
VZ
6437 /* Free SKBs, SGEs, TPA pool and driver internals */
6438 bnx2x_free_skbs(bp);
6439 for_each_queue(bp, i)
3196a88a 6440 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d1014634
YG
6441load_int_disable:
6442 bnx2x_int_disable_sync(bp);
6443 /* Release IRQs */
6444 bnx2x_free_irq(bp);
228241eb 6445load_error:
a2fbb9ea
ET
6446 bnx2x_free_mem(bp);
6447
6448 /* TBD we really need to reset the chip
6449 if we want to recover from this */
34f80b04 6450 return rc;
a2fbb9ea
ET
6451}
6452
6453static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6454{
a2fbb9ea
ET
6455 int rc;
6456
c14423fe 6457 /* halt the connection */
a2fbb9ea
ET
6458 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6459 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6460
34f80b04 6461 /* Wait for completion */
a2fbb9ea 6462 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 6463 &(bp->fp[index].state), 1);
c14423fe 6464 if (rc) /* timeout */
a2fbb9ea
ET
6465 return rc;
6466
6467 /* delete cfc entry */
6468 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6469
34f80b04
EG
6470 /* Wait for completion */
6471 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6472 &(bp->fp[index].state), 1);
6473 return rc;
a2fbb9ea
ET
6474}
6475
da5a662a 6476static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6477{
49d66772 6478 u16 dsb_sp_prod_idx;
c14423fe 6479 /* if the other port is handling traffic,
a2fbb9ea 6480 this can take a lot of time */
34f80b04
EG
6481 int cnt = 500;
6482 int rc;
a2fbb9ea
ET
6483
6484 might_sleep();
6485
6486 /* Send HALT ramrod */
6487 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6488 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6489
34f80b04
EG
6490 /* Wait for completion */
6491 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6492 &(bp->fp[0].state), 1);
6493 if (rc) /* timeout */
da5a662a 6494 return rc;
a2fbb9ea 6495
49d66772 6496 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6497
228241eb 6498 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6499 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6500
49d66772 6501 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6502 we are going to reset the chip anyway
6503 so there is not much to do if this times out
6504 */
34f80b04 6505 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
6506 if (!cnt) {
6507 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6508 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6509 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6510#ifdef BNX2X_STOP_ON_ERROR
6511 bnx2x_panic();
da5a662a
VZ
6512#else
6513 rc = -EBUSY;
34f80b04
EG
6514#endif
6515 break;
6516 }
6517 cnt--;
da5a662a 6518 msleep(1);
49d66772
ET
6519 }
6520 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6521 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6522
6523 return rc;
a2fbb9ea
ET
6524}
6525
34f80b04
EG
6526static void bnx2x_reset_func(struct bnx2x *bp)
6527{
6528 int port = BP_PORT(bp);
6529 int func = BP_FUNC(bp);
6530 int base, i;
6531
6532 /* Configure IGU */
6533 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6534 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6535
6536 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6537
6538 /* Clear ILT */
6539 base = FUNC_ILT_BASE(func);
6540 for (i = base; i < base + ILT_PER_FUNC; i++)
6541 bnx2x_ilt_wr(bp, i, 0);
6542}
6543
6544static void bnx2x_reset_port(struct bnx2x *bp)
6545{
6546 int port = BP_PORT(bp);
6547 u32 val;
6548
6549 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6550
6551 /* Do not rcv packets to BRB */
6552 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6553 /* Do not direct rcv packets that are not for MCP to the BRB */
6554 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6555 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6556
6557 /* Configure AEU */
6558 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6559
6560 msleep(100);
6561 /* Check for BRB port occupancy */
6562 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6563 if (val)
6564 DP(NETIF_MSG_IFDOWN,
33471629 6565 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6566
6567 /* TODO: Close Doorbell port? */
6568}
6569
6570static void bnx2x_reset_common(struct bnx2x *bp)
6571{
6572 /* reset_common */
6573 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6574 0xd3ffff7f);
6575 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6576}
6577
6578static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6579{
6580 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6581 BP_FUNC(bp), reset_code);
6582
6583 switch (reset_code) {
6584 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6585 bnx2x_reset_port(bp);
6586 bnx2x_reset_func(bp);
6587 bnx2x_reset_common(bp);
6588 break;
6589
6590 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6591 bnx2x_reset_port(bp);
6592 bnx2x_reset_func(bp);
6593 break;
6594
6595 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6596 bnx2x_reset_func(bp);
6597 break;
49d66772 6598
34f80b04
EG
6599 default:
6600 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6601 break;
6602 }
6603}
6604
33471629 6605/* must be called with rtnl_lock */
34f80b04 6606static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6607{
da5a662a 6608 int port = BP_PORT(bp);
a2fbb9ea 6609 u32 reset_code = 0;
da5a662a 6610 int i, cnt, rc;
a2fbb9ea
ET
6611
6612 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6613
228241eb
ET
6614 bp->rx_mode = BNX2X_RX_MODE_NONE;
6615 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6616
228241eb
ET
6617 if (netif_running(bp->dev)) {
6618 netif_tx_disable(bp->dev);
6619 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6620 }
6621
34f80b04
EG
6622 del_timer_sync(&bp->timer);
6623 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6624 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6625 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6626
da5a662a 6627 /* Wait until tx fast path tasks complete */
228241eb
ET
6628 for_each_queue(bp, i) {
6629 struct bnx2x_fastpath *fp = &bp->fp[i];
6630
34f80b04
EG
6631 cnt = 1000;
6632 smp_rmb();
da5a662a
VZ
6633 while (BNX2X_HAS_TX_WORK(fp)) {
6634
6635 if (!netif_running(bp->dev))
6636 bnx2x_tx_int(fp, 1000);
6637
34f80b04
EG
6638 if (!cnt) {
6639 BNX2X_ERR("timeout waiting for queue[%d]\n",
6640 i);
6641#ifdef BNX2X_STOP_ON_ERROR
6642 bnx2x_panic();
6643 return -EBUSY;
6644#else
6645 break;
6646#endif
6647 }
6648 cnt--;
da5a662a 6649 msleep(1);
34f80b04
EG
6650 smp_rmb();
6651 }
228241eb 6652 }
a2fbb9ea 6653
da5a662a
VZ
6654 /* Give HW time to discard old tx messages */
6655 msleep(1);
a2fbb9ea 6656
228241eb
ET
6657 for_each_queue(bp, i)
6658 napi_disable(&bnx2x_fp(bp, i, napi));
6659 /* Disable interrupts after Tx and Rx are disabled on stack level */
6660 bnx2x_int_disable_sync(bp);
a2fbb9ea 6661
34f80b04
EG
6662 /* Release IRQs */
6663 bnx2x_free_irq(bp);
6664
da5a662a
VZ
6665 if (unload_mode == UNLOAD_NORMAL)
6666 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6667
6668 else if (bp->flags & NO_WOL_FLAG) {
a2fbb9ea 6669 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
da5a662a
VZ
6670 if (CHIP_IS_E1H(bp))
6671 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
228241eb 6672
da5a662a
VZ
6673 } else if (bp->wol) {
6674 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
a2fbb9ea 6675 u8 *mac_addr = bp->dev->dev_addr;
34f80b04 6676 u32 val;
34f80b04
EG
6677 /* The mac address is written to entries 1-4 to
6678 preserve entry 0 which is used by the PMF */
da5a662a
VZ
6679 u8 entry = (BP_E1HVN(bp) + 1)*8;
6680
a2fbb9ea 6681 val = (mac_addr[0] << 8) | mac_addr[1];
3196a88a 6682 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
a2fbb9ea
ET
6683
6684 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6685 (mac_addr[4] << 8) | mac_addr[5];
3196a88a 6686 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
a2fbb9ea
ET
6687
6688 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
228241eb 6689
a2fbb9ea
ET
6690 } else
6691 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6692
3101c2bc
YG
6693 if (CHIP_IS_E1(bp)) {
6694 struct mac_configuration_cmd *config =
6695 bnx2x_sp(bp, mcast_config);
6696
6697 bnx2x_set_mac_addr_e1(bp, 0);
6698
6699 for (i = 0; i < config->hdr.length_6b; i++)
6700 CAM_INVALIDATE(config->config_table[i]);
6701
6702 config->hdr.length_6b = i;
6703 if (CHIP_REV_IS_SLOW(bp))
6704 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6705 else
6706 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6707 config->hdr.client_id = BP_CL_ID(bp);
6708 config->hdr.reserved1 = 0;
6709
6710 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6711 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6712 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6713
6714 } else { /* E1H */
6715 bnx2x_set_mac_addr_e1h(bp, 0);
6716
6717 for (i = 0; i < MC_HASH_SIZE; i++)
6718 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6719 }
6720
da5a662a
VZ
6721 if (CHIP_IS_E1H(bp))
6722 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6723
34f80b04
EG
6724 /* Close multi and leading connections
6725 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6726 for_each_nondefault_queue(bp, i)
6727 if (bnx2x_stop_multi(bp, i))
228241eb 6728 goto unload_error;
a2fbb9ea 6729
da5a662a
VZ
6730 rc = bnx2x_stop_leading(bp);
6731 if (rc) {
34f80b04 6732 BNX2X_ERR("Stop leading failed!\n");
da5a662a 6733#ifdef BNX2X_STOP_ON_ERROR
34f80b04 6734 return -EBUSY;
da5a662a
VZ
6735#else
6736 goto unload_error;
34f80b04 6737#endif
228241eb
ET
6738 }
6739
6740unload_error:
34f80b04 6741 if (!BP_NOMCP(bp))
228241eb 6742 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6743 else {
6744 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6745 load_count[0], load_count[1], load_count[2]);
6746 load_count[0]--;
da5a662a 6747 load_count[1 + port]--;
34f80b04
EG
6748 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6749 load_count[0], load_count[1], load_count[2]);
6750 if (load_count[0] == 0)
6751 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 6752 else if (load_count[1 + port] == 0)
34f80b04
EG
6753 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6754 else
6755 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6756 }
a2fbb9ea 6757
34f80b04
EG
6758 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6759 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6760 bnx2x__link_reset(bp);
a2fbb9ea
ET
6761
6762 /* Reset the chip */
228241eb 6763 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6764
6765 /* Report UNLOAD_DONE to MCP */
34f80b04 6766 if (!BP_NOMCP(bp))
a2fbb9ea
ET
6767 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6768
7a9b2557 6769 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6770 bnx2x_free_skbs(bp);
7a9b2557 6771 for_each_queue(bp, i)
3196a88a 6772 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
a2fbb9ea
ET
6773 bnx2x_free_mem(bp);
6774
6775 bp->state = BNX2X_STATE_CLOSED;
228241eb 6776
a2fbb9ea
ET
6777 netif_carrier_off(bp->dev);
6778
6779 return 0;
6780}
6781
34f80b04
EG
6782static void bnx2x_reset_task(struct work_struct *work)
6783{
6784 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6785
6786#ifdef BNX2X_STOP_ON_ERROR
6787 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6788 " so reset not done to allow debug dump,\n"
6789 KERN_ERR " you will need to reboot when done\n");
6790 return;
6791#endif
6792
6793 rtnl_lock();
6794
6795 if (!netif_running(bp->dev))
6796 goto reset_task_exit;
6797
6798 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6799 bnx2x_nic_load(bp, LOAD_NORMAL);
6800
6801reset_task_exit:
6802 rtnl_unlock();
6803}
6804
a2fbb9ea
ET
6805/* end of nic load/unload */
6806
6807/* ethtool_ops */
6808
6809/*
6810 * Init service functions
6811 */
6812
34f80b04
EG
6813static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6814{
6815 u32 val;
6816
6817 /* Check if there is any driver already loaded */
6818 val = REG_RD(bp, MISC_REG_UNPREPARED);
6819 if (val == 0x1) {
6820 /* Check if it is the UNDI driver
6821 * UNDI driver initializes CID offset for normal bell to 0x7
6822 */
4a37fb66 6823 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04 6824 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
76b190c5
EG
6825 if (val == 0x7)
6826 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6827 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6828
34f80b04
EG
6829 if (val == 0x7) {
6830 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6831 /* save our func */
34f80b04 6832 int func = BP_FUNC(bp);
da5a662a
VZ
6833 u32 swap_en;
6834 u32 swap_val;
34f80b04
EG
6835
6836 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6837
6838 /* try unload UNDI on port 0 */
6839 bp->func = 0;
da5a662a
VZ
6840 bp->fw_seq =
6841 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6842 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6843 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6844
6845 /* if UNDI is loaded on the other port */
6846 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6847
da5a662a
VZ
6848 /* send "DONE" for previous unload */
6849 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6850
6851 /* unload UNDI on port 1 */
34f80b04 6852 bp->func = 1;
da5a662a
VZ
6853 bp->fw_seq =
6854 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6855 DRV_MSG_SEQ_NUMBER_MASK);
6856 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6857
6858 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6859 }
6860
da5a662a
VZ
6861 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6862 HC_REG_CONFIG_0), 0x1000);
6863
6864 /* close input traffic and wait for it */
6865 /* Do not rcv packets to BRB */
6866 REG_WR(bp,
6867 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6868 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6869 /* Do not direct rcv packets that are not for MCP to
6870 * the BRB */
6871 REG_WR(bp,
6872 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6873 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6874 /* clear AEU */
6875 REG_WR(bp,
6876 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6877 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6878 msleep(10);
6879
6880 /* save NIG port swap info */
6881 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6882 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
6883 /* reset device */
6884 REG_WR(bp,
6885 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 6886 0xd3ffffff);
34f80b04
EG
6887 REG_WR(bp,
6888 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6889 0x1403);
da5a662a
VZ
6890 /* take the NIG out of reset and restore swap values */
6891 REG_WR(bp,
6892 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6893 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6894 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6895 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6896
6897 /* send unload done to the MCP */
6898 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6899
6900 /* restore our func and fw_seq */
6901 bp->func = func;
6902 bp->fw_seq =
6903 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6904 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04
EG
6905 }
6906 }
6907}
6908
6909static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6910{
6911 u32 val, val2, val3, val4, id;
72ce58c3 6912 u16 pmc;
34f80b04
EG
6913
6914 /* Get the chip revision id and number. */
6915 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6916 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6917 id = ((val & 0xffff) << 16);
6918 val = REG_RD(bp, MISC_REG_CHIP_REV);
6919 id |= ((val & 0xf) << 12);
6920 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6921 id |= ((val & 0xff) << 4);
6922 REG_RD(bp, MISC_REG_BOND_ID);
6923 id |= (val & 0xf);
6924 bp->common.chip_id = id;
6925 bp->link_params.chip_id = bp->common.chip_id;
6926 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6927
6928 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6929 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6930 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6931 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6932 bp->common.flash_size, bp->common.flash_size);
6933
6934 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6935 bp->link_params.shmem_base = bp->common.shmem_base;
6936 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6937
6938 if (!bp->common.shmem_base ||
6939 (bp->common.shmem_base < 0xA0000) ||
6940 (bp->common.shmem_base >= 0xC0000)) {
6941 BNX2X_DEV_INFO("MCP not active\n");
6942 bp->flags |= NO_MCP_FLAG;
6943 return;
6944 }
6945
6946 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6947 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6948 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6949 BNX2X_ERR("BAD MCP validity signature\n");
6950
6951 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6952 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6953
6954 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6955 bp->common.hw_config, bp->common.board);
6956
6957 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6958 SHARED_HW_CFG_LED_MODE_MASK) >>
6959 SHARED_HW_CFG_LED_MODE_SHIFT);
6960
6961 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6962 bp->common.bc_ver = val;
6963 BNX2X_DEV_INFO("bc_ver %X\n", val);
6964 if (val < BNX2X_BC_VER) {
6965 /* for now only warn
6966 * later we might need to enforce this */
6967 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6968 " please upgrade BC\n", BNX2X_BC_VER, val);
6969 }
72ce58c3
EG
6970
6971 if (BP_E1HVN(bp) == 0) {
6972 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6973 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6974 } else {
6975 /* no WOL capability for E1HVN != 0 */
6976 bp->flags |= NO_WOL_FLAG;
6977 }
6978 BNX2X_DEV_INFO("%sWoL capable\n",
6979 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
6980
6981 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6982 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6983 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6984 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6985
6986 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6987 val, val2, val3, val4);
6988}
6989
6990static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6991 u32 switch_cfg)
a2fbb9ea 6992{
34f80b04 6993 int port = BP_PORT(bp);
a2fbb9ea
ET
6994 u32 ext_phy_type;
6995
a2fbb9ea
ET
6996 switch (switch_cfg) {
6997 case SWITCH_CFG_1G:
6998 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6999
c18487ee
YR
7000 ext_phy_type =
7001 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7002 switch (ext_phy_type) {
7003 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7004 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7005 ext_phy_type);
7006
34f80b04
EG
7007 bp->port.supported |= (SUPPORTED_10baseT_Half |
7008 SUPPORTED_10baseT_Full |
7009 SUPPORTED_100baseT_Half |
7010 SUPPORTED_100baseT_Full |
7011 SUPPORTED_1000baseT_Full |
7012 SUPPORTED_2500baseX_Full |
7013 SUPPORTED_TP |
7014 SUPPORTED_FIBRE |
7015 SUPPORTED_Autoneg |
7016 SUPPORTED_Pause |
7017 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7018 break;
7019
7020 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7021 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7022 ext_phy_type);
7023
34f80b04
EG
7024 bp->port.supported |= (SUPPORTED_10baseT_Half |
7025 SUPPORTED_10baseT_Full |
7026 SUPPORTED_100baseT_Half |
7027 SUPPORTED_100baseT_Full |
7028 SUPPORTED_1000baseT_Full |
7029 SUPPORTED_TP |
7030 SUPPORTED_FIBRE |
7031 SUPPORTED_Autoneg |
7032 SUPPORTED_Pause |
7033 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7034 break;
7035
7036 default:
7037 BNX2X_ERR("NVRAM config error. "
7038 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7039 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7040 return;
7041 }
7042
34f80b04
EG
7043 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7044 port*0x10);
7045 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7046 break;
7047
7048 case SWITCH_CFG_10G:
7049 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7050
c18487ee
YR
7051 ext_phy_type =
7052 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7053 switch (ext_phy_type) {
7054 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7055 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7056 ext_phy_type);
7057
34f80b04
EG
7058 bp->port.supported |= (SUPPORTED_10baseT_Half |
7059 SUPPORTED_10baseT_Full |
7060 SUPPORTED_100baseT_Half |
7061 SUPPORTED_100baseT_Full |
7062 SUPPORTED_1000baseT_Full |
7063 SUPPORTED_2500baseX_Full |
7064 SUPPORTED_10000baseT_Full |
7065 SUPPORTED_TP |
7066 SUPPORTED_FIBRE |
7067 SUPPORTED_Autoneg |
7068 SUPPORTED_Pause |
7069 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7070 break;
7071
7072 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7073 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7074 ext_phy_type);
f1410647 7075
34f80b04
EG
7076 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7077 SUPPORTED_FIBRE |
7078 SUPPORTED_Pause |
7079 SUPPORTED_Asym_Pause);
f1410647
ET
7080 break;
7081
a2fbb9ea 7082 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7083 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7084 ext_phy_type);
7085
34f80b04
EG
7086 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7087 SUPPORTED_1000baseT_Full |
7088 SUPPORTED_FIBRE |
7089 SUPPORTED_Pause |
7090 SUPPORTED_Asym_Pause);
f1410647
ET
7091 break;
7092
7093 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7094 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7095 ext_phy_type);
7096
34f80b04
EG
7097 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7098 SUPPORTED_1000baseT_Full |
7099 SUPPORTED_FIBRE |
7100 SUPPORTED_Autoneg |
7101 SUPPORTED_Pause |
7102 SUPPORTED_Asym_Pause);
f1410647
ET
7103 break;
7104
c18487ee
YR
7105 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7106 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7107 ext_phy_type);
7108
34f80b04
EG
7109 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7110 SUPPORTED_2500baseX_Full |
7111 SUPPORTED_1000baseT_Full |
7112 SUPPORTED_FIBRE |
7113 SUPPORTED_Autoneg |
7114 SUPPORTED_Pause |
7115 SUPPORTED_Asym_Pause);
c18487ee
YR
7116 break;
7117
f1410647
ET
7118 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7119 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7120 ext_phy_type);
7121
34f80b04
EG
7122 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7123 SUPPORTED_TP |
7124 SUPPORTED_Autoneg |
7125 SUPPORTED_Pause |
7126 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7127 break;
7128
c18487ee
YR
7129 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7130 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7131 bp->link_params.ext_phy_config);
7132 break;
7133
a2fbb9ea
ET
7134 default:
7135 BNX2X_ERR("NVRAM config error. "
7136 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7137 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7138 return;
7139 }
7140
34f80b04
EG
7141 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7142 port*0x18);
7143 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7144
a2fbb9ea
ET
7145 break;
7146
7147 default:
7148 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7149 bp->port.link_config);
a2fbb9ea
ET
7150 return;
7151 }
34f80b04 7152 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7153
7154 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7155 if (!(bp->link_params.speed_cap_mask &
7156 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7157 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7158
c18487ee
YR
7159 if (!(bp->link_params.speed_cap_mask &
7160 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7161 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7162
c18487ee
YR
7163 if (!(bp->link_params.speed_cap_mask &
7164 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7165 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7166
c18487ee
YR
7167 if (!(bp->link_params.speed_cap_mask &
7168 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7169 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7170
c18487ee
YR
7171 if (!(bp->link_params.speed_cap_mask &
7172 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7173 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7174 SUPPORTED_1000baseT_Full);
a2fbb9ea 7175
c18487ee
YR
7176 if (!(bp->link_params.speed_cap_mask &
7177 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7178 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7179
c18487ee
YR
7180 if (!(bp->link_params.speed_cap_mask &
7181 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7182 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7183
34f80b04 7184 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7185}
7186
34f80b04 7187static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7188{
c18487ee 7189 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7190
34f80b04 7191 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7192 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7193 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7194 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7195 bp->port.advertising = bp->port.supported;
a2fbb9ea 7196 } else {
c18487ee
YR
7197 u32 ext_phy_type =
7198 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7199
7200 if ((ext_phy_type ==
7201 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7202 (ext_phy_type ==
7203 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7204 /* force 10G, no AN */
c18487ee 7205 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7206 bp->port.advertising =
a2fbb9ea
ET
7207 (ADVERTISED_10000baseT_Full |
7208 ADVERTISED_FIBRE);
7209 break;
7210 }
7211 BNX2X_ERR("NVRAM config error. "
7212 "Invalid link_config 0x%x"
7213 " Autoneg not supported\n",
34f80b04 7214 bp->port.link_config);
a2fbb9ea
ET
7215 return;
7216 }
7217 break;
7218
7219 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7220 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7221 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7222 bp->port.advertising = (ADVERTISED_10baseT_Full |
7223 ADVERTISED_TP);
a2fbb9ea
ET
7224 } else {
7225 BNX2X_ERR("NVRAM config error. "
7226 "Invalid link_config 0x%x"
7227 " speed_cap_mask 0x%x\n",
34f80b04 7228 bp->port.link_config,
c18487ee 7229 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7230 return;
7231 }
7232 break;
7233
7234 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7235 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7236 bp->link_params.req_line_speed = SPEED_10;
7237 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7238 bp->port.advertising = (ADVERTISED_10baseT_Half |
7239 ADVERTISED_TP);
a2fbb9ea
ET
7240 } else {
7241 BNX2X_ERR("NVRAM config error. "
7242 "Invalid link_config 0x%x"
7243 " speed_cap_mask 0x%x\n",
34f80b04 7244 bp->port.link_config,
c18487ee 7245 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7246 return;
7247 }
7248 break;
7249
7250 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7251 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7252 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7253 bp->port.advertising = (ADVERTISED_100baseT_Full |
7254 ADVERTISED_TP);
a2fbb9ea
ET
7255 } else {
7256 BNX2X_ERR("NVRAM config error. "
7257 "Invalid link_config 0x%x"
7258 " speed_cap_mask 0x%x\n",
34f80b04 7259 bp->port.link_config,
c18487ee 7260 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7261 return;
7262 }
7263 break;
7264
7265 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7266 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7267 bp->link_params.req_line_speed = SPEED_100;
7268 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7269 bp->port.advertising = (ADVERTISED_100baseT_Half |
7270 ADVERTISED_TP);
a2fbb9ea
ET
7271 } else {
7272 BNX2X_ERR("NVRAM config error. "
7273 "Invalid link_config 0x%x"
7274 " speed_cap_mask 0x%x\n",
34f80b04 7275 bp->port.link_config,
c18487ee 7276 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7277 return;
7278 }
7279 break;
7280
7281 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7282 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7283 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7284 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7285 ADVERTISED_TP);
a2fbb9ea
ET
7286 } else {
7287 BNX2X_ERR("NVRAM config error. "
7288 "Invalid link_config 0x%x"
7289 " speed_cap_mask 0x%x\n",
34f80b04 7290 bp->port.link_config,
c18487ee 7291 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7292 return;
7293 }
7294 break;
7295
7296 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7297 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7298 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7299 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7300 ADVERTISED_TP);
a2fbb9ea
ET
7301 } else {
7302 BNX2X_ERR("NVRAM config error. "
7303 "Invalid link_config 0x%x"
7304 " speed_cap_mask 0x%x\n",
34f80b04 7305 bp->port.link_config,
c18487ee 7306 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7307 return;
7308 }
7309 break;
7310
7311 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7312 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7313 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7314 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7315 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7316 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7317 ADVERTISED_FIBRE);
a2fbb9ea
ET
7318 } else {
7319 BNX2X_ERR("NVRAM config error. "
7320 "Invalid link_config 0x%x"
7321 " speed_cap_mask 0x%x\n",
34f80b04 7322 bp->port.link_config,
c18487ee 7323 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7324 return;
7325 }
7326 break;
7327
7328 default:
7329 BNX2X_ERR("NVRAM config error. "
7330 "BAD link speed link_config 0x%x\n",
34f80b04 7331 bp->port.link_config);
c18487ee 7332 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7333 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7334 break;
7335 }
a2fbb9ea 7336
34f80b04
EG
7337 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7338 PORT_FEATURE_FLOW_CONTROL_MASK);
c18487ee 7339 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
4ab84d45 7340 !(bp->port.supported & SUPPORTED_Autoneg))
c18487ee 7341 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 7342
c18487ee 7343 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7344 " advertising 0x%x\n",
c18487ee
YR
7345 bp->link_params.req_line_speed,
7346 bp->link_params.req_duplex,
34f80b04 7347 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7348}
7349
34f80b04 7350static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7351{
34f80b04
EG
7352 int port = BP_PORT(bp);
7353 u32 val, val2;
a2fbb9ea 7354
c18487ee 7355 bp->link_params.bp = bp;
34f80b04 7356 bp->link_params.port = port;
c18487ee 7357
c18487ee 7358 bp->link_params.serdes_config =
f1410647 7359 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7360 bp->link_params.lane_config =
a2fbb9ea 7361 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7362 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7363 SHMEM_RD(bp,
7364 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7365 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7366 SHMEM_RD(bp,
7367 dev_info.port_hw_config[port].speed_capability_mask);
7368
34f80b04 7369 bp->port.link_config =
a2fbb9ea
ET
7370 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7371
34f80b04
EG
7372 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7373 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7374 " link_config 0x%08x\n",
c18487ee
YR
7375 bp->link_params.serdes_config,
7376 bp->link_params.lane_config,
7377 bp->link_params.ext_phy_config,
34f80b04 7378 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7379
34f80b04 7380 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7381 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7382 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7383
7384 bnx2x_link_settings_requested(bp);
7385
7386 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7387 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7388 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7389 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7390 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7391 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7392 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7393 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7394 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7395 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7396}
7397
7398static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7399{
7400 int func = BP_FUNC(bp);
7401 u32 val, val2;
7402 int rc = 0;
a2fbb9ea 7403
34f80b04 7404 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7405
34f80b04
EG
7406 bp->e1hov = 0;
7407 bp->e1hmf = 0;
7408 if (CHIP_IS_E1H(bp)) {
7409 bp->mf_config =
7410 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7411
3196a88a
EG
7412 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7413 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 7414 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7415
34f80b04
EG
7416 bp->e1hov = val;
7417 bp->e1hmf = 1;
7418 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7419 "(0x%04x)\n",
7420 func, bp->e1hov, bp->e1hov);
7421 } else {
7422 BNX2X_DEV_INFO("Single function mode\n");
7423 if (BP_E1HVN(bp)) {
7424 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7425 " aborting\n", func);
7426 rc = -EPERM;
7427 }
7428 }
7429 }
a2fbb9ea 7430
34f80b04
EG
7431 if (!BP_NOMCP(bp)) {
7432 bnx2x_get_port_hwinfo(bp);
7433
7434 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7435 DRV_MSG_SEQ_NUMBER_MASK);
7436 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7437 }
7438
7439 if (IS_E1HMF(bp)) {
7440 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7441 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7442 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7443 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7444 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7445 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7446 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7447 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7448 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7449 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7450 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7451 ETH_ALEN);
7452 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7453 ETH_ALEN);
a2fbb9ea 7454 }
34f80b04
EG
7455
7456 return rc;
a2fbb9ea
ET
7457 }
7458
34f80b04
EG
7459 if (BP_NOMCP(bp)) {
7460 /* only supposed to happen on emulation/FPGA */
33471629 7461 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
7462 random_ether_addr(bp->dev->dev_addr);
7463 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7464 }
a2fbb9ea 7465
34f80b04
EG
7466 return rc;
7467}
7468
7469static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7470{
7471 int func = BP_FUNC(bp);
7472 int rc;
7473
da5a662a
VZ
7474 /* Disable interrupt handling until HW is initialized */
7475 atomic_set(&bp->intr_sem, 1);
7476
34f80b04 7477 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7478
34f80b04
EG
7479 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7480 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7481
7482 rc = bnx2x_get_hwinfo(bp);
7483
7484 /* need to reset chip if undi was active */
7485 if (!BP_NOMCP(bp))
7486 bnx2x_undi_unload(bp);
7487
7488 if (CHIP_REV_IS_FPGA(bp))
7489 printk(KERN_ERR PFX "FPGA detected\n");
7490
7491 if (BP_NOMCP(bp) && (func == 0))
7492 printk(KERN_ERR PFX
7493 "MCP disabled, must load devices in order!\n");
7494
7a9b2557
VZ
7495 /* Set TPA flags */
7496 if (disable_tpa) {
7497 bp->flags &= ~TPA_ENABLE_FLAG;
7498 bp->dev->features &= ~NETIF_F_LRO;
7499 } else {
7500 bp->flags |= TPA_ENABLE_FLAG;
7501 bp->dev->features |= NETIF_F_LRO;
7502 }
7503
7504
34f80b04
EG
7505 bp->tx_ring_size = MAX_TX_AVAIL;
7506 bp->rx_ring_size = MAX_RX_AVAIL;
7507
7508 bp->rx_csum = 1;
7509 bp->rx_offset = 0;
7510
7511 bp->tx_ticks = 50;
7512 bp->rx_ticks = 25;
7513
34f80b04
EG
7514 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7515 bp->current_interval = (poll ? poll : bp->timer_interval);
7516
7517 init_timer(&bp->timer);
7518 bp->timer.expires = jiffies + bp->current_interval;
7519 bp->timer.data = (unsigned long) bp;
7520 bp->timer.function = bnx2x_timer;
7521
7522 return rc;
a2fbb9ea
ET
7523}
7524
7525/*
7526 * ethtool service functions
7527 */
7528
7529/* All ethtool functions called with rtnl_lock */
7530
7531static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7532{
7533 struct bnx2x *bp = netdev_priv(dev);
7534
34f80b04
EG
7535 cmd->supported = bp->port.supported;
7536 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7537
7538 if (netif_carrier_ok(dev)) {
c18487ee
YR
7539 cmd->speed = bp->link_vars.line_speed;
7540 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7541 } else {
c18487ee
YR
7542 cmd->speed = bp->link_params.req_line_speed;
7543 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7544 }
34f80b04
EG
7545 if (IS_E1HMF(bp)) {
7546 u16 vn_max_rate;
7547
7548 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7549 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7550 if (vn_max_rate < cmd->speed)
7551 cmd->speed = vn_max_rate;
7552 }
a2fbb9ea 7553
c18487ee
YR
7554 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7555 u32 ext_phy_type =
7556 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7557
7558 switch (ext_phy_type) {
7559 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7560 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7561 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7562 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7563 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7564 cmd->port = PORT_FIBRE;
7565 break;
7566
7567 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7568 cmd->port = PORT_TP;
7569 break;
7570
c18487ee
YR
7571 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7572 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7573 bp->link_params.ext_phy_config);
7574 break;
7575
f1410647
ET
7576 default:
7577 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7578 bp->link_params.ext_phy_config);
7579 break;
f1410647
ET
7580 }
7581 } else
a2fbb9ea 7582 cmd->port = PORT_TP;
a2fbb9ea 7583
34f80b04 7584 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7585 cmd->transceiver = XCVR_INTERNAL;
7586
c18487ee 7587 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7588 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7589 else
a2fbb9ea 7590 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7591
7592 cmd->maxtxpkt = 0;
7593 cmd->maxrxpkt = 0;
7594
7595 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7596 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7597 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7598 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7599 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7600 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7601 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7602
7603 return 0;
7604}
7605
7606static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7607{
7608 struct bnx2x *bp = netdev_priv(dev);
7609 u32 advertising;
7610
34f80b04
EG
7611 if (IS_E1HMF(bp))
7612 return 0;
7613
a2fbb9ea
ET
7614 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7615 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7616 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7617 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7618 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7619 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7620 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7621
a2fbb9ea 7622 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7623 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7624 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7625 return -EINVAL;
f1410647 7626 }
a2fbb9ea
ET
7627
7628 /* advertise the requested speed and duplex if supported */
34f80b04 7629 cmd->advertising &= bp->port.supported;
a2fbb9ea 7630
c18487ee
YR
7631 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7632 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7633 bp->port.advertising |= (ADVERTISED_Autoneg |
7634 cmd->advertising);
a2fbb9ea
ET
7635
7636 } else { /* forced speed */
7637 /* advertise the requested speed and duplex if supported */
7638 switch (cmd->speed) {
7639 case SPEED_10:
7640 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7641 if (!(bp->port.supported &
f1410647
ET
7642 SUPPORTED_10baseT_Full)) {
7643 DP(NETIF_MSG_LINK,
7644 "10M full not supported\n");
a2fbb9ea 7645 return -EINVAL;
f1410647 7646 }
a2fbb9ea
ET
7647
7648 advertising = (ADVERTISED_10baseT_Full |
7649 ADVERTISED_TP);
7650 } else {
34f80b04 7651 if (!(bp->port.supported &
f1410647
ET
7652 SUPPORTED_10baseT_Half)) {
7653 DP(NETIF_MSG_LINK,
7654 "10M half not supported\n");
a2fbb9ea 7655 return -EINVAL;
f1410647 7656 }
a2fbb9ea
ET
7657
7658 advertising = (ADVERTISED_10baseT_Half |
7659 ADVERTISED_TP);
7660 }
7661 break;
7662
7663 case SPEED_100:
7664 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7665 if (!(bp->port.supported &
f1410647
ET
7666 SUPPORTED_100baseT_Full)) {
7667 DP(NETIF_MSG_LINK,
7668 "100M full not supported\n");
a2fbb9ea 7669 return -EINVAL;
f1410647 7670 }
a2fbb9ea
ET
7671
7672 advertising = (ADVERTISED_100baseT_Full |
7673 ADVERTISED_TP);
7674 } else {
34f80b04 7675 if (!(bp->port.supported &
f1410647
ET
7676 SUPPORTED_100baseT_Half)) {
7677 DP(NETIF_MSG_LINK,
7678 "100M half not supported\n");
a2fbb9ea 7679 return -EINVAL;
f1410647 7680 }
a2fbb9ea
ET
7681
7682 advertising = (ADVERTISED_100baseT_Half |
7683 ADVERTISED_TP);
7684 }
7685 break;
7686
7687 case SPEED_1000:
f1410647
ET
7688 if (cmd->duplex != DUPLEX_FULL) {
7689 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7690 return -EINVAL;
f1410647 7691 }
a2fbb9ea 7692
34f80b04 7693 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7694 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7695 return -EINVAL;
f1410647 7696 }
a2fbb9ea
ET
7697
7698 advertising = (ADVERTISED_1000baseT_Full |
7699 ADVERTISED_TP);
7700 break;
7701
7702 case SPEED_2500:
f1410647
ET
7703 if (cmd->duplex != DUPLEX_FULL) {
7704 DP(NETIF_MSG_LINK,
7705 "2.5G half not supported\n");
a2fbb9ea 7706 return -EINVAL;
f1410647 7707 }
a2fbb9ea 7708
34f80b04 7709 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7710 DP(NETIF_MSG_LINK,
7711 "2.5G full not supported\n");
a2fbb9ea 7712 return -EINVAL;
f1410647 7713 }
a2fbb9ea 7714
f1410647 7715 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7716 ADVERTISED_TP);
7717 break;
7718
7719 case SPEED_10000:
f1410647
ET
7720 if (cmd->duplex != DUPLEX_FULL) {
7721 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7722 return -EINVAL;
f1410647 7723 }
a2fbb9ea 7724
34f80b04 7725 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7726 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7727 return -EINVAL;
f1410647 7728 }
a2fbb9ea
ET
7729
7730 advertising = (ADVERTISED_10000baseT_Full |
7731 ADVERTISED_FIBRE);
7732 break;
7733
7734 default:
f1410647 7735 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7736 return -EINVAL;
7737 }
7738
c18487ee
YR
7739 bp->link_params.req_line_speed = cmd->speed;
7740 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7741 bp->port.advertising = advertising;
a2fbb9ea
ET
7742 }
7743
c18487ee 7744 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7745 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7746 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7747 bp->port.advertising);
a2fbb9ea 7748
34f80b04 7749 if (netif_running(dev)) {
bb2a0f7a 7750 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7751 bnx2x_link_set(bp);
7752 }
a2fbb9ea
ET
7753
7754 return 0;
7755}
7756
c18487ee
YR
7757#define PHY_FW_VER_LEN 10
7758
a2fbb9ea
ET
7759static void bnx2x_get_drvinfo(struct net_device *dev,
7760 struct ethtool_drvinfo *info)
7761{
7762 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 7763 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7764
7765 strcpy(info->driver, DRV_MODULE_NAME);
7766 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7767
7768 phy_fw_ver[0] = '\0';
34f80b04 7769 if (bp->port.pmf) {
4a37fb66 7770 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
7771 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7772 (bp->state != BNX2X_STATE_CLOSED),
7773 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 7774 bnx2x_release_phy_lock(bp);
34f80b04 7775 }
c18487ee 7776
f0e53a84
EG
7777 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7778 (bp->common.bc_ver & 0xff0000) >> 16,
7779 (bp->common.bc_ver & 0xff00) >> 8,
7780 (bp->common.bc_ver & 0xff),
7781 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
7782 strcpy(info->bus_info, pci_name(bp->pdev));
7783 info->n_stats = BNX2X_NUM_STATS;
7784 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7785 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7786 info->regdump_len = 0;
7787}
7788
7789static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7790{
7791 struct bnx2x *bp = netdev_priv(dev);
7792
7793 if (bp->flags & NO_WOL_FLAG) {
7794 wol->supported = 0;
7795 wol->wolopts = 0;
7796 } else {
7797 wol->supported = WAKE_MAGIC;
7798 if (bp->wol)
7799 wol->wolopts = WAKE_MAGIC;
7800 else
7801 wol->wolopts = 0;
7802 }
7803 memset(&wol->sopass, 0, sizeof(wol->sopass));
7804}
7805
7806static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7807{
7808 struct bnx2x *bp = netdev_priv(dev);
7809
7810 if (wol->wolopts & ~WAKE_MAGIC)
7811 return -EINVAL;
7812
7813 if (wol->wolopts & WAKE_MAGIC) {
7814 if (bp->flags & NO_WOL_FLAG)
7815 return -EINVAL;
7816
7817 bp->wol = 1;
34f80b04 7818 } else
a2fbb9ea 7819 bp->wol = 0;
34f80b04 7820
a2fbb9ea
ET
7821 return 0;
7822}
7823
7824static u32 bnx2x_get_msglevel(struct net_device *dev)
7825{
7826 struct bnx2x *bp = netdev_priv(dev);
7827
7828 return bp->msglevel;
7829}
7830
7831static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7832{
7833 struct bnx2x *bp = netdev_priv(dev);
7834
7835 if (capable(CAP_NET_ADMIN))
7836 bp->msglevel = level;
7837}
7838
7839static int bnx2x_nway_reset(struct net_device *dev)
7840{
7841 struct bnx2x *bp = netdev_priv(dev);
7842
34f80b04
EG
7843 if (!bp->port.pmf)
7844 return 0;
a2fbb9ea 7845
34f80b04 7846 if (netif_running(dev)) {
bb2a0f7a 7847 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7848 bnx2x_link_set(bp);
7849 }
a2fbb9ea
ET
7850
7851 return 0;
7852}
7853
7854static int bnx2x_get_eeprom_len(struct net_device *dev)
7855{
7856 struct bnx2x *bp = netdev_priv(dev);
7857
34f80b04 7858 return bp->common.flash_size;
a2fbb9ea
ET
7859}
7860
7861static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7862{
34f80b04 7863 int port = BP_PORT(bp);
a2fbb9ea
ET
7864 int count, i;
7865 u32 val = 0;
7866
7867 /* adjust timeout for emulation/FPGA */
7868 count = NVRAM_TIMEOUT_COUNT;
7869 if (CHIP_REV_IS_SLOW(bp))
7870 count *= 100;
7871
7872 /* request access to nvram interface */
7873 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7874 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7875
7876 for (i = 0; i < count*10; i++) {
7877 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7878 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7879 break;
7880
7881 udelay(5);
7882 }
7883
7884 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 7885 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
7886 return -EBUSY;
7887 }
7888
7889 return 0;
7890}
7891
7892static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7893{
34f80b04 7894 int port = BP_PORT(bp);
a2fbb9ea
ET
7895 int count, i;
7896 u32 val = 0;
7897
7898 /* adjust timeout for emulation/FPGA */
7899 count = NVRAM_TIMEOUT_COUNT;
7900 if (CHIP_REV_IS_SLOW(bp))
7901 count *= 100;
7902
7903 /* relinquish nvram interface */
7904 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7905 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7906
7907 for (i = 0; i < count*10; i++) {
7908 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7909 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7910 break;
7911
7912 udelay(5);
7913 }
7914
7915 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 7916 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
7917 return -EBUSY;
7918 }
7919
7920 return 0;
7921}
7922
7923static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7924{
7925 u32 val;
7926
7927 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7928
7929 /* enable both bits, even on read */
7930 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7931 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7932 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7933}
7934
7935static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7936{
7937 u32 val;
7938
7939 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7940
7941 /* disable both bits, even after read */
7942 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7943 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7944 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7945}
7946
7947static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7948 u32 cmd_flags)
7949{
f1410647 7950 int count, i, rc;
a2fbb9ea
ET
7951 u32 val;
7952
7953 /* build the command word */
7954 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7955
7956 /* need to clear DONE bit separately */
7957 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7958
7959 /* address of the NVRAM to read from */
7960 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7961 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7962
7963 /* issue a read command */
7964 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7965
7966 /* adjust timeout for emulation/FPGA */
7967 count = NVRAM_TIMEOUT_COUNT;
7968 if (CHIP_REV_IS_SLOW(bp))
7969 count *= 100;
7970
7971 /* wait for completion */
7972 *ret_val = 0;
7973 rc = -EBUSY;
7974 for (i = 0; i < count; i++) {
7975 udelay(5);
7976 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7977
7978 if (val & MCPR_NVM_COMMAND_DONE) {
7979 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
7980 /* we read nvram data in cpu order
7981 * but ethtool sees it as an array of bytes
7982 * converting to big-endian will do the work */
7983 val = cpu_to_be32(val);
7984 *ret_val = val;
7985 rc = 0;
7986 break;
7987 }
7988 }
7989
7990 return rc;
7991}
7992
7993static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7994 int buf_size)
7995{
7996 int rc;
7997 u32 cmd_flags;
7998 u32 val;
7999
8000 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8001 DP(BNX2X_MSG_NVM,
c14423fe 8002 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8003 offset, buf_size);
8004 return -EINVAL;
8005 }
8006
34f80b04
EG
8007 if (offset + buf_size > bp->common.flash_size) {
8008 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8009 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8010 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8011 return -EINVAL;
8012 }
8013
8014 /* request access to nvram interface */
8015 rc = bnx2x_acquire_nvram_lock(bp);
8016 if (rc)
8017 return rc;
8018
8019 /* enable access to nvram interface */
8020 bnx2x_enable_nvram_access(bp);
8021
8022 /* read the first word(s) */
8023 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8024 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8025 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8026 memcpy(ret_buf, &val, 4);
8027
8028 /* advance to the next dword */
8029 offset += sizeof(u32);
8030 ret_buf += sizeof(u32);
8031 buf_size -= sizeof(u32);
8032 cmd_flags = 0;
8033 }
8034
8035 if (rc == 0) {
8036 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8037 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8038 memcpy(ret_buf, &val, 4);
8039 }
8040
8041 /* disable access to nvram interface */
8042 bnx2x_disable_nvram_access(bp);
8043 bnx2x_release_nvram_lock(bp);
8044
8045 return rc;
8046}
8047
8048static int bnx2x_get_eeprom(struct net_device *dev,
8049 struct ethtool_eeprom *eeprom, u8 *eebuf)
8050{
8051 struct bnx2x *bp = netdev_priv(dev);
8052 int rc;
8053
34f80b04 8054 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8055 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8056 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8057 eeprom->len, eeprom->len);
8058
8059 /* parameters already validated in ethtool_get_eeprom */
8060
8061 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8062
8063 return rc;
8064}
8065
8066static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8067 u32 cmd_flags)
8068{
f1410647 8069 int count, i, rc;
a2fbb9ea
ET
8070
8071 /* build the command word */
8072 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8073
8074 /* need to clear DONE bit separately */
8075 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8076
8077 /* write the data */
8078 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8079
8080 /* address of the NVRAM to write to */
8081 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8082 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8083
8084 /* issue the write command */
8085 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8086
8087 /* adjust timeout for emulation/FPGA */
8088 count = NVRAM_TIMEOUT_COUNT;
8089 if (CHIP_REV_IS_SLOW(bp))
8090 count *= 100;
8091
8092 /* wait for completion */
8093 rc = -EBUSY;
8094 for (i = 0; i < count; i++) {
8095 udelay(5);
8096 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8097 if (val & MCPR_NVM_COMMAND_DONE) {
8098 rc = 0;
8099 break;
8100 }
8101 }
8102
8103 return rc;
8104}
8105
f1410647 8106#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8107
8108static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8109 int buf_size)
8110{
8111 int rc;
8112 u32 cmd_flags;
8113 u32 align_offset;
8114 u32 val;
8115
34f80b04
EG
8116 if (offset + buf_size > bp->common.flash_size) {
8117 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8118 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8119 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8120 return -EINVAL;
8121 }
8122
8123 /* request access to nvram interface */
8124 rc = bnx2x_acquire_nvram_lock(bp);
8125 if (rc)
8126 return rc;
8127
8128 /* enable access to nvram interface */
8129 bnx2x_enable_nvram_access(bp);
8130
8131 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8132 align_offset = (offset & ~0x03);
8133 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8134
8135 if (rc == 0) {
8136 val &= ~(0xff << BYTE_OFFSET(offset));
8137 val |= (*data_buf << BYTE_OFFSET(offset));
8138
8139 /* nvram data is returned as an array of bytes
8140 * convert it back to cpu order */
8141 val = be32_to_cpu(val);
8142
a2fbb9ea
ET
8143 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8144 cmd_flags);
8145 }
8146
8147 /* disable access to nvram interface */
8148 bnx2x_disable_nvram_access(bp);
8149 bnx2x_release_nvram_lock(bp);
8150
8151 return rc;
8152}
8153
8154static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8155 int buf_size)
8156{
8157 int rc;
8158 u32 cmd_flags;
8159 u32 val;
8160 u32 written_so_far;
8161
34f80b04 8162 if (buf_size == 1) /* ethtool */
a2fbb9ea 8163 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8164
8165 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8166 DP(BNX2X_MSG_NVM,
c14423fe 8167 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8168 offset, buf_size);
8169 return -EINVAL;
8170 }
8171
34f80b04
EG
8172 if (offset + buf_size > bp->common.flash_size) {
8173 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8174 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8175 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8176 return -EINVAL;
8177 }
8178
8179 /* request access to nvram interface */
8180 rc = bnx2x_acquire_nvram_lock(bp);
8181 if (rc)
8182 return rc;
8183
8184 /* enable access to nvram interface */
8185 bnx2x_enable_nvram_access(bp);
8186
8187 written_so_far = 0;
8188 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8189 while ((written_so_far < buf_size) && (rc == 0)) {
8190 if (written_so_far == (buf_size - sizeof(u32)))
8191 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8192 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8193 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8194 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8195 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8196
8197 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8198
8199 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8200
8201 /* advance to the next dword */
8202 offset += sizeof(u32);
8203 data_buf += sizeof(u32);
8204 written_so_far += sizeof(u32);
8205 cmd_flags = 0;
8206 }
8207
8208 /* disable access to nvram interface */
8209 bnx2x_disable_nvram_access(bp);
8210 bnx2x_release_nvram_lock(bp);
8211
8212 return rc;
8213}
8214
8215static int bnx2x_set_eeprom(struct net_device *dev,
8216 struct ethtool_eeprom *eeprom, u8 *eebuf)
8217{
8218 struct bnx2x *bp = netdev_priv(dev);
8219 int rc;
8220
34f80b04 8221 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8222 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8223 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8224 eeprom->len, eeprom->len);
8225
8226 /* parameters already validated in ethtool_set_eeprom */
8227
c18487ee 8228 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8229 if (eeprom->magic == 0x00504859)
8230 if (bp->port.pmf) {
8231
4a37fb66 8232 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8233 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8234 bp->link_params.ext_phy_config,
8235 (bp->state != BNX2X_STATE_CLOSED),
8236 eebuf, eeprom->len);
bb2a0f7a
YG
8237 if ((bp->state == BNX2X_STATE_OPEN) ||
8238 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8239 rc |= bnx2x_link_reset(&bp->link_params,
8240 &bp->link_vars);
8241 rc |= bnx2x_phy_init(&bp->link_params,
8242 &bp->link_vars);
bb2a0f7a 8243 }
4a37fb66 8244 bnx2x_release_phy_lock(bp);
34f80b04
EG
8245
8246 } else /* Only the PMF can access the PHY */
8247 return -EINVAL;
8248 else
c18487ee 8249 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8250
8251 return rc;
8252}
8253
8254static int bnx2x_get_coalesce(struct net_device *dev,
8255 struct ethtool_coalesce *coal)
8256{
8257 struct bnx2x *bp = netdev_priv(dev);
8258
8259 memset(coal, 0, sizeof(struct ethtool_coalesce));
8260
8261 coal->rx_coalesce_usecs = bp->rx_ticks;
8262 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8263
8264 return 0;
8265}
8266
8267static int bnx2x_set_coalesce(struct net_device *dev,
8268 struct ethtool_coalesce *coal)
8269{
8270 struct bnx2x *bp = netdev_priv(dev);
8271
8272 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8273 if (bp->rx_ticks > 3000)
8274 bp->rx_ticks = 3000;
8275
8276 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8277 if (bp->tx_ticks > 0x3000)
8278 bp->tx_ticks = 0x3000;
8279
34f80b04 8280 if (netif_running(dev))
a2fbb9ea
ET
8281 bnx2x_update_coalesce(bp);
8282
8283 return 0;
8284}
8285
8286static void bnx2x_get_ringparam(struct net_device *dev,
8287 struct ethtool_ringparam *ering)
8288{
8289 struct bnx2x *bp = netdev_priv(dev);
8290
8291 ering->rx_max_pending = MAX_RX_AVAIL;
8292 ering->rx_mini_max_pending = 0;
8293 ering->rx_jumbo_max_pending = 0;
8294
8295 ering->rx_pending = bp->rx_ring_size;
8296 ering->rx_mini_pending = 0;
8297 ering->rx_jumbo_pending = 0;
8298
8299 ering->tx_max_pending = MAX_TX_AVAIL;
8300 ering->tx_pending = bp->tx_ring_size;
8301}
8302
8303static int bnx2x_set_ringparam(struct net_device *dev,
8304 struct ethtool_ringparam *ering)
8305{
8306 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8307 int rc = 0;
a2fbb9ea
ET
8308
8309 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8310 (ering->tx_pending > MAX_TX_AVAIL) ||
8311 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8312 return -EINVAL;
8313
8314 bp->rx_ring_size = ering->rx_pending;
8315 bp->tx_ring_size = ering->tx_pending;
8316
34f80b04
EG
8317 if (netif_running(dev)) {
8318 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8319 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8320 }
8321
34f80b04 8322 return rc;
a2fbb9ea
ET
8323}
8324
8325static void bnx2x_get_pauseparam(struct net_device *dev,
8326 struct ethtool_pauseparam *epause)
8327{
8328 struct bnx2x *bp = netdev_priv(dev);
8329
c18487ee
YR
8330 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8331 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8332
8333 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8334 FLOW_CTRL_RX);
8335 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8336 FLOW_CTRL_TX);
a2fbb9ea
ET
8337
8338 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8339 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8340 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8341}
8342
8343static int bnx2x_set_pauseparam(struct net_device *dev,
8344 struct ethtool_pauseparam *epause)
8345{
8346 struct bnx2x *bp = netdev_priv(dev);
8347
34f80b04
EG
8348 if (IS_E1HMF(bp))
8349 return 0;
8350
a2fbb9ea
ET
8351 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8352 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8353 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8354
c18487ee 8355 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
a2fbb9ea 8356
f1410647 8357 if (epause->rx_pause)
c18487ee
YR
8358 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8359
f1410647 8360 if (epause->tx_pause)
c18487ee
YR
8361 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8362
8363 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8364 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 8365
c18487ee 8366 if (epause->autoneg) {
34f80b04 8367 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 8368 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
8369 return -EINVAL;
8370 }
a2fbb9ea 8371
c18487ee
YR
8372 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8373 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8374 }
a2fbb9ea 8375
c18487ee
YR
8376 DP(NETIF_MSG_LINK,
8377 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8378
8379 if (netif_running(dev)) {
bb2a0f7a 8380 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8381 bnx2x_link_set(bp);
8382 }
a2fbb9ea
ET
8383
8384 return 0;
8385}
8386
df0f2343
VZ
8387static int bnx2x_set_flags(struct net_device *dev, u32 data)
8388{
8389 struct bnx2x *bp = netdev_priv(dev);
8390 int changed = 0;
8391 int rc = 0;
8392
8393 /* TPA requires Rx CSUM offloading */
8394 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8395 if (!(dev->features & NETIF_F_LRO)) {
8396 dev->features |= NETIF_F_LRO;
8397 bp->flags |= TPA_ENABLE_FLAG;
8398 changed = 1;
8399 }
8400
8401 } else if (dev->features & NETIF_F_LRO) {
8402 dev->features &= ~NETIF_F_LRO;
8403 bp->flags &= ~TPA_ENABLE_FLAG;
8404 changed = 1;
8405 }
8406
8407 if (changed && netif_running(dev)) {
8408 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8409 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8410 }
8411
8412 return rc;
8413}
8414
a2fbb9ea
ET
8415static u32 bnx2x_get_rx_csum(struct net_device *dev)
8416{
8417 struct bnx2x *bp = netdev_priv(dev);
8418
8419 return bp->rx_csum;
8420}
8421
8422static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8423{
8424 struct bnx2x *bp = netdev_priv(dev);
df0f2343 8425 int rc = 0;
a2fbb9ea
ET
8426
8427 bp->rx_csum = data;
df0f2343
VZ
8428
8429 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8430 TPA'ed packets will be discarded due to wrong TCP CSUM */
8431 if (!data) {
8432 u32 flags = ethtool_op_get_flags(dev);
8433
8434 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8435 }
8436
8437 return rc;
a2fbb9ea
ET
8438}
8439
8440static int bnx2x_set_tso(struct net_device *dev, u32 data)
8441{
755735eb 8442 if (data) {
a2fbb9ea 8443 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8444 dev->features |= NETIF_F_TSO6;
8445 } else {
a2fbb9ea 8446 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8447 dev->features &= ~NETIF_F_TSO6;
8448 }
8449
a2fbb9ea
ET
8450 return 0;
8451}
8452
f3c87cdd 8453static const struct {
a2fbb9ea
ET
8454 char string[ETH_GSTRING_LEN];
8455} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8456 { "register_test (offline)" },
8457 { "memory_test (offline)" },
8458 { "loopback_test (offline)" },
8459 { "nvram_test (online)" },
8460 { "interrupt_test (online)" },
8461 { "link_test (online)" },
8462 { "idle check (online)" },
8463 { "MC errors (online)" }
a2fbb9ea
ET
8464};
8465
8466static int bnx2x_self_test_count(struct net_device *dev)
8467{
8468 return BNX2X_NUM_TESTS;
8469}
8470
f3c87cdd
YG
8471static int bnx2x_test_registers(struct bnx2x *bp)
8472{
8473 int idx, i, rc = -ENODEV;
8474 u32 wr_val = 0;
9dabc424 8475 int port = BP_PORT(bp);
f3c87cdd
YG
8476 static const struct {
8477 u32 offset0;
8478 u32 offset1;
8479 u32 mask;
8480 } reg_tbl[] = {
8481/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8482 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8483 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8484 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8485 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8486 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8487 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8488 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8489 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8490 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8491/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8492 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8493 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8494 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8495 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8496 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8497 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8498 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8499 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8500 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8501/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8502 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8503 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8504 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8505 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8506 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8507 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8508 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8509 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8510 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8511/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8512 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8513 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8514 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8515 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8516 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8517 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8518 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8519
8520 { 0xffffffff, 0, 0x00000000 }
8521 };
8522
8523 if (!netif_running(bp->dev))
8524 return rc;
8525
8526 /* Repeat the test twice:
8527 First by writing 0x00000000, second by writing 0xffffffff */
8528 for (idx = 0; idx < 2; idx++) {
8529
8530 switch (idx) {
8531 case 0:
8532 wr_val = 0;
8533 break;
8534 case 1:
8535 wr_val = 0xffffffff;
8536 break;
8537 }
8538
8539 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8540 u32 offset, mask, save_val, val;
f3c87cdd
YG
8541
8542 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8543 mask = reg_tbl[i].mask;
8544
8545 save_val = REG_RD(bp, offset);
8546
8547 REG_WR(bp, offset, wr_val);
8548 val = REG_RD(bp, offset);
8549
8550 /* Restore the original register's value */
8551 REG_WR(bp, offset, save_val);
8552
8553 /* verify that value is as expected value */
8554 if ((val & mask) != (wr_val & mask))
8555 goto test_reg_exit;
8556 }
8557 }
8558
8559 rc = 0;
8560
8561test_reg_exit:
8562 return rc;
8563}
8564
8565static int bnx2x_test_memory(struct bnx2x *bp)
8566{
8567 int i, j, rc = -ENODEV;
8568 u32 val;
8569 static const struct {
8570 u32 offset;
8571 int size;
8572 } mem_tbl[] = {
8573 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8574 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8575 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8576 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8577 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8578 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8579 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8580
8581 { 0xffffffff, 0 }
8582 };
8583 static const struct {
8584 char *name;
8585 u32 offset;
9dabc424
YG
8586 u32 e1_mask;
8587 u32 e1h_mask;
f3c87cdd 8588 } prty_tbl[] = {
9dabc424
YG
8589 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8590 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8591 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8592 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8593 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8594 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8595
8596 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
8597 };
8598
8599 if (!netif_running(bp->dev))
8600 return rc;
8601
8602 /* Go through all the memories */
8603 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8604 for (j = 0; j < mem_tbl[i].size; j++)
8605 REG_RD(bp, mem_tbl[i].offset + j*4);
8606
8607 /* Check the parity status */
8608 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8609 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
8610 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8611 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
8612 DP(NETIF_MSG_HW,
8613 "%s is 0x%x\n", prty_tbl[i].name, val);
8614 goto test_mem_exit;
8615 }
8616 }
8617
8618 rc = 0;
8619
8620test_mem_exit:
8621 return rc;
8622}
8623
8624static void bnx2x_netif_start(struct bnx2x *bp)
8625{
8626 int i;
8627
8628 if (atomic_dec_and_test(&bp->intr_sem)) {
8629 if (netif_running(bp->dev)) {
8630 bnx2x_int_enable(bp);
8631 for_each_queue(bp, i)
8632 napi_enable(&bnx2x_fp(bp, i, napi));
8633 if (bp->state == BNX2X_STATE_OPEN)
8634 netif_wake_queue(bp->dev);
8635 }
8636 }
8637}
8638
8639static void bnx2x_netif_stop(struct bnx2x *bp)
8640{
8641 int i;
8642
8643 if (netif_running(bp->dev)) {
8644 netif_tx_disable(bp->dev);
8645 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8646 for_each_queue(bp, i)
8647 napi_disable(&bnx2x_fp(bp, i, napi));
8648 }
8649 bnx2x_int_disable_sync(bp);
8650}
8651
8652static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8653{
8654 int cnt = 1000;
8655
8656 if (link_up)
8657 while (bnx2x_link_test(bp) && cnt--)
8658 msleep(10);
8659}
8660
8661static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8662{
8663 unsigned int pkt_size, num_pkts, i;
8664 struct sk_buff *skb;
8665 unsigned char *packet;
8666 struct bnx2x_fastpath *fp = &bp->fp[0];
8667 u16 tx_start_idx, tx_idx;
8668 u16 rx_start_idx, rx_idx;
8669 u16 pkt_prod;
8670 struct sw_tx_bd *tx_buf;
8671 struct eth_tx_bd *tx_bd;
8672 dma_addr_t mapping;
8673 union eth_rx_cqe *cqe;
8674 u8 cqe_fp_flags;
8675 struct sw_rx_bd *rx_buf;
8676 u16 len;
8677 int rc = -ENODEV;
8678
8679 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8680 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4a37fb66 8681 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8682 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8683 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8684
8685 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8686 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
4a37fb66 8687 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8688 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8689 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8690 /* wait until link state is restored */
8691 bnx2x_wait_for_link(bp, link_up);
8692
8693 } else
8694 return -EINVAL;
8695
8696 pkt_size = 1514;
8697 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8698 if (!skb) {
8699 rc = -ENOMEM;
8700 goto test_loopback_exit;
8701 }
8702 packet = skb_put(skb, pkt_size);
8703 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8704 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8705 for (i = ETH_HLEN; i < pkt_size; i++)
8706 packet[i] = (unsigned char) (i & 0xff);
8707
8708 num_pkts = 0;
8709 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8710 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8711
8712 pkt_prod = fp->tx_pkt_prod++;
8713 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8714 tx_buf->first_bd = fp->tx_bd_prod;
8715 tx_buf->skb = skb;
8716
8717 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8718 mapping = pci_map_single(bp->pdev, skb->data,
8719 skb_headlen(skb), PCI_DMA_TODEVICE);
8720 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8721 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8722 tx_bd->nbd = cpu_to_le16(1);
8723 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8724 tx_bd->vlan = cpu_to_le16(pkt_prod);
8725 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8726 ETH_TX_BD_FLAGS_END_BD);
8727 tx_bd->general_data = ((UNICAST_ADDRESS <<
8728 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8729
8730 fp->hw_tx_prods->bds_prod =
8731 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8732 mb(); /* FW restriction: must not reorder writing nbd and packets */
8733 fp->hw_tx_prods->packets_prod =
8734 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8735 DOORBELL(bp, FP_IDX(fp), 0);
8736
8737 mmiowb();
8738
8739 num_pkts++;
8740 fp->tx_bd_prod++;
8741 bp->dev->trans_start = jiffies;
8742
8743 udelay(100);
8744
8745 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8746 if (tx_idx != tx_start_idx + num_pkts)
8747 goto test_loopback_exit;
8748
8749 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8750 if (rx_idx != rx_start_idx + num_pkts)
8751 goto test_loopback_exit;
8752
8753 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8754 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8755 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8756 goto test_loopback_rx_exit;
8757
8758 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8759 if (len != pkt_size)
8760 goto test_loopback_rx_exit;
8761
8762 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8763 skb = rx_buf->skb;
8764 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8765 for (i = ETH_HLEN; i < pkt_size; i++)
8766 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8767 goto test_loopback_rx_exit;
8768
8769 rc = 0;
8770
8771test_loopback_rx_exit:
8772 bp->dev->last_rx = jiffies;
8773
8774 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8775 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8776 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8777 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8778
8779 /* Update producers */
8780 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8781 fp->rx_sge_prod);
8782 mmiowb(); /* keep prod updates ordered */
8783
8784test_loopback_exit:
8785 bp->link_params.loopback_mode = LOOPBACK_NONE;
8786
8787 return rc;
8788}
8789
8790static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8791{
8792 int rc = 0;
8793
8794 if (!netif_running(bp->dev))
8795 return BNX2X_LOOPBACK_FAILED;
8796
8797 bnx2x_netif_stop(bp);
8798
8799 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8800 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8801 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8802 }
8803
8804 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8805 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8806 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8807 }
8808
8809 bnx2x_netif_start(bp);
8810
8811 return rc;
8812}
8813
8814#define CRC32_RESIDUAL 0xdebb20e3
8815
8816static int bnx2x_test_nvram(struct bnx2x *bp)
8817{
8818 static const struct {
8819 int offset;
8820 int size;
8821 } nvram_tbl[] = {
8822 { 0, 0x14 }, /* bootstrap */
8823 { 0x14, 0xec }, /* dir */
8824 { 0x100, 0x350 }, /* manuf_info */
8825 { 0x450, 0xf0 }, /* feature_info */
8826 { 0x640, 0x64 }, /* upgrade_key_info */
8827 { 0x6a4, 0x64 },
8828 { 0x708, 0x70 }, /* manuf_key_info */
8829 { 0x778, 0x70 },
8830 { 0, 0 }
8831 };
8832 u32 buf[0x350 / 4];
8833 u8 *data = (u8 *)buf;
8834 int i, rc;
8835 u32 magic, csum;
8836
8837 rc = bnx2x_nvram_read(bp, 0, data, 4);
8838 if (rc) {
8839 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8840 goto test_nvram_exit;
8841 }
8842
8843 magic = be32_to_cpu(buf[0]);
8844 if (magic != 0x669955aa) {
8845 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8846 rc = -ENODEV;
8847 goto test_nvram_exit;
8848 }
8849
8850 for (i = 0; nvram_tbl[i].size; i++) {
8851
8852 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8853 nvram_tbl[i].size);
8854 if (rc) {
8855 DP(NETIF_MSG_PROBE,
8856 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8857 goto test_nvram_exit;
8858 }
8859
8860 csum = ether_crc_le(nvram_tbl[i].size, data);
8861 if (csum != CRC32_RESIDUAL) {
8862 DP(NETIF_MSG_PROBE,
8863 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8864 rc = -ENODEV;
8865 goto test_nvram_exit;
8866 }
8867 }
8868
8869test_nvram_exit:
8870 return rc;
8871}
8872
8873static int bnx2x_test_intr(struct bnx2x *bp)
8874{
8875 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8876 int i, rc;
8877
8878 if (!netif_running(bp->dev))
8879 return -ENODEV;
8880
8881 config->hdr.length_6b = 0;
8882 config->hdr.offset = 0;
8883 config->hdr.client_id = BP_CL_ID(bp);
8884 config->hdr.reserved1 = 0;
8885
8886 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8887 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8888 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8889 if (rc == 0) {
8890 bp->set_mac_pending++;
8891 for (i = 0; i < 10; i++) {
8892 if (!bp->set_mac_pending)
8893 break;
8894 msleep_interruptible(10);
8895 }
8896 if (i == 10)
8897 rc = -ENODEV;
8898 }
8899
8900 return rc;
8901}
8902
a2fbb9ea
ET
8903static void bnx2x_self_test(struct net_device *dev,
8904 struct ethtool_test *etest, u64 *buf)
8905{
8906 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
8907
8908 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8909
f3c87cdd 8910 if (!netif_running(dev))
a2fbb9ea 8911 return;
a2fbb9ea 8912
33471629 8913 /* offline tests are not supported in MF mode */
f3c87cdd
YG
8914 if (IS_E1HMF(bp))
8915 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8916
8917 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8918 u8 link_up;
8919
8920 link_up = bp->link_vars.link_up;
8921 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8922 bnx2x_nic_load(bp, LOAD_DIAG);
8923 /* wait until link state is restored */
8924 bnx2x_wait_for_link(bp, link_up);
8925
8926 if (bnx2x_test_registers(bp) != 0) {
8927 buf[0] = 1;
8928 etest->flags |= ETH_TEST_FL_FAILED;
8929 }
8930 if (bnx2x_test_memory(bp) != 0) {
8931 buf[1] = 1;
8932 etest->flags |= ETH_TEST_FL_FAILED;
8933 }
8934 buf[2] = bnx2x_test_loopback(bp, link_up);
8935 if (buf[2] != 0)
8936 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 8937
f3c87cdd
YG
8938 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8939 bnx2x_nic_load(bp, LOAD_NORMAL);
8940 /* wait until link state is restored */
8941 bnx2x_wait_for_link(bp, link_up);
8942 }
8943 if (bnx2x_test_nvram(bp) != 0) {
8944 buf[3] = 1;
a2fbb9ea
ET
8945 etest->flags |= ETH_TEST_FL_FAILED;
8946 }
f3c87cdd
YG
8947 if (bnx2x_test_intr(bp) != 0) {
8948 buf[4] = 1;
8949 etest->flags |= ETH_TEST_FL_FAILED;
8950 }
8951 if (bp->port.pmf)
8952 if (bnx2x_link_test(bp) != 0) {
8953 buf[5] = 1;
8954 etest->flags |= ETH_TEST_FL_FAILED;
8955 }
8956 buf[7] = bnx2x_mc_assert(bp);
8957 if (buf[7] != 0)
8958 etest->flags |= ETH_TEST_FL_FAILED;
8959
8960#ifdef BNX2X_EXTRA_DEBUG
8961 bnx2x_panic_dump(bp);
8962#endif
a2fbb9ea
ET
8963}
8964
bb2a0f7a
YG
8965static const struct {
8966 long offset;
8967 int size;
8968 u32 flags;
66e855f3
YG
8969#define STATS_FLAGS_PORT 1
8970#define STATS_FLAGS_FUNC 2
8971 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 8972} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
66e855f3
YG
8973/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8974 8, STATS_FLAGS_FUNC, "rx_bytes" },
8975 { STATS_OFFSET32(error_bytes_received_hi),
8976 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8977 { STATS_OFFSET32(total_bytes_transmitted_hi),
8978 8, STATS_FLAGS_FUNC, "tx_bytes" },
8979 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8980 8, STATS_FLAGS_PORT, "tx_error_bytes" },
bb2a0f7a 8981 { STATS_OFFSET32(total_unicast_packets_received_hi),
66e855f3 8982 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
bb2a0f7a 8983 { STATS_OFFSET32(total_multicast_packets_received_hi),
66e855f3 8984 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
bb2a0f7a 8985 { STATS_OFFSET32(total_broadcast_packets_received_hi),
66e855f3 8986 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
bb2a0f7a 8987 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
66e855f3 8988 8, STATS_FLAGS_FUNC, "tx_packets" },
bb2a0f7a 8989 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
66e855f3 8990 8, STATS_FLAGS_PORT, "tx_mac_errors" },
bb2a0f7a 8991/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
66e855f3 8992 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 8993 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 8994 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 8995 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 8996 8, STATS_FLAGS_PORT, "rx_align_errors" },
bb2a0f7a 8997 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 8998 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 8999 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9000 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
bb2a0f7a 9001 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9002 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9003 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9004 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9005 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9006 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9007 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9008 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9009 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
66e855f3
YG
9010 8, STATS_FLAGS_PORT, "rx_fragments" },
9011/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9012 8, STATS_FLAGS_PORT, "rx_jabbers" },
bb2a0f7a 9013 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
66e855f3 9014 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
bb2a0f7a 9015 { STATS_OFFSET32(jabber_packets_received),
66e855f3 9016 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
bb2a0f7a 9017 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9018 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9019 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9020 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9021 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9022 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9023 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9024 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9025 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9026 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9027 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9028 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
bb2a0f7a 9029 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9030 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
bb2a0f7a 9031/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
66e855f3 9032 8, STATS_FLAGS_PORT, "rx_xon_frames" },
bb2a0f7a 9033 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
66e855f3
YG
9034 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9035 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9036 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9037 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9038 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
bb2a0f7a 9039 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
66e855f3
YG
9040 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9041 { STATS_OFFSET32(mac_filter_discard),
9042 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9043 { STATS_OFFSET32(no_buff_discard),
9044 4, STATS_FLAGS_FUNC, "rx_discards" },
9045 { STATS_OFFSET32(xxoverflow_discard),
9046 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9047 { STATS_OFFSET32(brb_drop_hi),
9048 8, STATS_FLAGS_PORT, "brb_discard" },
9049 { STATS_OFFSET32(brb_truncate_hi),
9050 8, STATS_FLAGS_PORT, "brb_truncate" },
9051/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9052 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9053 { STATS_OFFSET32(rx_skb_alloc_failed),
9054 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9055/* 42 */{ STATS_OFFSET32(hw_csum_err),
9056 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
a2fbb9ea
ET
9057};
9058
66e855f3
YG
9059#define IS_NOT_E1HMF_STAT(bp, i) \
9060 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9061
a2fbb9ea
ET
9062static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9063{
bb2a0f7a
YG
9064 struct bnx2x *bp = netdev_priv(dev);
9065 int i, j;
9066
a2fbb9ea
ET
9067 switch (stringset) {
9068 case ETH_SS_STATS:
bb2a0f7a 9069 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9070 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9071 continue;
9072 strcpy(buf + j*ETH_GSTRING_LEN,
9073 bnx2x_stats_arr[i].string);
9074 j++;
9075 }
a2fbb9ea
ET
9076 break;
9077
9078 case ETH_SS_TEST:
9079 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9080 break;
9081 }
9082}
9083
9084static int bnx2x_get_stats_count(struct net_device *dev)
9085{
bb2a0f7a
YG
9086 struct bnx2x *bp = netdev_priv(dev);
9087 int i, num_stats = 0;
9088
9089 for (i = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9090 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9091 continue;
9092 num_stats++;
9093 }
9094 return num_stats;
a2fbb9ea
ET
9095}
9096
9097static void bnx2x_get_ethtool_stats(struct net_device *dev,
9098 struct ethtool_stats *stats, u64 *buf)
9099{
9100 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
9101 u32 *hw_stats = (u32 *)&bp->eth_stats;
9102 int i, j;
a2fbb9ea 9103
bb2a0f7a 9104 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9105 if (IS_NOT_E1HMF_STAT(bp, i))
a2fbb9ea 9106 continue;
bb2a0f7a
YG
9107
9108 if (bnx2x_stats_arr[i].size == 0) {
9109 /* skip this counter */
9110 buf[j] = 0;
9111 j++;
a2fbb9ea
ET
9112 continue;
9113 }
bb2a0f7a 9114 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 9115 /* 4-byte counter */
bb2a0f7a
YG
9116 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9117 j++;
a2fbb9ea
ET
9118 continue;
9119 }
9120 /* 8-byte counter */
bb2a0f7a
YG
9121 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9122 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9123 j++;
a2fbb9ea
ET
9124 }
9125}
9126
9127static int bnx2x_phys_id(struct net_device *dev, u32 data)
9128{
9129 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9130 int port = BP_PORT(bp);
a2fbb9ea
ET
9131 int i;
9132
34f80b04
EG
9133 if (!netif_running(dev))
9134 return 0;
9135
9136 if (!bp->port.pmf)
9137 return 0;
9138
a2fbb9ea
ET
9139 if (data == 0)
9140 data = 2;
9141
9142 for (i = 0; i < (data * 2); i++) {
c18487ee 9143 if ((i % 2) == 0)
34f80b04 9144 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9145 bp->link_params.hw_led_mode,
9146 bp->link_params.chip_id);
9147 else
34f80b04 9148 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9149 bp->link_params.hw_led_mode,
9150 bp->link_params.chip_id);
9151
a2fbb9ea
ET
9152 msleep_interruptible(500);
9153 if (signal_pending(current))
9154 break;
9155 }
9156
c18487ee 9157 if (bp->link_vars.link_up)
34f80b04 9158 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9159 bp->link_vars.line_speed,
9160 bp->link_params.hw_led_mode,
9161 bp->link_params.chip_id);
a2fbb9ea
ET
9162
9163 return 0;
9164}
9165
9166static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9167 .get_settings = bnx2x_get_settings,
9168 .set_settings = bnx2x_set_settings,
9169 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9170 .get_wol = bnx2x_get_wol,
9171 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9172 .get_msglevel = bnx2x_get_msglevel,
9173 .set_msglevel = bnx2x_set_msglevel,
9174 .nway_reset = bnx2x_nway_reset,
9175 .get_link = ethtool_op_get_link,
9176 .get_eeprom_len = bnx2x_get_eeprom_len,
9177 .get_eeprom = bnx2x_get_eeprom,
9178 .set_eeprom = bnx2x_set_eeprom,
9179 .get_coalesce = bnx2x_get_coalesce,
9180 .set_coalesce = bnx2x_set_coalesce,
9181 .get_ringparam = bnx2x_get_ringparam,
9182 .set_ringparam = bnx2x_set_ringparam,
9183 .get_pauseparam = bnx2x_get_pauseparam,
9184 .set_pauseparam = bnx2x_set_pauseparam,
9185 .get_rx_csum = bnx2x_get_rx_csum,
9186 .set_rx_csum = bnx2x_set_rx_csum,
9187 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9188 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9189 .set_flags = bnx2x_set_flags,
9190 .get_flags = ethtool_op_get_flags,
9191 .get_sg = ethtool_op_get_sg,
9192 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9193 .get_tso = ethtool_op_get_tso,
9194 .set_tso = bnx2x_set_tso,
9195 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9196 .self_test = bnx2x_self_test,
9197 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9198 .phys_id = bnx2x_phys_id,
9199 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9200 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9201};
9202
9203/* end of ethtool_ops */
9204
9205/****************************************************************************
9206* General service functions
9207****************************************************************************/
9208
9209static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9210{
9211 u16 pmcsr;
9212
9213 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9214
9215 switch (state) {
9216 case PCI_D0:
34f80b04 9217 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9218 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9219 PCI_PM_CTRL_PME_STATUS));
9220
9221 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9222 /* delay required during transition out of D3hot */
a2fbb9ea 9223 msleep(20);
34f80b04 9224 break;
a2fbb9ea 9225
34f80b04
EG
9226 case PCI_D3hot:
9227 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9228 pmcsr |= 3;
a2fbb9ea 9229
34f80b04
EG
9230 if (bp->wol)
9231 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9232
34f80b04
EG
9233 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9234 pmcsr);
a2fbb9ea 9235
34f80b04
EG
9236 /* No more memory access after this point until
9237 * device is brought back to D0.
9238 */
9239 break;
9240
9241 default:
9242 return -EINVAL;
9243 }
9244 return 0;
a2fbb9ea
ET
9245}
9246
34f80b04
EG
9247/*
9248 * net_device service functions
9249 */
9250
a2fbb9ea
ET
9251static int bnx2x_poll(struct napi_struct *napi, int budget)
9252{
9253 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9254 napi);
9255 struct bnx2x *bp = fp->bp;
9256 int work_done = 0;
2772f903 9257 u16 rx_cons_sb;
a2fbb9ea
ET
9258
9259#ifdef BNX2X_STOP_ON_ERROR
9260 if (unlikely(bp->panic))
34f80b04 9261 goto poll_panic;
a2fbb9ea
ET
9262#endif
9263
9264 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9265 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9266 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9267
9268 bnx2x_update_fpsb_idx(fp);
9269
da5a662a 9270 if (BNX2X_HAS_TX_WORK(fp))
a2fbb9ea
ET
9271 bnx2x_tx_int(fp, budget);
9272
2772f903
EG
9273 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9274 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9275 rx_cons_sb++;
da5a662a 9276 if (BNX2X_HAS_RX_WORK(fp))
a2fbb9ea
ET
9277 work_done = bnx2x_rx_int(fp, budget);
9278
da5a662a 9279 rmb(); /* BNX2X_HAS_WORK() reads the status block */
2772f903
EG
9280 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9281 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9282 rx_cons_sb++;
a2fbb9ea
ET
9283
9284 /* must not complete if we consumed full budget */
da5a662a 9285 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9286
9287#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9288poll_panic:
a2fbb9ea
ET
9289#endif
9290 netif_rx_complete(bp->dev, napi);
9291
34f80b04 9292 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9293 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9294 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9295 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9296 }
a2fbb9ea
ET
9297 return work_done;
9298}
9299
755735eb
EG
9300
9301/* we split the first BD into headers and data BDs
33471629 9302 * to ease the pain of our fellow microcode engineers
755735eb
EG
9303 * we use one mapping for both BDs
9304 * So far this has only been observed to happen
9305 * in Other Operating Systems(TM)
9306 */
9307static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9308 struct bnx2x_fastpath *fp,
9309 struct eth_tx_bd **tx_bd, u16 hlen,
9310 u16 bd_prod, int nbd)
9311{
9312 struct eth_tx_bd *h_tx_bd = *tx_bd;
9313 struct eth_tx_bd *d_tx_bd;
9314 dma_addr_t mapping;
9315 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9316
9317 /* first fix first BD */
9318 h_tx_bd->nbd = cpu_to_le16(nbd);
9319 h_tx_bd->nbytes = cpu_to_le16(hlen);
9320
9321 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9322 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9323 h_tx_bd->addr_lo, h_tx_bd->nbd);
9324
9325 /* now get a new data BD
9326 * (after the pbd) and fill it */
9327 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9328 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9329
9330 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9331 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9332
9333 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9334 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9335 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9336 d_tx_bd->vlan = 0;
9337 /* this marks the BD as one that has no individual mapping
9338 * the FW ignores this flag in a BD not marked start
9339 */
9340 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9341 DP(NETIF_MSG_TX_QUEUED,
9342 "TSO split data size is %d (%x:%x)\n",
9343 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9344
9345 /* update tx_bd for marking the last BD flag */
9346 *tx_bd = d_tx_bd;
9347
9348 return bd_prod;
9349}
9350
9351static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9352{
9353 if (fix > 0)
9354 csum = (u16) ~csum_fold(csum_sub(csum,
9355 csum_partial(t_header - fix, fix, 0)));
9356
9357 else if (fix < 0)
9358 csum = (u16) ~csum_fold(csum_add(csum,
9359 csum_partial(t_header, -fix, 0)));
9360
9361 return swab16(csum);
9362}
9363
9364static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9365{
9366 u32 rc;
9367
9368 if (skb->ip_summed != CHECKSUM_PARTIAL)
9369 rc = XMIT_PLAIN;
9370
9371 else {
9372 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9373 rc = XMIT_CSUM_V6;
9374 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9375 rc |= XMIT_CSUM_TCP;
9376
9377 } else {
9378 rc = XMIT_CSUM_V4;
9379 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9380 rc |= XMIT_CSUM_TCP;
9381 }
9382 }
9383
9384 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9385 rc |= XMIT_GSO_V4;
9386
9387 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9388 rc |= XMIT_GSO_V6;
9389
9390 return rc;
9391}
9392
9393/* check if packet requires linearization (packet is too fragmented) */
9394static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9395 u32 xmit_type)
9396{
9397 int to_copy = 0;
9398 int hlen = 0;
9399 int first_bd_sz = 0;
9400
9401 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9402 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9403
9404 if (xmit_type & XMIT_GSO) {
9405 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9406 /* Check if LSO packet needs to be copied:
9407 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9408 int wnd_size = MAX_FETCH_BD - 3;
33471629 9409 /* Number of windows to check */
755735eb
EG
9410 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9411 int wnd_idx = 0;
9412 int frag_idx = 0;
9413 u32 wnd_sum = 0;
9414
9415 /* Headers length */
9416 hlen = (int)(skb_transport_header(skb) - skb->data) +
9417 tcp_hdrlen(skb);
9418
9419 /* Amount of data (w/o headers) on linear part of SKB*/
9420 first_bd_sz = skb_headlen(skb) - hlen;
9421
9422 wnd_sum = first_bd_sz;
9423
9424 /* Calculate the first sum - it's special */
9425 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9426 wnd_sum +=
9427 skb_shinfo(skb)->frags[frag_idx].size;
9428
9429 /* If there was data on linear skb data - check it */
9430 if (first_bd_sz > 0) {
9431 if (unlikely(wnd_sum < lso_mss)) {
9432 to_copy = 1;
9433 goto exit_lbl;
9434 }
9435
9436 wnd_sum -= first_bd_sz;
9437 }
9438
9439 /* Others are easier: run through the frag list and
9440 check all windows */
9441 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9442 wnd_sum +=
9443 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9444
9445 if (unlikely(wnd_sum < lso_mss)) {
9446 to_copy = 1;
9447 break;
9448 }
9449 wnd_sum -=
9450 skb_shinfo(skb)->frags[wnd_idx].size;
9451 }
9452
9453 } else {
9454 /* in non-LSO too fragmented packet should always
9455 be linearized */
9456 to_copy = 1;
9457 }
9458 }
9459
9460exit_lbl:
9461 if (unlikely(to_copy))
9462 DP(NETIF_MSG_TX_QUEUED,
9463 "Linearization IS REQUIRED for %s packet. "
9464 "num_frags %d hlen %d first_bd_sz %d\n",
9465 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9466 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9467
9468 return to_copy;
9469}
9470
9471/* called with netif_tx_lock
a2fbb9ea 9472 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9473 * netif_wake_queue()
a2fbb9ea
ET
9474 */
9475static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9476{
9477 struct bnx2x *bp = netdev_priv(dev);
9478 struct bnx2x_fastpath *fp;
9479 struct sw_tx_bd *tx_buf;
9480 struct eth_tx_bd *tx_bd;
9481 struct eth_tx_parse_bd *pbd = NULL;
9482 u16 pkt_prod, bd_prod;
755735eb 9483 int nbd, fp_index;
a2fbb9ea 9484 dma_addr_t mapping;
755735eb
EG
9485 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9486 int vlan_off = (bp->e1hov ? 4 : 0);
9487 int i;
9488 u8 hlen = 0;
a2fbb9ea
ET
9489
9490#ifdef BNX2X_STOP_ON_ERROR
9491 if (unlikely(bp->panic))
9492 return NETDEV_TX_BUSY;
9493#endif
9494
755735eb 9495 fp_index = (smp_processor_id() % bp->num_queues);
a2fbb9ea 9496 fp = &bp->fp[fp_index];
755735eb 9497
a2fbb9ea
ET
9498 if (unlikely(bnx2x_tx_avail(bp->fp) <
9499 (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9500 bp->eth_stats.driver_xoff++,
a2fbb9ea
ET
9501 netif_stop_queue(dev);
9502 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9503 return NETDEV_TX_BUSY;
9504 }
9505
755735eb
EG
9506 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9507 " gso type %x xmit_type %x\n",
9508 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9509 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9510
33471629 9511 /* First, check if we need to linearize the skb
755735eb
EG
9512 (due to FW restrictions) */
9513 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9514 /* Statistics of linearization */
9515 bp->lin_cnt++;
9516 if (skb_linearize(skb) != 0) {
9517 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9518 "silently dropping this SKB\n");
9519 dev_kfree_skb_any(skb);
da5a662a 9520 return NETDEV_TX_OK;
755735eb
EG
9521 }
9522 }
9523
a2fbb9ea 9524 /*
755735eb 9525 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9526 then for TSO or xsum we have a parsing info BD,
755735eb 9527 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9528 (don't forget to mark the last one as last,
9529 and to unmap only AFTER you write to the BD ...)
755735eb 9530 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9531 */
9532
9533 pkt_prod = fp->tx_pkt_prod++;
755735eb 9534 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9535
755735eb 9536 /* get a tx_buf and first BD */
a2fbb9ea
ET
9537 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9538 tx_bd = &fp->tx_desc_ring[bd_prod];
9539
9540 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9541 tx_bd->general_data = (UNICAST_ADDRESS <<
9542 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
9543 /* header nbd */
9544 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 9545
755735eb
EG
9546 /* remember the first BD of the packet */
9547 tx_buf->first_bd = fp->tx_bd_prod;
9548 tx_buf->skb = skb;
a2fbb9ea
ET
9549
9550 DP(NETIF_MSG_TX_QUEUED,
9551 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9552 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9553
755735eb
EG
9554 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9555 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9556 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9557 vlan_off += 4;
9558 } else
9559 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9560
755735eb 9561 if (xmit_type) {
a2fbb9ea 9562
755735eb 9563 /* turn on parsing and get a BD */
a2fbb9ea
ET
9564 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9565 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9566
9567 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9568 }
9569
9570 if (xmit_type & XMIT_CSUM) {
9571 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9572
9573 /* for now NS flag is not used in Linux */
755735eb 9574 pbd->global_data = (hlen |
96fc1784 9575 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9576 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9577
755735eb
EG
9578 pbd->ip_hlen = (skb_transport_header(skb) -
9579 skb_network_header(skb)) / 2;
9580
9581 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9582
755735eb
EG
9583 pbd->total_hlen = cpu_to_le16(hlen);
9584 hlen = hlen*2 - vlan_off;
a2fbb9ea 9585
755735eb
EG
9586 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9587
9588 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9589 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9590 ETH_TX_BD_FLAGS_IP_CSUM;
9591 else
9592 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9593
9594 if (xmit_type & XMIT_CSUM_TCP) {
9595 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9596
9597 } else {
9598 s8 fix = SKB_CS_OFF(skb); /* signed! */
9599
a2fbb9ea 9600 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9601 pbd->cs_offset = fix / 2;
a2fbb9ea 9602
755735eb
EG
9603 DP(NETIF_MSG_TX_QUEUED,
9604 "hlen %d offset %d fix %d csum before fix %x\n",
9605 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9606 SKB_CS(skb));
9607
9608 /* HW bug: fixup the CSUM */
9609 pbd->tcp_pseudo_csum =
9610 bnx2x_csum_fix(skb_transport_header(skb),
9611 SKB_CS(skb), fix);
9612
9613 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9614 pbd->tcp_pseudo_csum);
9615 }
a2fbb9ea
ET
9616 }
9617
9618 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9619 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9620
9621 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9622 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 9623 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
9624 tx_bd->nbd = cpu_to_le16(nbd);
9625 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9626
9627 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9628 " nbytes %d flags %x vlan %x\n",
9629 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9630 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9631 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9632
755735eb 9633 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9634
9635 DP(NETIF_MSG_TX_QUEUED,
9636 "TSO packet len %d hlen %d total len %d tso size %d\n",
9637 skb->len, hlen, skb_headlen(skb),
9638 skb_shinfo(skb)->gso_size);
9639
9640 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9641
755735eb
EG
9642 if (unlikely(skb_headlen(skb) > hlen))
9643 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9644 bd_prod, ++nbd);
a2fbb9ea
ET
9645
9646 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9647 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9648 pbd->tcp_flags = pbd_tcp_flags(skb);
9649
9650 if (xmit_type & XMIT_GSO_V4) {
9651 pbd->ip_id = swab16(ip_hdr(skb)->id);
9652 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9653 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9654 ip_hdr(skb)->daddr,
9655 0, IPPROTO_TCP, 0));
755735eb
EG
9656
9657 } else
9658 pbd->tcp_pseudo_csum =
9659 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9660 &ipv6_hdr(skb)->daddr,
9661 0, IPPROTO_TCP, 0));
9662
a2fbb9ea
ET
9663 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9664 }
9665
755735eb
EG
9666 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9667 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9668
755735eb
EG
9669 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9670 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9671
755735eb
EG
9672 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9673 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9674
755735eb
EG
9675 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9676 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9677 tx_bd->nbytes = cpu_to_le16(frag->size);
9678 tx_bd->vlan = cpu_to_le16(pkt_prod);
9679 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9680
755735eb
EG
9681 DP(NETIF_MSG_TX_QUEUED,
9682 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9683 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9684 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9685 }
9686
755735eb 9687 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9688 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9689
9690 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9691 tx_bd, tx_bd->bd_flags.as_bitfield);
9692
a2fbb9ea
ET
9693 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9694
755735eb 9695 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9696 * if the packet contains or ends with it
9697 */
9698 if (TX_BD_POFF(bd_prod) < nbd)
9699 nbd++;
9700
9701 if (pbd)
9702 DP(NETIF_MSG_TX_QUEUED,
9703 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9704 " tcp_flags %x xsum %x seq %u hlen %u\n",
9705 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9706 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9707 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9708
755735eb 9709 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9710
96fc1784
ET
9711 fp->hw_tx_prods->bds_prod =
9712 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9713 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9714 fp->hw_tx_prods->packets_prod =
9715 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9716 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9717
9718 mmiowb();
9719
755735eb 9720 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9721 dev->trans_start = jiffies;
9722
9723 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9724 netif_stop_queue(dev);
bb2a0f7a 9725 bp->eth_stats.driver_xoff++;
a2fbb9ea
ET
9726 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9727 netif_wake_queue(dev);
9728 }
9729 fp->tx_pkt++;
9730
9731 return NETDEV_TX_OK;
9732}
9733
bb2a0f7a 9734/* called with rtnl_lock */
a2fbb9ea
ET
9735static int bnx2x_open(struct net_device *dev)
9736{
9737 struct bnx2x *bp = netdev_priv(dev);
9738
9739 bnx2x_set_power_state(bp, PCI_D0);
9740
bb2a0f7a 9741 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9742}
9743
bb2a0f7a 9744/* called with rtnl_lock */
a2fbb9ea
ET
9745static int bnx2x_close(struct net_device *dev)
9746{
a2fbb9ea
ET
9747 struct bnx2x *bp = netdev_priv(dev);
9748
9749 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9750 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9751 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9752 if (!CHIP_REV_IS_SLOW(bp))
9753 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9754
9755 return 0;
9756}
9757
34f80b04
EG
9758/* called with netif_tx_lock from set_multicast */
9759static void bnx2x_set_rx_mode(struct net_device *dev)
9760{
9761 struct bnx2x *bp = netdev_priv(dev);
9762 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9763 int port = BP_PORT(bp);
9764
9765 if (bp->state != BNX2X_STATE_OPEN) {
9766 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9767 return;
9768 }
9769
9770 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9771
9772 if (dev->flags & IFF_PROMISC)
9773 rx_mode = BNX2X_RX_MODE_PROMISC;
9774
9775 else if ((dev->flags & IFF_ALLMULTI) ||
9776 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9777 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9778
9779 else { /* some multicasts */
9780 if (CHIP_IS_E1(bp)) {
9781 int i, old, offset;
9782 struct dev_mc_list *mclist;
9783 struct mac_configuration_cmd *config =
9784 bnx2x_sp(bp, mcast_config);
9785
9786 for (i = 0, mclist = dev->mc_list;
9787 mclist && (i < dev->mc_count);
9788 i++, mclist = mclist->next) {
9789
9790 config->config_table[i].
9791 cam_entry.msb_mac_addr =
9792 swab16(*(u16 *)&mclist->dmi_addr[0]);
9793 config->config_table[i].
9794 cam_entry.middle_mac_addr =
9795 swab16(*(u16 *)&mclist->dmi_addr[2]);
9796 config->config_table[i].
9797 cam_entry.lsb_mac_addr =
9798 swab16(*(u16 *)&mclist->dmi_addr[4]);
9799 config->config_table[i].cam_entry.flags =
9800 cpu_to_le16(port);
9801 config->config_table[i].
9802 target_table_entry.flags = 0;
9803 config->config_table[i].
9804 target_table_entry.client_id = 0;
9805 config->config_table[i].
9806 target_table_entry.vlan_id = 0;
9807
9808 DP(NETIF_MSG_IFUP,
9809 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9810 config->config_table[i].
9811 cam_entry.msb_mac_addr,
9812 config->config_table[i].
9813 cam_entry.middle_mac_addr,
9814 config->config_table[i].
9815 cam_entry.lsb_mac_addr);
9816 }
9817 old = config->hdr.length_6b;
9818 if (old > i) {
9819 for (; i < old; i++) {
9820 if (CAM_IS_INVALID(config->
9821 config_table[i])) {
9822 i--; /* already invalidated */
9823 break;
9824 }
9825 /* invalidate */
9826 CAM_INVALIDATE(config->
9827 config_table[i]);
9828 }
9829 }
9830
9831 if (CHIP_REV_IS_SLOW(bp))
9832 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9833 else
9834 offset = BNX2X_MAX_MULTICAST*(1 + port);
9835
9836 config->hdr.length_6b = i;
9837 config->hdr.offset = offset;
9838 config->hdr.client_id = BP_CL_ID(bp);
9839 config->hdr.reserved1 = 0;
9840
9841 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9842 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9843 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9844 0);
9845 } else { /* E1H */
9846 /* Accept one or more multicasts */
9847 struct dev_mc_list *mclist;
9848 u32 mc_filter[MC_HASH_SIZE];
9849 u32 crc, bit, regidx;
9850 int i;
9851
9852 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9853
9854 for (i = 0, mclist = dev->mc_list;
9855 mclist && (i < dev->mc_count);
9856 i++, mclist = mclist->next) {
9857
9858 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9859 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9860 mclist->dmi_addr[0], mclist->dmi_addr[1],
9861 mclist->dmi_addr[2], mclist->dmi_addr[3],
9862 mclist->dmi_addr[4], mclist->dmi_addr[5]);
9863
9864 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9865 bit = (crc >> 24) & 0xff;
9866 regidx = bit >> 5;
9867 bit &= 0x1f;
9868 mc_filter[regidx] |= (1 << bit);
9869 }
9870
9871 for (i = 0; i < MC_HASH_SIZE; i++)
9872 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9873 mc_filter[i]);
9874 }
9875 }
9876
9877 bp->rx_mode = rx_mode;
9878 bnx2x_set_storm_rx_mode(bp);
9879}
9880
9881/* called with rtnl_lock */
a2fbb9ea
ET
9882static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9883{
9884 struct sockaddr *addr = p;
9885 struct bnx2x *bp = netdev_priv(dev);
9886
34f80b04 9887 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
9888 return -EINVAL;
9889
9890 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
9891 if (netif_running(dev)) {
9892 if (CHIP_IS_E1(bp))
3101c2bc 9893 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 9894 else
3101c2bc 9895 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 9896 }
a2fbb9ea
ET
9897
9898 return 0;
9899}
9900
c18487ee 9901/* called with rtnl_lock */
a2fbb9ea
ET
9902static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9903{
9904 struct mii_ioctl_data *data = if_mii(ifr);
9905 struct bnx2x *bp = netdev_priv(dev);
3196a88a 9906 int port = BP_PORT(bp);
a2fbb9ea
ET
9907 int err;
9908
9909 switch (cmd) {
9910 case SIOCGMIIPHY:
34f80b04 9911 data->phy_id = bp->port.phy_addr;
a2fbb9ea 9912
c14423fe 9913 /* fallthrough */
c18487ee 9914
a2fbb9ea 9915 case SIOCGMIIREG: {
c18487ee 9916 u16 mii_regval;
a2fbb9ea 9917
c18487ee
YR
9918 if (!netif_running(dev))
9919 return -EAGAIN;
a2fbb9ea 9920
34f80b04 9921 mutex_lock(&bp->port.phy_mutex);
3196a88a 9922 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
9923 DEFAULT_PHY_DEV_ADDR,
9924 (data->reg_num & 0x1f), &mii_regval);
9925 data->val_out = mii_regval;
34f80b04 9926 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9927 return err;
9928 }
9929
9930 case SIOCSMIIREG:
9931 if (!capable(CAP_NET_ADMIN))
9932 return -EPERM;
9933
c18487ee
YR
9934 if (!netif_running(dev))
9935 return -EAGAIN;
9936
34f80b04 9937 mutex_lock(&bp->port.phy_mutex);
3196a88a 9938 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
9939 DEFAULT_PHY_DEV_ADDR,
9940 (data->reg_num & 0x1f), data->val_in);
34f80b04 9941 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9942 return err;
9943
9944 default:
9945 /* do nothing */
9946 break;
9947 }
9948
9949 return -EOPNOTSUPP;
9950}
9951
34f80b04 9952/* called with rtnl_lock */
a2fbb9ea
ET
9953static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9954{
9955 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9956 int rc = 0;
a2fbb9ea
ET
9957
9958 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9959 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9960 return -EINVAL;
9961
9962 /* This does not race with packet allocation
c14423fe 9963 * because the actual alloc size is
a2fbb9ea
ET
9964 * only updated as part of load
9965 */
9966 dev->mtu = new_mtu;
9967
9968 if (netif_running(dev)) {
34f80b04
EG
9969 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9970 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 9971 }
34f80b04
EG
9972
9973 return rc;
a2fbb9ea
ET
9974}
9975
9976static void bnx2x_tx_timeout(struct net_device *dev)
9977{
9978 struct bnx2x *bp = netdev_priv(dev);
9979
9980#ifdef BNX2X_STOP_ON_ERROR
9981 if (!bp->panic)
9982 bnx2x_panic();
9983#endif
9984 /* This allows the netif to be shutdown gracefully before resetting */
9985 schedule_work(&bp->reset_task);
9986}
9987
9988#ifdef BCM_VLAN
34f80b04 9989/* called with rtnl_lock */
a2fbb9ea
ET
9990static void bnx2x_vlan_rx_register(struct net_device *dev,
9991 struct vlan_group *vlgrp)
9992{
9993 struct bnx2x *bp = netdev_priv(dev);
9994
9995 bp->vlgrp = vlgrp;
9996 if (netif_running(dev))
49d66772 9997 bnx2x_set_client_config(bp);
a2fbb9ea 9998}
34f80b04 9999
a2fbb9ea
ET
10000#endif
10001
10002#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10003static void poll_bnx2x(struct net_device *dev)
10004{
10005 struct bnx2x *bp = netdev_priv(dev);
10006
10007 disable_irq(bp->pdev->irq);
10008 bnx2x_interrupt(bp->pdev->irq, dev);
10009 enable_irq(bp->pdev->irq);
10010}
10011#endif
10012
34f80b04
EG
10013static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10014 struct net_device *dev)
a2fbb9ea
ET
10015{
10016 struct bnx2x *bp;
10017 int rc;
10018
10019 SET_NETDEV_DEV(dev, &pdev->dev);
10020 bp = netdev_priv(dev);
10021
34f80b04
EG
10022 bp->dev = dev;
10023 bp->pdev = pdev;
a2fbb9ea 10024 bp->flags = 0;
34f80b04 10025 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10026
10027 rc = pci_enable_device(pdev);
10028 if (rc) {
10029 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10030 goto err_out;
10031 }
10032
10033 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10034 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10035 " aborting\n");
10036 rc = -ENODEV;
10037 goto err_out_disable;
10038 }
10039
10040 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10041 printk(KERN_ERR PFX "Cannot find second PCI device"
10042 " base address, aborting\n");
10043 rc = -ENODEV;
10044 goto err_out_disable;
10045 }
10046
34f80b04
EG
10047 if (atomic_read(&pdev->enable_cnt) == 1) {
10048 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10049 if (rc) {
10050 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10051 " aborting\n");
10052 goto err_out_disable;
10053 }
a2fbb9ea 10054
34f80b04
EG
10055 pci_set_master(pdev);
10056 pci_save_state(pdev);
10057 }
a2fbb9ea
ET
10058
10059 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10060 if (bp->pm_cap == 0) {
10061 printk(KERN_ERR PFX "Cannot find power management"
10062 " capability, aborting\n");
10063 rc = -EIO;
10064 goto err_out_release;
10065 }
10066
10067 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10068 if (bp->pcie_cap == 0) {
10069 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10070 " aborting\n");
10071 rc = -EIO;
10072 goto err_out_release;
10073 }
10074
10075 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10076 bp->flags |= USING_DAC_FLAG;
10077 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10078 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10079 " failed, aborting\n");
10080 rc = -EIO;
10081 goto err_out_release;
10082 }
10083
10084 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10085 printk(KERN_ERR PFX "System does not support DMA,"
10086 " aborting\n");
10087 rc = -EIO;
10088 goto err_out_release;
10089 }
10090
34f80b04
EG
10091 dev->mem_start = pci_resource_start(pdev, 0);
10092 dev->base_addr = dev->mem_start;
10093 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10094
10095 dev->irq = pdev->irq;
10096
10097 bp->regview = ioremap_nocache(dev->base_addr,
10098 pci_resource_len(pdev, 0));
10099 if (!bp->regview) {
10100 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10101 rc = -ENOMEM;
10102 goto err_out_release;
10103 }
10104
34f80b04
EG
10105 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10106 min_t(u64, BNX2X_DB_SIZE,
10107 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10108 if (!bp->doorbells) {
10109 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10110 rc = -ENOMEM;
10111 goto err_out_unmap;
10112 }
10113
10114 bnx2x_set_power_state(bp, PCI_D0);
10115
34f80b04
EG
10116 /* clean indirect addresses */
10117 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10118 PCICFG_VENDOR_ID_OFFSET);
10119 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10120 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10121 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10122 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10123
34f80b04
EG
10124 dev->hard_start_xmit = bnx2x_start_xmit;
10125 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10126
34f80b04
EG
10127 dev->ethtool_ops = &bnx2x_ethtool_ops;
10128 dev->open = bnx2x_open;
10129 dev->stop = bnx2x_close;
10130 dev->set_multicast_list = bnx2x_set_rx_mode;
10131 dev->set_mac_address = bnx2x_change_mac_addr;
10132 dev->do_ioctl = bnx2x_ioctl;
10133 dev->change_mtu = bnx2x_change_mtu;
10134 dev->tx_timeout = bnx2x_tx_timeout;
10135#ifdef BCM_VLAN
10136 dev->vlan_rx_register = bnx2x_vlan_rx_register;
10137#endif
10138#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10139 dev->poll_controller = poll_bnx2x;
10140#endif
10141 dev->features |= NETIF_F_SG;
10142 dev->features |= NETIF_F_HW_CSUM;
10143 if (bp->flags & USING_DAC_FLAG)
10144 dev->features |= NETIF_F_HIGHDMA;
10145#ifdef BCM_VLAN
10146 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10147#endif
10148 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10149 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10150
10151 return 0;
10152
10153err_out_unmap:
10154 if (bp->regview) {
10155 iounmap(bp->regview);
10156 bp->regview = NULL;
10157 }
a2fbb9ea
ET
10158 if (bp->doorbells) {
10159 iounmap(bp->doorbells);
10160 bp->doorbells = NULL;
10161 }
10162
10163err_out_release:
34f80b04
EG
10164 if (atomic_read(&pdev->enable_cnt) == 1)
10165 pci_release_regions(pdev);
a2fbb9ea
ET
10166
10167err_out_disable:
10168 pci_disable_device(pdev);
10169 pci_set_drvdata(pdev, NULL);
10170
10171err_out:
10172 return rc;
10173}
10174
25047950
ET
10175static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10176{
10177 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10178
10179 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10180 return val;
10181}
10182
10183/* return value of 1=2.5GHz 2=5GHz */
10184static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10185{
10186 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10187
10188 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10189 return val;
10190}
10191
a2fbb9ea
ET
10192static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10193 const struct pci_device_id *ent)
10194{
10195 static int version_printed;
10196 struct net_device *dev = NULL;
10197 struct bnx2x *bp;
25047950 10198 int rc;
25047950 10199 DECLARE_MAC_BUF(mac);
a2fbb9ea
ET
10200
10201 if (version_printed++ == 0)
10202 printk(KERN_INFO "%s", version);
10203
10204 /* dev zeroed in init_etherdev */
10205 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
10206 if (!dev) {
10207 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10208 return -ENOMEM;
34f80b04 10209 }
a2fbb9ea
ET
10210
10211 netif_carrier_off(dev);
10212
10213 bp = netdev_priv(dev);
10214 bp->msglevel = debug;
10215
34f80b04 10216 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10217 if (rc < 0) {
10218 free_netdev(dev);
10219 return rc;
10220 }
10221
a2fbb9ea
ET
10222 rc = register_netdev(dev);
10223 if (rc) {
c14423fe 10224 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04 10225 goto init_one_exit;
a2fbb9ea
ET
10226 }
10227
10228 pci_set_drvdata(pdev, dev);
10229
34f80b04
EG
10230 rc = bnx2x_init_bp(bp);
10231 if (rc) {
10232 unregister_netdev(dev);
10233 goto init_one_exit;
10234 }
10235
10236 bp->common.name = board_info[ent->driver_data].name;
25047950 10237 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10238 " IRQ %d, ", dev->name, bp->common.name,
10239 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10240 bnx2x_get_pcie_width(bp),
10241 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10242 dev->base_addr, bp->pdev->irq);
10243 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
a2fbb9ea 10244 return 0;
34f80b04
EG
10245
10246init_one_exit:
10247 if (bp->regview)
10248 iounmap(bp->regview);
10249
10250 if (bp->doorbells)
10251 iounmap(bp->doorbells);
10252
10253 free_netdev(dev);
10254
10255 if (atomic_read(&pdev->enable_cnt) == 1)
10256 pci_release_regions(pdev);
10257
10258 pci_disable_device(pdev);
10259 pci_set_drvdata(pdev, NULL);
10260
10261 return rc;
a2fbb9ea
ET
10262}
10263
10264static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10265{
10266 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10267 struct bnx2x *bp;
10268
10269 if (!dev) {
228241eb
ET
10270 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10271 return;
10272 }
228241eb 10273 bp = netdev_priv(dev);
a2fbb9ea 10274
a2fbb9ea
ET
10275 unregister_netdev(dev);
10276
10277 if (bp->regview)
10278 iounmap(bp->regview);
10279
10280 if (bp->doorbells)
10281 iounmap(bp->doorbells);
10282
10283 free_netdev(dev);
34f80b04
EG
10284
10285 if (atomic_read(&pdev->enable_cnt) == 1)
10286 pci_release_regions(pdev);
10287
a2fbb9ea
ET
10288 pci_disable_device(pdev);
10289 pci_set_drvdata(pdev, NULL);
10290}
10291
10292static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10293{
10294 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10295 struct bnx2x *bp;
10296
34f80b04
EG
10297 if (!dev) {
10298 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10299 return -ENODEV;
10300 }
10301 bp = netdev_priv(dev);
a2fbb9ea 10302
34f80b04 10303 rtnl_lock();
a2fbb9ea 10304
34f80b04 10305 pci_save_state(pdev);
228241eb 10306
34f80b04
EG
10307 if (!netif_running(dev)) {
10308 rtnl_unlock();
10309 return 0;
10310 }
a2fbb9ea
ET
10311
10312 netif_device_detach(dev);
a2fbb9ea 10313
da5a662a 10314 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10315
a2fbb9ea 10316 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10317
34f80b04
EG
10318 rtnl_unlock();
10319
a2fbb9ea
ET
10320 return 0;
10321}
10322
10323static int bnx2x_resume(struct pci_dev *pdev)
10324{
10325 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10326 struct bnx2x *bp;
a2fbb9ea
ET
10327 int rc;
10328
228241eb
ET
10329 if (!dev) {
10330 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10331 return -ENODEV;
10332 }
228241eb 10333 bp = netdev_priv(dev);
a2fbb9ea 10334
34f80b04
EG
10335 rtnl_lock();
10336
228241eb 10337 pci_restore_state(pdev);
34f80b04
EG
10338
10339 if (!netif_running(dev)) {
10340 rtnl_unlock();
10341 return 0;
10342 }
10343
a2fbb9ea
ET
10344 bnx2x_set_power_state(bp, PCI_D0);
10345 netif_device_attach(dev);
10346
da5a662a 10347 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10348
34f80b04
EG
10349 rtnl_unlock();
10350
10351 return rc;
a2fbb9ea
ET
10352}
10353
493adb1f
WX
10354/**
10355 * bnx2x_io_error_detected - called when PCI error is detected
10356 * @pdev: Pointer to PCI device
10357 * @state: The current pci connection state
10358 *
10359 * This function is called after a PCI bus error affecting
10360 * this device has been detected.
10361 */
10362static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10363 pci_channel_state_t state)
10364{
10365 struct net_device *dev = pci_get_drvdata(pdev);
10366 struct bnx2x *bp = netdev_priv(dev);
10367
10368 rtnl_lock();
10369
10370 netif_device_detach(dev);
10371
10372 if (netif_running(dev))
10373 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10374
10375 pci_disable_device(pdev);
10376
10377 rtnl_unlock();
10378
10379 /* Request a slot reset */
10380 return PCI_ERS_RESULT_NEED_RESET;
10381}
10382
10383/**
10384 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10385 * @pdev: Pointer to PCI device
10386 *
10387 * Restart the card from scratch, as if from a cold-boot.
10388 */
10389static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10390{
10391 struct net_device *dev = pci_get_drvdata(pdev);
10392 struct bnx2x *bp = netdev_priv(dev);
10393
10394 rtnl_lock();
10395
10396 if (pci_enable_device(pdev)) {
10397 dev_err(&pdev->dev,
10398 "Cannot re-enable PCI device after reset\n");
10399 rtnl_unlock();
10400 return PCI_ERS_RESULT_DISCONNECT;
10401 }
10402
10403 pci_set_master(pdev);
10404 pci_restore_state(pdev);
10405
10406 if (netif_running(dev))
10407 bnx2x_set_power_state(bp, PCI_D0);
10408
10409 rtnl_unlock();
10410
10411 return PCI_ERS_RESULT_RECOVERED;
10412}
10413
10414/**
10415 * bnx2x_io_resume - called when traffic can start flowing again
10416 * @pdev: Pointer to PCI device
10417 *
10418 * This callback is called when the error recovery driver tells us that
10419 * its OK to resume normal operation.
10420 */
10421static void bnx2x_io_resume(struct pci_dev *pdev)
10422{
10423 struct net_device *dev = pci_get_drvdata(pdev);
10424 struct bnx2x *bp = netdev_priv(dev);
10425
10426 rtnl_lock();
10427
10428 if (netif_running(dev))
10429 bnx2x_nic_load(bp, LOAD_OPEN);
10430
10431 netif_device_attach(dev);
10432
10433 rtnl_unlock();
10434}
10435
10436static struct pci_error_handlers bnx2x_err_handler = {
10437 .error_detected = bnx2x_io_error_detected,
10438 .slot_reset = bnx2x_io_slot_reset,
10439 .resume = bnx2x_io_resume,
10440};
10441
a2fbb9ea 10442static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10443 .name = DRV_MODULE_NAME,
10444 .id_table = bnx2x_pci_tbl,
10445 .probe = bnx2x_init_one,
10446 .remove = __devexit_p(bnx2x_remove_one),
10447 .suspend = bnx2x_suspend,
10448 .resume = bnx2x_resume,
10449 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10450};
10451
10452static int __init bnx2x_init(void)
10453{
10454 return pci_register_driver(&bnx2x_pci_driver);
10455}
10456
10457static void __exit bnx2x_cleanup(void)
10458{
10459 pci_unregister_driver(&bnx2x_pci_driver);
10460}
10461
10462module_init(bnx2x_init);
10463module_exit(bnx2x_cleanup);
10464