]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Rework power state handling code
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea 51#include <linux/io.h>
45229b42 52#include <linux/stringify.h>
a2fbb9ea 53
359d8b15 54
a2fbb9ea
ET
55#include "bnx2x.h"
56#include "bnx2x_init.h"
94a78b79 57#include "bnx2x_init_ops.h"
0a64ea57 58#include "bnx2x_dump.h"
a2fbb9ea 59
4fd89b7a
DK
60#define DRV_MODULE_VERSION "1.52.1-8"
61#define DRV_MODULE_RELDATE "2010/04/01"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
94a78b79
VZ
64#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h"
66/* FW files */
45229b42
BH
67#define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
94a78b79 74
34f80b04
EG
75/* Time in jiffies before concluding the transmitter is hung */
76#define TX_TIMEOUT (5*HZ)
a2fbb9ea 77
53a10565 78static char version[] __devinitdata =
34f80b04 79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
24e3fcef 82MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 83MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
84MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
86MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
a2fbb9ea 88
555f6c78
EG
89static int multi_mode = 1;
90module_param(multi_mode, int, 0);
ca00392c
EG
91MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
54b9ddaa
VZ
94static int num_queues;
95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
555f6c78 98
19680c48 99static int disable_tpa;
19680c48 100module_param(disable_tpa, int, 0);
9898f86d 101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
102
103static int int_mode;
104module_param(int_mode, int, 0);
cdaa7cb8
VZ
105MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
106 "(1 INT#x; 2 MSI)");
8badd27a 107
a18f5128
EG
108static int dropless_fc;
109module_param(dropless_fc, int, 0);
110MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
111
9898f86d 112static int poll;
a2fbb9ea 113module_param(poll, int, 0);
9898f86d 114MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
115
116static int mrrs = -1;
117module_param(mrrs, int, 0);
118MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
119
9898f86d 120static int debug;
a2fbb9ea 121module_param(debug, int, 0);
9898f86d
EG
122MODULE_PARM_DESC(debug, " Default debug msglevel");
123
124static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 125
1cf167f2 126static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
127
128enum bnx2x_board_type {
129 BCM57710 = 0,
34f80b04
EG
130 BCM57711 = 1,
131 BCM57711E = 2,
a2fbb9ea
ET
132};
133
34f80b04 134/* indexed by board_type, above */
53a10565 135static struct {
a2fbb9ea
ET
136 char *name;
137} board_info[] __devinitdata = {
34f80b04
EG
138 { "Broadcom NetXtreme II BCM57710 XGb" },
139 { "Broadcom NetXtreme II BCM57711 XGb" },
140 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
141};
142
34f80b04 143
a3aa1884 144static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
147 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
148 { 0 }
149};
150
151MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
152
153/****************************************************************************
154* General service functions
155****************************************************************************/
156
157/* used only at init
158 * locking is done by mcp
159 */
573f2035 160void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
161{
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
164 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
165 PCICFG_VENDOR_ID_OFFSET);
166}
167
a2fbb9ea
ET
168static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
169{
170 u32 val;
171
172 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
173 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
174 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
175 PCICFG_VENDOR_ID_OFFSET);
176
177 return val;
178}
a2fbb9ea
ET
179
180static const u32 dmae_reg_go_c[] = {
181 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
182 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
183 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
184 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
185};
186
187/* copy command into DMAE command memory and set DMAE command go */
188static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
189 int idx)
190{
191 u32 cmd_offset;
192 int i;
193
194 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
195 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
196 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
197
ad8d3948
EG
198 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
199 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
200 }
201 REG_WR(bp, dmae_reg_go_c[idx], 1);
202}
203
ad8d3948
EG
204void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
205 u32 len32)
a2fbb9ea 206{
5ff7b6d4 207 struct dmae_command dmae;
a2fbb9ea 208 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
209 int cnt = 200;
210
211 if (!bp->dmae_ready) {
212 u32 *data = bnx2x_sp(bp, wb_data[0]);
213
214 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
215 " using indirect\n", dst_addr, len32);
216 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
217 return;
218 }
219
5ff7b6d4 220 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 221
5ff7b6d4
EG
222 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
223 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
224 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 225#ifdef __BIG_ENDIAN
5ff7b6d4 226 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 227#else
5ff7b6d4 228 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 229#endif
5ff7b6d4
EG
230 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
231 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
232 dmae.src_addr_lo = U64_LO(dma_addr);
233 dmae.src_addr_hi = U64_HI(dma_addr);
234 dmae.dst_addr_lo = dst_addr >> 2;
235 dmae.dst_addr_hi = 0;
236 dmae.len = len32;
237 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
239 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 240
c3eefaf6 241 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
242 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
243 "dst_addr [%x:%08x (%08x)]\n"
244 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
245 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
246 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
247 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 248 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
249 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
250 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 251
5ff7b6d4
EG
252 mutex_lock(&bp->dmae_mutex);
253
a2fbb9ea
ET
254 *wb_comp = 0;
255
5ff7b6d4 256 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
257
258 udelay(5);
ad8d3948
EG
259
260 while (*wb_comp != DMAE_COMP_VAL) {
261 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
262
ad8d3948 263 if (!cnt) {
c3eefaf6 264 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
265 break;
266 }
ad8d3948 267 cnt--;
12469401
YG
268 /* adjust delay for emulation/FPGA */
269 if (CHIP_REV_IS_SLOW(bp))
270 msleep(100);
271 else
272 udelay(5);
a2fbb9ea 273 }
ad8d3948
EG
274
275 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
276}
277
c18487ee 278void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 279{
5ff7b6d4 280 struct dmae_command dmae;
a2fbb9ea 281 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
282 int cnt = 200;
283
284 if (!bp->dmae_ready) {
285 u32 *data = bnx2x_sp(bp, wb_data[0]);
286 int i;
287
288 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
289 " using indirect\n", src_addr, len32);
290 for (i = 0; i < len32; i++)
291 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
292 return;
293 }
294
5ff7b6d4 295 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 296
5ff7b6d4
EG
297 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
298 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
299 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 300#ifdef __BIG_ENDIAN
5ff7b6d4 301 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 302#else
5ff7b6d4 303 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 304#endif
5ff7b6d4
EG
305 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
306 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
307 dmae.src_addr_lo = src_addr >> 2;
308 dmae.src_addr_hi = 0;
309 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
310 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
311 dmae.len = len32;
312 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
314 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 315
c3eefaf6 316 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
317 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
318 "dst_addr [%x:%08x (%08x)]\n"
319 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
320 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
321 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
322 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 323
5ff7b6d4
EG
324 mutex_lock(&bp->dmae_mutex);
325
326 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
327 *wb_comp = 0;
328
5ff7b6d4 329 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
330
331 udelay(5);
ad8d3948
EG
332
333 while (*wb_comp != DMAE_COMP_VAL) {
334
ad8d3948 335 if (!cnt) {
c3eefaf6 336 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
337 break;
338 }
ad8d3948 339 cnt--;
12469401
YG
340 /* adjust delay for emulation/FPGA */
341 if (CHIP_REV_IS_SLOW(bp))
342 msleep(100);
343 else
344 udelay(5);
a2fbb9ea 345 }
ad8d3948 346 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
347 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
348 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
349
350 mutex_unlock(&bp->dmae_mutex);
351}
352
573f2035
EG
353void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
354 u32 addr, u32 len)
355{
02e3c6cb 356 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
357 int offset = 0;
358
02e3c6cb 359 while (len > dmae_wr_max) {
573f2035 360 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
361 addr + offset, dmae_wr_max);
362 offset += dmae_wr_max * 4;
363 len -= dmae_wr_max;
573f2035
EG
364 }
365
366 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
367}
368
ad8d3948
EG
369/* used only for slowpath so not inlined */
370static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
371{
372 u32 wb_write[2];
373
374 wb_write[0] = val_hi;
375 wb_write[1] = val_lo;
376 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 377}
a2fbb9ea 378
ad8d3948
EG
379#ifdef USE_WB_RD
380static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
381{
382 u32 wb_data[2];
383
384 REG_RD_DMAE(bp, reg, wb_data, 2);
385
386 return HILO_U64(wb_data[0], wb_data[1]);
387}
388#endif
389
a2fbb9ea
ET
390static int bnx2x_mc_assert(struct bnx2x *bp)
391{
a2fbb9ea 392 char last_idx;
34f80b04
EG
393 int i, rc = 0;
394 u32 row0, row1, row2, row3;
395
396 /* XSTORM */
397 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
398 XSTORM_ASSERT_LIST_INDEX_OFFSET);
399 if (last_idx)
400 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
401
402 /* print the asserts */
403 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
404
405 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i));
407 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
409 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
411 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
412 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
413
414 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
415 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
416 " 0x%08x 0x%08x 0x%08x\n",
417 i, row3, row2, row1, row0);
418 rc++;
419 } else {
420 break;
421 }
422 }
423
424 /* TSTORM */
425 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
426 TSTORM_ASSERT_LIST_INDEX_OFFSET);
427 if (last_idx)
428 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
429
430 /* print the asserts */
431 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
432
433 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i));
435 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
437 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
439 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
440 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
441
442 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
443 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
444 " 0x%08x 0x%08x 0x%08x\n",
445 i, row3, row2, row1, row0);
446 rc++;
447 } else {
448 break;
449 }
450 }
451
452 /* CSTORM */
453 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
454 CSTORM_ASSERT_LIST_INDEX_OFFSET);
455 if (last_idx)
456 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
457
458 /* print the asserts */
459 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
460
461 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i));
463 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
465 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
467 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
468 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
469
470 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
471 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
472 " 0x%08x 0x%08x 0x%08x\n",
473 i, row3, row2, row1, row0);
474 rc++;
475 } else {
476 break;
477 }
478 }
479
480 /* USTORM */
481 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
482 USTORM_ASSERT_LIST_INDEX_OFFSET);
483 if (last_idx)
484 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
485
486 /* print the asserts */
487 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
488
489 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i));
491 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 4);
493 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 8);
495 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
496 USTORM_ASSERT_LIST_OFFSET(i) + 12);
497
498 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
499 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
500 " 0x%08x 0x%08x 0x%08x\n",
501 i, row3, row2, row1, row0);
502 rc++;
503 } else {
504 break;
a2fbb9ea
ET
505 }
506 }
34f80b04 507
a2fbb9ea
ET
508 return rc;
509}
c14423fe 510
a2fbb9ea
ET
511static void bnx2x_fw_dump(struct bnx2x *bp)
512{
cdaa7cb8 513 u32 addr;
a2fbb9ea 514 u32 mark, offset;
4781bfad 515 __be32 data[9];
a2fbb9ea
ET
516 int word;
517
2145a920
VZ
518 if (BP_NOMCP(bp)) {
519 BNX2X_ERR("NO MCP - can not dump\n");
520 return;
521 }
cdaa7cb8
VZ
522
523 addr = bp->common.shmem_base - 0x0800 + 4;
524 mark = REG_RD(bp, addr);
525 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
7995c64e 526 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 527
7995c64e 528 pr_err("");
cdaa7cb8 529 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
a2fbb9ea 530 for (word = 0; word < 8; word++)
cdaa7cb8 531 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 532 data[8] = 0x0;
7995c64e 533 pr_cont("%s", (char *)data);
a2fbb9ea 534 }
cdaa7cb8 535 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 536 for (word = 0; word < 8; word++)
cdaa7cb8 537 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 538 data[8] = 0x0;
7995c64e 539 pr_cont("%s", (char *)data);
a2fbb9ea 540 }
7995c64e 541 pr_err("end of fw dump\n");
a2fbb9ea
ET
542}
543
544static void bnx2x_panic_dump(struct bnx2x *bp)
545{
546 int i;
547 u16 j, start, end;
548
66e855f3
YG
549 bp->stats_state = STATS_STATE_DISABLED;
550 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
551
a2fbb9ea
ET
552 BNX2X_ERR("begin crash dump -----------------\n");
553
8440d2b6
EG
554 /* Indices */
555 /* Common */
cdaa7cb8
VZ
556 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
557 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
558 " spq_prod_idx(0x%x)\n",
8440d2b6
EG
559 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
560 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
561
562 /* Rx */
54b9ddaa 563 for_each_queue(bp, i) {
a2fbb9ea 564 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 565
cdaa7cb8
VZ
566 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
567 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
568 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 569 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
570 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
571 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8
VZ
572 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
573 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
8440d2b6
EG
574 fp->rx_sge_prod, fp->last_max_sge,
575 le16_to_cpu(fp->fp_u_idx),
576 fp->status_blk->u_status_block.status_block_index);
577 }
a2fbb9ea 578
8440d2b6 579 /* Tx */
54b9ddaa 580 for_each_queue(bp, i) {
8440d2b6 581 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 582
cdaa7cb8
VZ
583 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
584 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
585 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
586 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
587 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
cdaa7cb8
VZ
588 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
589 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 590 fp->status_blk->c_status_block.status_block_index,
ca00392c 591 fp->tx_db.data.prod);
8440d2b6 592 }
a2fbb9ea 593
8440d2b6
EG
594 /* Rings */
595 /* Rx */
54b9ddaa 596 for_each_queue(bp, i) {
8440d2b6 597 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
598
599 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
600 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 601 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
602 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
603 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
604
c3eefaf6
EG
605 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
606 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
607 }
608
3196a88a
EG
609 start = RX_SGE(fp->rx_sge_prod);
610 end = RX_SGE(fp->last_max_sge);
8440d2b6 611 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
612 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
613 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
614
c3eefaf6
EG
615 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
616 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
617 }
618
a2fbb9ea
ET
619 start = RCQ_BD(fp->rx_comp_cons - 10);
620 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 621 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
622 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
623
c3eefaf6
EG
624 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
625 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
626 }
627 }
628
8440d2b6 629 /* Tx */
54b9ddaa 630 for_each_queue(bp, i) {
8440d2b6
EG
631 struct bnx2x_fastpath *fp = &bp->fp[i];
632
633 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
634 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
635 for (j = start; j != end; j = TX_BD(j + 1)) {
636 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
637
c3eefaf6
EG
638 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
639 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
640 }
641
642 start = TX_BD(fp->tx_bd_cons - 10);
643 end = TX_BD(fp->tx_bd_cons + 254);
644 for (j = start; j != end; j = TX_BD(j + 1)) {
645 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
646
c3eefaf6
EG
647 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
648 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
649 }
650 }
a2fbb9ea 651
34f80b04 652 bnx2x_fw_dump(bp);
a2fbb9ea
ET
653 bnx2x_mc_assert(bp);
654 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
655}
656
615f8fd9 657static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 658{
34f80b04 659 int port = BP_PORT(bp);
a2fbb9ea
ET
660 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
661 u32 val = REG_RD(bp, addr);
662 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 663 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
664
665 if (msix) {
8badd27a
EG
666 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
668 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
669 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
670 } else if (msi) {
671 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
672 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
673 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
674 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
675 } else {
676 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 677 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
678 HC_CONFIG_0_REG_INT_LINE_EN_0 |
679 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 680
8badd27a
EG
681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
682 val, port, addr);
615f8fd9
ET
683
684 REG_WR(bp, addr, val);
685
a2fbb9ea
ET
686 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
687 }
688
8badd27a
EG
689 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
690 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
691
692 REG_WR(bp, addr, val);
37dbbf32
EG
693 /*
694 * Ensure that HC_CONFIG is written before leading/trailing edge config
695 */
696 mmiowb();
697 barrier();
34f80b04
EG
698
699 if (CHIP_IS_E1H(bp)) {
700 /* init leading/trailing edge */
701 if (IS_E1HMF(bp)) {
8badd27a 702 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 703 if (bp->port.pmf)
4acac6a5
EG
704 /* enable nig and gpio3 attention */
705 val |= 0x1100;
34f80b04
EG
706 } else
707 val = 0xffff;
708
709 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
710 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
711 }
37dbbf32
EG
712
713 /* Make sure that interrupts are indeed enabled from here on */
714 mmiowb();
a2fbb9ea
ET
715}
716
615f8fd9 717static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 718{
34f80b04 719 int port = BP_PORT(bp);
a2fbb9ea
ET
720 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
721 u32 val = REG_RD(bp, addr);
722
723 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
724 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
725 HC_CONFIG_0_REG_INT_LINE_EN_0 |
726 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
727
728 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
729 val, port, addr);
730
8badd27a
EG
731 /* flush all outstanding writes */
732 mmiowb();
733
a2fbb9ea
ET
734 REG_WR(bp, addr, val);
735 if (REG_RD(bp, addr) != val)
736 BNX2X_ERR("BUG! proper val not read from IGU!\n");
737}
738
f8ef6e44 739static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 740{
a2fbb9ea 741 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 742 int i, offset;
a2fbb9ea 743
34f80b04 744 /* disable interrupt handling */
a2fbb9ea 745 atomic_inc(&bp->intr_sem);
e1510706
EG
746 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
747
f8ef6e44
YG
748 if (disable_hw)
749 /* prevent the HW from sending interrupts */
750 bnx2x_int_disable(bp);
a2fbb9ea
ET
751
752 /* make sure all ISRs are done */
753 if (msix) {
8badd27a
EG
754 synchronize_irq(bp->msix_table[0].vector);
755 offset = 1;
37b091ba
MC
756#ifdef BCM_CNIC
757 offset++;
758#endif
a2fbb9ea 759 for_each_queue(bp, i)
8badd27a 760 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
761 } else
762 synchronize_irq(bp->pdev->irq);
763
764 /* make sure sp_task is not running */
1cf167f2
EG
765 cancel_delayed_work(&bp->sp_task);
766 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
767}
768
34f80b04 769/* fast path */
a2fbb9ea
ET
770
771/*
34f80b04 772 * General service functions
a2fbb9ea
ET
773 */
774
72fd0718
VZ
775/* Return true if succeeded to acquire the lock */
776static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
777{
778 u32 lock_status;
779 u32 resource_bit = (1 << resource);
780 int func = BP_FUNC(bp);
781 u32 hw_lock_control_reg;
782
783 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
784
785 /* Validating that the resource is within range */
786 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
787 DP(NETIF_MSG_HW,
788 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
789 resource, HW_LOCK_MAX_RESOURCE_VALUE);
790 return -EINVAL;
791 }
792
793 if (func <= 5)
794 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
795 else
796 hw_lock_control_reg =
797 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
798
799 /* Try to acquire the lock */
800 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
801 lock_status = REG_RD(bp, hw_lock_control_reg);
802 if (lock_status & resource_bit)
803 return true;
804
805 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
806 return false;
807}
808
34f80b04 809static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
810 u8 storm, u16 index, u8 op, u8 update)
811{
5c862848
EG
812 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
813 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
814 struct igu_ack_register igu_ack;
815
816 igu_ack.status_block_index = index;
817 igu_ack.sb_id_and_flags =
34f80b04 818 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
819 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
820 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
821 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
822
5c862848
EG
823 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
824 (*(u32 *)&igu_ack), hc_addr);
825 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
826
827 /* Make sure that ACK is written */
828 mmiowb();
829 barrier();
a2fbb9ea
ET
830}
831
54b9ddaa 832static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
833{
834 struct host_status_block *fpsb = fp->status_blk;
a2fbb9ea
ET
835
836 barrier(); /* status block is written to by the chip */
54b9ddaa
VZ
837 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
838 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
a2fbb9ea
ET
839}
840
a2fbb9ea
ET
841static u16 bnx2x_ack_int(struct bnx2x *bp)
842{
5c862848
EG
843 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
844 COMMAND_REG_SIMD_MASK);
845 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 846
5c862848
EG
847 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
848 result, hc_addr);
a2fbb9ea 849
a2fbb9ea
ET
850 return result;
851}
852
853
854/*
855 * fast path service functions
856 */
857
e8b5fc51
VZ
858static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
859{
860 /* Tell compiler that consumer and producer can change */
861 barrier();
862 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
863}
864
a2fbb9ea
ET
865/* free skb in the packet ring at pos idx
866 * return idx of last bd freed
867 */
868static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
869 u16 idx)
870{
871 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
872 struct eth_tx_start_bd *tx_start_bd;
873 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 874 struct sk_buff *skb = tx_buf->skb;
34f80b04 875 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
876 int nbd;
877
54b9ddaa
VZ
878 /* prefetch skb end pointer to speedup dev_kfree_skb() */
879 prefetch(&skb->end);
880
a2fbb9ea
ET
881 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
882 idx, tx_buf, skb);
883
884 /* unmap first bd */
885 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c 886 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
1a983142 887 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
ca00392c 888 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 889
ca00392c 890 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 891#ifdef BNX2X_STOP_ON_ERROR
ca00392c 892 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 893 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
894 bnx2x_panic();
895 }
896#endif
ca00392c 897 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 898
ca00392c
EG
899 /* Get the next bd */
900 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 901
ca00392c
EG
902 /* Skip a parse bd... */
903 --nbd;
904 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
905
906 /* ...and the TSO split header bd since they have no mapping */
907 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
908 --nbd;
909 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
910 }
911
912 /* now free frags */
913 while (nbd > 0) {
914
915 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c 916 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
1a983142
FT
917 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
918 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
a2fbb9ea
ET
919 if (--nbd)
920 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
921 }
922
923 /* release skb */
53e5e96e 924 WARN_ON(!skb);
54b9ddaa 925 dev_kfree_skb(skb);
a2fbb9ea
ET
926 tx_buf->first_bd = 0;
927 tx_buf->skb = NULL;
928
34f80b04 929 return new_cons;
a2fbb9ea
ET
930}
931
34f80b04 932static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 933{
34f80b04
EG
934 s16 used;
935 u16 prod;
936 u16 cons;
a2fbb9ea 937
a2fbb9ea
ET
938 prod = fp->tx_bd_prod;
939 cons = fp->tx_bd_cons;
940
34f80b04
EG
941 /* NUM_TX_RINGS = number of "next-page" entries
942 It will be used as a threshold */
943 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 944
34f80b04 945#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
946 WARN_ON(used < 0);
947 WARN_ON(used > fp->bp->tx_ring_size);
948 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 949#endif
a2fbb9ea 950
34f80b04 951 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
952}
953
54b9ddaa
VZ
954static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
955{
956 u16 hw_cons;
957
958 /* Tell compiler that status block fields can change */
959 barrier();
960 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
961 return hw_cons != fp->tx_pkt_cons;
962}
963
964static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
965{
966 struct bnx2x *bp = fp->bp;
555f6c78 967 struct netdev_queue *txq;
a2fbb9ea 968 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
a2fbb9ea
ET
969
970#ifdef BNX2X_STOP_ON_ERROR
971 if (unlikely(bp->panic))
54b9ddaa 972 return -1;
a2fbb9ea
ET
973#endif
974
54b9ddaa 975 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
976 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
977 sw_cons = fp->tx_pkt_cons;
978
979 while (sw_cons != hw_cons) {
980 u16 pkt_cons;
981
982 pkt_cons = TX_BD(sw_cons);
983
984 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
985
34f80b04 986 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
987 hw_cons, sw_cons, pkt_cons);
988
34f80b04 989/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
990 rmb();
991 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
992 }
993*/
994 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
995 sw_cons++;
a2fbb9ea
ET
996 }
997
998 fp->tx_pkt_cons = sw_cons;
999 fp->tx_bd_cons = bd_cons;
1000
c16cc0b4
VZ
1001 /* Need to make the tx_bd_cons update visible to start_xmit()
1002 * before checking for netif_tx_queue_stopped(). Without the
1003 * memory barrier, there is a small possibility that
1004 * start_xmit() will miss it and cause the queue to be stopped
1005 * forever.
1006 */
2d99cf16 1007 smp_mb();
c16cc0b4 1008
a2fbb9ea 1009 /* TBD need a thresh? */
555f6c78 1010 if (unlikely(netif_tx_queue_stopped(txq))) {
c16cc0b4
VZ
1011 /* Taking tx_lock() is needed to prevent reenabling the queue
1012 * while it's empty. This could have happen if rx_action() gets
1013 * suspended in bnx2x_tx_int() after the condition before
1014 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1015 *
1016 * stops the queue->sees fresh tx_bd_cons->releases the queue->
1017 * sends some packets consuming the whole queue again->
1018 * stops the queue
6044735d 1019 */
c16cc0b4
VZ
1020
1021 __netif_tx_lock(txq, smp_processor_id());
6044735d 1022
555f6c78 1023 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 1024 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 1025 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 1026 netif_tx_wake_queue(txq);
c16cc0b4
VZ
1027
1028 __netif_tx_unlock(txq);
a2fbb9ea 1029 }
54b9ddaa 1030 return 0;
a2fbb9ea
ET
1031}
1032
993ac7b5
MC
1033#ifdef BCM_CNIC
1034static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1035#endif
3196a88a 1036
a2fbb9ea
ET
1037static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1038 union eth_rx_cqe *rr_cqe)
1039{
1040 struct bnx2x *bp = fp->bp;
1041 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1042 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1043
34f80b04 1044 DP(BNX2X_MSG_SP,
a2fbb9ea 1045 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1046 fp->index, cid, command, bp->state,
34f80b04 1047 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
1048
1049 bp->spq_left++;
1050
0626b899 1051 if (fp->index) {
a2fbb9ea
ET
1052 switch (command | fp->state) {
1053 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1054 BNX2X_FP_STATE_OPENING):
1055 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1056 cid);
1057 fp->state = BNX2X_FP_STATE_OPEN;
1058 break;
1059
1060 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1061 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1062 cid);
1063 fp->state = BNX2X_FP_STATE_HALTED;
1064 break;
1065
1066 default:
34f80b04 1067 BNX2X_ERR("unexpected MC reply (%d) "
cdaa7cb8
VZ
1068 "fp[%d] state is %x\n",
1069 command, fp->index, fp->state);
34f80b04 1070 break;
a2fbb9ea 1071 }
34f80b04 1072 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1073 return;
1074 }
c14423fe 1075
a2fbb9ea
ET
1076 switch (command | bp->state) {
1077 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1078 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1079 bp->state = BNX2X_STATE_OPEN;
1080 break;
1081
1082 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1083 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1084 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1085 fp->state = BNX2X_FP_STATE_HALTED;
1086 break;
1087
a2fbb9ea 1088 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1089 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1090 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1091 break;
1092
993ac7b5
MC
1093#ifdef BCM_CNIC
1094 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1095 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1096 bnx2x_cnic_cfc_comp(bp, cid);
1097 break;
1098#endif
3196a88a 1099
a2fbb9ea 1100 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1101 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1102 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
1103 bp->set_mac_pending--;
1104 smp_wmb();
a2fbb9ea
ET
1105 break;
1106
49d66772 1107 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1108 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
1109 bp->set_mac_pending--;
1110 smp_wmb();
49d66772
ET
1111 break;
1112
a2fbb9ea 1113 default:
34f80b04 1114 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1115 command, bp->state);
34f80b04 1116 break;
a2fbb9ea 1117 }
34f80b04 1118 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1119}
1120
7a9b2557
VZ
1121static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1122 struct bnx2x_fastpath *fp, u16 index)
1123{
1124 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1125 struct page *page = sw_buf->page;
1126 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1127
1128 /* Skip "next page" elements */
1129 if (!page)
1130 return;
1131
1a983142 1132 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
4f40f2cb 1133 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1134 __free_pages(page, PAGES_PER_SGE_SHIFT);
1135
1136 sw_buf->page = NULL;
1137 sge->addr_hi = 0;
1138 sge->addr_lo = 0;
1139}
1140
1141static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1142 struct bnx2x_fastpath *fp, int last)
1143{
1144 int i;
1145
1146 for (i = 0; i < last; i++)
1147 bnx2x_free_rx_sge(bp, fp, i);
1148}
1149
1150static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1151 struct bnx2x_fastpath *fp, u16 index)
1152{
1153 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1154 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1155 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1156 dma_addr_t mapping;
1157
1158 if (unlikely(page == NULL))
1159 return -ENOMEM;
1160
1a983142
FT
1161 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1162 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
8d8bb39b 1163 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1164 __free_pages(page, PAGES_PER_SGE_SHIFT);
1165 return -ENOMEM;
1166 }
1167
1168 sw_buf->page = page;
1a983142 1169 dma_unmap_addr_set(sw_buf, mapping, mapping);
7a9b2557
VZ
1170
1171 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1172 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1173
1174 return 0;
1175}
1176
a2fbb9ea
ET
1177static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1178 struct bnx2x_fastpath *fp, u16 index)
1179{
1180 struct sk_buff *skb;
1181 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1182 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1183 dma_addr_t mapping;
1184
1185 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1186 if (unlikely(skb == NULL))
1187 return -ENOMEM;
1188
1a983142
FT
1189 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1190 DMA_FROM_DEVICE);
8d8bb39b 1191 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1192 dev_kfree_skb(skb);
1193 return -ENOMEM;
1194 }
1195
1196 rx_buf->skb = skb;
1a983142 1197 dma_unmap_addr_set(rx_buf, mapping, mapping);
a2fbb9ea
ET
1198
1199 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1200 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1201
1202 return 0;
1203}
1204
1205/* note that we are not allocating a new skb,
1206 * we are just moving one from cons to prod
1207 * we are not creating a new mapping,
1208 * so there is no need to check for dma_mapping_error().
1209 */
1210static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1211 struct sk_buff *skb, u16 cons, u16 prod)
1212{
1213 struct bnx2x *bp = fp->bp;
1214 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1215 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1216 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1217 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1218
1a983142
FT
1219 dma_sync_single_for_device(&bp->pdev->dev,
1220 dma_unmap_addr(cons_rx_buf, mapping),
1221 RX_COPY_THRESH, DMA_FROM_DEVICE);
a2fbb9ea
ET
1222
1223 prod_rx_buf->skb = cons_rx_buf->skb;
1a983142
FT
1224 dma_unmap_addr_set(prod_rx_buf, mapping,
1225 dma_unmap_addr(cons_rx_buf, mapping));
a2fbb9ea
ET
1226 *prod_bd = *cons_bd;
1227}
1228
7a9b2557
VZ
1229static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1230 u16 idx)
1231{
1232 u16 last_max = fp->last_max_sge;
1233
1234 if (SUB_S16(idx, last_max) > 0)
1235 fp->last_max_sge = idx;
1236}
1237
1238static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1239{
1240 int i, j;
1241
1242 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1243 int idx = RX_SGE_CNT * i - 1;
1244
1245 for (j = 0; j < 2; j++) {
1246 SGE_MASK_CLEAR_BIT(fp, idx);
1247 idx--;
1248 }
1249 }
1250}
1251
1252static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1253 struct eth_fast_path_rx_cqe *fp_cqe)
1254{
1255 struct bnx2x *bp = fp->bp;
4f40f2cb 1256 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1257 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1258 SGE_PAGE_SHIFT;
7a9b2557
VZ
1259 u16 last_max, last_elem, first_elem;
1260 u16 delta = 0;
1261 u16 i;
1262
1263 if (!sge_len)
1264 return;
1265
1266 /* First mark all used pages */
1267 for (i = 0; i < sge_len; i++)
1268 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1269
1270 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1271 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1272
1273 /* Here we assume that the last SGE index is the biggest */
1274 prefetch((void *)(fp->sge_mask));
1275 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1276
1277 last_max = RX_SGE(fp->last_max_sge);
1278 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1279 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1280
1281 /* If ring is not full */
1282 if (last_elem + 1 != first_elem)
1283 last_elem++;
1284
1285 /* Now update the prod */
1286 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1287 if (likely(fp->sge_mask[i]))
1288 break;
1289
1290 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1291 delta += RX_SGE_MASK_ELEM_SZ;
1292 }
1293
1294 if (delta > 0) {
1295 fp->rx_sge_prod += delta;
1296 /* clear page-end entries */
1297 bnx2x_clear_sge_mask_next_elems(fp);
1298 }
1299
1300 DP(NETIF_MSG_RX_STATUS,
1301 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1302 fp->last_max_sge, fp->rx_sge_prod);
1303}
1304
1305static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1306{
1307 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1308 memset(fp->sge_mask, 0xff,
1309 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1310
33471629
EG
1311 /* Clear the two last indices in the page to 1:
1312 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1313 hence will never be indicated and should be removed from
1314 the calculations. */
1315 bnx2x_clear_sge_mask_next_elems(fp);
1316}
1317
1318static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1319 struct sk_buff *skb, u16 cons, u16 prod)
1320{
1321 struct bnx2x *bp = fp->bp;
1322 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1323 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1324 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1325 dma_addr_t mapping;
1326
1327 /* move empty skb from pool to prod and map it */
1328 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1a983142
FT
1329 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1330 bp->rx_buf_size, DMA_FROM_DEVICE);
1331 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
7a9b2557
VZ
1332
1333 /* move partial skb from cons to pool (don't unmap yet) */
1334 fp->tpa_pool[queue] = *cons_rx_buf;
1335
1336 /* mark bin state as start - print error if current state != stop */
1337 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1338 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1339
1340 fp->tpa_state[queue] = BNX2X_TPA_START;
1341
1342 /* point prod_bd to new skb */
1343 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1344 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1345
1346#ifdef BNX2X_STOP_ON_ERROR
1347 fp->tpa_queue_used |= (1 << queue);
cdaa7cb8 1348#ifdef _ASM_GENERIC_INT_L64_H
7a9b2557
VZ
1349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1350#else
1351 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1352#endif
1353 fp->tpa_queue_used);
1354#endif
1355}
1356
1357static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1358 struct sk_buff *skb,
1359 struct eth_fast_path_rx_cqe *fp_cqe,
1360 u16 cqe_idx)
1361{
1362 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1363 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1364 u32 i, frag_len, frag_size, pages;
1365 int err;
1366 int j;
1367
1368 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1369 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1370
1371 /* This is needed in order to enable forwarding support */
1372 if (frag_size)
4f40f2cb 1373 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1374 max(frag_size, (u32)len_on_bd));
1375
1376#ifdef BNX2X_STOP_ON_ERROR
cdaa7cb8 1377 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
7a9b2557
VZ
1378 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1379 pages, cqe_idx);
1380 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1381 fp_cqe->pkt_len, len_on_bd);
1382 bnx2x_panic();
1383 return -EINVAL;
1384 }
1385#endif
1386
1387 /* Run through the SGL and compose the fragmented skb */
1388 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1389 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1390
1391 /* FW gives the indices of the SGE as if the ring is an array
1392 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1393 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1394 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1395 old_rx_pg = *rx_pg;
1396
1397 /* If we fail to allocate a substitute page, we simply stop
1398 where we are and drop the whole packet */
1399 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1400 if (unlikely(err)) {
de832a55 1401 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1402 return err;
1403 }
1404
1405 /* Unmap the page as we r going to pass it to the stack */
1a983142
FT
1406 dma_unmap_page(&bp->pdev->dev,
1407 dma_unmap_addr(&old_rx_pg, mapping),
1408 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
7a9b2557
VZ
1409
1410 /* Add one frag and update the appropriate fields in the skb */
1411 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1412
1413 skb->data_len += frag_len;
1414 skb->truesize += frag_len;
1415 skb->len += frag_len;
1416
1417 frag_size -= frag_len;
1418 }
1419
1420 return 0;
1421}
1422
1423static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1424 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1425 u16 cqe_idx)
1426{
1427 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1428 struct sk_buff *skb = rx_buf->skb;
1429 /* alloc new skb */
1430 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1431
1432 /* Unmap skb in the pool anyway, as we are going to change
1433 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1434 fails. */
1a983142
FT
1435 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1436 bp->rx_buf_size, DMA_FROM_DEVICE);
7a9b2557 1437
7a9b2557 1438 if (likely(new_skb)) {
66e855f3
YG
1439 /* fix ip xsum and give it to the stack */
1440 /* (no need to map the new skb) */
0c6671b0
EG
1441#ifdef BCM_VLAN
1442 int is_vlan_cqe =
1443 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1444 PARSING_FLAGS_VLAN);
1445 int is_not_hwaccel_vlan_cqe =
1446 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1447#endif
7a9b2557
VZ
1448
1449 prefetch(skb);
1450 prefetch(((char *)(skb)) + 128);
1451
7a9b2557
VZ
1452#ifdef BNX2X_STOP_ON_ERROR
1453 if (pad + len > bp->rx_buf_size) {
1454 BNX2X_ERR("skb_put is about to fail... "
1455 "pad %d len %d rx_buf_size %d\n",
1456 pad, len, bp->rx_buf_size);
1457 bnx2x_panic();
1458 return;
1459 }
1460#endif
1461
1462 skb_reserve(skb, pad);
1463 skb_put(skb, len);
1464
1465 skb->protocol = eth_type_trans(skb, bp->dev);
1466 skb->ip_summed = CHECKSUM_UNNECESSARY;
1467
1468 {
1469 struct iphdr *iph;
1470
1471 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1472#ifdef BCM_VLAN
1473 /* If there is no Rx VLAN offloading -
1474 take VLAN tag into an account */
1475 if (unlikely(is_not_hwaccel_vlan_cqe))
1476 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1477#endif
7a9b2557
VZ
1478 iph->check = 0;
1479 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1480 }
1481
1482 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1483 &cqe->fast_path_cqe, cqe_idx)) {
1484#ifdef BCM_VLAN
0c6671b0
EG
1485 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1486 (!is_not_hwaccel_vlan_cqe))
4fd89b7a
DK
1487 vlan_gro_receive(&fp->napi, bp->vlgrp,
1488 le16_to_cpu(cqe->fast_path_cqe.
1489 vlan_tag), skb);
7a9b2557
VZ
1490 else
1491#endif
4fd89b7a 1492 napi_gro_receive(&fp->napi, skb);
7a9b2557
VZ
1493 } else {
1494 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1495 " - dropping packet!\n");
1496 dev_kfree_skb(skb);
1497 }
1498
7a9b2557
VZ
1499
1500 /* put new skb in bin */
1501 fp->tpa_pool[queue].skb = new_skb;
1502
1503 } else {
66e855f3 1504 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1505 DP(NETIF_MSG_RX_STATUS,
1506 "Failed to allocate new skb - dropping packet!\n");
de832a55 1507 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1508 }
1509
1510 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1511}
1512
1513static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1514 struct bnx2x_fastpath *fp,
1515 u16 bd_prod, u16 rx_comp_prod,
1516 u16 rx_sge_prod)
1517{
8d9c5f34 1518 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1519 int i;
1520
1521 /* Update producers */
1522 rx_prods.bd_prod = bd_prod;
1523 rx_prods.cqe_prod = rx_comp_prod;
1524 rx_prods.sge_prod = rx_sge_prod;
1525
58f4c4cf
EG
1526 /*
1527 * Make sure that the BD and SGE data is updated before updating the
1528 * producers since FW might read the BD/SGE right after the producer
1529 * is updated.
1530 * This is only applicable for weak-ordered memory model archs such
1531 * as IA-64. The following barrier is also mandatory since FW will
1532 * assumes BDs must have buffers.
1533 */
1534 wmb();
1535
8d9c5f34
EG
1536 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1537 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1538 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1539 ((u32 *)&rx_prods)[i]);
1540
58f4c4cf
EG
1541 mmiowb(); /* keep prod updates ordered */
1542
7a9b2557 1543 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1544 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1545 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1546}
1547
a2fbb9ea
ET
1548static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1549{
1550 struct bnx2x *bp = fp->bp;
34f80b04 1551 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1552 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1553 int rx_pkt = 0;
1554
1555#ifdef BNX2X_STOP_ON_ERROR
1556 if (unlikely(bp->panic))
1557 return 0;
1558#endif
1559
34f80b04
EG
1560 /* CQ "next element" is of the size of the regular element,
1561 that's why it's ok here */
a2fbb9ea
ET
1562 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1563 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1564 hw_comp_cons++;
1565
1566 bd_cons = fp->rx_bd_cons;
1567 bd_prod = fp->rx_bd_prod;
34f80b04 1568 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1569 sw_comp_cons = fp->rx_comp_cons;
1570 sw_comp_prod = fp->rx_comp_prod;
1571
1572 /* Memory barrier necessary as speculative reads of the rx
1573 * buffer can be ahead of the index in the status block
1574 */
1575 rmb();
1576
1577 DP(NETIF_MSG_RX_STATUS,
1578 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1579 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1580
1581 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1582 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1583 struct sk_buff *skb;
1584 union eth_rx_cqe *cqe;
34f80b04
EG
1585 u8 cqe_fp_flags;
1586 u16 len, pad;
a2fbb9ea
ET
1587
1588 comp_ring_cons = RCQ_BD(sw_comp_cons);
1589 bd_prod = RX_BD(bd_prod);
1590 bd_cons = RX_BD(bd_cons);
1591
619e7a66
EG
1592 /* Prefetch the page containing the BD descriptor
1593 at producer's index. It will be needed when new skb is
1594 allocated */
1595 prefetch((void *)(PAGE_ALIGN((unsigned long)
1596 (&fp->rx_desc_ring[bd_prod])) -
1597 PAGE_SIZE + 1));
1598
a2fbb9ea 1599 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1600 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1601
a2fbb9ea 1602 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1603 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1604 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1605 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1606 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1607 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1608
1609 /* is this a slowpath msg? */
34f80b04 1610 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1611 bnx2x_sp_event(fp, cqe);
1612 goto next_cqe;
1613
1614 /* this is an rx packet */
1615 } else {
1616 rx_buf = &fp->rx_buf_ring[bd_cons];
1617 skb = rx_buf->skb;
54b9ddaa
VZ
1618 prefetch(skb);
1619 prefetch((u8 *)skb + 256);
a2fbb9ea
ET
1620 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1621 pad = cqe->fast_path_cqe.placement_offset;
1622
7a9b2557
VZ
1623 /* If CQE is marked both TPA_START and TPA_END
1624 it is a non-TPA CQE */
1625 if ((!fp->disable_tpa) &&
1626 (TPA_TYPE(cqe_fp_flags) !=
1627 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1628 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1629
1630 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1631 DP(NETIF_MSG_RX_STATUS,
1632 "calling tpa_start on queue %d\n",
1633 queue);
1634
1635 bnx2x_tpa_start(fp, queue, skb,
1636 bd_cons, bd_prod);
1637 goto next_rx;
1638 }
1639
1640 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1641 DP(NETIF_MSG_RX_STATUS,
1642 "calling tpa_stop on queue %d\n",
1643 queue);
1644
1645 if (!BNX2X_RX_SUM_FIX(cqe))
1646 BNX2X_ERR("STOP on none TCP "
1647 "data\n");
1648
1649 /* This is a size of the linear data
1650 on this skb */
1651 len = le16_to_cpu(cqe->fast_path_cqe.
1652 len_on_bd);
1653 bnx2x_tpa_stop(bp, fp, queue, pad,
1654 len, cqe, comp_ring_cons);
1655#ifdef BNX2X_STOP_ON_ERROR
1656 if (bp->panic)
17cb4006 1657 return 0;
7a9b2557
VZ
1658#endif
1659
1660 bnx2x_update_sge_prod(fp,
1661 &cqe->fast_path_cqe);
1662 goto next_cqe;
1663 }
1664 }
1665
1a983142
FT
1666 dma_sync_single_for_device(&bp->pdev->dev,
1667 dma_unmap_addr(rx_buf, mapping),
1668 pad + RX_COPY_THRESH,
1669 DMA_FROM_DEVICE);
a2fbb9ea
ET
1670 prefetch(skb);
1671 prefetch(((char *)(skb)) + 128);
1672
1673 /* is this an error packet? */
34f80b04 1674 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1675 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1676 "ERROR flags %x rx packet %u\n",
1677 cqe_fp_flags, sw_comp_cons);
de832a55 1678 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1679 goto reuse_rx;
1680 }
1681
1682 /* Since we don't have a jumbo ring
1683 * copy small packets if mtu > 1500
1684 */
1685 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1686 (len <= RX_COPY_THRESH)) {
1687 struct sk_buff *new_skb;
1688
1689 new_skb = netdev_alloc_skb(bp->dev,
1690 len + pad);
1691 if (new_skb == NULL) {
1692 DP(NETIF_MSG_RX_ERR,
34f80b04 1693 "ERROR packet dropped "
a2fbb9ea 1694 "because of alloc failure\n");
de832a55 1695 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1696 goto reuse_rx;
1697 }
1698
1699 /* aligned copy */
1700 skb_copy_from_linear_data_offset(skb, pad,
1701 new_skb->data + pad, len);
1702 skb_reserve(new_skb, pad);
1703 skb_put(new_skb, len);
1704
1705 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1706
1707 skb = new_skb;
1708
a119a069
EG
1709 } else
1710 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1a983142
FT
1711 dma_unmap_single(&bp->pdev->dev,
1712 dma_unmap_addr(rx_buf, mapping),
437cf2f1 1713 bp->rx_buf_size,
1a983142 1714 DMA_FROM_DEVICE);
a2fbb9ea
ET
1715 skb_reserve(skb, pad);
1716 skb_put(skb, len);
1717
1718 } else {
1719 DP(NETIF_MSG_RX_ERR,
34f80b04 1720 "ERROR packet dropped because "
a2fbb9ea 1721 "of alloc failure\n");
de832a55 1722 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1723reuse_rx:
1724 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1725 goto next_rx;
1726 }
1727
1728 skb->protocol = eth_type_trans(skb, bp->dev);
1729
1730 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1731 if (bp->rx_csum) {
1adcd8be
EG
1732 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1733 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1734 else
de832a55 1735 fp->eth_q_stats.hw_csum_err++;
66e855f3 1736 }
a2fbb9ea
ET
1737 }
1738
748e5439 1739 skb_record_rx_queue(skb, fp->index);
ab6ad5a4 1740
a2fbb9ea 1741#ifdef BCM_VLAN
0c6671b0 1742 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1743 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1744 PARSING_FLAGS_VLAN))
4fd89b7a
DK
1745 vlan_gro_receive(&fp->napi, bp->vlgrp,
1746 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
a2fbb9ea
ET
1747 else
1748#endif
4fd89b7a 1749 napi_gro_receive(&fp->napi, skb);
a2fbb9ea 1750
a2fbb9ea
ET
1751
1752next_rx:
1753 rx_buf->skb = NULL;
1754
1755 bd_cons = NEXT_RX_IDX(bd_cons);
1756 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1757 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1758 rx_pkt++;
a2fbb9ea
ET
1759next_cqe:
1760 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1761 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1762
34f80b04 1763 if (rx_pkt == budget)
a2fbb9ea
ET
1764 break;
1765 } /* while */
1766
1767 fp->rx_bd_cons = bd_cons;
34f80b04 1768 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1769 fp->rx_comp_cons = sw_comp_cons;
1770 fp->rx_comp_prod = sw_comp_prod;
1771
7a9b2557
VZ
1772 /* Update producers */
1773 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1774 fp->rx_sge_prod);
a2fbb9ea
ET
1775
1776 fp->rx_pkt += rx_pkt;
1777 fp->rx_calls++;
1778
1779 return rx_pkt;
1780}
1781
1782static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1783{
1784 struct bnx2x_fastpath *fp = fp_cookie;
1785 struct bnx2x *bp = fp->bp;
a2fbb9ea 1786
da5a662a
VZ
1787 /* Return here if interrupt is disabled */
1788 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1789 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1790 return IRQ_HANDLED;
1791 }
1792
34f80b04 1793 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1794 fp->index, fp->sb_id);
0626b899 1795 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1796
1797#ifdef BNX2X_STOP_ON_ERROR
1798 if (unlikely(bp->panic))
1799 return IRQ_HANDLED;
1800#endif
ca00392c 1801
54b9ddaa
VZ
1802 /* Handle Rx and Tx according to MSI-X vector */
1803 prefetch(fp->rx_cons_sb);
1804 prefetch(fp->tx_cons_sb);
1805 prefetch(&fp->status_blk->u_status_block.status_block_index);
1806 prefetch(&fp->status_blk->c_status_block.status_block_index);
1807 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
34f80b04 1808
a2fbb9ea
ET
1809 return IRQ_HANDLED;
1810}
1811
1812static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1813{
555f6c78 1814 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1815 u16 status = bnx2x_ack_int(bp);
34f80b04 1816 u16 mask;
ca00392c 1817 int i;
a2fbb9ea 1818
34f80b04 1819 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1820 if (unlikely(status == 0)) {
1821 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1822 return IRQ_NONE;
1823 }
f5372251 1824 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1825
34f80b04 1826 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1827 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1828 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1829 return IRQ_HANDLED;
1830 }
1831
3196a88a
EG
1832#ifdef BNX2X_STOP_ON_ERROR
1833 if (unlikely(bp->panic))
1834 return IRQ_HANDLED;
1835#endif
1836
ca00392c
EG
1837 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1838 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1839
ca00392c
EG
1840 mask = 0x2 << fp->sb_id;
1841 if (status & mask) {
54b9ddaa
VZ
1842 /* Handle Rx and Tx according to SB id */
1843 prefetch(fp->rx_cons_sb);
1844 prefetch(&fp->status_blk->u_status_block.
1845 status_block_index);
1846 prefetch(fp->tx_cons_sb);
1847 prefetch(&fp->status_blk->c_status_block.
1848 status_block_index);
1849 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1850 status &= ~mask;
1851 }
a2fbb9ea
ET
1852 }
1853
993ac7b5
MC
1854#ifdef BCM_CNIC
1855 mask = 0x2 << CNIC_SB_ID(bp);
1856 if (status & (mask | 0x1)) {
1857 struct cnic_ops *c_ops = NULL;
1858
1859 rcu_read_lock();
1860 c_ops = rcu_dereference(bp->cnic_ops);
1861 if (c_ops)
1862 c_ops->cnic_handler(bp->cnic_data, NULL);
1863 rcu_read_unlock();
1864
1865 status &= ~mask;
1866 }
1867#endif
a2fbb9ea 1868
34f80b04 1869 if (unlikely(status & 0x1)) {
1cf167f2 1870 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1871
1872 status &= ~0x1;
1873 if (!status)
1874 return IRQ_HANDLED;
1875 }
1876
cdaa7cb8
VZ
1877 if (unlikely(status))
1878 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 1879 status);
a2fbb9ea 1880
c18487ee 1881 return IRQ_HANDLED;
a2fbb9ea
ET
1882}
1883
c18487ee 1884/* end of fast path */
a2fbb9ea 1885
bb2a0f7a 1886static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1887
c18487ee
YR
1888/* Link */
1889
1890/*
1891 * General service functions
1892 */
a2fbb9ea 1893
4a37fb66 1894static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1895{
1896 u32 lock_status;
1897 u32 resource_bit = (1 << resource);
4a37fb66
YG
1898 int func = BP_FUNC(bp);
1899 u32 hw_lock_control_reg;
c18487ee 1900 int cnt;
a2fbb9ea 1901
c18487ee
YR
1902 /* Validating that the resource is within range */
1903 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1904 DP(NETIF_MSG_HW,
1905 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1906 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1907 return -EINVAL;
1908 }
a2fbb9ea 1909
4a37fb66
YG
1910 if (func <= 5) {
1911 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1912 } else {
1913 hw_lock_control_reg =
1914 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1915 }
1916
c18487ee 1917 /* Validating that the resource is not already taken */
4a37fb66 1918 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1919 if (lock_status & resource_bit) {
1920 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1921 lock_status, resource_bit);
1922 return -EEXIST;
1923 }
a2fbb9ea 1924
46230476
EG
1925 /* Try for 5 second every 5ms */
1926 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1927 /* Try to acquire the lock */
4a37fb66
YG
1928 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1929 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1930 if (lock_status & resource_bit)
1931 return 0;
a2fbb9ea 1932
c18487ee 1933 msleep(5);
a2fbb9ea 1934 }
c18487ee
YR
1935 DP(NETIF_MSG_HW, "Timeout\n");
1936 return -EAGAIN;
1937}
a2fbb9ea 1938
4a37fb66 1939static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1940{
1941 u32 lock_status;
1942 u32 resource_bit = (1 << resource);
4a37fb66
YG
1943 int func = BP_FUNC(bp);
1944 u32 hw_lock_control_reg;
a2fbb9ea 1945
72fd0718
VZ
1946 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1947
c18487ee
YR
1948 /* Validating that the resource is within range */
1949 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1950 DP(NETIF_MSG_HW,
1951 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1952 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1953 return -EINVAL;
1954 }
1955
4a37fb66
YG
1956 if (func <= 5) {
1957 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1958 } else {
1959 hw_lock_control_reg =
1960 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1961 }
1962
c18487ee 1963 /* Validating that the resource is currently taken */
4a37fb66 1964 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1965 if (!(lock_status & resource_bit)) {
1966 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1967 lock_status, resource_bit);
1968 return -EFAULT;
a2fbb9ea
ET
1969 }
1970
4a37fb66 1971 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1972 return 0;
1973}
1974
1975/* HW Lock for shared dual port PHYs */
4a37fb66 1976static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1977{
34f80b04 1978 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1979
46c6a674
EG
1980 if (bp->port.need_hw_lock)
1981 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1982}
a2fbb9ea 1983
4a37fb66 1984static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1985{
46c6a674
EG
1986 if (bp->port.need_hw_lock)
1987 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1988
34f80b04 1989 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1990}
a2fbb9ea 1991
4acac6a5
EG
1992int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1993{
1994 /* The GPIO should be swapped if swap register is set and active */
1995 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1996 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1997 int gpio_shift = gpio_num +
1998 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1999 u32 gpio_mask = (1 << gpio_shift);
2000 u32 gpio_reg;
2001 int value;
2002
2003 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2004 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2005 return -EINVAL;
2006 }
2007
2008 /* read GPIO value */
2009 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2010
2011 /* get the requested pin value */
2012 if ((gpio_reg & gpio_mask) == gpio_mask)
2013 value = 1;
2014 else
2015 value = 0;
2016
2017 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
2018
2019 return value;
2020}
2021
17de50b7 2022int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
2023{
2024 /* The GPIO should be swapped if swap register is set and active */
2025 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 2026 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
2027 int gpio_shift = gpio_num +
2028 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2029 u32 gpio_mask = (1 << gpio_shift);
2030 u32 gpio_reg;
a2fbb9ea 2031
c18487ee
YR
2032 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2033 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2034 return -EINVAL;
2035 }
a2fbb9ea 2036
4a37fb66 2037 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
2038 /* read GPIO and mask except the float bits */
2039 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 2040
c18487ee
YR
2041 switch (mode) {
2042 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2043 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2044 gpio_num, gpio_shift);
2045 /* clear FLOAT and set CLR */
2046 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2047 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2048 break;
a2fbb9ea 2049
c18487ee
YR
2050 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2051 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2052 gpio_num, gpio_shift);
2053 /* clear FLOAT and set SET */
2054 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2055 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2056 break;
a2fbb9ea 2057
17de50b7 2058 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
2059 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2060 gpio_num, gpio_shift);
2061 /* set FLOAT */
2062 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2063 break;
a2fbb9ea 2064
c18487ee
YR
2065 default:
2066 break;
a2fbb9ea
ET
2067 }
2068
c18487ee 2069 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2070 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2071
c18487ee 2072 return 0;
a2fbb9ea
ET
2073}
2074
4acac6a5
EG
2075int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2076{
2077 /* The GPIO should be swapped if swap register is set and active */
2078 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2079 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2080 int gpio_shift = gpio_num +
2081 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2082 u32 gpio_mask = (1 << gpio_shift);
2083 u32 gpio_reg;
2084
2085 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2086 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2087 return -EINVAL;
2088 }
2089
2090 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2091 /* read GPIO int */
2092 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2093
2094 switch (mode) {
2095 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2096 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2097 "output low\n", gpio_num, gpio_shift);
2098 /* clear SET and set CLR */
2099 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2100 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2101 break;
2102
2103 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2104 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2105 "output high\n", gpio_num, gpio_shift);
2106 /* clear CLR and set SET */
2107 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2108 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2109 break;
2110
2111 default:
2112 break;
2113 }
2114
2115 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2116 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2117
2118 return 0;
2119}
2120
c18487ee 2121static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2122{
c18487ee
YR
2123 u32 spio_mask = (1 << spio_num);
2124 u32 spio_reg;
a2fbb9ea 2125
c18487ee
YR
2126 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2127 (spio_num > MISC_REGISTERS_SPIO_7)) {
2128 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2129 return -EINVAL;
a2fbb9ea
ET
2130 }
2131
4a37fb66 2132 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2133 /* read SPIO and mask except the float bits */
2134 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2135
c18487ee 2136 switch (mode) {
6378c025 2137 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2138 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2139 /* clear FLOAT and set CLR */
2140 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2141 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2142 break;
a2fbb9ea 2143
6378c025 2144 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2145 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2146 /* clear FLOAT and set SET */
2147 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2148 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2149 break;
a2fbb9ea 2150
c18487ee
YR
2151 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2152 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2153 /* set FLOAT */
2154 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2155 break;
a2fbb9ea 2156
c18487ee
YR
2157 default:
2158 break;
a2fbb9ea
ET
2159 }
2160
c18487ee 2161 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2162 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2163
a2fbb9ea
ET
2164 return 0;
2165}
2166
c18487ee 2167static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2168{
ad33ea3a
EG
2169 switch (bp->link_vars.ieee_fc &
2170 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2171 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2172 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2173 ADVERTISED_Pause);
2174 break;
356e2385 2175
c18487ee 2176 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2177 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2178 ADVERTISED_Pause);
2179 break;
356e2385 2180
c18487ee 2181 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2182 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2183 break;
356e2385 2184
c18487ee 2185 default:
34f80b04 2186 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2187 ADVERTISED_Pause);
2188 break;
2189 }
2190}
f1410647 2191
c18487ee
YR
2192static void bnx2x_link_report(struct bnx2x *bp)
2193{
f34d28ea 2194 if (bp->flags & MF_FUNC_DIS) {
2691d51d 2195 netif_carrier_off(bp->dev);
7995c64e 2196 netdev_err(bp->dev, "NIC Link is Down\n");
2691d51d
EG
2197 return;
2198 }
2199
c18487ee 2200 if (bp->link_vars.link_up) {
35c5f8fe
EG
2201 u16 line_speed;
2202
c18487ee
YR
2203 if (bp->state == BNX2X_STATE_OPEN)
2204 netif_carrier_on(bp->dev);
7995c64e 2205 netdev_info(bp->dev, "NIC Link is Up, ");
f1410647 2206
35c5f8fe
EG
2207 line_speed = bp->link_vars.line_speed;
2208 if (IS_E1HMF(bp)) {
2209 u16 vn_max_rate;
2210
2211 vn_max_rate =
2212 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2213 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2214 if (vn_max_rate < line_speed)
2215 line_speed = vn_max_rate;
2216 }
7995c64e 2217 pr_cont("%d Mbps ", line_speed);
f1410647 2218
c18487ee 2219 if (bp->link_vars.duplex == DUPLEX_FULL)
7995c64e 2220 pr_cont("full duplex");
c18487ee 2221 else
7995c64e 2222 pr_cont("half duplex");
f1410647 2223
c0700f90
DM
2224 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2225 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
7995c64e 2226 pr_cont(", receive ");
356e2385
EG
2227 if (bp->link_vars.flow_ctrl &
2228 BNX2X_FLOW_CTRL_TX)
7995c64e 2229 pr_cont("& transmit ");
c18487ee 2230 } else {
7995c64e 2231 pr_cont(", transmit ");
c18487ee 2232 }
7995c64e 2233 pr_cont("flow control ON");
c18487ee 2234 }
7995c64e 2235 pr_cont("\n");
f1410647 2236
c18487ee
YR
2237 } else { /* link_down */
2238 netif_carrier_off(bp->dev);
7995c64e 2239 netdev_err(bp->dev, "NIC Link is Down\n");
f1410647 2240 }
c18487ee
YR
2241}
2242
b5bf9068 2243static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2244{
19680c48
EG
2245 if (!BP_NOMCP(bp)) {
2246 u8 rc;
a2fbb9ea 2247
19680c48 2248 /* Initialize link parameters structure variables */
8c99e7b0
YR
2249 /* It is recommended to turn off RX FC for jumbo frames
2250 for better performance */
0c593270 2251 if (bp->dev->mtu > 5000)
c0700f90 2252 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2253 else
c0700f90 2254 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2255
4a37fb66 2256 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2257
2258 if (load_mode == LOAD_DIAG)
2259 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2260
19680c48 2261 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2262
4a37fb66 2263 bnx2x_release_phy_lock(bp);
a2fbb9ea 2264
3c96c68b
EG
2265 bnx2x_calc_fc_adv(bp);
2266
b5bf9068
EG
2267 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2268 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2269 bnx2x_link_report(bp);
b5bf9068 2270 }
34f80b04 2271
19680c48
EG
2272 return rc;
2273 }
f5372251 2274 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2275 return -EINVAL;
a2fbb9ea
ET
2276}
2277
c18487ee 2278static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2279{
19680c48 2280 if (!BP_NOMCP(bp)) {
4a37fb66 2281 bnx2x_acquire_phy_lock(bp);
19680c48 2282 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2283 bnx2x_release_phy_lock(bp);
a2fbb9ea 2284
19680c48
EG
2285 bnx2x_calc_fc_adv(bp);
2286 } else
f5372251 2287 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2288}
a2fbb9ea 2289
c18487ee
YR
2290static void bnx2x__link_reset(struct bnx2x *bp)
2291{
19680c48 2292 if (!BP_NOMCP(bp)) {
4a37fb66 2293 bnx2x_acquire_phy_lock(bp);
589abe3a 2294 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2295 bnx2x_release_phy_lock(bp);
19680c48 2296 } else
f5372251 2297 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2298}
a2fbb9ea 2299
c18487ee
YR
2300static u8 bnx2x_link_test(struct bnx2x *bp)
2301{
2145a920 2302 u8 rc = 0;
a2fbb9ea 2303
2145a920
VZ
2304 if (!BP_NOMCP(bp)) {
2305 bnx2x_acquire_phy_lock(bp);
2306 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2307 bnx2x_release_phy_lock(bp);
2308 } else
2309 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 2310
c18487ee
YR
2311 return rc;
2312}
a2fbb9ea 2313
8a1c38d1 2314static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2315{
8a1c38d1
EG
2316 u32 r_param = bp->link_vars.line_speed / 8;
2317 u32 fair_periodic_timeout_usec;
2318 u32 t_fair;
34f80b04 2319
8a1c38d1
EG
2320 memset(&(bp->cmng.rs_vars), 0,
2321 sizeof(struct rate_shaping_vars_per_port));
2322 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2323
8a1c38d1
EG
2324 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2325 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2326
8a1c38d1
EG
2327 /* this is the threshold below which no timer arming will occur
2328 1.25 coefficient is for the threshold to be a little bigger
2329 than the real time, to compensate for timer in-accuracy */
2330 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2331 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2332
8a1c38d1
EG
2333 /* resolution of fairness timer */
2334 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2335 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2336 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2337
8a1c38d1
EG
2338 /* this is the threshold below which we won't arm the timer anymore */
2339 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2340
8a1c38d1
EG
2341 /* we multiply by 1e3/8 to get bytes/msec.
2342 We don't want the credits to pass a credit
2343 of the t_fair*FAIR_MEM (algorithm resolution) */
2344 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2345 /* since each tick is 4 usec */
2346 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2347}
2348
2691d51d
EG
2349/* Calculates the sum of vn_min_rates.
2350 It's needed for further normalizing of the min_rates.
2351 Returns:
2352 sum of vn_min_rates.
2353 or
2354 0 - if all the min_rates are 0.
2355 In the later case fainess algorithm should be deactivated.
2356 If not all min_rates are zero then those that are zeroes will be set to 1.
2357 */
2358static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2359{
2360 int all_zero = 1;
2361 int port = BP_PORT(bp);
2362 int vn;
2363
2364 bp->vn_weight_sum = 0;
2365 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2366 int func = 2*vn + port;
2367 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2368 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2369 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2370
2371 /* Skip hidden vns */
2372 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2373 continue;
2374
2375 /* If min rate is zero - set it to 1 */
2376 if (!vn_min_rate)
2377 vn_min_rate = DEF_MIN_RATE;
2378 else
2379 all_zero = 0;
2380
2381 bp->vn_weight_sum += vn_min_rate;
2382 }
2383
2384 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
2385 if (all_zero) {
2386 bp->cmng.flags.cmng_enables &=
2387 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2388 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2389 " fairness will be disabled\n");
2390 } else
2391 bp->cmng.flags.cmng_enables |=
2392 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
2393}
2394
8a1c38d1 2395static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2396{
2397 struct rate_shaping_vars_per_vn m_rs_vn;
2398 struct fairness_vars_per_vn m_fair_vn;
2399 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2400 u16 vn_min_rate, vn_max_rate;
2401 int i;
2402
2403 /* If function is hidden - set min and max to zeroes */
2404 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2405 vn_min_rate = 0;
2406 vn_max_rate = 0;
2407
2408 } else {
2409 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2410 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
2411 /* If min rate is zero - set it to 1 */
2412 if (!vn_min_rate)
34f80b04
EG
2413 vn_min_rate = DEF_MIN_RATE;
2414 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2415 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2416 }
8a1c38d1 2417 DP(NETIF_MSG_IFUP,
b015e3d1 2418 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 2419 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2420
2421 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2422 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2423
2424 /* global vn counter - maximal Mbps for this vn */
2425 m_rs_vn.vn_counter.rate = vn_max_rate;
2426
2427 /* quota - number of bytes transmitted in this period */
2428 m_rs_vn.vn_counter.quota =
2429 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2430
8a1c38d1 2431 if (bp->vn_weight_sum) {
34f80b04
EG
2432 /* credit for each period of the fairness algorithm:
2433 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2434 vn_weight_sum should not be larger than 10000, thus
2435 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2436 than zero */
34f80b04 2437 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
2438 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2439 (8 * bp->vn_weight_sum))),
2440 (bp->cmng.fair_vars.fair_threshold * 2));
2441 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
2442 m_fair_vn.vn_credit_delta);
2443 }
2444
34f80b04
EG
2445 /* Store it to internal memory */
2446 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2447 REG_WR(bp, BAR_XSTRORM_INTMEM +
2448 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2449 ((u32 *)(&m_rs_vn))[i]);
2450
2451 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2452 REG_WR(bp, BAR_XSTRORM_INTMEM +
2453 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2454 ((u32 *)(&m_fair_vn))[i]);
2455}
2456
8a1c38d1 2457
c18487ee
YR
2458/* This function is called upon link interrupt */
2459static void bnx2x_link_attn(struct bnx2x *bp)
2460{
bb2a0f7a
YG
2461 /* Make sure that we are synced with the current statistics */
2462 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2463
c18487ee 2464 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2465
bb2a0f7a
YG
2466 if (bp->link_vars.link_up) {
2467
1c06328c 2468 /* dropless flow control */
a18f5128 2469 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2470 int port = BP_PORT(bp);
2471 u32 pause_enabled = 0;
2472
2473 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2474 pause_enabled = 1;
2475
2476 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2477 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2478 pause_enabled);
2479 }
2480
bb2a0f7a
YG
2481 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2482 struct host_port_stats *pstats;
2483
2484 pstats = bnx2x_sp(bp, port_stats);
2485 /* reset old bmac stats */
2486 memset(&(pstats->mac_stx[0]), 0,
2487 sizeof(struct mac_stx));
2488 }
f34d28ea 2489 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2490 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2491 }
2492
c18487ee
YR
2493 /* indicate link status */
2494 bnx2x_link_report(bp);
34f80b04
EG
2495
2496 if (IS_E1HMF(bp)) {
8a1c38d1 2497 int port = BP_PORT(bp);
34f80b04 2498 int func;
8a1c38d1 2499 int vn;
34f80b04 2500
ab6ad5a4 2501 /* Set the attention towards other drivers on the same port */
34f80b04
EG
2502 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2503 if (vn == BP_E1HVN(bp))
2504 continue;
2505
8a1c38d1 2506 func = ((vn << 1) | port);
34f80b04
EG
2507 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2508 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2509 }
34f80b04 2510
8a1c38d1
EG
2511 if (bp->link_vars.link_up) {
2512 int i;
2513
2514 /* Init rate shaping and fairness contexts */
2515 bnx2x_init_port_minmax(bp);
34f80b04 2516
34f80b04 2517 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2518 bnx2x_init_vn_minmax(bp, 2*vn + port);
2519
2520 /* Store it to internal memory */
2521 for (i = 0;
2522 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2523 REG_WR(bp, BAR_XSTRORM_INTMEM +
2524 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2525 ((u32 *)(&bp->cmng))[i]);
2526 }
34f80b04 2527 }
c18487ee 2528}
a2fbb9ea 2529
c18487ee
YR
2530static void bnx2x__link_status_update(struct bnx2x *bp)
2531{
f34d28ea 2532 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2533 return;
a2fbb9ea 2534
c18487ee 2535 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2536
bb2a0f7a
YG
2537 if (bp->link_vars.link_up)
2538 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2539 else
2540 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2541
2691d51d
EG
2542 bnx2x_calc_vn_weight_sum(bp);
2543
c18487ee
YR
2544 /* indicate link status */
2545 bnx2x_link_report(bp);
a2fbb9ea 2546}
a2fbb9ea 2547
34f80b04
EG
2548static void bnx2x_pmf_update(struct bnx2x *bp)
2549{
2550 int port = BP_PORT(bp);
2551 u32 val;
2552
2553 bp->port.pmf = 1;
2554 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2555
2556 /* enable nig attention */
2557 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2558 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2559 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2560
2561 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2562}
2563
c18487ee 2564/* end of Link */
a2fbb9ea
ET
2565
2566/* slow path */
2567
2568/*
2569 * General service functions
2570 */
2571
2691d51d
EG
2572/* send the MCP a request, block until there is a reply */
2573u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2574{
2575 int func = BP_FUNC(bp);
2576 u32 seq = ++bp->fw_seq;
2577 u32 rc = 0;
2578 u32 cnt = 1;
2579 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2580
c4ff7cbf 2581 mutex_lock(&bp->fw_mb_mutex);
2691d51d
EG
2582 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2583 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2584
2585 do {
2586 /* let the FW do it's magic ... */
2587 msleep(delay);
2588
2589 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2590
c4ff7cbf
EG
2591 /* Give the FW up to 5 second (500*10ms) */
2592 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2593
2594 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2595 cnt*delay, rc, seq);
2596
2597 /* is this a reply to our command? */
2598 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2599 rc &= FW_MSG_CODE_MASK;
2600 else {
2601 /* FW BUG! */
2602 BNX2X_ERR("FW failed to respond!\n");
2603 bnx2x_fw_dump(bp);
2604 rc = 0;
2605 }
c4ff7cbf 2606 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2607
2608 return rc;
2609}
2610
e665bfda 2611static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2691d51d
EG
2612static void bnx2x_set_rx_mode(struct net_device *dev);
2613
2614static void bnx2x_e1h_disable(struct bnx2x *bp)
2615{
2616 int port = BP_PORT(bp);
2691d51d
EG
2617
2618 netif_tx_disable(bp->dev);
2691d51d
EG
2619
2620 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2621
2691d51d
EG
2622 netif_carrier_off(bp->dev);
2623}
2624
2625static void bnx2x_e1h_enable(struct bnx2x *bp)
2626{
2627 int port = BP_PORT(bp);
2628
2629 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2630
2691d51d
EG
2631 /* Tx queue should be only reenabled */
2632 netif_tx_wake_all_queues(bp->dev);
2633
061bc702
EG
2634 /*
2635 * Should not call netif_carrier_on since it will be called if the link
2636 * is up when checking for link state
2637 */
2691d51d
EG
2638}
2639
2640static void bnx2x_update_min_max(struct bnx2x *bp)
2641{
2642 int port = BP_PORT(bp);
2643 int vn, i;
2644
2645 /* Init rate shaping and fairness contexts */
2646 bnx2x_init_port_minmax(bp);
2647
2648 bnx2x_calc_vn_weight_sum(bp);
2649
2650 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2651 bnx2x_init_vn_minmax(bp, 2*vn + port);
2652
2653 if (bp->port.pmf) {
2654 int func;
2655
2656 /* Set the attention towards other drivers on the same port */
2657 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2658 if (vn == BP_E1HVN(bp))
2659 continue;
2660
2661 func = ((vn << 1) | port);
2662 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2663 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2664 }
2665
2666 /* Store it to internal memory */
2667 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2668 REG_WR(bp, BAR_XSTRORM_INTMEM +
2669 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2670 ((u32 *)(&bp->cmng))[i]);
2671 }
2672}
2673
2674static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2675{
2691d51d 2676 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
2677
2678 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2679
f34d28ea
EG
2680 /*
2681 * This is the only place besides the function initialization
2682 * where the bp->flags can change so it is done without any
2683 * locks
2684 */
2691d51d
EG
2685 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2686 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
f34d28ea 2687 bp->flags |= MF_FUNC_DIS;
2691d51d
EG
2688
2689 bnx2x_e1h_disable(bp);
2690 } else {
2691 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
f34d28ea 2692 bp->flags &= ~MF_FUNC_DIS;
2691d51d
EG
2693
2694 bnx2x_e1h_enable(bp);
2695 }
2696 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2697 }
2698 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2699
2700 bnx2x_update_min_max(bp);
2701 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2702 }
2703
2704 /* Report results to MCP */
2705 if (dcc_event)
2706 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2707 else
2708 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2709}
2710
28912902
MC
2711/* must be called under the spq lock */
2712static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2713{
2714 struct eth_spe *next_spe = bp->spq_prod_bd;
2715
2716 if (bp->spq_prod_bd == bp->spq_last_bd) {
2717 bp->spq_prod_bd = bp->spq;
2718 bp->spq_prod_idx = 0;
2719 DP(NETIF_MSG_TIMER, "end of spq\n");
2720 } else {
2721 bp->spq_prod_bd++;
2722 bp->spq_prod_idx++;
2723 }
2724 return next_spe;
2725}
2726
2727/* must be called under the spq lock */
2728static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2729{
2730 int func = BP_FUNC(bp);
2731
2732 /* Make sure that BD data is updated before writing the producer */
2733 wmb();
2734
2735 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2736 bp->spq_prod_idx);
2737 mmiowb();
2738}
2739
a2fbb9ea
ET
2740/* the slow path queue is odd since completions arrive on the fastpath ring */
2741static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2742 u32 data_hi, u32 data_lo, int common)
2743{
28912902 2744 struct eth_spe *spe;
a2fbb9ea 2745
a2fbb9ea
ET
2746#ifdef BNX2X_STOP_ON_ERROR
2747 if (unlikely(bp->panic))
2748 return -EIO;
2749#endif
2750
34f80b04 2751 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2752
2753 if (!bp->spq_left) {
2754 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2755 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2756 bnx2x_panic();
2757 return -EBUSY;
2758 }
f1410647 2759
28912902
MC
2760 spe = bnx2x_sp_get_next(bp);
2761
a2fbb9ea 2762 /* CID needs port number to be encoded int it */
28912902 2763 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
2764 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2765 HW_CID(bp, cid));
28912902 2766 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 2767 if (common)
28912902 2768 spe->hdr.type |=
a2fbb9ea
ET
2769 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2770
28912902
MC
2771 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2772 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
2773
2774 bp->spq_left--;
2775
cdaa7cb8
VZ
2776 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2777 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2778 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2779 (u32)(U64_LO(bp->spq_mapping) +
2780 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2781 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2782
28912902 2783 bnx2x_sp_prod_update(bp);
34f80b04 2784 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2785 return 0;
2786}
2787
2788/* acquire split MCP access lock register */
4a37fb66 2789static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2790{
72fd0718 2791 u32 j, val;
34f80b04 2792 int rc = 0;
a2fbb9ea
ET
2793
2794 might_sleep();
72fd0718 2795 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
2796 val = (1UL << 31);
2797 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2798 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2799 if (val & (1L << 31))
2800 break;
2801
2802 msleep(5);
2803 }
a2fbb9ea 2804 if (!(val & (1L << 31))) {
19680c48 2805 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2806 rc = -EBUSY;
2807 }
2808
2809 return rc;
2810}
2811
4a37fb66
YG
2812/* release split MCP access lock register */
2813static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 2814{
72fd0718 2815 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
2816}
2817
2818static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2819{
2820 struct host_def_status_block *def_sb = bp->def_status_blk;
2821 u16 rc = 0;
2822
2823 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2824 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2825 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2826 rc |= 1;
2827 }
2828 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2829 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2830 rc |= 2;
2831 }
2832 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2833 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2834 rc |= 4;
2835 }
2836 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2837 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2838 rc |= 8;
2839 }
2840 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2841 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2842 rc |= 16;
2843 }
2844 return rc;
2845}
2846
2847/*
2848 * slow path service functions
2849 */
2850
2851static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2852{
34f80b04 2853 int port = BP_PORT(bp);
5c862848
EG
2854 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2855 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2856 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2857 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2858 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2859 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2860 u32 aeu_mask;
87942b46 2861 u32 nig_mask = 0;
a2fbb9ea 2862
a2fbb9ea
ET
2863 if (bp->attn_state & asserted)
2864 BNX2X_ERR("IGU ERROR\n");
2865
3fcaf2e5
EG
2866 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2867 aeu_mask = REG_RD(bp, aeu_addr);
2868
a2fbb9ea 2869 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 2870 aeu_mask, asserted);
72fd0718 2871 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 2872 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2873
3fcaf2e5
EG
2874 REG_WR(bp, aeu_addr, aeu_mask);
2875 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2876
3fcaf2e5 2877 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2878 bp->attn_state |= asserted;
3fcaf2e5 2879 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2880
2881 if (asserted & ATTN_HARD_WIRED_MASK) {
2882 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2883
a5e9a7cf
EG
2884 bnx2x_acquire_phy_lock(bp);
2885
877e9aa4 2886 /* save nig interrupt mask */
87942b46 2887 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2888 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2889
c18487ee 2890 bnx2x_link_attn(bp);
a2fbb9ea
ET
2891
2892 /* handle unicore attn? */
2893 }
2894 if (asserted & ATTN_SW_TIMER_4_FUNC)
2895 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2896
2897 if (asserted & GPIO_2_FUNC)
2898 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2899
2900 if (asserted & GPIO_3_FUNC)
2901 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2902
2903 if (asserted & GPIO_4_FUNC)
2904 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2905
2906 if (port == 0) {
2907 if (asserted & ATTN_GENERAL_ATTN_1) {
2908 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2909 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2910 }
2911 if (asserted & ATTN_GENERAL_ATTN_2) {
2912 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2913 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2914 }
2915 if (asserted & ATTN_GENERAL_ATTN_3) {
2916 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2917 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2918 }
2919 } else {
2920 if (asserted & ATTN_GENERAL_ATTN_4) {
2921 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2922 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2923 }
2924 if (asserted & ATTN_GENERAL_ATTN_5) {
2925 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2926 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2927 }
2928 if (asserted & ATTN_GENERAL_ATTN_6) {
2929 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2930 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2931 }
2932 }
2933
2934 } /* if hardwired */
2935
5c862848
EG
2936 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2937 asserted, hc_addr);
2938 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2939
2940 /* now set back the mask */
a5e9a7cf 2941 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2942 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2943 bnx2x_release_phy_lock(bp);
2944 }
a2fbb9ea
ET
2945}
2946
fd4ef40d
EG
2947static inline void bnx2x_fan_failure(struct bnx2x *bp)
2948{
2949 int port = BP_PORT(bp);
2950
2951 /* mark the failure */
2952 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2953 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2954 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2955 bp->link_params.ext_phy_config);
2956
2957 /* log the failure */
cdaa7cb8
VZ
2958 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2959 " the driver to shutdown the card to prevent permanent"
2960 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 2961}
ab6ad5a4 2962
877e9aa4 2963static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2964{
34f80b04 2965 int port = BP_PORT(bp);
877e9aa4 2966 int reg_offset;
4d295db0 2967 u32 val, swap_val, swap_override;
877e9aa4 2968
34f80b04
EG
2969 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2970 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2971
34f80b04 2972 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2973
2974 val = REG_RD(bp, reg_offset);
2975 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2976 REG_WR(bp, reg_offset, val);
2977
2978 BNX2X_ERR("SPIO5 hw attention\n");
2979
fd4ef40d 2980 /* Fan failure attention */
35b19ba5
EG
2981 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2982 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2983 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2984 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2985 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2986 /* The PHY reset is controlled by GPIO 1 */
2987 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2988 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2989 break;
2990
4d295db0
EG
2991 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2992 /* The PHY reset is controlled by GPIO 1 */
2993 /* fake the port number to cancel the swap done in
2994 set_gpio() */
2995 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2996 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2997 port = (swap_val && swap_override) ^ 1;
2998 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2999 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3000 break;
3001
877e9aa4
ET
3002 default:
3003 break;
3004 }
fd4ef40d 3005 bnx2x_fan_failure(bp);
877e9aa4 3006 }
34f80b04 3007
589abe3a
EG
3008 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3009 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3010 bnx2x_acquire_phy_lock(bp);
3011 bnx2x_handle_module_detect_int(&bp->link_params);
3012 bnx2x_release_phy_lock(bp);
3013 }
3014
34f80b04
EG
3015 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3016
3017 val = REG_RD(bp, reg_offset);
3018 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3019 REG_WR(bp, reg_offset, val);
3020
3021 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 3022 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
3023 bnx2x_panic();
3024 }
877e9aa4
ET
3025}
3026
3027static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3028{
3029 u32 val;
3030
0626b899 3031 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
3032
3033 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3034 BNX2X_ERR("DB hw attention 0x%x\n", val);
3035 /* DORQ discard attention */
3036 if (val & 0x2)
3037 BNX2X_ERR("FATAL error from DORQ\n");
3038 }
34f80b04
EG
3039
3040 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3041
3042 int port = BP_PORT(bp);
3043 int reg_offset;
3044
3045 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3046 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3047
3048 val = REG_RD(bp, reg_offset);
3049 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3050 REG_WR(bp, reg_offset, val);
3051
3052 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3053 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3054 bnx2x_panic();
3055 }
877e9aa4
ET
3056}
3057
3058static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3059{
3060 u32 val;
3061
3062 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3063
3064 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3065 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3066 /* CFC error attention */
3067 if (val & 0x2)
3068 BNX2X_ERR("FATAL error from CFC\n");
3069 }
3070
3071 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3072
3073 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3074 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3075 /* RQ_USDMDP_FIFO_OVERFLOW */
3076 if (val & 0x18000)
3077 BNX2X_ERR("FATAL error from PXP\n");
3078 }
34f80b04
EG
3079
3080 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3081
3082 int port = BP_PORT(bp);
3083 int reg_offset;
3084
3085 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3086 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3087
3088 val = REG_RD(bp, reg_offset);
3089 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3090 REG_WR(bp, reg_offset, val);
3091
3092 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3093 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3094 bnx2x_panic();
3095 }
877e9aa4
ET
3096}
3097
3098static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3099{
34f80b04
EG
3100 u32 val;
3101
877e9aa4
ET
3102 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3103
34f80b04
EG
3104 if (attn & BNX2X_PMF_LINK_ASSERT) {
3105 int func = BP_FUNC(bp);
3106
3107 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
3108 bp->mf_config = SHMEM_RD(bp,
3109 mf_cfg.func_mf_config[func].config);
2691d51d
EG
3110 val = SHMEM_RD(bp, func_mb[func].drv_status);
3111 if (val & DRV_STATUS_DCC_EVENT_MASK)
3112 bnx2x_dcc_event(bp,
3113 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3114 bnx2x__link_status_update(bp);
2691d51d 3115 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3116 bnx2x_pmf_update(bp);
3117
3118 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3119
3120 BNX2X_ERR("MC assert!\n");
3121 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3122 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3123 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3124 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3125 bnx2x_panic();
3126
3127 } else if (attn & BNX2X_MCP_ASSERT) {
3128
3129 BNX2X_ERR("MCP assert!\n");
3130 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3131 bnx2x_fw_dump(bp);
877e9aa4
ET
3132
3133 } else
3134 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3135 }
3136
3137 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3138 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3139 if (attn & BNX2X_GRC_TIMEOUT) {
3140 val = CHIP_IS_E1H(bp) ?
3141 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3142 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3143 }
3144 if (attn & BNX2X_GRC_RSV) {
3145 val = CHIP_IS_E1H(bp) ?
3146 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3147 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3148 }
877e9aa4 3149 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3150 }
3151}
3152
72fd0718
VZ
3153static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3154static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3155
3156
3157#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3158#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3159#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3160#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3161#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3162#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3163/*
3164 * should be run under rtnl lock
3165 */
3166static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3167{
3168 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3169 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3170 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3171 barrier();
3172 mmiowb();
3173}
3174
3175/*
3176 * should be run under rtnl lock
3177 */
3178static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3179{
3180 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3181 val |= (1 << 16);
3182 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3183 barrier();
3184 mmiowb();
3185}
3186
3187/*
3188 * should be run under rtnl lock
3189 */
3190static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3191{
3192 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3193 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3194 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3195}
3196
3197/*
3198 * should be run under rtnl lock
3199 */
3200static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3201{
3202 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3203
3204 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3205
3206 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3207 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3208 barrier();
3209 mmiowb();
3210}
3211
3212/*
3213 * should be run under rtnl lock
3214 */
3215static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3216{
3217 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3218
3219 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3220
3221 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3222 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3223 barrier();
3224 mmiowb();
3225
3226 return val1;
3227}
3228
3229/*
3230 * should be run under rtnl lock
3231 */
3232static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3233{
3234 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3235}
3236
3237static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3238{
3239 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3240 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3241}
3242
3243static inline void _print_next_block(int idx, const char *blk)
3244{
3245 if (idx)
3246 pr_cont(", ");
3247 pr_cont("%s", blk);
3248}
3249
3250static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3251{
3252 int i = 0;
3253 u32 cur_bit = 0;
3254 for (i = 0; sig; i++) {
3255 cur_bit = ((u32)0x1 << i);
3256 if (sig & cur_bit) {
3257 switch (cur_bit) {
3258 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3259 _print_next_block(par_num++, "BRB");
3260 break;
3261 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3262 _print_next_block(par_num++, "PARSER");
3263 break;
3264 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3265 _print_next_block(par_num++, "TSDM");
3266 break;
3267 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3268 _print_next_block(par_num++, "SEARCHER");
3269 break;
3270 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3271 _print_next_block(par_num++, "TSEMI");
3272 break;
3273 }
3274
3275 /* Clear the bit */
3276 sig &= ~cur_bit;
3277 }
3278 }
3279
3280 return par_num;
3281}
3282
3283static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3284{
3285 int i = 0;
3286 u32 cur_bit = 0;
3287 for (i = 0; sig; i++) {
3288 cur_bit = ((u32)0x1 << i);
3289 if (sig & cur_bit) {
3290 switch (cur_bit) {
3291 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3292 _print_next_block(par_num++, "PBCLIENT");
3293 break;
3294 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3295 _print_next_block(par_num++, "QM");
3296 break;
3297 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3298 _print_next_block(par_num++, "XSDM");
3299 break;
3300 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3301 _print_next_block(par_num++, "XSEMI");
3302 break;
3303 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3304 _print_next_block(par_num++, "DOORBELLQ");
3305 break;
3306 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3307 _print_next_block(par_num++, "VAUX PCI CORE");
3308 break;
3309 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3310 _print_next_block(par_num++, "DEBUG");
3311 break;
3312 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3313 _print_next_block(par_num++, "USDM");
3314 break;
3315 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3316 _print_next_block(par_num++, "USEMI");
3317 break;
3318 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3319 _print_next_block(par_num++, "UPB");
3320 break;
3321 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3322 _print_next_block(par_num++, "CSDM");
3323 break;
3324 }
3325
3326 /* Clear the bit */
3327 sig &= ~cur_bit;
3328 }
3329 }
3330
3331 return par_num;
3332}
3333
3334static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3335{
3336 int i = 0;
3337 u32 cur_bit = 0;
3338 for (i = 0; sig; i++) {
3339 cur_bit = ((u32)0x1 << i);
3340 if (sig & cur_bit) {
3341 switch (cur_bit) {
3342 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3343 _print_next_block(par_num++, "CSEMI");
3344 break;
3345 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3346 _print_next_block(par_num++, "PXP");
3347 break;
3348 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3349 _print_next_block(par_num++,
3350 "PXPPCICLOCKCLIENT");
3351 break;
3352 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3353 _print_next_block(par_num++, "CFC");
3354 break;
3355 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3356 _print_next_block(par_num++, "CDU");
3357 break;
3358 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3359 _print_next_block(par_num++, "IGU");
3360 break;
3361 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3362 _print_next_block(par_num++, "MISC");
3363 break;
3364 }
3365
3366 /* Clear the bit */
3367 sig &= ~cur_bit;
3368 }
3369 }
3370
3371 return par_num;
3372}
3373
3374static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3375{
3376 int i = 0;
3377 u32 cur_bit = 0;
3378 for (i = 0; sig; i++) {
3379 cur_bit = ((u32)0x1 << i);
3380 if (sig & cur_bit) {
3381 switch (cur_bit) {
3382 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3383 _print_next_block(par_num++, "MCP ROM");
3384 break;
3385 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3386 _print_next_block(par_num++, "MCP UMP RX");
3387 break;
3388 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3389 _print_next_block(par_num++, "MCP UMP TX");
3390 break;
3391 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3392 _print_next_block(par_num++, "MCP SCPAD");
3393 break;
3394 }
3395
3396 /* Clear the bit */
3397 sig &= ~cur_bit;
3398 }
3399 }
3400
3401 return par_num;
3402}
3403
3404static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3405 u32 sig2, u32 sig3)
3406{
3407 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3408 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3409 int par_num = 0;
3410 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3411 "[0]:0x%08x [1]:0x%08x "
3412 "[2]:0x%08x [3]:0x%08x\n",
3413 sig0 & HW_PRTY_ASSERT_SET_0,
3414 sig1 & HW_PRTY_ASSERT_SET_1,
3415 sig2 & HW_PRTY_ASSERT_SET_2,
3416 sig3 & HW_PRTY_ASSERT_SET_3);
3417 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3418 bp->dev->name);
3419 par_num = bnx2x_print_blocks_with_parity0(
3420 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3421 par_num = bnx2x_print_blocks_with_parity1(
3422 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3423 par_num = bnx2x_print_blocks_with_parity2(
3424 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3425 par_num = bnx2x_print_blocks_with_parity3(
3426 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3427 printk("\n");
3428 return true;
3429 } else
3430 return false;
3431}
3432
3433static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 3434{
a2fbb9ea 3435 struct attn_route attn;
72fd0718
VZ
3436 int port = BP_PORT(bp);
3437
3438 attn.sig[0] = REG_RD(bp,
3439 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3440 port*4);
3441 attn.sig[1] = REG_RD(bp,
3442 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3443 port*4);
3444 attn.sig[2] = REG_RD(bp,
3445 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3446 port*4);
3447 attn.sig[3] = REG_RD(bp,
3448 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3449 port*4);
3450
3451 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3452 attn.sig[3]);
3453}
3454
3455static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3456{
3457 struct attn_route attn, *group_mask;
34f80b04 3458 int port = BP_PORT(bp);
877e9aa4 3459 int index;
a2fbb9ea
ET
3460 u32 reg_addr;
3461 u32 val;
3fcaf2e5 3462 u32 aeu_mask;
a2fbb9ea
ET
3463
3464 /* need to take HW lock because MCP or other port might also
3465 try to handle this event */
4a37fb66 3466 bnx2x_acquire_alr(bp);
a2fbb9ea 3467
72fd0718
VZ
3468 if (bnx2x_chk_parity_attn(bp)) {
3469 bp->recovery_state = BNX2X_RECOVERY_INIT;
3470 bnx2x_set_reset_in_progress(bp);
3471 schedule_delayed_work(&bp->reset_task, 0);
3472 /* Disable HW interrupts */
3473 bnx2x_int_disable(bp);
3474 bnx2x_release_alr(bp);
3475 /* In case of parity errors don't handle attentions so that
3476 * other function would "see" parity errors.
3477 */
3478 return;
3479 }
3480
a2fbb9ea
ET
3481 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3482 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3483 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3484 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3485 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3486 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3487
3488 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3489 if (deasserted & (1 << index)) {
72fd0718 3490 group_mask = &bp->attn_group[index];
a2fbb9ea 3491
34f80b04 3492 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
72fd0718
VZ
3493 index, group_mask->sig[0], group_mask->sig[1],
3494 group_mask->sig[2], group_mask->sig[3]);
a2fbb9ea 3495
877e9aa4 3496 bnx2x_attn_int_deasserted3(bp,
72fd0718 3497 attn.sig[3] & group_mask->sig[3]);
877e9aa4 3498 bnx2x_attn_int_deasserted1(bp,
72fd0718 3499 attn.sig[1] & group_mask->sig[1]);
877e9aa4 3500 bnx2x_attn_int_deasserted2(bp,
72fd0718 3501 attn.sig[2] & group_mask->sig[2]);
877e9aa4 3502 bnx2x_attn_int_deasserted0(bp,
72fd0718 3503 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
3504 }
3505 }
3506
4a37fb66 3507 bnx2x_release_alr(bp);
a2fbb9ea 3508
5c862848 3509 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3510
3511 val = ~deasserted;
3fcaf2e5
EG
3512 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3513 val, reg_addr);
5c862848 3514 REG_WR(bp, reg_addr, val);
a2fbb9ea 3515
a2fbb9ea 3516 if (~bp->attn_state & deasserted)
3fcaf2e5 3517 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3518
3519 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3520 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3521
3fcaf2e5
EG
3522 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3523 aeu_mask = REG_RD(bp, reg_addr);
3524
3525 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3526 aeu_mask, deasserted);
72fd0718 3527 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 3528 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3529
3fcaf2e5
EG
3530 REG_WR(bp, reg_addr, aeu_mask);
3531 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3532
3533 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3534 bp->attn_state &= ~deasserted;
3535 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3536}
3537
3538static void bnx2x_attn_int(struct bnx2x *bp)
3539{
3540 /* read local copy of bits */
68d59484
EG
3541 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3542 attn_bits);
3543 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3544 attn_bits_ack);
a2fbb9ea
ET
3545 u32 attn_state = bp->attn_state;
3546
3547 /* look for changed bits */
3548 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3549 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3550
3551 DP(NETIF_MSG_HW,
3552 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3553 attn_bits, attn_ack, asserted, deasserted);
3554
3555 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3556 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3557
3558 /* handle bits that were raised */
3559 if (asserted)
3560 bnx2x_attn_int_asserted(bp, asserted);
3561
3562 if (deasserted)
3563 bnx2x_attn_int_deasserted(bp, deasserted);
3564}
3565
3566static void bnx2x_sp_task(struct work_struct *work)
3567{
1cf167f2 3568 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3569 u16 status;
3570
3571 /* Return here if interrupt is disabled */
3572 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3573 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3574 return;
3575 }
3576
3577 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3578/* if (status == 0) */
3579/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3580
cdaa7cb8 3581 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 3582
877e9aa4 3583 /* HW attentions */
cdaa7cb8 3584 if (status & 0x1) {
a2fbb9ea 3585 bnx2x_attn_int(bp);
cdaa7cb8
VZ
3586 status &= ~0x1;
3587 }
3588
3589 /* CStorm events: STAT_QUERY */
3590 if (status & 0x2) {
3591 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
3592 status &= ~0x2;
3593 }
3594
3595 if (unlikely(status))
3596 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3597 status);
a2fbb9ea 3598
68d59484 3599 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3600 IGU_INT_NOP, 1);
3601 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3602 IGU_INT_NOP, 1);
3603 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3604 IGU_INT_NOP, 1);
3605 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3606 IGU_INT_NOP, 1);
3607 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3608 IGU_INT_ENABLE, 1);
3609}
3610
3611static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3612{
3613 struct net_device *dev = dev_instance;
3614 struct bnx2x *bp = netdev_priv(dev);
3615
3616 /* Return here if interrupt is disabled */
3617 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3618 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3619 return IRQ_HANDLED;
3620 }
3621
8d9c5f34 3622 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3623
3624#ifdef BNX2X_STOP_ON_ERROR
3625 if (unlikely(bp->panic))
3626 return IRQ_HANDLED;
3627#endif
3628
993ac7b5
MC
3629#ifdef BCM_CNIC
3630 {
3631 struct cnic_ops *c_ops;
3632
3633 rcu_read_lock();
3634 c_ops = rcu_dereference(bp->cnic_ops);
3635 if (c_ops)
3636 c_ops->cnic_handler(bp->cnic_data, NULL);
3637 rcu_read_unlock();
3638 }
3639#endif
1cf167f2 3640 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3641
3642 return IRQ_HANDLED;
3643}
3644
3645/* end of slow path */
3646
3647/* Statistics */
3648
3649/****************************************************************************
3650* Macros
3651****************************************************************************/
3652
a2fbb9ea
ET
3653/* sum[hi:lo] += add[hi:lo] */
3654#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3655 do { \
3656 s_lo += a_lo; \
f5ba6772 3657 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3658 } while (0)
3659
3660/* difference = minuend - subtrahend */
3661#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3662 do { \
bb2a0f7a
YG
3663 if (m_lo < s_lo) { \
3664 /* underflow */ \
a2fbb9ea 3665 d_hi = m_hi - s_hi; \
bb2a0f7a 3666 if (d_hi > 0) { \
6378c025 3667 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3668 d_hi--; \
3669 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3670 } else { \
6378c025 3671 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3672 d_hi = 0; \
3673 d_lo = 0; \
3674 } \
bb2a0f7a
YG
3675 } else { \
3676 /* m_lo >= s_lo */ \
a2fbb9ea 3677 if (m_hi < s_hi) { \
bb2a0f7a
YG
3678 d_hi = 0; \
3679 d_lo = 0; \
3680 } else { \
6378c025 3681 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3682 d_hi = m_hi - s_hi; \
3683 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3684 } \
3685 } \
3686 } while (0)
3687
bb2a0f7a 3688#define UPDATE_STAT64(s, t) \
a2fbb9ea 3689 do { \
bb2a0f7a
YG
3690 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3691 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3692 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3693 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3694 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3695 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3696 } while (0)
3697
bb2a0f7a 3698#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3699 do { \
bb2a0f7a
YG
3700 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3701 diff.lo, new->s##_lo, old->s##_lo); \
3702 ADD_64(estats->t##_hi, diff.hi, \
3703 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3704 } while (0)
3705
3706/* sum[hi:lo] += add */
3707#define ADD_EXTEND_64(s_hi, s_lo, a) \
3708 do { \
3709 s_lo += a; \
3710 s_hi += (s_lo < a) ? 1 : 0; \
3711 } while (0)
3712
bb2a0f7a 3713#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3714 do { \
bb2a0f7a
YG
3715 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3716 pstats->mac_stx[1].s##_lo, \
3717 new->s); \
a2fbb9ea
ET
3718 } while (0)
3719
bb2a0f7a 3720#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3721 do { \
4781bfad
EG
3722 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3723 old_tclient->s = tclient->s; \
de832a55
EG
3724 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3725 } while (0)
3726
3727#define UPDATE_EXTEND_USTAT(s, t) \
3728 do { \
3729 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3730 old_uclient->s = uclient->s; \
3731 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3732 } while (0)
3733
3734#define UPDATE_EXTEND_XSTAT(s, t) \
3735 do { \
4781bfad
EG
3736 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3737 old_xclient->s = xclient->s; \
de832a55
EG
3738 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3739 } while (0)
3740
3741/* minuend -= subtrahend */
3742#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3743 do { \
3744 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3745 } while (0)
3746
3747/* minuend[hi:lo] -= subtrahend */
3748#define SUB_EXTEND_64(m_hi, m_lo, s) \
3749 do { \
3750 SUB_64(m_hi, 0, m_lo, s); \
3751 } while (0)
3752
3753#define SUB_EXTEND_USTAT(s, t) \
3754 do { \
3755 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3756 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3757 } while (0)
3758
3759/*
3760 * General service functions
3761 */
3762
3763static inline long bnx2x_hilo(u32 *hiref)
3764{
3765 u32 lo = *(hiref + 1);
3766#if (BITS_PER_LONG == 64)
3767 u32 hi = *hiref;
3768
3769 return HILO_U64(hi, lo);
3770#else
3771 return lo;
3772#endif
3773}
3774
3775/*
3776 * Init service functions
3777 */
3778
bb2a0f7a
YG
3779static void bnx2x_storm_stats_post(struct bnx2x *bp)
3780{
3781 if (!bp->stats_pending) {
3782 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3783 int i, rc;
bb2a0f7a
YG
3784
3785 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3786 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3787 for_each_queue(bp, i)
3788 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3789
3790 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3791 ((u32 *)&ramrod_data)[1],
3792 ((u32 *)&ramrod_data)[0], 0);
3793 if (rc == 0) {
3794 /* stats ramrod has it's own slot on the spq */
3795 bp->spq_left++;
3796 bp->stats_pending = 1;
3797 }
3798 }
3799}
3800
bb2a0f7a
YG
3801static void bnx2x_hw_stats_post(struct bnx2x *bp)
3802{
3803 struct dmae_command *dmae = &bp->stats_dmae;
3804 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3805
3806 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3807 if (CHIP_REV_IS_SLOW(bp))
3808 return;
bb2a0f7a
YG
3809
3810 /* loader */
3811 if (bp->executer_idx) {
3812 int loader_idx = PMF_DMAE_C(bp);
3813
3814 memset(dmae, 0, sizeof(struct dmae_command));
3815
3816 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3817 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3818 DMAE_CMD_DST_RESET |
3819#ifdef __BIG_ENDIAN
3820 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3821#else
3822 DMAE_CMD_ENDIANITY_DW_SWAP |
3823#endif
3824 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3825 DMAE_CMD_PORT_0) |
3826 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3827 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3828 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3829 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3830 sizeof(struct dmae_command) *
3831 (loader_idx + 1)) >> 2;
3832 dmae->dst_addr_hi = 0;
3833 dmae->len = sizeof(struct dmae_command) >> 2;
3834 if (CHIP_IS_E1(bp))
3835 dmae->len--;
3836 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3837 dmae->comp_addr_hi = 0;
3838 dmae->comp_val = 1;
3839
3840 *stats_comp = 0;
3841 bnx2x_post_dmae(bp, dmae, loader_idx);
3842
3843 } else if (bp->func_stx) {
3844 *stats_comp = 0;
3845 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3846 }
3847}
3848
3849static int bnx2x_stats_comp(struct bnx2x *bp)
3850{
3851 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3852 int cnt = 10;
3853
3854 might_sleep();
3855 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3856 if (!cnt) {
3857 BNX2X_ERR("timeout waiting for stats finished\n");
3858 break;
3859 }
3860 cnt--;
12469401 3861 msleep(1);
bb2a0f7a
YG
3862 }
3863 return 1;
3864}
3865
3866/*
3867 * Statistics service functions
3868 */
3869
3870static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3871{
3872 struct dmae_command *dmae;
3873 u32 opcode;
3874 int loader_idx = PMF_DMAE_C(bp);
3875 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3876
3877 /* sanity */
3878 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3879 BNX2X_ERR("BUG!\n");
3880 return;
3881 }
3882
3883 bp->executer_idx = 0;
3884
3885 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3886 DMAE_CMD_C_ENABLE |
3887 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3888#ifdef __BIG_ENDIAN
3889 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3890#else
3891 DMAE_CMD_ENDIANITY_DW_SWAP |
3892#endif
3893 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3894 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3895
3896 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3897 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3898 dmae->src_addr_lo = bp->port.port_stx >> 2;
3899 dmae->src_addr_hi = 0;
3900 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3901 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3902 dmae->len = DMAE_LEN32_RD_MAX;
3903 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3904 dmae->comp_addr_hi = 0;
3905 dmae->comp_val = 1;
3906
3907 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3908 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3909 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3910 dmae->src_addr_hi = 0;
7a9b2557
VZ
3911 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3912 DMAE_LEN32_RD_MAX * 4);
3913 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3914 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3915 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3916 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3917 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3918 dmae->comp_val = DMAE_COMP_VAL;
3919
3920 *stats_comp = 0;
3921 bnx2x_hw_stats_post(bp);
3922 bnx2x_stats_comp(bp);
3923}
3924
3925static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3926{
3927 struct dmae_command *dmae;
34f80b04 3928 int port = BP_PORT(bp);
bb2a0f7a 3929 int vn = BP_E1HVN(bp);
a2fbb9ea 3930 u32 opcode;
bb2a0f7a 3931 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3932 u32 mac_addr;
bb2a0f7a
YG
3933 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3934
3935 /* sanity */
3936 if (!bp->link_vars.link_up || !bp->port.pmf) {
3937 BNX2X_ERR("BUG!\n");
3938 return;
3939 }
a2fbb9ea
ET
3940
3941 bp->executer_idx = 0;
bb2a0f7a
YG
3942
3943 /* MCP */
3944 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3945 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3946 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3947#ifdef __BIG_ENDIAN
bb2a0f7a 3948 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3949#else
bb2a0f7a 3950 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3951#endif
bb2a0f7a
YG
3952 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3953 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3954
bb2a0f7a 3955 if (bp->port.port_stx) {
a2fbb9ea
ET
3956
3957 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3958 dmae->opcode = opcode;
bb2a0f7a
YG
3959 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3960 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3961 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3962 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3963 dmae->len = sizeof(struct host_port_stats) >> 2;
3964 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3965 dmae->comp_addr_hi = 0;
3966 dmae->comp_val = 1;
a2fbb9ea
ET
3967 }
3968
bb2a0f7a
YG
3969 if (bp->func_stx) {
3970
3971 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3972 dmae->opcode = opcode;
3973 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3974 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3975 dmae->dst_addr_lo = bp->func_stx >> 2;
3976 dmae->dst_addr_hi = 0;
3977 dmae->len = sizeof(struct host_func_stats) >> 2;
3978 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3979 dmae->comp_addr_hi = 0;
3980 dmae->comp_val = 1;
a2fbb9ea
ET
3981 }
3982
bb2a0f7a 3983 /* MAC */
a2fbb9ea
ET
3984 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3985 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3986 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3987#ifdef __BIG_ENDIAN
3988 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3989#else
3990 DMAE_CMD_ENDIANITY_DW_SWAP |
3991#endif
bb2a0f7a
YG
3992 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3993 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3994
c18487ee 3995 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3996
3997 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3998 NIG_REG_INGRESS_BMAC0_MEM);
3999
4000 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4001 BIGMAC_REGISTER_TX_STAT_GTBYT */
4002 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4003 dmae->opcode = opcode;
4004 dmae->src_addr_lo = (mac_addr +
4005 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4006 dmae->src_addr_hi = 0;
4007 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4008 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4009 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4010 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4011 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4012 dmae->comp_addr_hi = 0;
4013 dmae->comp_val = 1;
4014
4015 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4016 BIGMAC_REGISTER_RX_STAT_GRIPJ */
4017 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4018 dmae->opcode = opcode;
4019 dmae->src_addr_lo = (mac_addr +
4020 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4021 dmae->src_addr_hi = 0;
4022 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4023 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 4024 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4025 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
4026 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4027 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4028 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4029 dmae->comp_addr_hi = 0;
4030 dmae->comp_val = 1;
4031
c18487ee 4032 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
4033
4034 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4035
4036 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4037 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4038 dmae->opcode = opcode;
4039 dmae->src_addr_lo = (mac_addr +
4040 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4041 dmae->src_addr_hi = 0;
4042 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4043 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4044 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4045 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4046 dmae->comp_addr_hi = 0;
4047 dmae->comp_val = 1;
4048
4049 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4050 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4051 dmae->opcode = opcode;
4052 dmae->src_addr_lo = (mac_addr +
4053 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4054 dmae->src_addr_hi = 0;
4055 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4056 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 4057 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4058 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
4059 dmae->len = 1;
4060 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4061 dmae->comp_addr_hi = 0;
4062 dmae->comp_val = 1;
4063
4064 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4065 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4066 dmae->opcode = opcode;
4067 dmae->src_addr_lo = (mac_addr +
4068 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4069 dmae->src_addr_hi = 0;
4070 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4071 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 4072 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4073 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
4074 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4075 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4076 dmae->comp_addr_hi = 0;
4077 dmae->comp_val = 1;
4078 }
4079
4080 /* NIG */
bb2a0f7a
YG
4081 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4082 dmae->opcode = opcode;
4083 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4084 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4085 dmae->src_addr_hi = 0;
4086 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4087 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4088 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4089 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4090 dmae->comp_addr_hi = 0;
4091 dmae->comp_val = 1;
4092
4093 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4094 dmae->opcode = opcode;
4095 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4096 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4097 dmae->src_addr_hi = 0;
4098 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4099 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4100 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4101 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4102 dmae->len = (2*sizeof(u32)) >> 2;
4103 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4104 dmae->comp_addr_hi = 0;
4105 dmae->comp_val = 1;
4106
a2fbb9ea
ET
4107 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4108 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4109 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4110 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4111#ifdef __BIG_ENDIAN
4112 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4113#else
4114 DMAE_CMD_ENDIANITY_DW_SWAP |
4115#endif
bb2a0f7a
YG
4116 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4117 (vn << DMAE_CMD_E1HVN_SHIFT));
4118 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4119 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 4120 dmae->src_addr_hi = 0;
bb2a0f7a
YG
4121 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4122 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4123 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4124 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4125 dmae->len = (2*sizeof(u32)) >> 2;
4126 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4127 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4128 dmae->comp_val = DMAE_COMP_VAL;
4129
4130 *stats_comp = 0;
a2fbb9ea
ET
4131}
4132
bb2a0f7a 4133static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 4134{
bb2a0f7a
YG
4135 struct dmae_command *dmae = &bp->stats_dmae;
4136 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4137
bb2a0f7a
YG
4138 /* sanity */
4139 if (!bp->func_stx) {
4140 BNX2X_ERR("BUG!\n");
4141 return;
4142 }
a2fbb9ea 4143
bb2a0f7a
YG
4144 bp->executer_idx = 0;
4145 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 4146
bb2a0f7a
YG
4147 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4148 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4149 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4150#ifdef __BIG_ENDIAN
4151 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4152#else
4153 DMAE_CMD_ENDIANITY_DW_SWAP |
4154#endif
4155 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4156 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4157 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4158 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4159 dmae->dst_addr_lo = bp->func_stx >> 2;
4160 dmae->dst_addr_hi = 0;
4161 dmae->len = sizeof(struct host_func_stats) >> 2;
4162 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4163 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4164 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4165
bb2a0f7a
YG
4166 *stats_comp = 0;
4167}
a2fbb9ea 4168
bb2a0f7a
YG
4169static void bnx2x_stats_start(struct bnx2x *bp)
4170{
4171 if (bp->port.pmf)
4172 bnx2x_port_stats_init(bp);
4173
4174 else if (bp->func_stx)
4175 bnx2x_func_stats_init(bp);
4176
4177 bnx2x_hw_stats_post(bp);
4178 bnx2x_storm_stats_post(bp);
4179}
4180
4181static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4182{
4183 bnx2x_stats_comp(bp);
4184 bnx2x_stats_pmf_update(bp);
4185 bnx2x_stats_start(bp);
4186}
4187
4188static void bnx2x_stats_restart(struct bnx2x *bp)
4189{
4190 bnx2x_stats_comp(bp);
4191 bnx2x_stats_start(bp);
4192}
4193
4194static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4195{
4196 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4197 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 4198 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
4199 struct {
4200 u32 lo;
4201 u32 hi;
4202 } diff;
bb2a0f7a
YG
4203
4204 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4205 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4206 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4207 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4208 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4209 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 4210 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 4211 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 4212 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
4213 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4214 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4215 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4216 UPDATE_STAT64(tx_stat_gt127,
4217 tx_stat_etherstatspkts65octetsto127octets);
4218 UPDATE_STAT64(tx_stat_gt255,
4219 tx_stat_etherstatspkts128octetsto255octets);
4220 UPDATE_STAT64(tx_stat_gt511,
4221 tx_stat_etherstatspkts256octetsto511octets);
4222 UPDATE_STAT64(tx_stat_gt1023,
4223 tx_stat_etherstatspkts512octetsto1023octets);
4224 UPDATE_STAT64(tx_stat_gt1518,
4225 tx_stat_etherstatspkts1024octetsto1522octets);
4226 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4227 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4228 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4229 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4230 UPDATE_STAT64(tx_stat_gterr,
4231 tx_stat_dot3statsinternalmactransmiterrors);
4232 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
4233
4234 estats->pause_frames_received_hi =
4235 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4236 estats->pause_frames_received_lo =
4237 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4238
4239 estats->pause_frames_sent_hi =
4240 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4241 estats->pause_frames_sent_lo =
4242 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
4243}
4244
4245static void bnx2x_emac_stats_update(struct bnx2x *bp)
4246{
4247 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4248 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 4249 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
4250
4251 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4252 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4253 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4254 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4255 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4256 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4257 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4258 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4259 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4260 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4261 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4262 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4263 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4264 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4265 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4266 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4267 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4268 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4269 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4270 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4271 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4272 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4273 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4274 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4275 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4276 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4277 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4278 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4279 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4280 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4281 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
4282
4283 estats->pause_frames_received_hi =
4284 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4285 estats->pause_frames_received_lo =
4286 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4287 ADD_64(estats->pause_frames_received_hi,
4288 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4289 estats->pause_frames_received_lo,
4290 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4291
4292 estats->pause_frames_sent_hi =
4293 pstats->mac_stx[1].tx_stat_outxonsent_hi;
4294 estats->pause_frames_sent_lo =
4295 pstats->mac_stx[1].tx_stat_outxonsent_lo;
4296 ADD_64(estats->pause_frames_sent_hi,
4297 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4298 estats->pause_frames_sent_lo,
4299 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
4300}
4301
4302static int bnx2x_hw_stats_update(struct bnx2x *bp)
4303{
4304 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4305 struct nig_stats *old = &(bp->port.old_nig_stats);
4306 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4307 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
4308 struct {
4309 u32 lo;
4310 u32 hi;
4311 } diff;
bb2a0f7a
YG
4312
4313 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4314 bnx2x_bmac_stats_update(bp);
4315
4316 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4317 bnx2x_emac_stats_update(bp);
4318
4319 else { /* unreached */
c3eefaf6 4320 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
4321 return -1;
4322 }
a2fbb9ea 4323
bb2a0f7a
YG
4324 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4325 new->brb_discard - old->brb_discard);
66e855f3
YG
4326 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4327 new->brb_truncate - old->brb_truncate);
a2fbb9ea 4328
bb2a0f7a
YG
4329 UPDATE_STAT64_NIG(egress_mac_pkt0,
4330 etherstatspkts1024octetsto1522octets);
4331 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 4332
bb2a0f7a 4333 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 4334
bb2a0f7a
YG
4335 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4336 sizeof(struct mac_stx));
4337 estats->brb_drop_hi = pstats->brb_drop_hi;
4338 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 4339
bb2a0f7a 4340 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 4341
2145a920
VZ
4342 if (!BP_NOMCP(bp)) {
4343 u32 nig_timer_max =
4344 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4345 if (nig_timer_max != estats->nig_timer_max) {
4346 estats->nig_timer_max = nig_timer_max;
4347 BNX2X_ERR("NIG timer max (%u)\n",
4348 estats->nig_timer_max);
4349 }
de832a55
EG
4350 }
4351
bb2a0f7a 4352 return 0;
a2fbb9ea
ET
4353}
4354
bb2a0f7a 4355static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
4356{
4357 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 4358 struct tstorm_per_port_stats *tport =
de832a55 4359 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
4360 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4361 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
4362 int i;
4363
6fe49bb9
EG
4364 memcpy(&(fstats->total_bytes_received_hi),
4365 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
4366 sizeof(struct host_func_stats) - 2*sizeof(u32));
4367 estats->error_bytes_received_hi = 0;
4368 estats->error_bytes_received_lo = 0;
4369 estats->etherstatsoverrsizepkts_hi = 0;
4370 estats->etherstatsoverrsizepkts_lo = 0;
4371 estats->no_buff_discard_hi = 0;
4372 estats->no_buff_discard_lo = 0;
a2fbb9ea 4373
54b9ddaa 4374 for_each_queue(bp, i) {
de832a55
EG
4375 struct bnx2x_fastpath *fp = &bp->fp[i];
4376 int cl_id = fp->cl_id;
4377 struct tstorm_per_client_stats *tclient =
4378 &stats->tstorm_common.client_statistics[cl_id];
4379 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4380 struct ustorm_per_client_stats *uclient =
4381 &stats->ustorm_common.client_statistics[cl_id];
4382 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4383 struct xstorm_per_client_stats *xclient =
4384 &stats->xstorm_common.client_statistics[cl_id];
4385 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4386 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4387 u32 diff;
4388
4389 /* are storm stats valid? */
4390 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 4391 bp->stats_counter) {
de832a55 4392 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
cdaa7cb8 4393 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
de832a55
EG
4394 i, xclient->stats_counter, bp->stats_counter);
4395 return -1;
4396 }
4397 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 4398 bp->stats_counter) {
de832a55 4399 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
cdaa7cb8 4400 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
de832a55
EG
4401 i, tclient->stats_counter, bp->stats_counter);
4402 return -2;
4403 }
4404 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4405 bp->stats_counter) {
4406 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
cdaa7cb8 4407 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
de832a55
EG
4408 i, uclient->stats_counter, bp->stats_counter);
4409 return -4;
4410 }
a2fbb9ea 4411
de832a55 4412 qstats->total_bytes_received_hi =
ca00392c 4413 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4414 qstats->total_bytes_received_lo =
ca00392c
EG
4415 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4416
4417 ADD_64(qstats->total_bytes_received_hi,
4418 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4419 qstats->total_bytes_received_lo,
4420 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4421
4422 ADD_64(qstats->total_bytes_received_hi,
4423 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4424 qstats->total_bytes_received_lo,
4425 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4426
dea7aab1
VZ
4427 SUB_64(qstats->total_bytes_received_hi,
4428 le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
4429 qstats->total_bytes_received_lo,
4430 le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
4431
4432 SUB_64(qstats->total_bytes_received_hi,
4433 le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
4434 qstats->total_bytes_received_lo,
4435 le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
4436
4437 SUB_64(qstats->total_bytes_received_hi,
4438 le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
4439 qstats->total_bytes_received_lo,
4440 le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
4441
ca00392c
EG
4442 qstats->valid_bytes_received_hi =
4443 qstats->total_bytes_received_hi;
de832a55 4444 qstats->valid_bytes_received_lo =
ca00392c 4445 qstats->total_bytes_received_lo;
bb2a0f7a 4446
de832a55 4447 qstats->error_bytes_received_hi =
bb2a0f7a 4448 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4449 qstats->error_bytes_received_lo =
bb2a0f7a 4450 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4451
de832a55
EG
4452 ADD_64(qstats->total_bytes_received_hi,
4453 qstats->error_bytes_received_hi,
4454 qstats->total_bytes_received_lo,
4455 qstats->error_bytes_received_lo);
4456
4457 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4458 total_unicast_packets_received);
4459 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4460 total_multicast_packets_received);
4461 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4462 total_broadcast_packets_received);
4463 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4464 etherstatsoverrsizepkts);
4465 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4466
4467 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4468 total_unicast_packets_received);
4469 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4470 total_multicast_packets_received);
4471 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4472 total_broadcast_packets_received);
4473 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4474 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4475 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4476
4477 qstats->total_bytes_transmitted_hi =
ca00392c 4478 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4479 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4480 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4481
4482 ADD_64(qstats->total_bytes_transmitted_hi,
4483 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4484 qstats->total_bytes_transmitted_lo,
4485 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4486
4487 ADD_64(qstats->total_bytes_transmitted_hi,
4488 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4489 qstats->total_bytes_transmitted_lo,
4490 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4491
de832a55
EG
4492 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4493 total_unicast_packets_transmitted);
4494 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4495 total_multicast_packets_transmitted);
4496 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4497 total_broadcast_packets_transmitted);
4498
4499 old_tclient->checksum_discard = tclient->checksum_discard;
4500 old_tclient->ttl0_discard = tclient->ttl0_discard;
4501
4502 ADD_64(fstats->total_bytes_received_hi,
4503 qstats->total_bytes_received_hi,
4504 fstats->total_bytes_received_lo,
4505 qstats->total_bytes_received_lo);
4506 ADD_64(fstats->total_bytes_transmitted_hi,
4507 qstats->total_bytes_transmitted_hi,
4508 fstats->total_bytes_transmitted_lo,
4509 qstats->total_bytes_transmitted_lo);
4510 ADD_64(fstats->total_unicast_packets_received_hi,
4511 qstats->total_unicast_packets_received_hi,
4512 fstats->total_unicast_packets_received_lo,
4513 qstats->total_unicast_packets_received_lo);
4514 ADD_64(fstats->total_multicast_packets_received_hi,
4515 qstats->total_multicast_packets_received_hi,
4516 fstats->total_multicast_packets_received_lo,
4517 qstats->total_multicast_packets_received_lo);
4518 ADD_64(fstats->total_broadcast_packets_received_hi,
4519 qstats->total_broadcast_packets_received_hi,
4520 fstats->total_broadcast_packets_received_lo,
4521 qstats->total_broadcast_packets_received_lo);
4522 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4523 qstats->total_unicast_packets_transmitted_hi,
4524 fstats->total_unicast_packets_transmitted_lo,
4525 qstats->total_unicast_packets_transmitted_lo);
4526 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4527 qstats->total_multicast_packets_transmitted_hi,
4528 fstats->total_multicast_packets_transmitted_lo,
4529 qstats->total_multicast_packets_transmitted_lo);
4530 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4531 qstats->total_broadcast_packets_transmitted_hi,
4532 fstats->total_broadcast_packets_transmitted_lo,
4533 qstats->total_broadcast_packets_transmitted_lo);
4534 ADD_64(fstats->valid_bytes_received_hi,
4535 qstats->valid_bytes_received_hi,
4536 fstats->valid_bytes_received_lo,
4537 qstats->valid_bytes_received_lo);
4538
4539 ADD_64(estats->error_bytes_received_hi,
4540 qstats->error_bytes_received_hi,
4541 estats->error_bytes_received_lo,
4542 qstats->error_bytes_received_lo);
4543 ADD_64(estats->etherstatsoverrsizepkts_hi,
4544 qstats->etherstatsoverrsizepkts_hi,
4545 estats->etherstatsoverrsizepkts_lo,
4546 qstats->etherstatsoverrsizepkts_lo);
4547 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4548 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4549 }
4550
4551 ADD_64(fstats->total_bytes_received_hi,
4552 estats->rx_stat_ifhcinbadoctets_hi,
4553 fstats->total_bytes_received_lo,
4554 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4555
4556 memcpy(estats, &(fstats->total_bytes_received_hi),
4557 sizeof(struct host_func_stats) - 2*sizeof(u32));
4558
de832a55
EG
4559 ADD_64(estats->etherstatsoverrsizepkts_hi,
4560 estats->rx_stat_dot3statsframestoolong_hi,
4561 estats->etherstatsoverrsizepkts_lo,
4562 estats->rx_stat_dot3statsframestoolong_lo);
4563 ADD_64(estats->error_bytes_received_hi,
4564 estats->rx_stat_ifhcinbadoctets_hi,
4565 estats->error_bytes_received_lo,
4566 estats->rx_stat_ifhcinbadoctets_lo);
4567
4568 if (bp->port.pmf) {
4569 estats->mac_filter_discard =
4570 le32_to_cpu(tport->mac_filter_discard);
4571 estats->xxoverflow_discard =
4572 le32_to_cpu(tport->xxoverflow_discard);
4573 estats->brb_truncate_discard =
bb2a0f7a 4574 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4575 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4576 }
bb2a0f7a
YG
4577
4578 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4579
de832a55
EG
4580 bp->stats_pending = 0;
4581
a2fbb9ea
ET
4582 return 0;
4583}
4584
bb2a0f7a 4585static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4586{
bb2a0f7a 4587 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4588 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4589 int i;
a2fbb9ea
ET
4590
4591 nstats->rx_packets =
4592 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4593 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4594 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4595
4596 nstats->tx_packets =
4597 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4598 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4599 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4600
de832a55 4601 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4602
0e39e645 4603 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4604
de832a55 4605 nstats->rx_dropped = estats->mac_discard;
54b9ddaa 4606 for_each_queue(bp, i)
de832a55
EG
4607 nstats->rx_dropped +=
4608 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4609
a2fbb9ea
ET
4610 nstats->tx_dropped = 0;
4611
4612 nstats->multicast =
de832a55 4613 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4614
bb2a0f7a 4615 nstats->collisions =
de832a55 4616 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4617
4618 nstats->rx_length_errors =
de832a55
EG
4619 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4620 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4621 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4622 bnx2x_hilo(&estats->brb_truncate_hi);
4623 nstats->rx_crc_errors =
4624 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4625 nstats->rx_frame_errors =
4626 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4627 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4628 nstats->rx_missed_errors = estats->xxoverflow_discard;
4629
4630 nstats->rx_errors = nstats->rx_length_errors +
4631 nstats->rx_over_errors +
4632 nstats->rx_crc_errors +
4633 nstats->rx_frame_errors +
0e39e645
ET
4634 nstats->rx_fifo_errors +
4635 nstats->rx_missed_errors;
a2fbb9ea 4636
bb2a0f7a 4637 nstats->tx_aborted_errors =
de832a55
EG
4638 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4639 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4640 nstats->tx_carrier_errors =
4641 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4642 nstats->tx_fifo_errors = 0;
4643 nstats->tx_heartbeat_errors = 0;
4644 nstats->tx_window_errors = 0;
4645
4646 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4647 nstats->tx_carrier_errors +
4648 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4649}
4650
4651static void bnx2x_drv_stats_update(struct bnx2x *bp)
4652{
4653 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4654 int i;
4655
4656 estats->driver_xoff = 0;
4657 estats->rx_err_discard_pkt = 0;
4658 estats->rx_skb_alloc_failed = 0;
4659 estats->hw_csum_err = 0;
54b9ddaa 4660 for_each_queue(bp, i) {
de832a55
EG
4661 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4662
4663 estats->driver_xoff += qstats->driver_xoff;
4664 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4665 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4666 estats->hw_csum_err += qstats->hw_csum_err;
4667 }
a2fbb9ea
ET
4668}
4669
bb2a0f7a 4670static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4671{
bb2a0f7a 4672 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4673
bb2a0f7a
YG
4674 if (*stats_comp != DMAE_COMP_VAL)
4675 return;
4676
4677 if (bp->port.pmf)
de832a55 4678 bnx2x_hw_stats_update(bp);
a2fbb9ea 4679
de832a55
EG
4680 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4681 BNX2X_ERR("storm stats were not updated for 3 times\n");
4682 bnx2x_panic();
4683 return;
a2fbb9ea
ET
4684 }
4685
de832a55
EG
4686 bnx2x_net_stats_update(bp);
4687 bnx2x_drv_stats_update(bp);
4688
7995c64e 4689 if (netif_msg_timer(bp)) {
bb2a0f7a 4690 struct bnx2x_eth_stats *estats = &bp->eth_stats;
34f80b04 4691 int i;
a2fbb9ea 4692
dea7aab1
VZ
4693 printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
4694 bp->dev->name,
de832a55 4695 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea
ET
4696
4697 for_each_queue(bp, i) {
dea7aab1
VZ
4698 struct bnx2x_fastpath *fp = &bp->fp[i];
4699 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4700
4701 printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
4702 " rx pkt(%lu) rx calls(%lu %lu)\n",
4703 fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
4704 fp->rx_comp_cons),
4705 le16_to_cpu(*fp->rx_cons_sb),
4706 bnx2x_hilo(&qstats->
4707 total_unicast_packets_received_hi),
4708 fp->rx_calls, fp->rx_pkt);
4709 }
4710
4711 for_each_queue(bp, i) {
4712 struct bnx2x_fastpath *fp = &bp->fp[i];
4713 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4714 struct netdev_queue *txq =
4715 netdev_get_tx_queue(bp->dev, i);
4716
4717 printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
4718 " tx pkt(%lu) tx calls (%lu)"
4719 " %s (Xoff events %u)\n",
4720 fp->name, bnx2x_tx_avail(fp),
4721 le16_to_cpu(*fp->tx_cons_sb),
4722 bnx2x_hilo(&qstats->
4723 total_unicast_packets_transmitted_hi),
4724 fp->tx_pkt,
4725 (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
4726 qstats->driver_xoff);
a2fbb9ea
ET
4727 }
4728 }
4729
bb2a0f7a
YG
4730 bnx2x_hw_stats_post(bp);
4731 bnx2x_storm_stats_post(bp);
4732}
a2fbb9ea 4733
bb2a0f7a
YG
4734static void bnx2x_port_stats_stop(struct bnx2x *bp)
4735{
4736 struct dmae_command *dmae;
4737 u32 opcode;
4738 int loader_idx = PMF_DMAE_C(bp);
4739 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4740
bb2a0f7a 4741 bp->executer_idx = 0;
a2fbb9ea 4742
bb2a0f7a
YG
4743 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4744 DMAE_CMD_C_ENABLE |
4745 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4746#ifdef __BIG_ENDIAN
bb2a0f7a 4747 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4748#else
bb2a0f7a 4749 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4750#endif
bb2a0f7a
YG
4751 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4752 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4753
4754 if (bp->port.port_stx) {
4755
4756 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4757 if (bp->func_stx)
4758 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4759 else
4760 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4761 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4762 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4763 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4764 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4765 dmae->len = sizeof(struct host_port_stats) >> 2;
4766 if (bp->func_stx) {
4767 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4768 dmae->comp_addr_hi = 0;
4769 dmae->comp_val = 1;
4770 } else {
4771 dmae->comp_addr_lo =
4772 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4773 dmae->comp_addr_hi =
4774 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4775 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4776
bb2a0f7a
YG
4777 *stats_comp = 0;
4778 }
a2fbb9ea
ET
4779 }
4780
bb2a0f7a
YG
4781 if (bp->func_stx) {
4782
4783 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4784 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4785 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4786 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4787 dmae->dst_addr_lo = bp->func_stx >> 2;
4788 dmae->dst_addr_hi = 0;
4789 dmae->len = sizeof(struct host_func_stats) >> 2;
4790 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4791 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4792 dmae->comp_val = DMAE_COMP_VAL;
4793
4794 *stats_comp = 0;
a2fbb9ea 4795 }
bb2a0f7a
YG
4796}
4797
4798static void bnx2x_stats_stop(struct bnx2x *bp)
4799{
4800 int update = 0;
4801
4802 bnx2x_stats_comp(bp);
4803
4804 if (bp->port.pmf)
4805 update = (bnx2x_hw_stats_update(bp) == 0);
4806
4807 update |= (bnx2x_storm_stats_update(bp) == 0);
4808
4809 if (update) {
4810 bnx2x_net_stats_update(bp);
a2fbb9ea 4811
bb2a0f7a
YG
4812 if (bp->port.pmf)
4813 bnx2x_port_stats_stop(bp);
4814
4815 bnx2x_hw_stats_post(bp);
4816 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4817 }
4818}
4819
bb2a0f7a
YG
4820static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4821{
4822}
4823
4824static const struct {
4825 void (*action)(struct bnx2x *bp);
4826 enum bnx2x_stats_state next_state;
4827} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4828/* state event */
4829{
4830/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4831/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4832/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4833/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4834},
4835{
4836/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4837/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4838/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4839/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4840}
4841};
4842
4843static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4844{
4845 enum bnx2x_stats_state state = bp->stats_state;
4846
cdaa7cb8
VZ
4847 if (unlikely(bp->panic))
4848 return;
4849
bb2a0f7a
YG
4850 bnx2x_stats_stm[state][event].action(bp);
4851 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4852
8924665a
EG
4853 /* Make sure the state has been "changed" */
4854 smp_wmb();
4855
7995c64e 4856 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
bb2a0f7a
YG
4857 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4858 state, event, bp->stats_state);
4859}
4860
6fe49bb9
EG
4861static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4862{
4863 struct dmae_command *dmae;
4864 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4865
4866 /* sanity */
4867 if (!bp->port.pmf || !bp->port.port_stx) {
4868 BNX2X_ERR("BUG!\n");
4869 return;
4870 }
4871
4872 bp->executer_idx = 0;
4873
4874 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4875 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4876 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4877 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4878#ifdef __BIG_ENDIAN
4879 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4880#else
4881 DMAE_CMD_ENDIANITY_DW_SWAP |
4882#endif
4883 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4884 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4885 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4886 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4887 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4888 dmae->dst_addr_hi = 0;
4889 dmae->len = sizeof(struct host_port_stats) >> 2;
4890 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4891 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4892 dmae->comp_val = DMAE_COMP_VAL;
4893
4894 *stats_comp = 0;
4895 bnx2x_hw_stats_post(bp);
4896 bnx2x_stats_comp(bp);
4897}
4898
4899static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4900{
4901 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4902 int port = BP_PORT(bp);
4903 int func;
4904 u32 func_stx;
4905
4906 /* sanity */
4907 if (!bp->port.pmf || !bp->func_stx) {
4908 BNX2X_ERR("BUG!\n");
4909 return;
4910 }
4911
4912 /* save our func_stx */
4913 func_stx = bp->func_stx;
4914
4915 for (vn = VN_0; vn < vn_max; vn++) {
4916 func = 2*vn + port;
4917
4918 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4919 bnx2x_func_stats_init(bp);
4920 bnx2x_hw_stats_post(bp);
4921 bnx2x_stats_comp(bp);
4922 }
4923
4924 /* restore our func_stx */
4925 bp->func_stx = func_stx;
4926}
4927
4928static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4929{
4930 struct dmae_command *dmae = &bp->stats_dmae;
4931 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4932
4933 /* sanity */
4934 if (!bp->func_stx) {
4935 BNX2X_ERR("BUG!\n");
4936 return;
4937 }
4938
4939 bp->executer_idx = 0;
4940 memset(dmae, 0, sizeof(struct dmae_command));
4941
4942 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4943 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4944 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4945#ifdef __BIG_ENDIAN
4946 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4947#else
4948 DMAE_CMD_ENDIANITY_DW_SWAP |
4949#endif
4950 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4951 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4952 dmae->src_addr_lo = bp->func_stx >> 2;
4953 dmae->src_addr_hi = 0;
4954 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4955 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4956 dmae->len = sizeof(struct host_func_stats) >> 2;
4957 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4958 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4959 dmae->comp_val = DMAE_COMP_VAL;
4960
4961 *stats_comp = 0;
4962 bnx2x_hw_stats_post(bp);
4963 bnx2x_stats_comp(bp);
4964}
4965
4966static void bnx2x_stats_init(struct bnx2x *bp)
4967{
4968 int port = BP_PORT(bp);
4969 int func = BP_FUNC(bp);
4970 int i;
4971
4972 bp->stats_pending = 0;
4973 bp->executer_idx = 0;
4974 bp->stats_counter = 0;
4975
4976 /* port and func stats for management */
4977 if (!BP_NOMCP(bp)) {
4978 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4979 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4980
4981 } else {
4982 bp->port.port_stx = 0;
4983 bp->func_stx = 0;
4984 }
4985 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4986 bp->port.port_stx, bp->func_stx);
4987
4988 /* port stats */
4989 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4990 bp->port.old_nig_stats.brb_discard =
4991 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4992 bp->port.old_nig_stats.brb_truncate =
4993 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4994 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4995 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4996 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4997 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4998
4999 /* function stats */
5000 for_each_queue(bp, i) {
5001 struct bnx2x_fastpath *fp = &bp->fp[i];
5002
5003 memset(&fp->old_tclient, 0,
5004 sizeof(struct tstorm_per_client_stats));
5005 memset(&fp->old_uclient, 0,
5006 sizeof(struct ustorm_per_client_stats));
5007 memset(&fp->old_xclient, 0,
5008 sizeof(struct xstorm_per_client_stats));
5009 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
5010 }
5011
5012 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
5013 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
5014
5015 bp->stats_state = STATS_STATE_DISABLED;
5016
5017 if (bp->port.pmf) {
5018 if (bp->port.port_stx)
5019 bnx2x_port_stats_base_init(bp);
5020
5021 if (bp->func_stx)
5022 bnx2x_func_stats_base_init(bp);
5023
5024 } else if (bp->func_stx)
5025 bnx2x_func_stats_base_update(bp);
5026}
5027
a2fbb9ea
ET
5028static void bnx2x_timer(unsigned long data)
5029{
5030 struct bnx2x *bp = (struct bnx2x *) data;
5031
5032 if (!netif_running(bp->dev))
5033 return;
5034
5035 if (atomic_read(&bp->intr_sem) != 0)
f1410647 5036 goto timer_restart;
a2fbb9ea
ET
5037
5038 if (poll) {
5039 struct bnx2x_fastpath *fp = &bp->fp[0];
5040 int rc;
5041
7961f791 5042 bnx2x_tx_int(fp);
a2fbb9ea
ET
5043 rc = bnx2x_rx_int(fp, 1000);
5044 }
5045
34f80b04
EG
5046 if (!BP_NOMCP(bp)) {
5047 int func = BP_FUNC(bp);
a2fbb9ea
ET
5048 u32 drv_pulse;
5049 u32 mcp_pulse;
5050
5051 ++bp->fw_drv_pulse_wr_seq;
5052 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5053 /* TBD - add SYSTEM_TIME */
5054 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 5055 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 5056
34f80b04 5057 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
5058 MCP_PULSE_SEQ_MASK);
5059 /* The delta between driver pulse and mcp response
5060 * should be 1 (before mcp response) or 0 (after mcp response)
5061 */
5062 if ((drv_pulse != mcp_pulse) &&
5063 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5064 /* someone lost a heartbeat... */
5065 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5066 drv_pulse, mcp_pulse);
5067 }
5068 }
5069
f34d28ea 5070 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 5071 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 5072
f1410647 5073timer_restart:
a2fbb9ea
ET
5074 mod_timer(&bp->timer, jiffies + bp->current_interval);
5075}
5076
5077/* end of Statistics */
5078
5079/* nic init */
5080
5081/*
5082 * nic init service functions
5083 */
5084
34f80b04 5085static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 5086{
34f80b04
EG
5087 int port = BP_PORT(bp);
5088
ca00392c
EG
5089 /* "CSTORM" */
5090 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5091 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5092 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5093 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5094 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5095 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
5096}
5097
5c862848
EG
5098static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5099 dma_addr_t mapping, int sb_id)
34f80b04
EG
5100{
5101 int port = BP_PORT(bp);
bb2a0f7a 5102 int func = BP_FUNC(bp);
a2fbb9ea 5103 int index;
34f80b04 5104 u64 section;
a2fbb9ea
ET
5105
5106 /* USTORM */
5107 section = ((u64)mapping) + offsetof(struct host_status_block,
5108 u_status_block);
34f80b04 5109 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 5110
ca00392c
EG
5111 REG_WR(bp, BAR_CSTRORM_INTMEM +
5112 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5113 REG_WR(bp, BAR_CSTRORM_INTMEM +
5114 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 5115 U64_HI(section));
ca00392c
EG
5116 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5117 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
5118
5119 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
5120 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5121 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
5122
5123 /* CSTORM */
5124 section = ((u64)mapping) + offsetof(struct host_status_block,
5125 c_status_block);
34f80b04 5126 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5127
5128 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5129 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 5130 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5131 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 5132 U64_HI(section));
7a9b2557 5133 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 5134 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
5135
5136 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5137 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 5138 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
5139
5140 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5141}
5142
5143static void bnx2x_zero_def_sb(struct bnx2x *bp)
5144{
5145 int func = BP_FUNC(bp);
a2fbb9ea 5146
ca00392c 5147 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
5148 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5149 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
5150 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5151 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5152 sizeof(struct cstorm_def_status_block_u)/4);
5153 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5154 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5155 sizeof(struct cstorm_def_status_block_c)/4);
5156 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
5157 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5158 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
5159}
5160
5161static void bnx2x_init_def_sb(struct bnx2x *bp,
5162 struct host_def_status_block *def_sb,
34f80b04 5163 dma_addr_t mapping, int sb_id)
a2fbb9ea 5164{
34f80b04
EG
5165 int port = BP_PORT(bp);
5166 int func = BP_FUNC(bp);
a2fbb9ea
ET
5167 int index, val, reg_offset;
5168 u64 section;
5169
5170 /* ATTN */
5171 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5172 atten_status_block);
34f80b04 5173 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 5174
49d66772
ET
5175 bp->attn_state = 0;
5176
a2fbb9ea
ET
5177 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5178 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5179
34f80b04 5180 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
5181 bp->attn_group[index].sig[0] = REG_RD(bp,
5182 reg_offset + 0x10*index);
5183 bp->attn_group[index].sig[1] = REG_RD(bp,
5184 reg_offset + 0x4 + 0x10*index);
5185 bp->attn_group[index].sig[2] = REG_RD(bp,
5186 reg_offset + 0x8 + 0x10*index);
5187 bp->attn_group[index].sig[3] = REG_RD(bp,
5188 reg_offset + 0xc + 0x10*index);
5189 }
5190
a2fbb9ea
ET
5191 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5192 HC_REG_ATTN_MSG0_ADDR_L);
5193
5194 REG_WR(bp, reg_offset, U64_LO(section));
5195 REG_WR(bp, reg_offset + 4, U64_HI(section));
5196
5197 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5198
5199 val = REG_RD(bp, reg_offset);
34f80b04 5200 val |= sb_id;
a2fbb9ea
ET
5201 REG_WR(bp, reg_offset, val);
5202
5203 /* USTORM */
5204 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5205 u_def_status_block);
34f80b04 5206 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 5207
ca00392c
EG
5208 REG_WR(bp, BAR_CSTRORM_INTMEM +
5209 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5210 REG_WR(bp, BAR_CSTRORM_INTMEM +
5211 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 5212 U64_HI(section));
ca00392c
EG
5213 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5214 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
5215
5216 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
5217 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5218 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
5219
5220 /* CSTORM */
5221 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5222 c_def_status_block);
34f80b04 5223 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5224
5225 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5226 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 5227 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5228 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 5229 U64_HI(section));
5c862848 5230 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 5231 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
5232
5233 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5234 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 5235 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
5236
5237 /* TSTORM */
5238 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5239 t_def_status_block);
34f80b04 5240 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5241
5242 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5243 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 5244 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5245 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 5246 U64_HI(section));
5c862848 5247 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 5248 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
5249
5250 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5251 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 5252 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
5253
5254 /* XSTORM */
5255 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5256 x_def_status_block);
34f80b04 5257 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5258
5259 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 5260 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 5261 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 5262 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 5263 U64_HI(section));
5c862848 5264 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 5265 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
5266
5267 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5268 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 5269 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 5270
bb2a0f7a 5271 bp->stats_pending = 0;
66e855f3 5272 bp->set_mac_pending = 0;
bb2a0f7a 5273
34f80b04 5274 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
5275}
5276
5277static void bnx2x_update_coalesce(struct bnx2x *bp)
5278{
34f80b04 5279 int port = BP_PORT(bp);
a2fbb9ea
ET
5280 int i;
5281
5282 for_each_queue(bp, i) {
34f80b04 5283 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
5284
5285 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
5286 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5287 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5288 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 5289 bp->rx_ticks/(4 * BNX2X_BTR));
ca00392c
EG
5290 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5291 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5292 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 5293 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
5294
5295 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5296 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
5297 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5298 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 5299 bp->tx_ticks/(4 * BNX2X_BTR));
a2fbb9ea 5300 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
5301 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5302 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 5303 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
5304 }
5305}
5306
7a9b2557
VZ
5307static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5308 struct bnx2x_fastpath *fp, int last)
5309{
5310 int i;
5311
5312 for (i = 0; i < last; i++) {
5313 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5314 struct sk_buff *skb = rx_buf->skb;
5315
5316 if (skb == NULL) {
5317 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5318 continue;
5319 }
5320
5321 if (fp->tpa_state[i] == BNX2X_TPA_START)
1a983142
FT
5322 dma_unmap_single(&bp->pdev->dev,
5323 dma_unmap_addr(rx_buf, mapping),
5324 bp->rx_buf_size, DMA_FROM_DEVICE);
7a9b2557
VZ
5325
5326 dev_kfree_skb(skb);
5327 rx_buf->skb = NULL;
5328 }
5329}
5330
a2fbb9ea
ET
5331static void bnx2x_init_rx_rings(struct bnx2x *bp)
5332{
7a9b2557 5333 int func = BP_FUNC(bp);
32626230
EG
5334 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5335 ETH_MAX_AGGREGATION_QUEUES_E1H;
5336 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 5337 int i, j;
a2fbb9ea 5338
87942b46 5339 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
5340 DP(NETIF_MSG_IFUP,
5341 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 5342
7a9b2557 5343 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 5344
54b9ddaa 5345 for_each_queue(bp, j) {
32626230 5346 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 5347
32626230 5348 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
5349 fp->tpa_pool[i].skb =
5350 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5351 if (!fp->tpa_pool[i].skb) {
5352 BNX2X_ERR("Failed to allocate TPA "
5353 "skb pool for queue[%d] - "
5354 "disabling TPA on this "
5355 "queue!\n", j);
5356 bnx2x_free_tpa_pool(bp, fp, i);
5357 fp->disable_tpa = 1;
5358 break;
5359 }
1a983142 5360 dma_unmap_addr_set((struct sw_rx_bd *)
7a9b2557
VZ
5361 &bp->fp->tpa_pool[i],
5362 mapping, 0);
5363 fp->tpa_state[i] = BNX2X_TPA_STOP;
5364 }
5365 }
5366 }
5367
54b9ddaa 5368 for_each_queue(bp, j) {
a2fbb9ea
ET
5369 struct bnx2x_fastpath *fp = &bp->fp[j];
5370
5371 fp->rx_bd_cons = 0;
5372 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
5373 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5374
5375 /* "next page" elements initialization */
5376 /* SGE ring */
5377 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5378 struct eth_rx_sge *sge;
5379
5380 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5381 sge->addr_hi =
5382 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5383 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5384 sge->addr_lo =
5385 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5386 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5387 }
5388
5389 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 5390
7a9b2557 5391 /* RX BD ring */
a2fbb9ea
ET
5392 for (i = 1; i <= NUM_RX_RINGS; i++) {
5393 struct eth_rx_bd *rx_bd;
5394
5395 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5396 rx_bd->addr_hi =
5397 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 5398 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5399 rx_bd->addr_lo =
5400 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 5401 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5402 }
5403
34f80b04 5404 /* CQ ring */
a2fbb9ea
ET
5405 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5406 struct eth_rx_cqe_next_page *nextpg;
5407
5408 nextpg = (struct eth_rx_cqe_next_page *)
5409 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5410 nextpg->addr_hi =
5411 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 5412 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5413 nextpg->addr_lo =
5414 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 5415 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5416 }
5417
7a9b2557
VZ
5418 /* Allocate SGEs and initialize the ring elements */
5419 for (i = 0, ring_prod = 0;
5420 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 5421
7a9b2557
VZ
5422 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5423 BNX2X_ERR("was only able to allocate "
5424 "%d rx sges\n", i);
5425 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5426 /* Cleanup already allocated elements */
5427 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 5428 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
5429 fp->disable_tpa = 1;
5430 ring_prod = 0;
5431 break;
5432 }
5433 ring_prod = NEXT_SGE_IDX(ring_prod);
5434 }
5435 fp->rx_sge_prod = ring_prod;
5436
5437 /* Allocate BDs and initialize BD ring */
66e855f3 5438 fp->rx_comp_cons = 0;
7a9b2557 5439 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5440 for (i = 0; i < bp->rx_ring_size; i++) {
5441 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5442 BNX2X_ERR("was only able to allocate "
de832a55
EG
5443 "%d rx skbs on queue[%d]\n", i, j);
5444 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5445 break;
5446 }
5447 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5448 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5449 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5450 }
5451
7a9b2557
VZ
5452 fp->rx_bd_prod = ring_prod;
5453 /* must not have more available CQEs than BDs */
cdaa7cb8
VZ
5454 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5455 cqe_ring_prod);
a2fbb9ea
ET
5456 fp->rx_pkt = fp->rx_calls = 0;
5457
7a9b2557
VZ
5458 /* Warning!
5459 * this will generate an interrupt (to the TSTORM)
5460 * must only be done after chip is initialized
5461 */
5462 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5463 fp->rx_sge_prod);
a2fbb9ea
ET
5464 if (j != 0)
5465 continue;
5466
5467 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5468 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5469 U64_LO(fp->rx_comp_mapping));
5470 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5471 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5472 U64_HI(fp->rx_comp_mapping));
5473 }
5474}
5475
5476static void bnx2x_init_tx_ring(struct bnx2x *bp)
5477{
5478 int i, j;
5479
54b9ddaa 5480 for_each_queue(bp, j) {
a2fbb9ea
ET
5481 struct bnx2x_fastpath *fp = &bp->fp[j];
5482
5483 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5484 struct eth_tx_next_bd *tx_next_bd =
5485 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5486
ca00392c 5487 tx_next_bd->addr_hi =
a2fbb9ea 5488 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5489 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5490 tx_next_bd->addr_lo =
a2fbb9ea 5491 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5492 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5493 }
5494
ca00392c
EG
5495 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5496 fp->tx_db.data.zero_fill1 = 0;
5497 fp->tx_db.data.prod = 0;
5498
a2fbb9ea
ET
5499 fp->tx_pkt_prod = 0;
5500 fp->tx_pkt_cons = 0;
5501 fp->tx_bd_prod = 0;
5502 fp->tx_bd_cons = 0;
5503 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5504 fp->tx_pkt = 0;
5505 }
5506}
5507
5508static void bnx2x_init_sp_ring(struct bnx2x *bp)
5509{
34f80b04 5510 int func = BP_FUNC(bp);
a2fbb9ea
ET
5511
5512 spin_lock_init(&bp->spq_lock);
5513
5514 bp->spq_left = MAX_SPQ_PENDING;
5515 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5516 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5517 bp->spq_prod_bd = bp->spq;
5518 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5519
34f80b04 5520 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5521 U64_LO(bp->spq_mapping));
34f80b04
EG
5522 REG_WR(bp,
5523 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5524 U64_HI(bp->spq_mapping));
5525
34f80b04 5526 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5527 bp->spq_prod_idx);
5528}
5529
5530static void bnx2x_init_context(struct bnx2x *bp)
5531{
5532 int i;
5533
54b9ddaa
VZ
5534 /* Rx */
5535 for_each_queue(bp, i) {
a2fbb9ea
ET
5536 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5537 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5538 u8 cl_id = fp->cl_id;
a2fbb9ea 5539
34f80b04
EG
5540 context->ustorm_st_context.common.sb_index_numbers =
5541 BNX2X_RX_SB_INDEX_NUM;
0626b899 5542 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5543 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5544 context->ustorm_st_context.common.flags =
de832a55
EG
5545 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5546 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5547 context->ustorm_st_context.common.statistics_counter_id =
5548 cl_id;
8d9c5f34 5549 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5550 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5551 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5552 bp->rx_buf_size;
34f80b04 5553 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5554 U64_HI(fp->rx_desc_mapping);
34f80b04 5555 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5556 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5557 if (!fp->disable_tpa) {
5558 context->ustorm_st_context.common.flags |=
ca00392c 5559 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5560 context->ustorm_st_context.common.sge_buff_size =
cdaa7cb8
VZ
5561 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
5562 0xffff);
7a9b2557
VZ
5563 context->ustorm_st_context.common.sge_page_base_hi =
5564 U64_HI(fp->rx_sge_mapping);
5565 context->ustorm_st_context.common.sge_page_base_lo =
5566 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5567
5568 context->ustorm_st_context.common.max_sges_for_packet =
5569 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5570 context->ustorm_st_context.common.max_sges_for_packet =
5571 ((context->ustorm_st_context.common.
5572 max_sges_for_packet + PAGES_PER_SGE - 1) &
5573 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5574 }
5575
8d9c5f34
EG
5576 context->ustorm_ag_context.cdu_usage =
5577 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5578 CDU_REGION_NUMBER_UCM_AG,
5579 ETH_CONNECTION_TYPE);
5580
ca00392c
EG
5581 context->xstorm_ag_context.cdu_reserved =
5582 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5583 CDU_REGION_NUMBER_XCM_AG,
5584 ETH_CONNECTION_TYPE);
5585 }
5586
54b9ddaa
VZ
5587 /* Tx */
5588 for_each_queue(bp, i) {
ca00392c
EG
5589 struct bnx2x_fastpath *fp = &bp->fp[i];
5590 struct eth_context *context =
54b9ddaa 5591 bnx2x_sp(bp, context[i].eth);
ca00392c
EG
5592
5593 context->cstorm_st_context.sb_index_number =
5594 C_SB_ETH_TX_CQ_INDEX;
5595 context->cstorm_st_context.status_block_id = fp->sb_id;
5596
8d9c5f34
EG
5597 context->xstorm_st_context.tx_bd_page_base_hi =
5598 U64_HI(fp->tx_desc_mapping);
5599 context->xstorm_st_context.tx_bd_page_base_lo =
5600 U64_LO(fp->tx_desc_mapping);
ca00392c 5601 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5602 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5603 }
5604}
5605
5606static void bnx2x_init_ind_table(struct bnx2x *bp)
5607{
26c8fa4d 5608 int func = BP_FUNC(bp);
a2fbb9ea
ET
5609 int i;
5610
555f6c78 5611 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5612 return;
5613
555f6c78
EG
5614 DP(NETIF_MSG_IFUP,
5615 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5616 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5617 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5618 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 5619 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
5620}
5621
49d66772
ET
5622static void bnx2x_set_client_config(struct bnx2x *bp)
5623{
49d66772 5624 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5625 int port = BP_PORT(bp);
5626 int i;
49d66772 5627
e7799c5f 5628 tstorm_client.mtu = bp->dev->mtu;
49d66772 5629 tstorm_client.config_flags =
de832a55
EG
5630 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5631 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5632#ifdef BCM_VLAN
0c6671b0 5633 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5634 tstorm_client.config_flags |=
8d9c5f34 5635 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5636 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5637 }
5638#endif
49d66772
ET
5639
5640 for_each_queue(bp, i) {
de832a55
EG
5641 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5642
49d66772 5643 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5644 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5645 ((u32 *)&tstorm_client)[0]);
5646 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5647 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5648 ((u32 *)&tstorm_client)[1]);
5649 }
5650
34f80b04
EG
5651 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5652 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5653}
5654
a2fbb9ea
ET
5655static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5656{
a2fbb9ea 5657 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 5658 int mode = bp->rx_mode;
37b091ba 5659 int mask = bp->rx_mode_cl_mask;
34f80b04 5660 int func = BP_FUNC(bp);
581ce43d 5661 int port = BP_PORT(bp);
a2fbb9ea 5662 int i;
581ce43d
EG
5663 /* All but management unicast packets should pass to the host as well */
5664 u32 llh_mask =
5665 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5666 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5667 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5668 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5669
3196a88a 5670 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5671
5672 switch (mode) {
5673 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5674 tstorm_mac_filter.ucast_drop_all = mask;
5675 tstorm_mac_filter.mcast_drop_all = mask;
5676 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5677 break;
356e2385 5678
a2fbb9ea 5679 case BNX2X_RX_MODE_NORMAL:
34f80b04 5680 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5681 break;
356e2385 5682
a2fbb9ea 5683 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5684 tstorm_mac_filter.mcast_accept_all = mask;
5685 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5686 break;
356e2385 5687
a2fbb9ea 5688 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5689 tstorm_mac_filter.ucast_accept_all = mask;
5690 tstorm_mac_filter.mcast_accept_all = mask;
5691 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5692 /* pass management unicast packets as well */
5693 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5694 break;
356e2385 5695
a2fbb9ea 5696 default:
34f80b04
EG
5697 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5698 break;
a2fbb9ea
ET
5699 }
5700
581ce43d
EG
5701 REG_WR(bp,
5702 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5703 llh_mask);
5704
a2fbb9ea
ET
5705 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5706 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5707 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5708 ((u32 *)&tstorm_mac_filter)[i]);
5709
34f80b04 5710/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5711 ((u32 *)&tstorm_mac_filter)[i]); */
5712 }
a2fbb9ea 5713
49d66772
ET
5714 if (mode != BNX2X_RX_MODE_NONE)
5715 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5716}
5717
471de716
EG
5718static void bnx2x_init_internal_common(struct bnx2x *bp)
5719{
5720 int i;
5721
5722 /* Zero this manually as its initialization is
5723 currently missing in the initTool */
5724 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5725 REG_WR(bp, BAR_USTRORM_INTMEM +
5726 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5727}
5728
5729static void bnx2x_init_internal_port(struct bnx2x *bp)
5730{
5731 int port = BP_PORT(bp);
5732
ca00392c
EG
5733 REG_WR(bp,
5734 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5735 REG_WR(bp,
5736 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5737 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5738 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5739}
5740
5741static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5742{
a2fbb9ea
ET
5743 struct tstorm_eth_function_common_config tstorm_config = {0};
5744 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5745 int port = BP_PORT(bp);
5746 int func = BP_FUNC(bp);
de832a55
EG
5747 int i, j;
5748 u32 offset;
471de716 5749 u16 max_agg_size;
a2fbb9ea
ET
5750
5751 if (is_multi(bp)) {
555f6c78 5752 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5753 tstorm_config.rss_result_mask = MULTI_MASK;
5754 }
ca00392c
EG
5755
5756 /* Enable TPA if needed */
5757 if (bp->flags & TPA_ENABLE_FLAG)
5758 tstorm_config.config_flags |=
5759 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5760
8d9c5f34
EG
5761 if (IS_E1HMF(bp))
5762 tstorm_config.config_flags |=
5763 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5764
34f80b04
EG
5765 tstorm_config.leading_client_id = BP_L_ID(bp);
5766
a2fbb9ea 5767 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5768 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5769 (*(u32 *)&tstorm_config));
5770
c14423fe 5771 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 5772 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
5773 bnx2x_set_storm_rx_mode(bp);
5774
de832a55
EG
5775 for_each_queue(bp, i) {
5776 u8 cl_id = bp->fp[i].cl_id;
5777
5778 /* reset xstorm per client statistics */
5779 offset = BAR_XSTRORM_INTMEM +
5780 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5781 for (j = 0;
5782 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5783 REG_WR(bp, offset + j*4, 0);
5784
5785 /* reset tstorm per client statistics */
5786 offset = BAR_TSTRORM_INTMEM +
5787 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5788 for (j = 0;
5789 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5790 REG_WR(bp, offset + j*4, 0);
5791
5792 /* reset ustorm per client statistics */
5793 offset = BAR_USTRORM_INTMEM +
5794 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5795 for (j = 0;
5796 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5797 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5798 }
5799
5800 /* Init statistics related context */
34f80b04 5801 stats_flags.collect_eth = 1;
a2fbb9ea 5802
66e855f3 5803 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5804 ((u32 *)&stats_flags)[0]);
66e855f3 5805 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5806 ((u32 *)&stats_flags)[1]);
5807
66e855f3 5808 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5809 ((u32 *)&stats_flags)[0]);
66e855f3 5810 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5811 ((u32 *)&stats_flags)[1]);
5812
de832a55
EG
5813 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5814 ((u32 *)&stats_flags)[0]);
5815 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5816 ((u32 *)&stats_flags)[1]);
5817
66e855f3 5818 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5819 ((u32 *)&stats_flags)[0]);
66e855f3 5820 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5821 ((u32 *)&stats_flags)[1]);
5822
66e855f3
YG
5823 REG_WR(bp, BAR_XSTRORM_INTMEM +
5824 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5825 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5826 REG_WR(bp, BAR_XSTRORM_INTMEM +
5827 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5828 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5829
5830 REG_WR(bp, BAR_TSTRORM_INTMEM +
5831 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5832 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5833 REG_WR(bp, BAR_TSTRORM_INTMEM +
5834 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5835 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5836
de832a55
EG
5837 REG_WR(bp, BAR_USTRORM_INTMEM +
5838 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5839 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5840 REG_WR(bp, BAR_USTRORM_INTMEM +
5841 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5842 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5843
34f80b04
EG
5844 if (CHIP_IS_E1H(bp)) {
5845 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5846 IS_E1HMF(bp));
5847 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5848 IS_E1HMF(bp));
5849 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5850 IS_E1HMF(bp));
5851 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5852 IS_E1HMF(bp));
5853
7a9b2557
VZ
5854 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5855 bp->e1hov);
34f80b04
EG
5856 }
5857
4f40f2cb 5858 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
cdaa7cb8
VZ
5859 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
5860 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
54b9ddaa 5861 for_each_queue(bp, i) {
7a9b2557 5862 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5863
5864 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5865 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5866 U64_LO(fp->rx_comp_mapping));
5867 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5868 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5869 U64_HI(fp->rx_comp_mapping));
5870
ca00392c
EG
5871 /* Next page */
5872 REG_WR(bp, BAR_USTRORM_INTMEM +
5873 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5874 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5875 REG_WR(bp, BAR_USTRORM_INTMEM +
5876 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5877 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5878
7a9b2557 5879 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5880 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5881 max_agg_size);
5882 }
8a1c38d1 5883
1c06328c
EG
5884 /* dropless flow control */
5885 if (CHIP_IS_E1H(bp)) {
5886 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5887
5888 rx_pause.bd_thr_low = 250;
5889 rx_pause.cqe_thr_low = 250;
5890 rx_pause.cos = 1;
5891 rx_pause.sge_thr_low = 0;
5892 rx_pause.bd_thr_high = 350;
5893 rx_pause.cqe_thr_high = 350;
5894 rx_pause.sge_thr_high = 0;
5895
54b9ddaa 5896 for_each_queue(bp, i) {
1c06328c
EG
5897 struct bnx2x_fastpath *fp = &bp->fp[i];
5898
5899 if (!fp->disable_tpa) {
5900 rx_pause.sge_thr_low = 150;
5901 rx_pause.sge_thr_high = 250;
5902 }
5903
5904
5905 offset = BAR_USTRORM_INTMEM +
5906 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5907 fp->cl_id);
5908 for (j = 0;
5909 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5910 j++)
5911 REG_WR(bp, offset + j*4,
5912 ((u32 *)&rx_pause)[j]);
5913 }
5914 }
5915
8a1c38d1
EG
5916 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5917
5918 /* Init rate shaping and fairness contexts */
5919 if (IS_E1HMF(bp)) {
5920 int vn;
5921
5922 /* During init there is no active link
5923 Until link is up, set link rate to 10Gbps */
5924 bp->link_vars.line_speed = SPEED_10000;
5925 bnx2x_init_port_minmax(bp);
5926
b015e3d1
EG
5927 if (!BP_NOMCP(bp))
5928 bp->mf_config =
5929 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
5930 bnx2x_calc_vn_weight_sum(bp);
5931
5932 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5933 bnx2x_init_vn_minmax(bp, 2*vn + port);
5934
5935 /* Enable rate shaping and fairness */
b015e3d1 5936 bp->cmng.flags.cmng_enables |=
8a1c38d1 5937 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 5938
8a1c38d1
EG
5939 } else {
5940 /* rate shaping and fairness are disabled */
5941 DP(NETIF_MSG_IFUP,
5942 "single function mode minmax will be disabled\n");
5943 }
5944
5945
cdaa7cb8 5946 /* Store cmng structures to internal memory */
8a1c38d1
EG
5947 if (bp->port.pmf)
5948 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5949 REG_WR(bp, BAR_XSTRORM_INTMEM +
5950 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5951 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5952}
5953
471de716
EG
5954static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5955{
5956 switch (load_code) {
5957 case FW_MSG_CODE_DRV_LOAD_COMMON:
5958 bnx2x_init_internal_common(bp);
5959 /* no break */
5960
5961 case FW_MSG_CODE_DRV_LOAD_PORT:
5962 bnx2x_init_internal_port(bp);
5963 /* no break */
5964
5965 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5966 bnx2x_init_internal_func(bp);
5967 break;
5968
5969 default:
5970 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5971 break;
5972 }
5973}
5974
5975static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5976{
5977 int i;
5978
5979 for_each_queue(bp, i) {
5980 struct bnx2x_fastpath *fp = &bp->fp[i];
5981
34f80b04 5982 fp->bp = bp;
a2fbb9ea 5983 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5984 fp->index = i;
34f80b04 5985 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
5986#ifdef BCM_CNIC
5987 fp->sb_id = fp->cl_id + 1;
5988#else
34f80b04 5989 fp->sb_id = fp->cl_id;
37b091ba 5990#endif
34f80b04 5991 DP(NETIF_MSG_IFUP,
f5372251
EG
5992 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5993 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5994 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5995 fp->sb_id);
5c862848 5996 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5997 }
5998
16119785
EG
5999 /* ensure status block indices were read */
6000 rmb();
6001
6002
5c862848
EG
6003 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
6004 DEF_SB_ID);
6005 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
6006 bnx2x_update_coalesce(bp);
6007 bnx2x_init_rx_rings(bp);
6008 bnx2x_init_tx_ring(bp);
6009 bnx2x_init_sp_ring(bp);
6010 bnx2x_init_context(bp);
471de716 6011 bnx2x_init_internal(bp, load_code);
a2fbb9ea 6012 bnx2x_init_ind_table(bp);
0ef00459
EG
6013 bnx2x_stats_init(bp);
6014
6015 /* At this point, we are ready for interrupts */
6016 atomic_set(&bp->intr_sem, 0);
6017
6018 /* flush all before enabling interrupts */
6019 mb();
6020 mmiowb();
6021
615f8fd9 6022 bnx2x_int_enable(bp);
eb8da205
EG
6023
6024 /* Check for SPIO5 */
6025 bnx2x_attn_int_deasserted0(bp,
6026 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6027 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
6028}
6029
6030/* end of nic init */
6031
6032/*
6033 * gzip service functions
6034 */
6035
6036static int bnx2x_gunzip_init(struct bnx2x *bp)
6037{
1a983142
FT
6038 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6039 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
6040 if (bp->gunzip_buf == NULL)
6041 goto gunzip_nomem1;
6042
6043 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6044 if (bp->strm == NULL)
6045 goto gunzip_nomem2;
6046
6047 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6048 GFP_KERNEL);
6049 if (bp->strm->workspace == NULL)
6050 goto gunzip_nomem3;
6051
6052 return 0;
6053
6054gunzip_nomem3:
6055 kfree(bp->strm);
6056 bp->strm = NULL;
6057
6058gunzip_nomem2:
1a983142
FT
6059 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6060 bp->gunzip_mapping);
a2fbb9ea
ET
6061 bp->gunzip_buf = NULL;
6062
6063gunzip_nomem1:
cdaa7cb8
VZ
6064 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
6065 " un-compression\n");
a2fbb9ea
ET
6066 return -ENOMEM;
6067}
6068
6069static void bnx2x_gunzip_end(struct bnx2x *bp)
6070{
6071 kfree(bp->strm->workspace);
6072
6073 kfree(bp->strm);
6074 bp->strm = NULL;
6075
6076 if (bp->gunzip_buf) {
1a983142
FT
6077 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6078 bp->gunzip_mapping);
a2fbb9ea
ET
6079 bp->gunzip_buf = NULL;
6080 }
6081}
6082
94a78b79 6083static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
6084{
6085 int n, rc;
6086
6087 /* check gzip header */
94a78b79
VZ
6088 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6089 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 6090 return -EINVAL;
94a78b79 6091 }
a2fbb9ea
ET
6092
6093 n = 10;
6094
34f80b04 6095#define FNAME 0x8
a2fbb9ea
ET
6096
6097 if (zbuf[3] & FNAME)
6098 while ((zbuf[n++] != 0) && (n < len));
6099
94a78b79 6100 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
6101 bp->strm->avail_in = len - n;
6102 bp->strm->next_out = bp->gunzip_buf;
6103 bp->strm->avail_out = FW_BUF_SIZE;
6104
6105 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6106 if (rc != Z_OK)
6107 return rc;
6108
6109 rc = zlib_inflate(bp->strm, Z_FINISH);
6110 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
6111 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6112 bp->strm->msg);
a2fbb9ea
ET
6113
6114 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6115 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
6116 netdev_err(bp->dev, "Firmware decompression error:"
6117 " gunzip_outlen (%d) not aligned\n",
6118 bp->gunzip_outlen);
a2fbb9ea
ET
6119 bp->gunzip_outlen >>= 2;
6120
6121 zlib_inflateEnd(bp->strm);
6122
6123 if (rc == Z_STREAM_END)
6124 return 0;
6125
6126 return rc;
6127}
6128
6129/* nic load/unload */
6130
6131/*
34f80b04 6132 * General service functions
a2fbb9ea
ET
6133 */
6134
6135/* send a NIG loopback debug packet */
6136static void bnx2x_lb_pckt(struct bnx2x *bp)
6137{
a2fbb9ea 6138 u32 wb_write[3];
a2fbb9ea
ET
6139
6140 /* Ethernet source and destination addresses */
a2fbb9ea
ET
6141 wb_write[0] = 0x55555555;
6142 wb_write[1] = 0x55555555;
34f80b04 6143 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 6144 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
6145
6146 /* NON-IP protocol */
a2fbb9ea
ET
6147 wb_write[0] = 0x09000000;
6148 wb_write[1] = 0x55555555;
34f80b04 6149 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 6150 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
6151}
6152
6153/* some of the internal memories
6154 * are not directly readable from the driver
6155 * to test them we send debug packets
6156 */
6157static int bnx2x_int_mem_test(struct bnx2x *bp)
6158{
6159 int factor;
6160 int count, i;
6161 u32 val = 0;
6162
ad8d3948 6163 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 6164 factor = 120;
ad8d3948
EG
6165 else if (CHIP_REV_IS_EMUL(bp))
6166 factor = 200;
6167 else
a2fbb9ea 6168 factor = 1;
a2fbb9ea
ET
6169
6170 DP(NETIF_MSG_HW, "start part1\n");
6171
6172 /* Disable inputs of parser neighbor blocks */
6173 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6174 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6175 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 6176 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
6177
6178 /* Write 0 to parser credits for CFC search request */
6179 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6180
6181 /* send Ethernet packet */
6182 bnx2x_lb_pckt(bp);
6183
6184 /* TODO do i reset NIG statistic? */
6185 /* Wait until NIG register shows 1 packet of size 0x10 */
6186 count = 1000 * factor;
6187 while (count) {
34f80b04 6188
a2fbb9ea
ET
6189 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6190 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
6191 if (val == 0x10)
6192 break;
6193
6194 msleep(10);
6195 count--;
6196 }
6197 if (val != 0x10) {
6198 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6199 return -1;
6200 }
6201
6202 /* Wait until PRS register shows 1 packet */
6203 count = 1000 * factor;
6204 while (count) {
6205 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
6206 if (val == 1)
6207 break;
6208
6209 msleep(10);
6210 count--;
6211 }
6212 if (val != 0x1) {
6213 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6214 return -2;
6215 }
6216
6217 /* Reset and init BRB, PRS */
34f80b04 6218 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 6219 msleep(50);
34f80b04 6220 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 6221 msleep(50);
94a78b79
VZ
6222 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6223 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
6224
6225 DP(NETIF_MSG_HW, "part2\n");
6226
6227 /* Disable inputs of parser neighbor blocks */
6228 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6229 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6230 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 6231 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
6232
6233 /* Write 0 to parser credits for CFC search request */
6234 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6235
6236 /* send 10 Ethernet packets */
6237 for (i = 0; i < 10; i++)
6238 bnx2x_lb_pckt(bp);
6239
6240 /* Wait until NIG register shows 10 + 1
6241 packets of size 11*0x10 = 0xb0 */
6242 count = 1000 * factor;
6243 while (count) {
34f80b04 6244
a2fbb9ea
ET
6245 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6246 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
6247 if (val == 0xb0)
6248 break;
6249
6250 msleep(10);
6251 count--;
6252 }
6253 if (val != 0xb0) {
6254 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6255 return -3;
6256 }
6257
6258 /* Wait until PRS register shows 2 packets */
6259 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6260 if (val != 2)
6261 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6262
6263 /* Write 1 to parser credits for CFC search request */
6264 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6265
6266 /* Wait until PRS register shows 3 packets */
6267 msleep(10 * factor);
6268 /* Wait until NIG register shows 1 packet of size 0x10 */
6269 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6270 if (val != 3)
6271 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6272
6273 /* clear NIG EOP FIFO */
6274 for (i = 0; i < 11; i++)
6275 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6276 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6277 if (val != 1) {
6278 BNX2X_ERR("clear of NIG failed\n");
6279 return -4;
6280 }
6281
6282 /* Reset and init BRB, PRS, NIG */
6283 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6284 msleep(50);
6285 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6286 msleep(50);
94a78b79
VZ
6287 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6288 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 6289#ifndef BCM_CNIC
a2fbb9ea
ET
6290 /* set NIC mode */
6291 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6292#endif
6293
6294 /* Enable inputs of parser neighbor blocks */
6295 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6296 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6297 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 6298 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
6299
6300 DP(NETIF_MSG_HW, "done\n");
6301
6302 return 0; /* OK */
6303}
6304
6305static void enable_blocks_attention(struct bnx2x *bp)
6306{
6307 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6308 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6309 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6310 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6311 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6312 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6313 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6314 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6315 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
6316/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6317/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6318 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6319 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6320 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
6321/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6322/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6323 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6324 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6325 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6326 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
6327/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6328/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6329 if (CHIP_REV_IS_FPGA(bp))
6330 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6331 else
6332 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
6333 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6334 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6335 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
6336/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6337/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6338 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6339 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
6340/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6341 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
6342}
6343
72fd0718
VZ
6344static const struct {
6345 u32 addr;
6346 u32 mask;
6347} bnx2x_parity_mask[] = {
6348 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6349 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6350 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6351 {HC_REG_HC_PRTY_MASK, 0xffffffff},
6352 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6353 {QM_REG_QM_PRTY_MASK, 0x0},
6354 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6355 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6356 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6357 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6358 {CDU_REG_CDU_PRTY_MASK, 0x0},
6359 {CFC_REG_CFC_PRTY_MASK, 0x0},
6360 {DBG_REG_DBG_PRTY_MASK, 0x0},
6361 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6362 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6363 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6364 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6365 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6366 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6367 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6368 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6369 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6370 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6371 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6372 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6373 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6374 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6375 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6376};
6377
6378static void enable_blocks_parity(struct bnx2x *bp)
6379{
6380 int i, mask_arr_len =
6381 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6382
6383 for (i = 0; i < mask_arr_len; i++)
6384 REG_WR(bp, bnx2x_parity_mask[i].addr,
6385 bnx2x_parity_mask[i].mask);
6386}
6387
34f80b04 6388
81f75bbf
EG
6389static void bnx2x_reset_common(struct bnx2x *bp)
6390{
6391 /* reset_common */
6392 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6393 0xd3ffff7f);
6394 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6395}
6396
573f2035
EG
6397static void bnx2x_init_pxp(struct bnx2x *bp)
6398{
6399 u16 devctl;
6400 int r_order, w_order;
6401
6402 pci_read_config_word(bp->pdev,
6403 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6404 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6405 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6406 if (bp->mrrs == -1)
6407 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6408 else {
6409 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6410 r_order = bp->mrrs;
6411 }
6412
6413 bnx2x_init_pxp_arb(bp, r_order, w_order);
6414}
fd4ef40d
EG
6415
6416static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6417{
2145a920 6418 int is_required;
fd4ef40d 6419 u32 val;
2145a920 6420 int port;
fd4ef40d 6421
2145a920
VZ
6422 if (BP_NOMCP(bp))
6423 return;
6424
6425 is_required = 0;
fd4ef40d
EG
6426 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6427 SHARED_HW_CFG_FAN_FAILURE_MASK;
6428
6429 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6430 is_required = 1;
6431
6432 /*
6433 * The fan failure mechanism is usually related to the PHY type since
6434 * the power consumption of the board is affected by the PHY. Currently,
6435 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6436 */
6437 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6438 for (port = PORT_0; port < PORT_MAX; port++) {
6439 u32 phy_type =
6440 SHMEM_RD(bp, dev_info.port_hw_config[port].
6441 external_phy_config) &
6442 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6443 is_required |=
6444 ((phy_type ==
6445 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
6446 (phy_type ==
6447 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
6448 (phy_type ==
6449 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6450 }
6451
6452 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6453
6454 if (is_required == 0)
6455 return;
6456
6457 /* Fan failure is indicated by SPIO 5 */
6458 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6459 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6460
6461 /* set to active low mode */
6462 val = REG_RD(bp, MISC_REG_SPIO_INT);
6463 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 6464 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
6465 REG_WR(bp, MISC_REG_SPIO_INT, val);
6466
6467 /* enable interrupt to signal the IGU */
6468 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6469 val |= (1 << MISC_REGISTERS_SPIO_5);
6470 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6471}
6472
34f80b04 6473static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 6474{
a2fbb9ea 6475 u32 val, i;
37b091ba
MC
6476#ifdef BCM_CNIC
6477 u32 wb_write[2];
6478#endif
a2fbb9ea 6479
34f80b04 6480 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 6481
81f75bbf 6482 bnx2x_reset_common(bp);
34f80b04
EG
6483 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6484 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 6485
94a78b79 6486 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
6487 if (CHIP_IS_E1H(bp))
6488 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 6489
34f80b04
EG
6490 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6491 msleep(30);
6492 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 6493
94a78b79 6494 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
6495 if (CHIP_IS_E1(bp)) {
6496 /* enable HW interrupt from PXP on USDM overflow
6497 bit 16 on INT_MASK_0 */
6498 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6499 }
a2fbb9ea 6500
94a78b79 6501 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6502 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6503
6504#ifdef __BIG_ENDIAN
34f80b04
EG
6505 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6506 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6507 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6508 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6509 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6510 /* make sure this value is 0 */
6511 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6512
6513/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6514 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6515 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6516 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6517 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6518#endif
6519
34f80b04 6520 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 6521#ifdef BCM_CNIC
34f80b04
EG
6522 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6523 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6524 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6525#endif
6526
34f80b04
EG
6527 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6528 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6529
34f80b04
EG
6530 /* let the HW do it's magic ... */
6531 msleep(100);
6532 /* finish PXP init */
6533 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6534 if (val != 1) {
6535 BNX2X_ERR("PXP2 CFG failed\n");
6536 return -EBUSY;
6537 }
6538 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6539 if (val != 1) {
6540 BNX2X_ERR("PXP2 RD_INIT failed\n");
6541 return -EBUSY;
6542 }
a2fbb9ea 6543
34f80b04
EG
6544 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6545 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6546
94a78b79 6547 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6548
34f80b04
EG
6549 /* clean the DMAE memory */
6550 bp->dmae_ready = 1;
6551 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6552
94a78b79
VZ
6553 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6554 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6555 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6556 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6557
34f80b04
EG
6558 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6559 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6560 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6561 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6562
94a78b79 6563 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
6564
6565#ifdef BCM_CNIC
6566 wb_write[0] = 0;
6567 wb_write[1] = 0;
6568 for (i = 0; i < 64; i++) {
6569 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6570 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6571
6572 if (CHIP_IS_E1H(bp)) {
6573 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6574 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6575 wb_write, 2);
6576 }
6577 }
6578#endif
34f80b04
EG
6579 /* soft reset pulse */
6580 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6581 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 6582
37b091ba 6583#ifdef BCM_CNIC
94a78b79 6584 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6585#endif
a2fbb9ea 6586
94a78b79 6587 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6588 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6589 if (!CHIP_REV_IS_SLOW(bp)) {
6590 /* enable hw interrupt from doorbell Q */
6591 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6592 }
a2fbb9ea 6593
94a78b79
VZ
6594 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6595 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6596 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 6597#ifndef BCM_CNIC
3196a88a
EG
6598 /* set NIC mode */
6599 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 6600#endif
34f80b04
EG
6601 if (CHIP_IS_E1H(bp))
6602 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6603
94a78b79
VZ
6604 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6605 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6606 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6607 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6608
ca00392c
EG
6609 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6610 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6611 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6612 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6613
94a78b79
VZ
6614 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6615 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6616 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6617 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6618
34f80b04
EG
6619 /* sync semi rtc */
6620 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6621 0x80000000);
6622 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6623 0x80000000);
a2fbb9ea 6624
94a78b79
VZ
6625 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6626 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6627 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6628
34f80b04
EG
6629 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6630 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6631 REG_WR(bp, i, 0xc0cac01a);
6632 /* TODO: replace with something meaningful */
6633 }
94a78b79 6634 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
6635#ifdef BCM_CNIC
6636 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6637 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6638 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6639 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6640 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6641 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6642 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6643 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6644 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6645 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6646#endif
34f80b04 6647 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6648
34f80b04
EG
6649 if (sizeof(union cdu_context) != 1024)
6650 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
6651 dev_alert(&bp->pdev->dev, "please adjust the size "
6652 "of cdu_context(%ld)\n",
7995c64e 6653 (long)sizeof(union cdu_context));
a2fbb9ea 6654
94a78b79 6655 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6656 val = (4 << 24) + (0 << 12) + 1024;
6657 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6658
94a78b79 6659 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6660 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6661 /* enable context validation interrupt from CFC */
6662 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6663
6664 /* set the thresholds to prevent CFC/CDU race */
6665 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6666
94a78b79
VZ
6667 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6668 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6669
94a78b79 6670 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6671 /* Reset PCIE errors for debug */
6672 REG_WR(bp, 0x2814, 0xffffffff);
6673 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6674
94a78b79 6675 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6676 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6677 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6678 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6679
94a78b79 6680 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6681 if (CHIP_IS_E1H(bp)) {
6682 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6683 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6684 }
6685
6686 if (CHIP_REV_IS_SLOW(bp))
6687 msleep(200);
6688
6689 /* finish CFC init */
6690 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6691 if (val != 1) {
6692 BNX2X_ERR("CFC LL_INIT failed\n");
6693 return -EBUSY;
6694 }
6695 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6696 if (val != 1) {
6697 BNX2X_ERR("CFC AC_INIT failed\n");
6698 return -EBUSY;
6699 }
6700 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6701 if (val != 1) {
6702 BNX2X_ERR("CFC CAM_INIT failed\n");
6703 return -EBUSY;
6704 }
6705 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6706
34f80b04
EG
6707 /* read NIG statistic
6708 to see if this is our first up since powerup */
6709 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6710 val = *bnx2x_sp(bp, wb_data[0]);
6711
6712 /* do internal memory self test */
6713 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6714 BNX2X_ERR("internal mem self test failed\n");
6715 return -EBUSY;
6716 }
6717
35b19ba5 6718 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6719 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6720 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6721 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6722 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6723 bp->port.need_hw_lock = 1;
6724 break;
6725
34f80b04
EG
6726 default:
6727 break;
6728 }
f1410647 6729
fd4ef40d
EG
6730 bnx2x_setup_fan_failure_detection(bp);
6731
34f80b04
EG
6732 /* clear PXP2 attentions */
6733 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6734
34f80b04 6735 enable_blocks_attention(bp);
72fd0718
VZ
6736 if (CHIP_PARITY_SUPPORTED(bp))
6737 enable_blocks_parity(bp);
a2fbb9ea 6738
6bbca910
YR
6739 if (!BP_NOMCP(bp)) {
6740 bnx2x_acquire_phy_lock(bp);
6741 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6742 bnx2x_release_phy_lock(bp);
6743 } else
6744 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6745
34f80b04
EG
6746 return 0;
6747}
a2fbb9ea 6748
34f80b04
EG
6749static int bnx2x_init_port(struct bnx2x *bp)
6750{
6751 int port = BP_PORT(bp);
94a78b79 6752 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6753 u32 low, high;
34f80b04 6754 u32 val;
a2fbb9ea 6755
cdaa7cb8 6756 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
6757
6758 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6759
94a78b79 6760 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6761 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6762
6763 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6764 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6765 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 6766 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6767
37b091ba
MC
6768#ifdef BCM_CNIC
6769 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 6770
94a78b79 6771 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
6772 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6773 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 6774#endif
cdaa7cb8 6775
94a78b79 6776 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6777
94a78b79 6778 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6779 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6780 /* no pause for emulation and FPGA */
6781 low = 0;
6782 high = 513;
6783 } else {
6784 if (IS_E1HMF(bp))
6785 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6786 else if (bp->dev->mtu > 4096) {
6787 if (bp->flags & ONE_PORT_FLAG)
6788 low = 160;
6789 else {
6790 val = bp->dev->mtu;
6791 /* (24*1024 + val*4)/256 */
6792 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6793 }
6794 } else
6795 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6796 high = low + 56; /* 14*1024/256 */
6797 }
6798 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6799 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6800
6801
94a78b79 6802 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6803
94a78b79 6804 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6805 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6806 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6807 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6808
94a78b79
VZ
6809 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6810 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6811 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6812 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6813
94a78b79 6814 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6815 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6816
94a78b79 6817 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6818
6819 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6820 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6821
6822 /* update threshold */
34f80b04 6823 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6824 /* update init credit */
34f80b04 6825 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6826
6827 /* probe changes */
34f80b04 6828 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6829 msleep(5);
34f80b04 6830 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 6831
37b091ba
MC
6832#ifdef BCM_CNIC
6833 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 6834#endif
94a78b79 6835 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6836 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6837
6838 if (CHIP_IS_E1(bp)) {
6839 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6840 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6841 }
94a78b79 6842 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6843
94a78b79 6844 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6845 /* init aeu_mask_attn_func_0/1:
6846 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6847 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6848 * bits 4-7 are used for "per vn group attention" */
6849 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6850 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6851
94a78b79 6852 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6853 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6854 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6855 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6856 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6857
94a78b79 6858 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6859
6860 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6861
6862 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6863 /* 0x2 disable e1hov, 0x1 enable */
6864 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6865 (IS_E1HMF(bp) ? 0x1 : 0x2));
6866
1c06328c
EG
6867 {
6868 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6869 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6870 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6871 }
34f80b04
EG
6872 }
6873
94a78b79 6874 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6875 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6876
35b19ba5 6877 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6878 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6879 {
6880 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6881
6882 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6883 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6884
6885 /* The GPIO should be swapped if the swap register is
6886 set and active */
6887 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6888 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6889
6890 /* Select function upon port-swap configuration */
6891 if (port == 0) {
6892 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6893 aeu_gpio_mask = (swap_val && swap_override) ?
6894 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6895 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6896 } else {
6897 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6898 aeu_gpio_mask = (swap_val && swap_override) ?
6899 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6900 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6901 }
6902 val = REG_RD(bp, offset);
6903 /* add GPIO3 to group */
6904 val |= aeu_gpio_mask;
6905 REG_WR(bp, offset, val);
6906 }
6907 break;
6908
35b19ba5 6909 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6910 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6911 /* add SPIO 5 to group 0 */
4d295db0
EG
6912 {
6913 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6914 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6915 val = REG_RD(bp, reg_addr);
f1410647 6916 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6917 REG_WR(bp, reg_addr, val);
6918 }
f1410647
ET
6919 break;
6920
6921 default:
6922 break;
6923 }
6924
c18487ee 6925 bnx2x__link_reset(bp);
a2fbb9ea 6926
34f80b04
EG
6927 return 0;
6928}
6929
6930#define ILT_PER_FUNC (768/2)
6931#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6932/* the phys address is shifted right 12 bits and has an added
6933 1=valid bit added to the 53rd bit
6934 then since this is a wide register(TM)
6935 we split it into two 32 bit writes
6936 */
6937#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6938#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6939#define PXP_ONE_ILT(x) (((x) << 10) | x)
6940#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6941
37b091ba
MC
6942#ifdef BCM_CNIC
6943#define CNIC_ILT_LINES 127
6944#define CNIC_CTX_PER_ILT 16
6945#else
34f80b04 6946#define CNIC_ILT_LINES 0
37b091ba 6947#endif
34f80b04
EG
6948
6949static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6950{
6951 int reg;
6952
6953 if (CHIP_IS_E1H(bp))
6954 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6955 else /* E1 */
6956 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6957
6958 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6959}
6960
6961static int bnx2x_init_func(struct bnx2x *bp)
6962{
6963 int port = BP_PORT(bp);
6964 int func = BP_FUNC(bp);
8badd27a 6965 u32 addr, val;
34f80b04
EG
6966 int i;
6967
cdaa7cb8 6968 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 6969
8badd27a
EG
6970 /* set MSI reconfigure capability */
6971 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6972 val = REG_RD(bp, addr);
6973 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6974 REG_WR(bp, addr, val);
6975
34f80b04
EG
6976 i = FUNC_ILT_BASE(func);
6977
6978 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6979 if (CHIP_IS_E1H(bp)) {
6980 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6981 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6982 } else /* E1 */
6983 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6984 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6985
37b091ba
MC
6986#ifdef BCM_CNIC
6987 i += 1 + CNIC_ILT_LINES;
6988 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6989 if (CHIP_IS_E1(bp))
6990 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6991 else {
6992 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6993 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6994 }
6995
6996 i++;
6997 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6998 if (CHIP_IS_E1(bp))
6999 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
7000 else {
7001 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
7002 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
7003 }
7004
7005 i++;
7006 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
7007 if (CHIP_IS_E1(bp))
7008 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
7009 else {
7010 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
7011 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
7012 }
7013
7014 /* tell the searcher where the T2 table is */
7015 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
7016
7017 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
7018 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
7019
7020 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
7021 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
7022 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
7023
7024 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
7025#endif
34f80b04
EG
7026
7027 if (CHIP_IS_E1H(bp)) {
573f2035
EG
7028 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
7029 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
7030 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
7031 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
7032 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
7033 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
7034 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
7035 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
7036 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
7037
7038 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7039 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
7040 }
7041
7042 /* HC init per function */
7043 if (CHIP_IS_E1H(bp)) {
7044 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7045
7046 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7047 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7048 }
94a78b79 7049 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 7050
c14423fe 7051 /* Reset PCIE errors for debug */
a2fbb9ea
ET
7052 REG_WR(bp, 0x2114, 0xffffffff);
7053 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 7054
34f80b04
EG
7055 return 0;
7056}
7057
7058static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7059{
7060 int i, rc = 0;
a2fbb9ea 7061
34f80b04
EG
7062 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
7063 BP_FUNC(bp), load_code);
a2fbb9ea 7064
34f80b04
EG
7065 bp->dmae_ready = 0;
7066 mutex_init(&bp->dmae_mutex);
54016b26
EG
7067 rc = bnx2x_gunzip_init(bp);
7068 if (rc)
7069 return rc;
a2fbb9ea 7070
34f80b04
EG
7071 switch (load_code) {
7072 case FW_MSG_CODE_DRV_LOAD_COMMON:
7073 rc = bnx2x_init_common(bp);
7074 if (rc)
7075 goto init_hw_err;
7076 /* no break */
7077
7078 case FW_MSG_CODE_DRV_LOAD_PORT:
7079 bp->dmae_ready = 1;
7080 rc = bnx2x_init_port(bp);
7081 if (rc)
7082 goto init_hw_err;
7083 /* no break */
7084
7085 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7086 bp->dmae_ready = 1;
7087 rc = bnx2x_init_func(bp);
7088 if (rc)
7089 goto init_hw_err;
7090 break;
7091
7092 default:
7093 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7094 break;
7095 }
7096
7097 if (!BP_NOMCP(bp)) {
7098 int func = BP_FUNC(bp);
a2fbb9ea
ET
7099
7100 bp->fw_drv_pulse_wr_seq =
34f80b04 7101 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 7102 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
7103 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7104 }
a2fbb9ea 7105
34f80b04
EG
7106 /* this needs to be done before gunzip end */
7107 bnx2x_zero_def_sb(bp);
7108 for_each_queue(bp, i)
7109 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
7110#ifdef BCM_CNIC
7111 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7112#endif
34f80b04
EG
7113
7114init_hw_err:
7115 bnx2x_gunzip_end(bp);
7116
7117 return rc;
a2fbb9ea
ET
7118}
7119
a2fbb9ea
ET
7120static void bnx2x_free_mem(struct bnx2x *bp)
7121{
7122
7123#define BNX2X_PCI_FREE(x, y, size) \
7124 do { \
7125 if (x) { \
1a983142 7126 dma_free_coherent(&bp->pdev->dev, size, x, y); \
a2fbb9ea
ET
7127 x = NULL; \
7128 y = 0; \
7129 } \
7130 } while (0)
7131
7132#define BNX2X_FREE(x) \
7133 do { \
7134 if (x) { \
7135 vfree(x); \
7136 x = NULL; \
7137 } \
7138 } while (0)
7139
7140 int i;
7141
7142 /* fastpath */
555f6c78 7143 /* Common */
a2fbb9ea
ET
7144 for_each_queue(bp, i) {
7145
555f6c78 7146 /* status blocks */
a2fbb9ea
ET
7147 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7148 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 7149 sizeof(struct host_status_block));
555f6c78
EG
7150 }
7151 /* Rx */
54b9ddaa 7152 for_each_queue(bp, i) {
a2fbb9ea 7153
555f6c78 7154 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
7155 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7156 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7157 bnx2x_fp(bp, i, rx_desc_mapping),
7158 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7159
7160 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7161 bnx2x_fp(bp, i, rx_comp_mapping),
7162 sizeof(struct eth_fast_path_rx_cqe) *
7163 NUM_RCQ_BD);
a2fbb9ea 7164
7a9b2557 7165 /* SGE ring */
32626230 7166 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
7167 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7168 bnx2x_fp(bp, i, rx_sge_mapping),
7169 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7170 }
555f6c78 7171 /* Tx */
54b9ddaa 7172 for_each_queue(bp, i) {
555f6c78
EG
7173
7174 /* fastpath tx rings: tx_buf tx_desc */
7175 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7176 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7177 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 7178 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 7179 }
a2fbb9ea
ET
7180 /* end of fastpath */
7181
7182 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 7183 sizeof(struct host_def_status_block));
a2fbb9ea
ET
7184
7185 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 7186 sizeof(struct bnx2x_slowpath));
a2fbb9ea 7187
37b091ba 7188#ifdef BCM_CNIC
a2fbb9ea
ET
7189 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7190 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7191 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7192 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
7193 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7194 sizeof(struct host_status_block));
a2fbb9ea 7195#endif
7a9b2557 7196 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
7197
7198#undef BNX2X_PCI_FREE
7199#undef BNX2X_KFREE
7200}
7201
7202static int bnx2x_alloc_mem(struct bnx2x *bp)
7203{
7204
7205#define BNX2X_PCI_ALLOC(x, y, size) \
7206 do { \
1a983142 7207 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
a2fbb9ea
ET
7208 if (x == NULL) \
7209 goto alloc_mem_err; \
7210 memset(x, 0, size); \
7211 } while (0)
7212
7213#define BNX2X_ALLOC(x, size) \
7214 do { \
7215 x = vmalloc(size); \
7216 if (x == NULL) \
7217 goto alloc_mem_err; \
7218 memset(x, 0, size); \
7219 } while (0)
7220
7221 int i;
7222
7223 /* fastpath */
555f6c78 7224 /* Common */
a2fbb9ea
ET
7225 for_each_queue(bp, i) {
7226 bnx2x_fp(bp, i, bp) = bp;
7227
555f6c78 7228 /* status blocks */
a2fbb9ea
ET
7229 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7230 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 7231 sizeof(struct host_status_block));
555f6c78
EG
7232 }
7233 /* Rx */
54b9ddaa 7234 for_each_queue(bp, i) {
a2fbb9ea 7235
555f6c78 7236 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
7237 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7238 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7239 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7240 &bnx2x_fp(bp, i, rx_desc_mapping),
7241 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7242
7243 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7244 &bnx2x_fp(bp, i, rx_comp_mapping),
7245 sizeof(struct eth_fast_path_rx_cqe) *
7246 NUM_RCQ_BD);
7247
7a9b2557
VZ
7248 /* SGE ring */
7249 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7250 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7251 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7252 &bnx2x_fp(bp, i, rx_sge_mapping),
7253 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 7254 }
555f6c78 7255 /* Tx */
54b9ddaa 7256 for_each_queue(bp, i) {
555f6c78 7257
555f6c78
EG
7258 /* fastpath tx rings: tx_buf tx_desc */
7259 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7260 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7261 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7262 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 7263 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 7264 }
a2fbb9ea
ET
7265 /* end of fastpath */
7266
7267 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7268 sizeof(struct host_def_status_block));
7269
7270 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7271 sizeof(struct bnx2x_slowpath));
7272
37b091ba 7273#ifdef BCM_CNIC
a2fbb9ea
ET
7274 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7275
a2fbb9ea
ET
7276 /* allocate searcher T2 table
7277 we allocate 1/4 of alloc num for T2
7278 (which is not entered into the ILT) */
7279 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7280
37b091ba 7281 /* Initialize T2 (for 1024 connections) */
a2fbb9ea 7282 for (i = 0; i < 16*1024; i += 64)
37b091ba 7283 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 7284
37b091ba 7285 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
a2fbb9ea
ET
7286 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7287
7288 /* QM queues (128*MAX_CONN) */
7289 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
37b091ba
MC
7290
7291 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7292 sizeof(struct host_status_block));
a2fbb9ea
ET
7293#endif
7294
7295 /* Slow path ring */
7296 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7297
7298 return 0;
7299
7300alloc_mem_err:
7301 bnx2x_free_mem(bp);
7302 return -ENOMEM;
7303
7304#undef BNX2X_PCI_ALLOC
7305#undef BNX2X_ALLOC
7306}
7307
7308static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7309{
7310 int i;
7311
54b9ddaa 7312 for_each_queue(bp, i) {
a2fbb9ea
ET
7313 struct bnx2x_fastpath *fp = &bp->fp[i];
7314
7315 u16 bd_cons = fp->tx_bd_cons;
7316 u16 sw_prod = fp->tx_pkt_prod;
7317 u16 sw_cons = fp->tx_pkt_cons;
7318
a2fbb9ea
ET
7319 while (sw_cons != sw_prod) {
7320 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7321 sw_cons++;
7322 }
7323 }
7324}
7325
7326static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7327{
7328 int i, j;
7329
54b9ddaa 7330 for_each_queue(bp, j) {
a2fbb9ea
ET
7331 struct bnx2x_fastpath *fp = &bp->fp[j];
7332
a2fbb9ea
ET
7333 for (i = 0; i < NUM_RX_BD; i++) {
7334 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7335 struct sk_buff *skb = rx_buf->skb;
7336
7337 if (skb == NULL)
7338 continue;
7339
1a983142
FT
7340 dma_unmap_single(&bp->pdev->dev,
7341 dma_unmap_addr(rx_buf, mapping),
7342 bp->rx_buf_size, DMA_FROM_DEVICE);
a2fbb9ea
ET
7343
7344 rx_buf->skb = NULL;
7345 dev_kfree_skb(skb);
7346 }
7a9b2557 7347 if (!fp->disable_tpa)
32626230
EG
7348 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7349 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 7350 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
7351 }
7352}
7353
7354static void bnx2x_free_skbs(struct bnx2x *bp)
7355{
7356 bnx2x_free_tx_skbs(bp);
7357 bnx2x_free_rx_skbs(bp);
7358}
7359
7360static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7361{
34f80b04 7362 int i, offset = 1;
a2fbb9ea
ET
7363
7364 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 7365 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
7366 bp->msix_table[0].vector);
7367
37b091ba
MC
7368#ifdef BCM_CNIC
7369 offset++;
7370#endif
a2fbb9ea 7371 for_each_queue(bp, i) {
c14423fe 7372 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 7373 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
7374 bnx2x_fp(bp, i, state));
7375
34f80b04 7376 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 7377 }
a2fbb9ea
ET
7378}
7379
6cbe5065 7380static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
a2fbb9ea 7381{
a2fbb9ea 7382 if (bp->flags & USING_MSIX_FLAG) {
6cbe5065
VZ
7383 if (!disable_only)
7384 bnx2x_free_msix_irqs(bp);
a2fbb9ea 7385 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
7386 bp->flags &= ~USING_MSIX_FLAG;
7387
8badd27a 7388 } else if (bp->flags & USING_MSI_FLAG) {
6cbe5065
VZ
7389 if (!disable_only)
7390 free_irq(bp->pdev->irq, bp->dev);
8badd27a
EG
7391 pci_disable_msi(bp->pdev);
7392 bp->flags &= ~USING_MSI_FLAG;
7393
6cbe5065 7394 } else if (!disable_only)
a2fbb9ea
ET
7395 free_irq(bp->pdev->irq, bp->dev);
7396}
7397
7398static int bnx2x_enable_msix(struct bnx2x *bp)
7399{
8badd27a
EG
7400 int i, rc, offset = 1;
7401 int igu_vec = 0;
a2fbb9ea 7402
8badd27a
EG
7403 bp->msix_table[0].entry = igu_vec;
7404 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 7405
37b091ba
MC
7406#ifdef BCM_CNIC
7407 igu_vec = BP_L_ID(bp) + offset;
7408 bp->msix_table[1].entry = igu_vec;
7409 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7410 offset++;
7411#endif
34f80b04 7412 for_each_queue(bp, i) {
8badd27a 7413 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
7414 bp->msix_table[i + offset].entry = igu_vec;
7415 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7416 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
7417 }
7418
34f80b04 7419 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 7420 BNX2X_NUM_QUEUES(bp) + offset);
1ac218c8
VZ
7421
7422 /*
7423 * reconfigure number of tx/rx queues according to available
7424 * MSI-X vectors
7425 */
7426 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
7427 /* vectors available for FP */
7428 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
7429
7430 DP(NETIF_MSG_IFUP,
7431 "Trying to use less MSI-X vectors: %d\n", rc);
7432
7433 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
7434
7435 if (rc) {
7436 DP(NETIF_MSG_IFUP,
7437 "MSI-X is not attainable rc %d\n", rc);
7438 return rc;
7439 }
7440
7441 bp->num_queues = min(bp->num_queues, fp_vec);
7442
7443 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
7444 bp->num_queues);
7445 } else if (rc) {
8badd27a
EG
7446 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7447 return rc;
34f80b04 7448 }
8badd27a 7449
a2fbb9ea
ET
7450 bp->flags |= USING_MSIX_FLAG;
7451
7452 return 0;
a2fbb9ea
ET
7453}
7454
a2fbb9ea
ET
7455static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7456{
34f80b04 7457 int i, rc, offset = 1;
a2fbb9ea 7458
a2fbb9ea
ET
7459 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7460 bp->dev->name, bp->dev);
a2fbb9ea
ET
7461 if (rc) {
7462 BNX2X_ERR("request sp irq failed\n");
7463 return -EBUSY;
7464 }
7465
37b091ba
MC
7466#ifdef BCM_CNIC
7467 offset++;
7468#endif
a2fbb9ea 7469 for_each_queue(bp, i) {
555f6c78 7470 struct bnx2x_fastpath *fp = &bp->fp[i];
54b9ddaa
VZ
7471 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7472 bp->dev->name, i);
ca00392c 7473
34f80b04 7474 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 7475 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 7476 if (rc) {
555f6c78 7477 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
7478 bnx2x_free_msix_irqs(bp);
7479 return -EBUSY;
7480 }
7481
555f6c78 7482 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
7483 }
7484
555f6c78 7485 i = BNX2X_NUM_QUEUES(bp);
cdaa7cb8
VZ
7486 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
7487 " ... fp[%d] %d\n",
7488 bp->msix_table[0].vector,
7489 0, bp->msix_table[offset].vector,
7490 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 7491
a2fbb9ea 7492 return 0;
a2fbb9ea
ET
7493}
7494
8badd27a
EG
7495static int bnx2x_enable_msi(struct bnx2x *bp)
7496{
7497 int rc;
7498
7499 rc = pci_enable_msi(bp->pdev);
7500 if (rc) {
7501 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7502 return -1;
7503 }
7504 bp->flags |= USING_MSI_FLAG;
7505
7506 return 0;
7507}
7508
a2fbb9ea
ET
7509static int bnx2x_req_irq(struct bnx2x *bp)
7510{
8badd27a 7511 unsigned long flags;
34f80b04 7512 int rc;
a2fbb9ea 7513
8badd27a
EG
7514 if (bp->flags & USING_MSI_FLAG)
7515 flags = 0;
7516 else
7517 flags = IRQF_SHARED;
7518
7519 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 7520 bp->dev->name, bp->dev);
a2fbb9ea
ET
7521 if (!rc)
7522 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7523
7524 return rc;
a2fbb9ea
ET
7525}
7526
65abd74d
YG
7527static void bnx2x_napi_enable(struct bnx2x *bp)
7528{
7529 int i;
7530
54b9ddaa 7531 for_each_queue(bp, i)
65abd74d
YG
7532 napi_enable(&bnx2x_fp(bp, i, napi));
7533}
7534
7535static void bnx2x_napi_disable(struct bnx2x *bp)
7536{
7537 int i;
7538
54b9ddaa 7539 for_each_queue(bp, i)
65abd74d
YG
7540 napi_disable(&bnx2x_fp(bp, i, napi));
7541}
7542
7543static void bnx2x_netif_start(struct bnx2x *bp)
7544{
e1510706
EG
7545 int intr_sem;
7546
7547 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7548 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7549
7550 if (intr_sem) {
65abd74d 7551 if (netif_running(bp->dev)) {
65abd74d
YG
7552 bnx2x_napi_enable(bp);
7553 bnx2x_int_enable(bp);
555f6c78
EG
7554 if (bp->state == BNX2X_STATE_OPEN)
7555 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
7556 }
7557 }
7558}
7559
f8ef6e44 7560static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 7561{
f8ef6e44 7562 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 7563 bnx2x_napi_disable(bp);
762d5f6c 7564 netif_tx_disable(bp->dev);
65abd74d
YG
7565}
7566
a2fbb9ea
ET
7567/*
7568 * Init service functions
7569 */
7570
e665bfda
MC
7571/**
7572 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7573 *
7574 * @param bp driver descriptor
7575 * @param set set or clear an entry (1 or 0)
7576 * @param mac pointer to a buffer containing a MAC
7577 * @param cl_bit_vec bit vector of clients to register a MAC for
7578 * @param cam_offset offset in a CAM to use
7579 * @param with_bcast set broadcast MAC as well
7580 */
7581static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7582 u32 cl_bit_vec, u8 cam_offset,
7583 u8 with_bcast)
a2fbb9ea
ET
7584{
7585 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 7586 int port = BP_PORT(bp);
a2fbb9ea
ET
7587
7588 /* CAM allocation
7589 * unicasts 0-31:port0 32-63:port1
7590 * multicast 64-127:port0 128-191:port1
7591 */
e665bfda
MC
7592 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7593 config->hdr.offset = cam_offset;
7594 config->hdr.client_id = 0xff;
a2fbb9ea
ET
7595 config->hdr.reserved1 = 0;
7596
7597 /* primary MAC */
7598 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 7599 swab16(*(u16 *)&mac[0]);
a2fbb9ea 7600 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 7601 swab16(*(u16 *)&mac[2]);
a2fbb9ea 7602 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 7603 swab16(*(u16 *)&mac[4]);
34f80b04 7604 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7605 if (set)
7606 config->config_table[0].target_table_entry.flags = 0;
7607 else
7608 CAM_INVALIDATE(config->config_table[0]);
ca00392c 7609 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 7610 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
7611 config->config_table[0].target_table_entry.vlan_id = 0;
7612
3101c2bc
YG
7613 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7614 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7615 config->config_table[0].cam_entry.msb_mac_addr,
7616 config->config_table[0].cam_entry.middle_mac_addr,
7617 config->config_table[0].cam_entry.lsb_mac_addr);
7618
7619 /* broadcast */
e665bfda
MC
7620 if (with_bcast) {
7621 config->config_table[1].cam_entry.msb_mac_addr =
7622 cpu_to_le16(0xffff);
7623 config->config_table[1].cam_entry.middle_mac_addr =
7624 cpu_to_le16(0xffff);
7625 config->config_table[1].cam_entry.lsb_mac_addr =
7626 cpu_to_le16(0xffff);
7627 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7628 if (set)
7629 config->config_table[1].target_table_entry.flags =
7630 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7631 else
7632 CAM_INVALIDATE(config->config_table[1]);
7633 config->config_table[1].target_table_entry.clients_bit_vector =
7634 cpu_to_le32(cl_bit_vec);
7635 config->config_table[1].target_table_entry.vlan_id = 0;
7636 }
a2fbb9ea
ET
7637
7638 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7639 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7640 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7641}
7642
e665bfda
MC
7643/**
7644 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7645 *
7646 * @param bp driver descriptor
7647 * @param set set or clear an entry (1 or 0)
7648 * @param mac pointer to a buffer containing a MAC
7649 * @param cl_bit_vec bit vector of clients to register a MAC for
7650 * @param cam_offset offset in a CAM to use
7651 */
7652static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7653 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
7654{
7655 struct mac_configuration_cmd_e1h *config =
7656 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7657
8d9c5f34 7658 config->hdr.length = 1;
e665bfda
MC
7659 config->hdr.offset = cam_offset;
7660 config->hdr.client_id = 0xff;
34f80b04
EG
7661 config->hdr.reserved1 = 0;
7662
7663 /* primary MAC */
7664 config->config_table[0].msb_mac_addr =
e665bfda 7665 swab16(*(u16 *)&mac[0]);
34f80b04 7666 config->config_table[0].middle_mac_addr =
e665bfda 7667 swab16(*(u16 *)&mac[2]);
34f80b04 7668 config->config_table[0].lsb_mac_addr =
e665bfda 7669 swab16(*(u16 *)&mac[4]);
ca00392c 7670 config->config_table[0].clients_bit_vector =
e665bfda 7671 cpu_to_le32(cl_bit_vec);
34f80b04
EG
7672 config->config_table[0].vlan_id = 0;
7673 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7674 if (set)
7675 config->config_table[0].flags = BP_PORT(bp);
7676 else
7677 config->config_table[0].flags =
7678 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7679
e665bfda 7680 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 7681 (set ? "setting" : "clearing"),
34f80b04
EG
7682 config->config_table[0].msb_mac_addr,
7683 config->config_table[0].middle_mac_addr,
e665bfda 7684 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
7685
7686 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7687 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7688 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7689}
7690
a2fbb9ea
ET
7691static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7692 int *state_p, int poll)
7693{
7694 /* can take a while if any port is running */
8b3a0f0b 7695 int cnt = 5000;
a2fbb9ea 7696
c14423fe
ET
7697 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7698 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7699
7700 might_sleep();
34f80b04 7701 while (cnt--) {
a2fbb9ea
ET
7702 if (poll) {
7703 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7704 /* if index is different from 0
7705 * the reply for some commands will
3101c2bc 7706 * be on the non default queue
a2fbb9ea
ET
7707 */
7708 if (idx)
7709 bnx2x_rx_int(&bp->fp[idx], 10);
7710 }
a2fbb9ea 7711
3101c2bc 7712 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7713 if (*state_p == state) {
7714#ifdef BNX2X_STOP_ON_ERROR
7715 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7716#endif
a2fbb9ea 7717 return 0;
8b3a0f0b 7718 }
a2fbb9ea 7719
a2fbb9ea 7720 msleep(1);
e3553b29
EG
7721
7722 if (bp->panic)
7723 return -EIO;
a2fbb9ea
ET
7724 }
7725
a2fbb9ea 7726 /* timeout! */
49d66772
ET
7727 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7728 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7729#ifdef BNX2X_STOP_ON_ERROR
7730 bnx2x_panic();
7731#endif
a2fbb9ea 7732
49d66772 7733 return -EBUSY;
a2fbb9ea
ET
7734}
7735
e665bfda
MC
7736static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7737{
7738 bp->set_mac_pending++;
7739 smp_wmb();
7740
7741 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7742 (1 << bp->fp->cl_id), BP_FUNC(bp));
7743
7744 /* Wait for a completion */
7745 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7746}
7747
7748static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7749{
7750 bp->set_mac_pending++;
7751 smp_wmb();
7752
7753 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7754 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7755 1);
7756
7757 /* Wait for a completion */
7758 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7759}
7760
993ac7b5
MC
7761#ifdef BCM_CNIC
7762/**
7763 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7764 * MAC(s). This function will wait until the ramdord completion
7765 * returns.
7766 *
7767 * @param bp driver handle
7768 * @param set set or clear the CAM entry
7769 *
7770 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7771 */
7772static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7773{
7774 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7775
7776 bp->set_mac_pending++;
7777 smp_wmb();
7778
7779 /* Send a SET_MAC ramrod */
7780 if (CHIP_IS_E1(bp))
7781 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7782 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7783 1);
7784 else
7785 /* CAM allocation for E1H
7786 * unicasts: by func number
7787 * multicast: 20+FUNC*20, 20 each
7788 */
7789 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7790 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7791
7792 /* Wait for a completion when setting */
7793 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7794
7795 return 0;
7796}
7797#endif
7798
a2fbb9ea
ET
7799static int bnx2x_setup_leading(struct bnx2x *bp)
7800{
34f80b04 7801 int rc;
a2fbb9ea 7802
c14423fe 7803 /* reset IGU state */
34f80b04 7804 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7805
7806 /* SETUP ramrod */
7807 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7808
34f80b04
EG
7809 /* Wait for completion */
7810 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7811
34f80b04 7812 return rc;
a2fbb9ea
ET
7813}
7814
7815static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7816{
555f6c78
EG
7817 struct bnx2x_fastpath *fp = &bp->fp[index];
7818
a2fbb9ea 7819 /* reset IGU state */
555f6c78 7820 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7821
228241eb 7822 /* SETUP ramrod */
555f6c78
EG
7823 fp->state = BNX2X_FP_STATE_OPENING;
7824 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7825 fp->cl_id, 0);
a2fbb9ea
ET
7826
7827 /* Wait for completion */
7828 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7829 &(fp->state), 0);
a2fbb9ea
ET
7830}
7831
a2fbb9ea 7832static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7833
54b9ddaa 7834static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
ca00392c 7835{
ca00392c
EG
7836
7837 switch (bp->multi_mode) {
7838 case ETH_RSS_MODE_DISABLED:
54b9ddaa 7839 bp->num_queues = 1;
ca00392c
EG
7840 break;
7841
7842 case ETH_RSS_MODE_REGULAR:
54b9ddaa
VZ
7843 if (num_queues)
7844 bp->num_queues = min_t(u32, num_queues,
7845 BNX2X_MAX_QUEUES(bp));
ca00392c 7846 else
54b9ddaa
VZ
7847 bp->num_queues = min_t(u32, num_online_cpus(),
7848 BNX2X_MAX_QUEUES(bp));
ca00392c
EG
7849 break;
7850
7851
7852 default:
54b9ddaa 7853 bp->num_queues = 1;
ca00392c
EG
7854 break;
7855 }
ca00392c
EG
7856}
7857
54b9ddaa 7858static int bnx2x_set_num_queues(struct bnx2x *bp)
a2fbb9ea 7859{
ca00392c 7860 int rc = 0;
a2fbb9ea 7861
8badd27a
EG
7862 switch (int_mode) {
7863 case INT_MODE_INTx:
7864 case INT_MODE_MSI:
54b9ddaa 7865 bp->num_queues = 1;
ca00392c 7866 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a 7867 break;
8badd27a 7868 default:
54b9ddaa
VZ
7869 /* Set number of queues according to bp->multi_mode value */
7870 bnx2x_set_num_queues_msix(bp);
ca00392c 7871
54b9ddaa
VZ
7872 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7873 bp->num_queues);
ca00392c 7874
2dfe0e1f
EG
7875 /* if we can't use MSI-X we only need one fp,
7876 * so try to enable MSI-X with the requested number of fp's
7877 * and fallback to MSI or legacy INTx with one fp
7878 */
ca00392c 7879 rc = bnx2x_enable_msix(bp);
54b9ddaa 7880 if (rc)
34f80b04 7881 /* failed to enable MSI-X */
54b9ddaa 7882 bp->num_queues = 1;
8badd27a 7883 break;
a2fbb9ea 7884 }
54b9ddaa 7885 bp->dev->real_num_tx_queues = bp->num_queues;
ca00392c 7886 return rc;
8badd27a
EG
7887}
7888
993ac7b5
MC
7889#ifdef BCM_CNIC
7890static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7891static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7892#endif
8badd27a
EG
7893
7894/* must be called with rtnl_lock */
7895static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7896{
7897 u32 load_code;
ca00392c
EG
7898 int i, rc;
7899
8badd27a 7900#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7901 if (unlikely(bp->panic))
7902 return -EPERM;
7903#endif
7904
7905 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7906
54b9ddaa 7907 rc = bnx2x_set_num_queues(bp);
c14423fe 7908
6cbe5065
VZ
7909 if (bnx2x_alloc_mem(bp)) {
7910 bnx2x_free_irq(bp, true);
a2fbb9ea 7911 return -ENOMEM;
6cbe5065 7912 }
a2fbb9ea 7913
54b9ddaa 7914 for_each_queue(bp, i)
7a9b2557
VZ
7915 bnx2x_fp(bp, i, disable_tpa) =
7916 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7917
54b9ddaa 7918 for_each_queue(bp, i)
2dfe0e1f
EG
7919 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7920 bnx2x_poll, 128);
7921
2dfe0e1f
EG
7922 bnx2x_napi_enable(bp);
7923
34f80b04
EG
7924 if (bp->flags & USING_MSIX_FLAG) {
7925 rc = bnx2x_req_msix_irqs(bp);
7926 if (rc) {
6cbe5065 7927 bnx2x_free_irq(bp, true);
2dfe0e1f 7928 goto load_error1;
34f80b04
EG
7929 }
7930 } else {
ca00392c 7931 /* Fall to INTx if failed to enable MSI-X due to lack of
54b9ddaa 7932 memory (in bnx2x_set_num_queues()) */
8badd27a
EG
7933 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7934 bnx2x_enable_msi(bp);
34f80b04
EG
7935 bnx2x_ack_int(bp);
7936 rc = bnx2x_req_irq(bp);
7937 if (rc) {
2dfe0e1f 7938 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6cbe5065 7939 bnx2x_free_irq(bp, true);
2dfe0e1f 7940 goto load_error1;
a2fbb9ea 7941 }
8badd27a
EG
7942 if (bp->flags & USING_MSI_FLAG) {
7943 bp->dev->irq = bp->pdev->irq;
7995c64e
JP
7944 netdev_info(bp->dev, "using MSI IRQ %d\n",
7945 bp->pdev->irq);
8badd27a 7946 }
a2fbb9ea
ET
7947 }
7948
2dfe0e1f
EG
7949 /* Send LOAD_REQUEST command to MCP
7950 Returns the type of LOAD command:
7951 if it is the first port to be initialized
7952 common blocks should be initialized, otherwise - not
7953 */
7954 if (!BP_NOMCP(bp)) {
7955 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7956 if (!load_code) {
7957 BNX2X_ERR("MCP response failure, aborting\n");
7958 rc = -EBUSY;
7959 goto load_error2;
7960 }
7961 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7962 rc = -EBUSY; /* other port in diagnostic mode */
7963 goto load_error2;
7964 }
7965
7966 } else {
7967 int port = BP_PORT(bp);
7968
f5372251 7969 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7970 load_count[0], load_count[1], load_count[2]);
7971 load_count[0]++;
7972 load_count[1 + port]++;
f5372251 7973 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7974 load_count[0], load_count[1], load_count[2]);
7975 if (load_count[0] == 1)
7976 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7977 else if (load_count[1 + port] == 1)
7978 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7979 else
7980 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7981 }
7982
7983 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7984 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7985 bp->port.pmf = 1;
7986 else
7987 bp->port.pmf = 0;
7988 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7989
a2fbb9ea 7990 /* Initialize HW */
34f80b04
EG
7991 rc = bnx2x_init_hw(bp, load_code);
7992 if (rc) {
a2fbb9ea 7993 BNX2X_ERR("HW init failed, aborting\n");
f1e1a199
VZ
7994 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7995 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7996 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
2dfe0e1f 7997 goto load_error2;
a2fbb9ea
ET
7998 }
7999
a2fbb9ea 8000 /* Setup NIC internals and enable interrupts */
471de716 8001 bnx2x_nic_init(bp, load_code);
a2fbb9ea 8002
2691d51d
EG
8003 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
8004 (bp->common.shmem2_base))
8005 SHMEM2_WR(bp, dcc_support,
8006 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
8007 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
8008
a2fbb9ea 8009 /* Send LOAD_DONE command to MCP */
34f80b04 8010 if (!BP_NOMCP(bp)) {
228241eb
ET
8011 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8012 if (!load_code) {
da5a662a 8013 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 8014 rc = -EBUSY;
2dfe0e1f 8015 goto load_error3;
a2fbb9ea
ET
8016 }
8017 }
8018
8019 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
8020
34f80b04
EG
8021 rc = bnx2x_setup_leading(bp);
8022 if (rc) {
da5a662a 8023 BNX2X_ERR("Setup leading failed!\n");
e3553b29 8024#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 8025 goto load_error3;
e3553b29
EG
8026#else
8027 bp->panic = 1;
8028 return -EBUSY;
8029#endif
34f80b04 8030 }
a2fbb9ea 8031
34f80b04
EG
8032 if (CHIP_IS_E1H(bp))
8033 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 8034 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
f34d28ea 8035 bp->flags |= MF_FUNC_DIS;
34f80b04 8036 }
a2fbb9ea 8037
ca00392c 8038 if (bp->state == BNX2X_STATE_OPEN) {
37b091ba
MC
8039#ifdef BCM_CNIC
8040 /* Enable Timer scan */
8041 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
8042#endif
34f80b04
EG
8043 for_each_nondefault_queue(bp, i) {
8044 rc = bnx2x_setup_multi(bp, i);
8045 if (rc)
37b091ba
MC
8046#ifdef BCM_CNIC
8047 goto load_error4;
8048#else
2dfe0e1f 8049 goto load_error3;
37b091ba 8050#endif
34f80b04 8051 }
a2fbb9ea 8052
ca00392c 8053 if (CHIP_IS_E1(bp))
e665bfda 8054 bnx2x_set_eth_mac_addr_e1(bp, 1);
ca00392c 8055 else
e665bfda 8056 bnx2x_set_eth_mac_addr_e1h(bp, 1);
993ac7b5
MC
8057#ifdef BCM_CNIC
8058 /* Set iSCSI L2 MAC */
8059 mutex_lock(&bp->cnic_mutex);
8060 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
8061 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8062 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
4a6e47a4
MC
8063 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
8064 CNIC_SB_ID(bp));
993ac7b5
MC
8065 }
8066 mutex_unlock(&bp->cnic_mutex);
8067#endif
ca00392c 8068 }
34f80b04
EG
8069
8070 if (bp->port.pmf)
b5bf9068 8071 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
8072
8073 /* Start fast path */
34f80b04
EG
8074 switch (load_mode) {
8075 case LOAD_NORMAL:
ca00392c
EG
8076 if (bp->state == BNX2X_STATE_OPEN) {
8077 /* Tx queue should be only reenabled */
8078 netif_tx_wake_all_queues(bp->dev);
8079 }
2dfe0e1f 8080 /* Initialize the receive filter. */
34f80b04
EG
8081 bnx2x_set_rx_mode(bp->dev);
8082 break;
8083
8084 case LOAD_OPEN:
555f6c78 8085 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
8086 if (bp->state != BNX2X_STATE_OPEN)
8087 netif_tx_disable(bp->dev);
2dfe0e1f 8088 /* Initialize the receive filter. */
34f80b04 8089 bnx2x_set_rx_mode(bp->dev);
34f80b04 8090 break;
a2fbb9ea 8091
34f80b04 8092 case LOAD_DIAG:
2dfe0e1f 8093 /* Initialize the receive filter. */
a2fbb9ea 8094 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
8095 bp->state = BNX2X_STATE_DIAG;
8096 break;
8097
8098 default:
8099 break;
a2fbb9ea
ET
8100 }
8101
34f80b04
EG
8102 if (!bp->port.pmf)
8103 bnx2x__link_status_update(bp);
8104
a2fbb9ea
ET
8105 /* start the timer */
8106 mod_timer(&bp->timer, jiffies + bp->current_interval);
8107
993ac7b5
MC
8108#ifdef BCM_CNIC
8109 bnx2x_setup_cnic_irq_info(bp);
8110 if (bp->state == BNX2X_STATE_OPEN)
8111 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8112#endif
72fd0718 8113 bnx2x_inc_load_cnt(bp);
34f80b04 8114
a2fbb9ea
ET
8115 return 0;
8116
37b091ba
MC
8117#ifdef BCM_CNIC
8118load_error4:
8119 /* Disable Timer scan */
8120 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8121#endif
2dfe0e1f
EG
8122load_error3:
8123 bnx2x_int_disable_sync(bp, 1);
8124 if (!BP_NOMCP(bp)) {
8125 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8126 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8127 }
8128 bp->port.pmf = 0;
7a9b2557
VZ
8129 /* Free SKBs, SGEs, TPA pool and driver internals */
8130 bnx2x_free_skbs(bp);
54b9ddaa 8131 for_each_queue(bp, i)
3196a88a 8132 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 8133load_error2:
d1014634 8134 /* Release IRQs */
6cbe5065 8135 bnx2x_free_irq(bp, false);
2dfe0e1f
EG
8136load_error1:
8137 bnx2x_napi_disable(bp);
54b9ddaa 8138 for_each_queue(bp, i)
7cde1c8b 8139 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8140 bnx2x_free_mem(bp);
8141
34f80b04 8142 return rc;
a2fbb9ea
ET
8143}
8144
8145static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8146{
555f6c78 8147 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
8148 int rc;
8149
c14423fe 8150 /* halt the connection */
555f6c78
EG
8151 fp->state = BNX2X_FP_STATE_HALTING;
8152 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 8153
34f80b04 8154 /* Wait for completion */
a2fbb9ea 8155 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 8156 &(fp->state), 1);
c14423fe 8157 if (rc) /* timeout */
a2fbb9ea
ET
8158 return rc;
8159
8160 /* delete cfc entry */
8161 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8162
34f80b04
EG
8163 /* Wait for completion */
8164 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 8165 &(fp->state), 1);
34f80b04 8166 return rc;
a2fbb9ea
ET
8167}
8168
da5a662a 8169static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 8170{
4781bfad 8171 __le16 dsb_sp_prod_idx;
c14423fe 8172 /* if the other port is handling traffic,
a2fbb9ea 8173 this can take a lot of time */
34f80b04
EG
8174 int cnt = 500;
8175 int rc;
a2fbb9ea
ET
8176
8177 might_sleep();
8178
8179 /* Send HALT ramrod */
8180 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 8181 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 8182
34f80b04
EG
8183 /* Wait for completion */
8184 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8185 &(bp->fp[0].state), 1);
8186 if (rc) /* timeout */
da5a662a 8187 return rc;
a2fbb9ea 8188
49d66772 8189 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 8190
228241eb 8191 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
8192 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8193
49d66772 8194 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
8195 we are going to reset the chip anyway
8196 so there is not much to do if this times out
8197 */
34f80b04 8198 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
8199 if (!cnt) {
8200 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8201 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8202 *bp->dsb_sp_prod, dsb_sp_prod_idx);
8203#ifdef BNX2X_STOP_ON_ERROR
8204 bnx2x_panic();
8205#endif
36e552ab 8206 rc = -EBUSY;
34f80b04
EG
8207 break;
8208 }
8209 cnt--;
da5a662a 8210 msleep(1);
5650d9d4 8211 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
8212 }
8213 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8214 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
8215
8216 return rc;
a2fbb9ea
ET
8217}
8218
34f80b04
EG
8219static void bnx2x_reset_func(struct bnx2x *bp)
8220{
8221 int port = BP_PORT(bp);
8222 int func = BP_FUNC(bp);
8223 int base, i;
8224
8225 /* Configure IGU */
8226 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8227 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8228
37b091ba
MC
8229#ifdef BCM_CNIC
8230 /* Disable Timer scan */
8231 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8232 /*
8233 * Wait for at least 10ms and up to 2 second for the timers scan to
8234 * complete
8235 */
8236 for (i = 0; i < 200; i++) {
8237 msleep(10);
8238 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8239 break;
8240 }
8241#endif
34f80b04
EG
8242 /* Clear ILT */
8243 base = FUNC_ILT_BASE(func);
8244 for (i = base; i < base + ILT_PER_FUNC; i++)
8245 bnx2x_ilt_wr(bp, i, 0);
8246}
8247
8248static void bnx2x_reset_port(struct bnx2x *bp)
8249{
8250 int port = BP_PORT(bp);
8251 u32 val;
8252
8253 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8254
8255 /* Do not rcv packets to BRB */
8256 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8257 /* Do not direct rcv packets that are not for MCP to the BRB */
8258 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8259 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8260
8261 /* Configure AEU */
8262 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8263
8264 msleep(100);
8265 /* Check for BRB port occupancy */
8266 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8267 if (val)
8268 DP(NETIF_MSG_IFDOWN,
33471629 8269 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
8270
8271 /* TODO: Close Doorbell port? */
8272}
8273
34f80b04
EG
8274static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8275{
8276 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
8277 BP_FUNC(bp), reset_code);
8278
8279 switch (reset_code) {
8280 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8281 bnx2x_reset_port(bp);
8282 bnx2x_reset_func(bp);
8283 bnx2x_reset_common(bp);
8284 break;
8285
8286 case FW_MSG_CODE_DRV_UNLOAD_PORT:
8287 bnx2x_reset_port(bp);
8288 bnx2x_reset_func(bp);
8289 break;
8290
8291 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8292 bnx2x_reset_func(bp);
8293 break;
49d66772 8294
34f80b04
EG
8295 default:
8296 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8297 break;
8298 }
8299}
8300
72fd0718 8301static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 8302{
da5a662a 8303 int port = BP_PORT(bp);
a2fbb9ea 8304 u32 reset_code = 0;
da5a662a 8305 int i, cnt, rc;
a2fbb9ea 8306
555f6c78 8307 /* Wait until tx fastpath tasks complete */
54b9ddaa 8308 for_each_queue(bp, i) {
228241eb
ET
8309 struct bnx2x_fastpath *fp = &bp->fp[i];
8310
34f80b04 8311 cnt = 1000;
e8b5fc51 8312 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 8313
7961f791 8314 bnx2x_tx_int(fp);
34f80b04
EG
8315 if (!cnt) {
8316 BNX2X_ERR("timeout waiting for queue[%d]\n",
8317 i);
8318#ifdef BNX2X_STOP_ON_ERROR
8319 bnx2x_panic();
8320 return -EBUSY;
8321#else
8322 break;
8323#endif
8324 }
8325 cnt--;
da5a662a 8326 msleep(1);
34f80b04 8327 }
228241eb 8328 }
da5a662a
VZ
8329 /* Give HW time to discard old tx messages */
8330 msleep(1);
a2fbb9ea 8331
3101c2bc
YG
8332 if (CHIP_IS_E1(bp)) {
8333 struct mac_configuration_cmd *config =
8334 bnx2x_sp(bp, mcast_config);
8335
e665bfda 8336 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 8337
8d9c5f34 8338 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
8339 CAM_INVALIDATE(config->config_table[i]);
8340
8d9c5f34 8341 config->hdr.length = i;
3101c2bc
YG
8342 if (CHIP_REV_IS_SLOW(bp))
8343 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8344 else
8345 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 8346 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
8347 config->hdr.reserved1 = 0;
8348
e665bfda
MC
8349 bp->set_mac_pending++;
8350 smp_wmb();
8351
3101c2bc
YG
8352 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8353 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8354 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8355
8356 } else { /* E1H */
65abd74d
YG
8357 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8358
e665bfda 8359 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
8360
8361 for (i = 0; i < MC_HASH_SIZE; i++)
8362 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
8363
8364 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 8365 }
993ac7b5
MC
8366#ifdef BCM_CNIC
8367 /* Clear iSCSI L2 MAC */
8368 mutex_lock(&bp->cnic_mutex);
8369 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8370 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8371 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8372 }
8373 mutex_unlock(&bp->cnic_mutex);
8374#endif
3101c2bc 8375
65abd74d
YG
8376 if (unload_mode == UNLOAD_NORMAL)
8377 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8378
7d0446c2 8379 else if (bp->flags & NO_WOL_FLAG)
65abd74d 8380 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 8381
7d0446c2 8382 else if (bp->wol) {
65abd74d
YG
8383 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8384 u8 *mac_addr = bp->dev->dev_addr;
8385 u32 val;
8386 /* The mac address is written to entries 1-4 to
8387 preserve entry 0 which is used by the PMF */
8388 u8 entry = (BP_E1HVN(bp) + 1)*8;
8389
8390 val = (mac_addr[0] << 8) | mac_addr[1];
8391 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8392
8393 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8394 (mac_addr[4] << 8) | mac_addr[5];
8395 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8396
8397 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8398
8399 } else
8400 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8401
34f80b04
EG
8402 /* Close multi and leading connections
8403 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
8404 for_each_nondefault_queue(bp, i)
8405 if (bnx2x_stop_multi(bp, i))
228241eb 8406 goto unload_error;
a2fbb9ea 8407
da5a662a
VZ
8408 rc = bnx2x_stop_leading(bp);
8409 if (rc) {
34f80b04 8410 BNX2X_ERR("Stop leading failed!\n");
da5a662a 8411#ifdef BNX2X_STOP_ON_ERROR
34f80b04 8412 return -EBUSY;
da5a662a
VZ
8413#else
8414 goto unload_error;
34f80b04 8415#endif
228241eb
ET
8416 }
8417
8418unload_error:
34f80b04 8419 if (!BP_NOMCP(bp))
228241eb 8420 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 8421 else {
f5372251 8422 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
8423 load_count[0], load_count[1], load_count[2]);
8424 load_count[0]--;
da5a662a 8425 load_count[1 + port]--;
f5372251 8426 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
8427 load_count[0], load_count[1], load_count[2]);
8428 if (load_count[0] == 0)
8429 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 8430 else if (load_count[1 + port] == 0)
34f80b04
EG
8431 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8432 else
8433 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8434 }
a2fbb9ea 8435
34f80b04
EG
8436 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8437 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8438 bnx2x__link_reset(bp);
a2fbb9ea
ET
8439
8440 /* Reset the chip */
228241eb 8441 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
8442
8443 /* Report UNLOAD_DONE to MCP */
34f80b04 8444 if (!BP_NOMCP(bp))
a2fbb9ea 8445 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 8446
72fd0718
VZ
8447}
8448
8449static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8450{
8451 u32 val;
8452
8453 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8454
8455 if (CHIP_IS_E1(bp)) {
8456 int port = BP_PORT(bp);
8457 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8458 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8459
8460 val = REG_RD(bp, addr);
8461 val &= ~(0x300);
8462 REG_WR(bp, addr, val);
8463 } else if (CHIP_IS_E1H(bp)) {
8464 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8465 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8466 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8467 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8468 }
8469}
8470
8471/* must be called with rtnl_lock */
8472static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8473{
8474 int i;
8475
8476 if (bp->state == BNX2X_STATE_CLOSED) {
8477 /* Interface has been removed - nothing to recover */
8478 bp->recovery_state = BNX2X_RECOVERY_DONE;
8479 bp->is_leader = 0;
8480 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8481 smp_wmb();
8482
8483 return -EINVAL;
8484 }
8485
8486#ifdef BCM_CNIC
8487 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8488#endif
8489 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8490
8491 /* Set "drop all" */
8492 bp->rx_mode = BNX2X_RX_MODE_NONE;
8493 bnx2x_set_storm_rx_mode(bp);
8494
8495 /* Disable HW interrupts, NAPI and Tx */
8496 bnx2x_netif_stop(bp, 1);
8497
8498 del_timer_sync(&bp->timer);
8499 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8500 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8501 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8502
8503 /* Release IRQs */
8504 bnx2x_free_irq(bp, false);
8505
8506 /* Cleanup the chip if needed */
8507 if (unload_mode != UNLOAD_RECOVERY)
8508 bnx2x_chip_cleanup(bp, unload_mode);
8509
9a035440 8510 bp->port.pmf = 0;
a2fbb9ea 8511
7a9b2557 8512 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 8513 bnx2x_free_skbs(bp);
54b9ddaa 8514 for_each_queue(bp, i)
3196a88a 8515 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 8516 for_each_queue(bp, i)
7cde1c8b 8517 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8518 bnx2x_free_mem(bp);
8519
8520 bp->state = BNX2X_STATE_CLOSED;
228241eb 8521
a2fbb9ea
ET
8522 netif_carrier_off(bp->dev);
8523
72fd0718
VZ
8524 /* The last driver must disable a "close the gate" if there is no
8525 * parity attention or "process kill" pending.
8526 */
8527 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8528 bnx2x_reset_is_done(bp))
8529 bnx2x_disable_close_the_gate(bp);
8530
8531 /* Reset MCP mail box sequence if there is on going recovery */
8532 if (unload_mode == UNLOAD_RECOVERY)
8533 bp->fw_seq = 0;
8534
8535 return 0;
8536}
8537
8538/* Close gates #2, #3 and #4: */
8539static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8540{
8541 u32 val, addr;
8542
8543 /* Gates #2 and #4a are closed/opened for "not E1" only */
8544 if (!CHIP_IS_E1(bp)) {
8545 /* #4 */
8546 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8547 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8548 close ? (val | 0x1) : (val & (~(u32)1)));
8549 /* #2 */
8550 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8551 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8552 close ? (val | 0x1) : (val & (~(u32)1)));
8553 }
8554
8555 /* #3 */
8556 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8557 val = REG_RD(bp, addr);
8558 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8559
8560 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8561 close ? "closing" : "opening");
8562 mmiowb();
8563}
8564
8565#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8566
8567static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8568{
8569 /* Do some magic... */
8570 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8571 *magic_val = val & SHARED_MF_CLP_MAGIC;
8572 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8573}
8574
8575/* Restore the value of the `magic' bit.
8576 *
8577 * @param pdev Device handle.
8578 * @param magic_val Old value of the `magic' bit.
8579 */
8580static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8581{
8582 /* Restore the `magic' bit value... */
8583 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8584 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8585 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8586 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8587 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8588 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8589}
8590
8591/* Prepares for MCP reset: takes care of CLP configurations.
8592 *
8593 * @param bp
8594 * @param magic_val Old value of 'magic' bit.
8595 */
8596static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8597{
8598 u32 shmem;
8599 u32 validity_offset;
8600
8601 DP(NETIF_MSG_HW, "Starting\n");
8602
8603 /* Set `magic' bit in order to save MF config */
8604 if (!CHIP_IS_E1(bp))
8605 bnx2x_clp_reset_prep(bp, magic_val);
8606
8607 /* Get shmem offset */
8608 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8609 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8610
8611 /* Clear validity map flags */
8612 if (shmem > 0)
8613 REG_WR(bp, shmem + validity_offset, 0);
8614}
8615
8616#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8617#define MCP_ONE_TIMEOUT 100 /* 100 ms */
8618
8619/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8620 * depending on the HW type.
8621 *
8622 * @param bp
8623 */
8624static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8625{
8626 /* special handling for emulation and FPGA,
8627 wait 10 times longer */
8628 if (CHIP_REV_IS_SLOW(bp))
8629 msleep(MCP_ONE_TIMEOUT*10);
8630 else
8631 msleep(MCP_ONE_TIMEOUT);
8632}
8633
8634static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8635{
8636 u32 shmem, cnt, validity_offset, val;
8637 int rc = 0;
8638
8639 msleep(100);
8640
8641 /* Get shmem offset */
8642 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8643 if (shmem == 0) {
8644 BNX2X_ERR("Shmem 0 return failure\n");
8645 rc = -ENOTTY;
8646 goto exit_lbl;
8647 }
8648
8649 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8650
8651 /* Wait for MCP to come up */
8652 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8653 /* TBD: its best to check validity map of last port.
8654 * currently checks on port 0.
8655 */
8656 val = REG_RD(bp, shmem + validity_offset);
8657 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8658 shmem + validity_offset, val);
8659
8660 /* check that shared memory is valid. */
8661 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8662 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8663 break;
8664
8665 bnx2x_mcp_wait_one(bp);
8666 }
8667
8668 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8669
8670 /* Check that shared memory is valid. This indicates that MCP is up. */
8671 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8672 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8673 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8674 rc = -ENOTTY;
8675 goto exit_lbl;
8676 }
8677
8678exit_lbl:
8679 /* Restore the `magic' bit value */
8680 if (!CHIP_IS_E1(bp))
8681 bnx2x_clp_reset_done(bp, magic_val);
8682
8683 return rc;
8684}
8685
8686static void bnx2x_pxp_prep(struct bnx2x *bp)
8687{
8688 if (!CHIP_IS_E1(bp)) {
8689 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8690 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8691 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8692 mmiowb();
8693 }
8694}
8695
8696/*
8697 * Reset the whole chip except for:
8698 * - PCIE core
8699 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8700 * one reset bit)
8701 * - IGU
8702 * - MISC (including AEU)
8703 * - GRC
8704 * - RBCN, RBCP
8705 */
8706static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8707{
8708 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8709
8710 not_reset_mask1 =
8711 MISC_REGISTERS_RESET_REG_1_RST_HC |
8712 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8713 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8714
8715 not_reset_mask2 =
8716 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8717 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8718 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8719 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8720 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8721 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8722 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8723 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8724
8725 reset_mask1 = 0xffffffff;
8726
8727 if (CHIP_IS_E1(bp))
8728 reset_mask2 = 0xffff;
8729 else
8730 reset_mask2 = 0x1ffff;
8731
8732 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8733 reset_mask1 & (~not_reset_mask1));
8734 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8735 reset_mask2 & (~not_reset_mask2));
8736
8737 barrier();
8738 mmiowb();
8739
8740 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8741 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8742 mmiowb();
8743}
8744
8745static int bnx2x_process_kill(struct bnx2x *bp)
8746{
8747 int cnt = 1000;
8748 u32 val = 0;
8749 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8750
8751
8752 /* Empty the Tetris buffer, wait for 1s */
8753 do {
8754 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8755 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8756 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8757 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8758 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8759 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8760 ((port_is_idle_0 & 0x1) == 0x1) &&
8761 ((port_is_idle_1 & 0x1) == 0x1) &&
8762 (pgl_exp_rom2 == 0xffffffff))
8763 break;
8764 msleep(1);
8765 } while (cnt-- > 0);
8766
8767 if (cnt <= 0) {
8768 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8769 " are still"
8770 " outstanding read requests after 1s!\n");
8771 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8772 " port_is_idle_0=0x%08x,"
8773 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8774 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8775 pgl_exp_rom2);
8776 return -EAGAIN;
8777 }
8778
8779 barrier();
8780
8781 /* Close gates #2, #3 and #4 */
8782 bnx2x_set_234_gates(bp, true);
8783
8784 /* TBD: Indicate that "process kill" is in progress to MCP */
8785
8786 /* Clear "unprepared" bit */
8787 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8788 barrier();
8789
8790 /* Make sure all is written to the chip before the reset */
8791 mmiowb();
8792
8793 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8794 * PSWHST, GRC and PSWRD Tetris buffer.
8795 */
8796 msleep(1);
8797
8798 /* Prepare to chip reset: */
8799 /* MCP */
8800 bnx2x_reset_mcp_prep(bp, &val);
8801
8802 /* PXP */
8803 bnx2x_pxp_prep(bp);
8804 barrier();
8805
8806 /* reset the chip */
8807 bnx2x_process_kill_chip_reset(bp);
8808 barrier();
8809
8810 /* Recover after reset: */
8811 /* MCP */
8812 if (bnx2x_reset_mcp_comp(bp, val))
8813 return -EAGAIN;
8814
8815 /* PXP */
8816 bnx2x_pxp_prep(bp);
8817
8818 /* Open the gates #2, #3 and #4 */
8819 bnx2x_set_234_gates(bp, false);
8820
8821 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8822 * reset state, re-enable attentions. */
8823
a2fbb9ea
ET
8824 return 0;
8825}
8826
72fd0718
VZ
8827static int bnx2x_leader_reset(struct bnx2x *bp)
8828{
8829 int rc = 0;
8830 /* Try to recover after the failure */
8831 if (bnx2x_process_kill(bp)) {
8832 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8833 bp->dev->name);
8834 rc = -EAGAIN;
8835 goto exit_leader_reset;
8836 }
8837
8838 /* Clear "reset is in progress" bit and update the driver state */
8839 bnx2x_set_reset_done(bp);
8840 bp->recovery_state = BNX2X_RECOVERY_DONE;
8841
8842exit_leader_reset:
8843 bp->is_leader = 0;
8844 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8845 smp_wmb();
8846 return rc;
8847}
8848
8849static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8850
8851/* Assumption: runs under rtnl lock. This together with the fact
8852 * that it's called only from bnx2x_reset_task() ensure that it
8853 * will never be called when netif_running(bp->dev) is false.
8854 */
8855static void bnx2x_parity_recover(struct bnx2x *bp)
8856{
8857 DP(NETIF_MSG_HW, "Handling parity\n");
8858 while (1) {
8859 switch (bp->recovery_state) {
8860 case BNX2X_RECOVERY_INIT:
8861 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8862 /* Try to get a LEADER_LOCK HW lock */
8863 if (bnx2x_trylock_hw_lock(bp,
8864 HW_LOCK_RESOURCE_RESERVED_08))
8865 bp->is_leader = 1;
8866
8867 /* Stop the driver */
8868 /* If interface has been removed - break */
8869 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8870 return;
8871
8872 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8873 /* Ensure "is_leader" and "recovery_state"
8874 * update values are seen on other CPUs
8875 */
8876 smp_wmb();
8877 break;
8878
8879 case BNX2X_RECOVERY_WAIT:
8880 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8881 if (bp->is_leader) {
8882 u32 load_counter = bnx2x_get_load_cnt(bp);
8883 if (load_counter) {
8884 /* Wait until all other functions get
8885 * down.
8886 */
8887 schedule_delayed_work(&bp->reset_task,
8888 HZ/10);
8889 return;
8890 } else {
8891 /* If all other functions got down -
8892 * try to bring the chip back to
8893 * normal. In any case it's an exit
8894 * point for a leader.
8895 */
8896 if (bnx2x_leader_reset(bp) ||
8897 bnx2x_nic_load(bp, LOAD_NORMAL)) {
8898 printk(KERN_ERR"%s: Recovery "
8899 "has failed. Power cycle is "
8900 "needed.\n", bp->dev->name);
8901 /* Disconnect this device */
8902 netif_device_detach(bp->dev);
8903 /* Block ifup for all function
8904 * of this ASIC until
8905 * "process kill" or power
8906 * cycle.
8907 */
8908 bnx2x_set_reset_in_progress(bp);
8909 /* Shut down the power */
8910 bnx2x_set_power_state(bp,
8911 PCI_D3hot);
8912 return;
8913 }
8914
8915 return;
8916 }
8917 } else { /* non-leader */
8918 if (!bnx2x_reset_is_done(bp)) {
8919 /* Try to get a LEADER_LOCK HW lock as
8920 * long as a former leader may have
8921 * been unloaded by the user or
8922 * released a leadership by another
8923 * reason.
8924 */
8925 if (bnx2x_trylock_hw_lock(bp,
8926 HW_LOCK_RESOURCE_RESERVED_08)) {
8927 /* I'm a leader now! Restart a
8928 * switch case.
8929 */
8930 bp->is_leader = 1;
8931 break;
8932 }
8933
8934 schedule_delayed_work(&bp->reset_task,
8935 HZ/10);
8936 return;
8937
8938 } else { /* A leader has completed
8939 * the "process kill". It's an exit
8940 * point for a non-leader.
8941 */
8942 bnx2x_nic_load(bp, LOAD_NORMAL);
8943 bp->recovery_state =
8944 BNX2X_RECOVERY_DONE;
8945 smp_wmb();
8946 return;
8947 }
8948 }
8949 default:
8950 return;
8951 }
8952 }
8953}
8954
8955/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8956 * scheduled on a general queue in order to prevent a dead lock.
8957 */
34f80b04
EG
8958static void bnx2x_reset_task(struct work_struct *work)
8959{
72fd0718 8960 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
8961
8962#ifdef BNX2X_STOP_ON_ERROR
8963 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8964 " so reset not done to allow debug dump,\n"
72fd0718 8965 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
8966 return;
8967#endif
8968
8969 rtnl_lock();
8970
8971 if (!netif_running(bp->dev))
8972 goto reset_task_exit;
8973
72fd0718
VZ
8974 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8975 bnx2x_parity_recover(bp);
8976 else {
8977 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8978 bnx2x_nic_load(bp, LOAD_NORMAL);
8979 }
34f80b04
EG
8980
8981reset_task_exit:
8982 rtnl_unlock();
8983}
8984
a2fbb9ea
ET
8985/* end of nic load/unload */
8986
8987/* ethtool_ops */
8988
8989/*
8990 * Init service functions
8991 */
8992
f1ef27ef
EG
8993static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8994{
8995 switch (func) {
8996 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8997 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8998 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8999 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
9000 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
9001 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
9002 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
9003 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
9004 default:
9005 BNX2X_ERR("Unsupported function index: %d\n", func);
9006 return (u32)(-1);
9007 }
9008}
9009
9010static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
9011{
9012 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
9013
9014 /* Flush all outstanding writes */
9015 mmiowb();
9016
9017 /* Pretend to be function 0 */
9018 REG_WR(bp, reg, 0);
9019 /* Flush the GRC transaction (in the chip) */
9020 new_val = REG_RD(bp, reg);
9021 if (new_val != 0) {
9022 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
9023 new_val);
9024 BUG();
9025 }
9026
9027 /* From now we are in the "like-E1" mode */
9028 bnx2x_int_disable(bp);
9029
9030 /* Flush all outstanding writes */
9031 mmiowb();
9032
9033 /* Restore the original funtion settings */
9034 REG_WR(bp, reg, orig_func);
9035 new_val = REG_RD(bp, reg);
9036 if (new_val != orig_func) {
9037 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
9038 orig_func, new_val);
9039 BUG();
9040 }
9041}
9042
9043static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
9044{
9045 if (CHIP_IS_E1H(bp))
9046 bnx2x_undi_int_disable_e1h(bp, func);
9047 else
9048 bnx2x_int_disable(bp);
9049}
9050
34f80b04
EG
9051static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
9052{
9053 u32 val;
9054
9055 /* Check if there is any driver already loaded */
9056 val = REG_RD(bp, MISC_REG_UNPREPARED);
9057 if (val == 0x1) {
9058 /* Check if it is the UNDI driver
9059 * UNDI driver initializes CID offset for normal bell to 0x7
9060 */
4a37fb66 9061 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
9062 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9063 if (val == 0x7) {
9064 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 9065 /* save our func */
34f80b04 9066 int func = BP_FUNC(bp);
da5a662a
VZ
9067 u32 swap_en;
9068 u32 swap_val;
34f80b04 9069
b4661739
EG
9070 /* clear the UNDI indication */
9071 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9072
34f80b04
EG
9073 BNX2X_DEV_INFO("UNDI is active! reset device\n");
9074
9075 /* try unload UNDI on port 0 */
9076 bp->func = 0;
da5a662a
VZ
9077 bp->fw_seq =
9078 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9079 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 9080 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
9081
9082 /* if UNDI is loaded on the other port */
9083 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9084
da5a662a
VZ
9085 /* send "DONE" for previous unload */
9086 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9087
9088 /* unload UNDI on port 1 */
34f80b04 9089 bp->func = 1;
da5a662a
VZ
9090 bp->fw_seq =
9091 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9092 DRV_MSG_SEQ_NUMBER_MASK);
9093 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9094
9095 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
9096 }
9097
b4661739
EG
9098 /* now it's safe to release the lock */
9099 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9100
f1ef27ef 9101 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
9102
9103 /* close input traffic and wait for it */
9104 /* Do not rcv packets to BRB */
9105 REG_WR(bp,
9106 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9107 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9108 /* Do not direct rcv packets that are not for MCP to
9109 * the BRB */
9110 REG_WR(bp,
9111 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9112 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9113 /* clear AEU */
9114 REG_WR(bp,
9115 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9116 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9117 msleep(10);
9118
9119 /* save NIG port swap info */
9120 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9121 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
9122 /* reset device */
9123 REG_WR(bp,
9124 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 9125 0xd3ffffff);
34f80b04
EG
9126 REG_WR(bp,
9127 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9128 0x1403);
da5a662a
VZ
9129 /* take the NIG out of reset and restore swap values */
9130 REG_WR(bp,
9131 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9132 MISC_REGISTERS_RESET_REG_1_RST_NIG);
9133 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9134 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9135
9136 /* send unload done to the MCP */
9137 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9138
9139 /* restore our func and fw_seq */
9140 bp->func = func;
9141 bp->fw_seq =
9142 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9143 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
9144
9145 } else
9146 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
9147 }
9148}
9149
9150static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9151{
9152 u32 val, val2, val3, val4, id;
72ce58c3 9153 u16 pmc;
34f80b04
EG
9154
9155 /* Get the chip revision id and number. */
9156 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9157 val = REG_RD(bp, MISC_REG_CHIP_NUM);
9158 id = ((val & 0xffff) << 16);
9159 val = REG_RD(bp, MISC_REG_CHIP_REV);
9160 id |= ((val & 0xf) << 12);
9161 val = REG_RD(bp, MISC_REG_CHIP_METAL);
9162 id |= ((val & 0xff) << 4);
5a40e08e 9163 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
9164 id |= (val & 0xf);
9165 bp->common.chip_id = id;
9166 bp->link_params.chip_id = bp->common.chip_id;
9167 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9168
1c06328c
EG
9169 val = (REG_RD(bp, 0x2874) & 0x55);
9170 if ((bp->common.chip_id & 0x1) ||
9171 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9172 bp->flags |= ONE_PORT_FLAG;
9173 BNX2X_DEV_INFO("single port device\n");
9174 }
9175
34f80b04
EG
9176 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9177 bp->common.flash_size = (NVRAM_1MB_SIZE <<
9178 (val & MCPR_NVM_CFG4_FLASH_SIZE));
9179 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9180 bp->common.flash_size, bp->common.flash_size);
9181
9182 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 9183 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 9184 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
9185 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
9186 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
9187
9188 if (!bp->common.shmem_base ||
9189 (bp->common.shmem_base < 0xA0000) ||
9190 (bp->common.shmem_base >= 0xC0000)) {
9191 BNX2X_DEV_INFO("MCP not active\n");
9192 bp->flags |= NO_MCP_FLAG;
9193 return;
9194 }
9195
9196 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9197 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9198 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
cdaa7cb8 9199 BNX2X_ERROR("BAD MCP validity signature\n");
34f80b04
EG
9200
9201 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 9202 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
9203
9204 bp->link_params.hw_led_mode = ((bp->common.hw_config &
9205 SHARED_HW_CFG_LED_MODE_MASK) >>
9206 SHARED_HW_CFG_LED_MODE_SHIFT);
9207
c2c8b03e
EG
9208 bp->link_params.feature_config_flags = 0;
9209 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9210 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9211 bp->link_params.feature_config_flags |=
9212 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9213 else
9214 bp->link_params.feature_config_flags &=
9215 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9216
34f80b04
EG
9217 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9218 bp->common.bc_ver = val;
9219 BNX2X_DEV_INFO("bc_ver %X\n", val);
9220 if (val < BNX2X_BC_VER) {
9221 /* for now only warn
9222 * later we might need to enforce this */
cdaa7cb8
VZ
9223 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
9224 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 9225 }
4d295db0
EG
9226 bp->link_params.feature_config_flags |=
9227 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9228 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
9229
9230 if (BP_E1HVN(bp) == 0) {
9231 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9232 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9233 } else {
9234 /* no WOL capability for E1HVN != 0 */
9235 bp->flags |= NO_WOL_FLAG;
9236 }
9237 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 9238 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
9239
9240 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9241 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9242 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9243 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9244
cdaa7cb8
VZ
9245 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9246 val, val2, val3, val4);
34f80b04
EG
9247}
9248
9249static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9250 u32 switch_cfg)
a2fbb9ea 9251{
34f80b04 9252 int port = BP_PORT(bp);
a2fbb9ea
ET
9253 u32 ext_phy_type;
9254
a2fbb9ea
ET
9255 switch (switch_cfg) {
9256 case SWITCH_CFG_1G:
9257 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9258
c18487ee
YR
9259 ext_phy_type =
9260 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
9261 switch (ext_phy_type) {
9262 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9263 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9264 ext_phy_type);
9265
34f80b04
EG
9266 bp->port.supported |= (SUPPORTED_10baseT_Half |
9267 SUPPORTED_10baseT_Full |
9268 SUPPORTED_100baseT_Half |
9269 SUPPORTED_100baseT_Full |
9270 SUPPORTED_1000baseT_Full |
9271 SUPPORTED_2500baseX_Full |
9272 SUPPORTED_TP |
9273 SUPPORTED_FIBRE |
9274 SUPPORTED_Autoneg |
9275 SUPPORTED_Pause |
9276 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9277 break;
9278
9279 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9280 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9281 ext_phy_type);
9282
34f80b04
EG
9283 bp->port.supported |= (SUPPORTED_10baseT_Half |
9284 SUPPORTED_10baseT_Full |
9285 SUPPORTED_100baseT_Half |
9286 SUPPORTED_100baseT_Full |
9287 SUPPORTED_1000baseT_Full |
9288 SUPPORTED_TP |
9289 SUPPORTED_FIBRE |
9290 SUPPORTED_Autoneg |
9291 SUPPORTED_Pause |
9292 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9293 break;
9294
9295 default:
9296 BNX2X_ERR("NVRAM config error. "
9297 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 9298 bp->link_params.ext_phy_config);
a2fbb9ea
ET
9299 return;
9300 }
9301
34f80b04
EG
9302 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9303 port*0x10);
9304 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
9305 break;
9306
9307 case SWITCH_CFG_10G:
9308 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9309
c18487ee
YR
9310 ext_phy_type =
9311 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
9312 switch (ext_phy_type) {
9313 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9314 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9315 ext_phy_type);
9316
34f80b04
EG
9317 bp->port.supported |= (SUPPORTED_10baseT_Half |
9318 SUPPORTED_10baseT_Full |
9319 SUPPORTED_100baseT_Half |
9320 SUPPORTED_100baseT_Full |
9321 SUPPORTED_1000baseT_Full |
9322 SUPPORTED_2500baseX_Full |
9323 SUPPORTED_10000baseT_Full |
9324 SUPPORTED_TP |
9325 SUPPORTED_FIBRE |
9326 SUPPORTED_Autoneg |
9327 SUPPORTED_Pause |
9328 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9329 break;
9330
589abe3a
EG
9331 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9332 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 9333 ext_phy_type);
f1410647 9334
34f80b04 9335 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 9336 SUPPORTED_1000baseT_Full |
34f80b04 9337 SUPPORTED_FIBRE |
589abe3a 9338 SUPPORTED_Autoneg |
34f80b04
EG
9339 SUPPORTED_Pause |
9340 SUPPORTED_Asym_Pause);
f1410647
ET
9341 break;
9342
589abe3a
EG
9343 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9344 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
9345 ext_phy_type);
9346
34f80b04 9347 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 9348 SUPPORTED_2500baseX_Full |
34f80b04 9349 SUPPORTED_1000baseT_Full |
589abe3a
EG
9350 SUPPORTED_FIBRE |
9351 SUPPORTED_Autoneg |
9352 SUPPORTED_Pause |
9353 SUPPORTED_Asym_Pause);
9354 break;
9355
9356 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9357 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9358 ext_phy_type);
9359
9360 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
9361 SUPPORTED_FIBRE |
9362 SUPPORTED_Pause |
9363 SUPPORTED_Asym_Pause);
f1410647
ET
9364 break;
9365
589abe3a
EG
9366 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9367 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
9368 ext_phy_type);
9369
34f80b04
EG
9370 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9371 SUPPORTED_1000baseT_Full |
9372 SUPPORTED_FIBRE |
34f80b04
EG
9373 SUPPORTED_Pause |
9374 SUPPORTED_Asym_Pause);
f1410647
ET
9375 break;
9376
589abe3a
EG
9377 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9378 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
9379 ext_phy_type);
9380
34f80b04 9381 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 9382 SUPPORTED_1000baseT_Full |
34f80b04 9383 SUPPORTED_Autoneg |
589abe3a 9384 SUPPORTED_FIBRE |
34f80b04
EG
9385 SUPPORTED_Pause |
9386 SUPPORTED_Asym_Pause);
c18487ee
YR
9387 break;
9388
4d295db0
EG
9389 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9390 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9391 ext_phy_type);
9392
9393 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9394 SUPPORTED_1000baseT_Full |
9395 SUPPORTED_Autoneg |
9396 SUPPORTED_FIBRE |
9397 SUPPORTED_Pause |
9398 SUPPORTED_Asym_Pause);
9399 break;
9400
f1410647
ET
9401 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9402 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9403 ext_phy_type);
9404
34f80b04
EG
9405 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9406 SUPPORTED_TP |
9407 SUPPORTED_Autoneg |
9408 SUPPORTED_Pause |
9409 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9410 break;
9411
28577185
EG
9412 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9413 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9414 ext_phy_type);
9415
9416 bp->port.supported |= (SUPPORTED_10baseT_Half |
9417 SUPPORTED_10baseT_Full |
9418 SUPPORTED_100baseT_Half |
9419 SUPPORTED_100baseT_Full |
9420 SUPPORTED_1000baseT_Full |
9421 SUPPORTED_10000baseT_Full |
9422 SUPPORTED_TP |
9423 SUPPORTED_Autoneg |
9424 SUPPORTED_Pause |
9425 SUPPORTED_Asym_Pause);
9426 break;
9427
c18487ee
YR
9428 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9429 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9430 bp->link_params.ext_phy_config);
9431 break;
9432
a2fbb9ea
ET
9433 default:
9434 BNX2X_ERR("NVRAM config error. "
9435 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 9436 bp->link_params.ext_phy_config);
a2fbb9ea
ET
9437 return;
9438 }
9439
34f80b04
EG
9440 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9441 port*0x18);
9442 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 9443
a2fbb9ea
ET
9444 break;
9445
9446 default:
9447 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 9448 bp->port.link_config);
a2fbb9ea
ET
9449 return;
9450 }
34f80b04 9451 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
9452
9453 /* mask what we support according to speed_cap_mask */
c18487ee
YR
9454 if (!(bp->link_params.speed_cap_mask &
9455 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 9456 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 9457
c18487ee
YR
9458 if (!(bp->link_params.speed_cap_mask &
9459 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 9460 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 9461
c18487ee
YR
9462 if (!(bp->link_params.speed_cap_mask &
9463 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 9464 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 9465
c18487ee
YR
9466 if (!(bp->link_params.speed_cap_mask &
9467 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 9468 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 9469
c18487ee
YR
9470 if (!(bp->link_params.speed_cap_mask &
9471 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
9472 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9473 SUPPORTED_1000baseT_Full);
a2fbb9ea 9474
c18487ee
YR
9475 if (!(bp->link_params.speed_cap_mask &
9476 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 9477 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 9478
c18487ee
YR
9479 if (!(bp->link_params.speed_cap_mask &
9480 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 9481 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 9482
34f80b04 9483 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
9484}
9485
34f80b04 9486static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 9487{
c18487ee 9488 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 9489
34f80b04 9490 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 9491 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 9492 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 9493 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 9494 bp->port.advertising = bp->port.supported;
a2fbb9ea 9495 } else {
c18487ee
YR
9496 u32 ext_phy_type =
9497 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9498
9499 if ((ext_phy_type ==
9500 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9501 (ext_phy_type ==
9502 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 9503 /* force 10G, no AN */
c18487ee 9504 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 9505 bp->port.advertising =
a2fbb9ea
ET
9506 (ADVERTISED_10000baseT_Full |
9507 ADVERTISED_FIBRE);
9508 break;
9509 }
9510 BNX2X_ERR("NVRAM config error. "
9511 "Invalid link_config 0x%x"
9512 " Autoneg not supported\n",
34f80b04 9513 bp->port.link_config);
a2fbb9ea
ET
9514 return;
9515 }
9516 break;
9517
9518 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 9519 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 9520 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
9521 bp->port.advertising = (ADVERTISED_10baseT_Full |
9522 ADVERTISED_TP);
a2fbb9ea 9523 } else {
cdaa7cb8
VZ
9524 BNX2X_ERROR("NVRAM config error. "
9525 "Invalid link_config 0x%x"
9526 " speed_cap_mask 0x%x\n",
9527 bp->port.link_config,
9528 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9529 return;
9530 }
9531 break;
9532
9533 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 9534 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
9535 bp->link_params.req_line_speed = SPEED_10;
9536 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
9537 bp->port.advertising = (ADVERTISED_10baseT_Half |
9538 ADVERTISED_TP);
a2fbb9ea 9539 } else {
cdaa7cb8
VZ
9540 BNX2X_ERROR("NVRAM config error. "
9541 "Invalid link_config 0x%x"
9542 " speed_cap_mask 0x%x\n",
9543 bp->port.link_config,
9544 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9545 return;
9546 }
9547 break;
9548
9549 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 9550 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 9551 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
9552 bp->port.advertising = (ADVERTISED_100baseT_Full |
9553 ADVERTISED_TP);
a2fbb9ea 9554 } else {
cdaa7cb8
VZ
9555 BNX2X_ERROR("NVRAM config error. "
9556 "Invalid link_config 0x%x"
9557 " speed_cap_mask 0x%x\n",
9558 bp->port.link_config,
9559 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9560 return;
9561 }
9562 break;
9563
9564 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 9565 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
9566 bp->link_params.req_line_speed = SPEED_100;
9567 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
9568 bp->port.advertising = (ADVERTISED_100baseT_Half |
9569 ADVERTISED_TP);
a2fbb9ea 9570 } else {
cdaa7cb8
VZ
9571 BNX2X_ERROR("NVRAM config error. "
9572 "Invalid link_config 0x%x"
9573 " speed_cap_mask 0x%x\n",
9574 bp->port.link_config,
9575 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9576 return;
9577 }
9578 break;
9579
9580 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 9581 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 9582 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
9583 bp->port.advertising = (ADVERTISED_1000baseT_Full |
9584 ADVERTISED_TP);
a2fbb9ea 9585 } else {
cdaa7cb8
VZ
9586 BNX2X_ERROR("NVRAM config error. "
9587 "Invalid link_config 0x%x"
9588 " speed_cap_mask 0x%x\n",
9589 bp->port.link_config,
9590 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9591 return;
9592 }
9593 break;
9594
9595 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 9596 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 9597 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
9598 bp->port.advertising = (ADVERTISED_2500baseX_Full |
9599 ADVERTISED_TP);
a2fbb9ea 9600 } else {
cdaa7cb8
VZ
9601 BNX2X_ERROR("NVRAM config error. "
9602 "Invalid link_config 0x%x"
9603 " speed_cap_mask 0x%x\n",
9604 bp->port.link_config,
9605 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9606 return;
9607 }
9608 break;
9609
9610 case PORT_FEATURE_LINK_SPEED_10G_CX4:
9611 case PORT_FEATURE_LINK_SPEED_10G_KX4:
9612 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 9613 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 9614 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
9615 bp->port.advertising = (ADVERTISED_10000baseT_Full |
9616 ADVERTISED_FIBRE);
a2fbb9ea 9617 } else {
cdaa7cb8
VZ
9618 BNX2X_ERROR("NVRAM config error. "
9619 "Invalid link_config 0x%x"
9620 " speed_cap_mask 0x%x\n",
9621 bp->port.link_config,
9622 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9623 return;
9624 }
9625 break;
9626
9627 default:
cdaa7cb8
VZ
9628 BNX2X_ERROR("NVRAM config error. "
9629 "BAD link speed link_config 0x%x\n",
9630 bp->port.link_config);
c18487ee 9631 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 9632 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
9633 break;
9634 }
a2fbb9ea 9635
34f80b04
EG
9636 bp->link_params.req_flow_ctrl = (bp->port.link_config &
9637 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 9638 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 9639 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 9640 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9641
c18487ee 9642 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 9643 " advertising 0x%x\n",
c18487ee
YR
9644 bp->link_params.req_line_speed,
9645 bp->link_params.req_duplex,
34f80b04 9646 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
9647}
9648
e665bfda
MC
9649static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9650{
9651 mac_hi = cpu_to_be16(mac_hi);
9652 mac_lo = cpu_to_be32(mac_lo);
9653 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9654 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9655}
9656
34f80b04 9657static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 9658{
34f80b04
EG
9659 int port = BP_PORT(bp);
9660 u32 val, val2;
589abe3a 9661 u32 config;
c2c8b03e 9662 u16 i;
01cd4528 9663 u32 ext_phy_type;
a2fbb9ea 9664
c18487ee 9665 bp->link_params.bp = bp;
34f80b04 9666 bp->link_params.port = port;
c18487ee 9667
c18487ee 9668 bp->link_params.lane_config =
a2fbb9ea 9669 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 9670 bp->link_params.ext_phy_config =
a2fbb9ea
ET
9671 SHMEM_RD(bp,
9672 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
9673 /* BCM8727_NOC => BCM8727 no over current */
9674 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9675 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9676 bp->link_params.ext_phy_config &=
9677 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9678 bp->link_params.ext_phy_config |=
9679 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9680 bp->link_params.feature_config_flags |=
9681 FEATURE_CONFIG_BCM8727_NOC;
9682 }
9683
c18487ee 9684 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
9685 SHMEM_RD(bp,
9686 dev_info.port_hw_config[port].speed_capability_mask);
9687
34f80b04 9688 bp->port.link_config =
a2fbb9ea
ET
9689 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9690
c2c8b03e
EG
9691 /* Get the 4 lanes xgxs config rx and tx */
9692 for (i = 0; i < 2; i++) {
9693 val = SHMEM_RD(bp,
9694 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9695 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9696 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9697
9698 val = SHMEM_RD(bp,
9699 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9700 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9701 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9702 }
9703
3ce2c3f9
EG
9704 /* If the device is capable of WoL, set the default state according
9705 * to the HW
9706 */
4d295db0 9707 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
9708 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9709 (config & PORT_FEATURE_WOL_ENABLED));
9710
c2c8b03e
EG
9711 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
9712 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
9713 bp->link_params.lane_config,
9714 bp->link_params.ext_phy_config,
34f80b04 9715 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 9716
4d295db0
EG
9717 bp->link_params.switch_cfg |= (bp->port.link_config &
9718 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 9719 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
9720
9721 bnx2x_link_settings_requested(bp);
9722
01cd4528
EG
9723 /*
9724 * If connected directly, work with the internal PHY, otherwise, work
9725 * with the external PHY
9726 */
9727 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9728 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9729 bp->mdio.prtad = bp->link_params.phy_addr;
9730
9731 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9732 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9733 bp->mdio.prtad =
659bc5c4 9734 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 9735
a2fbb9ea
ET
9736 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9737 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 9738 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
9739 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9740 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
9741
9742#ifdef BCM_CNIC
9743 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9744 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9745 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9746#endif
34f80b04
EG
9747}
9748
9749static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9750{
9751 int func = BP_FUNC(bp);
9752 u32 val, val2;
9753 int rc = 0;
a2fbb9ea 9754
34f80b04 9755 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 9756
34f80b04
EG
9757 bp->e1hov = 0;
9758 bp->e1hmf = 0;
2145a920 9759 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
34f80b04
EG
9760 bp->mf_config =
9761 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 9762
2691d51d 9763 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 9764 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 9765 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 9766 bp->e1hmf = 1;
2691d51d
EG
9767 BNX2X_DEV_INFO("%s function mode\n",
9768 IS_E1HMF(bp) ? "multi" : "single");
9769
9770 if (IS_E1HMF(bp)) {
9771 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9772 e1hov_tag) &
9773 FUNC_MF_CFG_E1HOV_TAG_MASK);
9774 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9775 bp->e1hov = val;
9776 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9777 "(0x%04x)\n",
9778 func, bp->e1hov, bp->e1hov);
9779 } else {
cdaa7cb8
VZ
9780 BNX2X_ERROR("No valid E1HOV for func %d,"
9781 " aborting\n", func);
34f80b04
EG
9782 rc = -EPERM;
9783 }
2691d51d
EG
9784 } else {
9785 if (BP_E1HVN(bp)) {
cdaa7cb8
VZ
9786 BNX2X_ERROR("VN %d in single function mode,"
9787 " aborting\n", BP_E1HVN(bp));
2691d51d
EG
9788 rc = -EPERM;
9789 }
34f80b04
EG
9790 }
9791 }
a2fbb9ea 9792
34f80b04
EG
9793 if (!BP_NOMCP(bp)) {
9794 bnx2x_get_port_hwinfo(bp);
9795
9796 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9797 DRV_MSG_SEQ_NUMBER_MASK);
9798 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9799 }
9800
9801 if (IS_E1HMF(bp)) {
9802 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9803 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
9804 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9805 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9806 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9807 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9808 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9809 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9810 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
9811 bp->dev->dev_addr[5] = (u8)(val & 0xff);
9812 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9813 ETH_ALEN);
9814 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9815 ETH_ALEN);
a2fbb9ea 9816 }
34f80b04
EG
9817
9818 return rc;
a2fbb9ea
ET
9819 }
9820
34f80b04
EG
9821 if (BP_NOMCP(bp)) {
9822 /* only supposed to happen on emulation/FPGA */
cdaa7cb8 9823 BNX2X_ERROR("warning: random MAC workaround active\n");
34f80b04
EG
9824 random_ether_addr(bp->dev->dev_addr);
9825 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9826 }
a2fbb9ea 9827
34f80b04
EG
9828 return rc;
9829}
9830
34f24c7f
VZ
9831static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9832{
9833 int cnt, i, block_end, rodi;
9834 char vpd_data[BNX2X_VPD_LEN+1];
9835 char str_id_reg[VENDOR_ID_LEN+1];
9836 char str_id_cap[VENDOR_ID_LEN+1];
9837 u8 len;
9838
9839 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9840 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9841
9842 if (cnt < BNX2X_VPD_LEN)
9843 goto out_not_found;
9844
9845 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9846 PCI_VPD_LRDT_RO_DATA);
9847 if (i < 0)
9848 goto out_not_found;
9849
9850
9851 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9852 pci_vpd_lrdt_size(&vpd_data[i]);
9853
9854 i += PCI_VPD_LRDT_TAG_SIZE;
9855
9856 if (block_end > BNX2X_VPD_LEN)
9857 goto out_not_found;
9858
9859 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9860 PCI_VPD_RO_KEYWORD_MFR_ID);
9861 if (rodi < 0)
9862 goto out_not_found;
9863
9864 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9865
9866 if (len != VENDOR_ID_LEN)
9867 goto out_not_found;
9868
9869 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9870
9871 /* vendor specific info */
9872 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9873 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9874 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9875 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9876
9877 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9878 PCI_VPD_RO_KEYWORD_VENDOR0);
9879 if (rodi >= 0) {
9880 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9881
9882 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9883
9884 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9885 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9886 bp->fw_ver[len] = ' ';
9887 }
9888 }
9889 return;
9890 }
9891out_not_found:
9892 return;
9893}
9894
34f80b04
EG
9895static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9896{
9897 int func = BP_FUNC(bp);
87942b46 9898 int timer_interval;
34f80b04
EG
9899 int rc;
9900
da5a662a
VZ
9901 /* Disable interrupt handling until HW is initialized */
9902 atomic_set(&bp->intr_sem, 1);
e1510706 9903 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 9904
34f80b04 9905 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 9906 mutex_init(&bp->fw_mb_mutex);
993ac7b5
MC
9907#ifdef BCM_CNIC
9908 mutex_init(&bp->cnic_mutex);
9909#endif
a2fbb9ea 9910
1cf167f2 9911 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 9912 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
9913
9914 rc = bnx2x_get_hwinfo(bp);
9915
34f24c7f 9916 bnx2x_read_fwinfo(bp);
34f80b04
EG
9917 /* need to reset chip if undi was active */
9918 if (!BP_NOMCP(bp))
9919 bnx2x_undi_unload(bp);
9920
9921 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 9922 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
9923
9924 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
9925 dev_err(&bp->pdev->dev, "MCP disabled, "
9926 "must load devices in order!\n");
34f80b04 9927
555f6c78 9928 /* Set multi queue mode */
8badd27a
EG
9929 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9930 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
cdaa7cb8
VZ
9931 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
9932 "requested is not MSI-X\n");
555f6c78
EG
9933 multi_mode = ETH_RSS_MODE_DISABLED;
9934 }
9935 bp->multi_mode = multi_mode;
9936
9937
4fd89b7a
DK
9938 bp->dev->features |= NETIF_F_GRO;
9939
7a9b2557
VZ
9940 /* Set TPA flags */
9941 if (disable_tpa) {
9942 bp->flags &= ~TPA_ENABLE_FLAG;
9943 bp->dev->features &= ~NETIF_F_LRO;
9944 } else {
9945 bp->flags |= TPA_ENABLE_FLAG;
9946 bp->dev->features |= NETIF_F_LRO;
9947 }
9948
a18f5128
EG
9949 if (CHIP_IS_E1(bp))
9950 bp->dropless_fc = 0;
9951 else
9952 bp->dropless_fc = dropless_fc;
9953
8d5726c4 9954 bp->mrrs = mrrs;
7a9b2557 9955
34f80b04
EG
9956 bp->tx_ring_size = MAX_TX_AVAIL;
9957 bp->rx_ring_size = MAX_RX_AVAIL;
9958
9959 bp->rx_csum = 1;
34f80b04 9960
7d323bfd
EG
9961 /* make sure that the numbers are in the right granularity */
9962 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9963 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
34f80b04 9964
87942b46
EG
9965 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9966 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
9967
9968 init_timer(&bp->timer);
9969 bp->timer.expires = jiffies + bp->current_interval;
9970 bp->timer.data = (unsigned long) bp;
9971 bp->timer.function = bnx2x_timer;
9972
9973 return rc;
a2fbb9ea
ET
9974}
9975
9976/*
9977 * ethtool service functions
9978 */
9979
9980/* All ethtool functions called with rtnl_lock */
9981
9982static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9983{
9984 struct bnx2x *bp = netdev_priv(dev);
9985
34f80b04
EG
9986 cmd->supported = bp->port.supported;
9987 cmd->advertising = bp->port.advertising;
a2fbb9ea 9988
f34d28ea
EG
9989 if ((bp->state == BNX2X_STATE_OPEN) &&
9990 !(bp->flags & MF_FUNC_DIS) &&
9991 (bp->link_vars.link_up)) {
c18487ee
YR
9992 cmd->speed = bp->link_vars.line_speed;
9993 cmd->duplex = bp->link_vars.duplex;
b015e3d1
EG
9994 if (IS_E1HMF(bp)) {
9995 u16 vn_max_rate;
34f80b04 9996
b015e3d1
EG
9997 vn_max_rate =
9998 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
34f80b04 9999 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
b015e3d1
EG
10000 if (vn_max_rate < cmd->speed)
10001 cmd->speed = vn_max_rate;
10002 }
10003 } else {
10004 cmd->speed = -1;
10005 cmd->duplex = -1;
34f80b04 10006 }
a2fbb9ea 10007
c18487ee
YR
10008 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
10009 u32 ext_phy_type =
10010 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
10011
10012 switch (ext_phy_type) {
10013 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 10014 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 10015 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
10016 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
10017 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
10018 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 10019 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
10020 cmd->port = PORT_FIBRE;
10021 break;
10022
10023 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 10024 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
10025 cmd->port = PORT_TP;
10026 break;
10027
c18487ee
YR
10028 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
10029 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
10030 bp->link_params.ext_phy_config);
10031 break;
10032
f1410647
ET
10033 default:
10034 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
10035 bp->link_params.ext_phy_config);
10036 break;
f1410647
ET
10037 }
10038 } else
a2fbb9ea 10039 cmd->port = PORT_TP;
a2fbb9ea 10040
01cd4528 10041 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
10042 cmd->transceiver = XCVR_INTERNAL;
10043
c18487ee 10044 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 10045 cmd->autoneg = AUTONEG_ENABLE;
f1410647 10046 else
a2fbb9ea 10047 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
10048
10049 cmd->maxtxpkt = 0;
10050 cmd->maxrxpkt = 0;
10051
10052 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10053 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10054 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10055 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10056 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10057 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10058 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10059
10060 return 0;
10061}
10062
10063static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10064{
10065 struct bnx2x *bp = netdev_priv(dev);
10066 u32 advertising;
10067
34f80b04
EG
10068 if (IS_E1HMF(bp))
10069 return 0;
10070
a2fbb9ea
ET
10071 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10072 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10073 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10074 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10075 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10076 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10077 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10078
a2fbb9ea 10079 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
10080 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10081 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 10082 return -EINVAL;
f1410647 10083 }
a2fbb9ea
ET
10084
10085 /* advertise the requested speed and duplex if supported */
34f80b04 10086 cmd->advertising &= bp->port.supported;
a2fbb9ea 10087
c18487ee
YR
10088 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10089 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
10090 bp->port.advertising |= (ADVERTISED_Autoneg |
10091 cmd->advertising);
a2fbb9ea
ET
10092
10093 } else { /* forced speed */
10094 /* advertise the requested speed and duplex if supported */
10095 switch (cmd->speed) {
10096 case SPEED_10:
10097 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 10098 if (!(bp->port.supported &
f1410647
ET
10099 SUPPORTED_10baseT_Full)) {
10100 DP(NETIF_MSG_LINK,
10101 "10M full not supported\n");
a2fbb9ea 10102 return -EINVAL;
f1410647 10103 }
a2fbb9ea
ET
10104
10105 advertising = (ADVERTISED_10baseT_Full |
10106 ADVERTISED_TP);
10107 } else {
34f80b04 10108 if (!(bp->port.supported &
f1410647
ET
10109 SUPPORTED_10baseT_Half)) {
10110 DP(NETIF_MSG_LINK,
10111 "10M half not supported\n");
a2fbb9ea 10112 return -EINVAL;
f1410647 10113 }
a2fbb9ea
ET
10114
10115 advertising = (ADVERTISED_10baseT_Half |
10116 ADVERTISED_TP);
10117 }
10118 break;
10119
10120 case SPEED_100:
10121 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 10122 if (!(bp->port.supported &
f1410647
ET
10123 SUPPORTED_100baseT_Full)) {
10124 DP(NETIF_MSG_LINK,
10125 "100M full not supported\n");
a2fbb9ea 10126 return -EINVAL;
f1410647 10127 }
a2fbb9ea
ET
10128
10129 advertising = (ADVERTISED_100baseT_Full |
10130 ADVERTISED_TP);
10131 } else {
34f80b04 10132 if (!(bp->port.supported &
f1410647
ET
10133 SUPPORTED_100baseT_Half)) {
10134 DP(NETIF_MSG_LINK,
10135 "100M half not supported\n");
a2fbb9ea 10136 return -EINVAL;
f1410647 10137 }
a2fbb9ea
ET
10138
10139 advertising = (ADVERTISED_100baseT_Half |
10140 ADVERTISED_TP);
10141 }
10142 break;
10143
10144 case SPEED_1000:
f1410647
ET
10145 if (cmd->duplex != DUPLEX_FULL) {
10146 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 10147 return -EINVAL;
f1410647 10148 }
a2fbb9ea 10149
34f80b04 10150 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 10151 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 10152 return -EINVAL;
f1410647 10153 }
a2fbb9ea
ET
10154
10155 advertising = (ADVERTISED_1000baseT_Full |
10156 ADVERTISED_TP);
10157 break;
10158
10159 case SPEED_2500:
f1410647
ET
10160 if (cmd->duplex != DUPLEX_FULL) {
10161 DP(NETIF_MSG_LINK,
10162 "2.5G half not supported\n");
a2fbb9ea 10163 return -EINVAL;
f1410647 10164 }
a2fbb9ea 10165
34f80b04 10166 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
10167 DP(NETIF_MSG_LINK,
10168 "2.5G full not supported\n");
a2fbb9ea 10169 return -EINVAL;
f1410647 10170 }
a2fbb9ea 10171
f1410647 10172 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
10173 ADVERTISED_TP);
10174 break;
10175
10176 case SPEED_10000:
f1410647
ET
10177 if (cmd->duplex != DUPLEX_FULL) {
10178 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 10179 return -EINVAL;
f1410647 10180 }
a2fbb9ea 10181
34f80b04 10182 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 10183 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 10184 return -EINVAL;
f1410647 10185 }
a2fbb9ea
ET
10186
10187 advertising = (ADVERTISED_10000baseT_Full |
10188 ADVERTISED_FIBRE);
10189 break;
10190
10191 default:
f1410647 10192 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
10193 return -EINVAL;
10194 }
10195
c18487ee
YR
10196 bp->link_params.req_line_speed = cmd->speed;
10197 bp->link_params.req_duplex = cmd->duplex;
34f80b04 10198 bp->port.advertising = advertising;
a2fbb9ea
ET
10199 }
10200
c18487ee 10201 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 10202 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 10203 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 10204 bp->port.advertising);
a2fbb9ea 10205
34f80b04 10206 if (netif_running(dev)) {
bb2a0f7a 10207 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10208 bnx2x_link_set(bp);
10209 }
a2fbb9ea
ET
10210
10211 return 0;
10212}
10213
0a64ea57
EG
10214#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10215#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10216
10217static int bnx2x_get_regs_len(struct net_device *dev)
10218{
0a64ea57 10219 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 10220 int regdump_len = 0;
0a64ea57
EG
10221 int i;
10222
0a64ea57
EG
10223 if (CHIP_IS_E1(bp)) {
10224 for (i = 0; i < REGS_COUNT; i++)
10225 if (IS_E1_ONLINE(reg_addrs[i].info))
10226 regdump_len += reg_addrs[i].size;
10227
10228 for (i = 0; i < WREGS_COUNT_E1; i++)
10229 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10230 regdump_len += wreg_addrs_e1[i].size *
10231 (1 + wreg_addrs_e1[i].read_regs_count);
10232
10233 } else { /* E1H */
10234 for (i = 0; i < REGS_COUNT; i++)
10235 if (IS_E1H_ONLINE(reg_addrs[i].info))
10236 regdump_len += reg_addrs[i].size;
10237
10238 for (i = 0; i < WREGS_COUNT_E1H; i++)
10239 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10240 regdump_len += wreg_addrs_e1h[i].size *
10241 (1 + wreg_addrs_e1h[i].read_regs_count);
10242 }
10243 regdump_len *= 4;
10244 regdump_len += sizeof(struct dump_hdr);
10245
10246 return regdump_len;
10247}
10248
10249static void bnx2x_get_regs(struct net_device *dev,
10250 struct ethtool_regs *regs, void *_p)
10251{
10252 u32 *p = _p, i, j;
10253 struct bnx2x *bp = netdev_priv(dev);
10254 struct dump_hdr dump_hdr = {0};
10255
10256 regs->version = 0;
10257 memset(p, 0, regs->len);
10258
10259 if (!netif_running(bp->dev))
10260 return;
10261
10262 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10263 dump_hdr.dump_sign = dump_sign_all;
10264 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10265 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10266 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10267 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10268 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10269
10270 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10271 p += dump_hdr.hdr_size + 1;
10272
10273 if (CHIP_IS_E1(bp)) {
10274 for (i = 0; i < REGS_COUNT; i++)
10275 if (IS_E1_ONLINE(reg_addrs[i].info))
10276 for (j = 0; j < reg_addrs[i].size; j++)
10277 *p++ = REG_RD(bp,
10278 reg_addrs[i].addr + j*4);
10279
10280 } else { /* E1H */
10281 for (i = 0; i < REGS_COUNT; i++)
10282 if (IS_E1H_ONLINE(reg_addrs[i].info))
10283 for (j = 0; j < reg_addrs[i].size; j++)
10284 *p++ = REG_RD(bp,
10285 reg_addrs[i].addr + j*4);
10286 }
10287}
10288
0d28e49a
EG
10289#define PHY_FW_VER_LEN 10
10290
10291static void bnx2x_get_drvinfo(struct net_device *dev,
10292 struct ethtool_drvinfo *info)
10293{
10294 struct bnx2x *bp = netdev_priv(dev);
10295 u8 phy_fw_ver[PHY_FW_VER_LEN];
10296
10297 strcpy(info->driver, DRV_MODULE_NAME);
10298 strcpy(info->version, DRV_MODULE_VERSION);
10299
10300 phy_fw_ver[0] = '\0';
10301 if (bp->port.pmf) {
10302 bnx2x_acquire_phy_lock(bp);
10303 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10304 (bp->state != BNX2X_STATE_CLOSED),
10305 phy_fw_ver, PHY_FW_VER_LEN);
10306 bnx2x_release_phy_lock(bp);
10307 }
10308
34f24c7f
VZ
10309 strncpy(info->fw_version, bp->fw_ver, 32);
10310 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10311 "bc %d.%d.%d%s%s",
0d28e49a
EG
10312 (bp->common.bc_ver & 0xff0000) >> 16,
10313 (bp->common.bc_ver & 0xff00) >> 8,
10314 (bp->common.bc_ver & 0xff),
34f24c7f 10315 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
0d28e49a
EG
10316 strcpy(info->bus_info, pci_name(bp->pdev));
10317 info->n_stats = BNX2X_NUM_STATS;
10318 info->testinfo_len = BNX2X_NUM_TESTS;
10319 info->eedump_len = bp->common.flash_size;
10320 info->regdump_len = bnx2x_get_regs_len(dev);
10321}
10322
a2fbb9ea
ET
10323static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10324{
10325 struct bnx2x *bp = netdev_priv(dev);
10326
10327 if (bp->flags & NO_WOL_FLAG) {
10328 wol->supported = 0;
10329 wol->wolopts = 0;
10330 } else {
10331 wol->supported = WAKE_MAGIC;
10332 if (bp->wol)
10333 wol->wolopts = WAKE_MAGIC;
10334 else
10335 wol->wolopts = 0;
10336 }
10337 memset(&wol->sopass, 0, sizeof(wol->sopass));
10338}
10339
10340static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10341{
10342 struct bnx2x *bp = netdev_priv(dev);
10343
10344 if (wol->wolopts & ~WAKE_MAGIC)
10345 return -EINVAL;
10346
10347 if (wol->wolopts & WAKE_MAGIC) {
10348 if (bp->flags & NO_WOL_FLAG)
10349 return -EINVAL;
10350
10351 bp->wol = 1;
34f80b04 10352 } else
a2fbb9ea 10353 bp->wol = 0;
34f80b04 10354
a2fbb9ea
ET
10355 return 0;
10356}
10357
10358static u32 bnx2x_get_msglevel(struct net_device *dev)
10359{
10360 struct bnx2x *bp = netdev_priv(dev);
10361
7995c64e 10362 return bp->msg_enable;
a2fbb9ea
ET
10363}
10364
10365static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10366{
10367 struct bnx2x *bp = netdev_priv(dev);
10368
10369 if (capable(CAP_NET_ADMIN))
7995c64e 10370 bp->msg_enable = level;
a2fbb9ea
ET
10371}
10372
10373static int bnx2x_nway_reset(struct net_device *dev)
10374{
10375 struct bnx2x *bp = netdev_priv(dev);
10376
34f80b04
EG
10377 if (!bp->port.pmf)
10378 return 0;
a2fbb9ea 10379
34f80b04 10380 if (netif_running(dev)) {
bb2a0f7a 10381 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10382 bnx2x_link_set(bp);
10383 }
a2fbb9ea
ET
10384
10385 return 0;
10386}
10387
ab6ad5a4 10388static u32 bnx2x_get_link(struct net_device *dev)
01e53298
NO
10389{
10390 struct bnx2x *bp = netdev_priv(dev);
10391
f34d28ea
EG
10392 if (bp->flags & MF_FUNC_DIS)
10393 return 0;
10394
01e53298
NO
10395 return bp->link_vars.link_up;
10396}
10397
a2fbb9ea
ET
10398static int bnx2x_get_eeprom_len(struct net_device *dev)
10399{
10400 struct bnx2x *bp = netdev_priv(dev);
10401
34f80b04 10402 return bp->common.flash_size;
a2fbb9ea
ET
10403}
10404
10405static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10406{
34f80b04 10407 int port = BP_PORT(bp);
a2fbb9ea
ET
10408 int count, i;
10409 u32 val = 0;
10410
10411 /* adjust timeout for emulation/FPGA */
10412 count = NVRAM_TIMEOUT_COUNT;
10413 if (CHIP_REV_IS_SLOW(bp))
10414 count *= 100;
10415
10416 /* request access to nvram interface */
10417 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10418 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10419
10420 for (i = 0; i < count*10; i++) {
10421 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10422 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10423 break;
10424
10425 udelay(5);
10426 }
10427
10428 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 10429 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
10430 return -EBUSY;
10431 }
10432
10433 return 0;
10434}
10435
10436static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10437{
34f80b04 10438 int port = BP_PORT(bp);
a2fbb9ea
ET
10439 int count, i;
10440 u32 val = 0;
10441
10442 /* adjust timeout for emulation/FPGA */
10443 count = NVRAM_TIMEOUT_COUNT;
10444 if (CHIP_REV_IS_SLOW(bp))
10445 count *= 100;
10446
10447 /* relinquish nvram interface */
10448 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10449 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10450
10451 for (i = 0; i < count*10; i++) {
10452 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10453 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10454 break;
10455
10456 udelay(5);
10457 }
10458
10459 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 10460 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
10461 return -EBUSY;
10462 }
10463
10464 return 0;
10465}
10466
10467static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10468{
10469 u32 val;
10470
10471 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10472
10473 /* enable both bits, even on read */
10474 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10475 (val | MCPR_NVM_ACCESS_ENABLE_EN |
10476 MCPR_NVM_ACCESS_ENABLE_WR_EN));
10477}
10478
10479static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10480{
10481 u32 val;
10482
10483 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10484
10485 /* disable both bits, even after read */
10486 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10487 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10488 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10489}
10490
4781bfad 10491static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
10492 u32 cmd_flags)
10493{
f1410647 10494 int count, i, rc;
a2fbb9ea
ET
10495 u32 val;
10496
10497 /* build the command word */
10498 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10499
10500 /* need to clear DONE bit separately */
10501 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10502
10503 /* address of the NVRAM to read from */
10504 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10505 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10506
10507 /* issue a read command */
10508 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10509
10510 /* adjust timeout for emulation/FPGA */
10511 count = NVRAM_TIMEOUT_COUNT;
10512 if (CHIP_REV_IS_SLOW(bp))
10513 count *= 100;
10514
10515 /* wait for completion */
10516 *ret_val = 0;
10517 rc = -EBUSY;
10518 for (i = 0; i < count; i++) {
10519 udelay(5);
10520 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10521
10522 if (val & MCPR_NVM_COMMAND_DONE) {
10523 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
10524 /* we read nvram data in cpu order
10525 * but ethtool sees it as an array of bytes
10526 * converting to big-endian will do the work */
4781bfad 10527 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
10528 rc = 0;
10529 break;
10530 }
10531 }
10532
10533 return rc;
10534}
10535
10536static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10537 int buf_size)
10538{
10539 int rc;
10540 u32 cmd_flags;
4781bfad 10541 __be32 val;
a2fbb9ea
ET
10542
10543 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 10544 DP(BNX2X_MSG_NVM,
c14423fe 10545 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
10546 offset, buf_size);
10547 return -EINVAL;
10548 }
10549
34f80b04
EG
10550 if (offset + buf_size > bp->common.flash_size) {
10551 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10552 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10553 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10554 return -EINVAL;
10555 }
10556
10557 /* request access to nvram interface */
10558 rc = bnx2x_acquire_nvram_lock(bp);
10559 if (rc)
10560 return rc;
10561
10562 /* enable access to nvram interface */
10563 bnx2x_enable_nvram_access(bp);
10564
10565 /* read the first word(s) */
10566 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10567 while ((buf_size > sizeof(u32)) && (rc == 0)) {
10568 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10569 memcpy(ret_buf, &val, 4);
10570
10571 /* advance to the next dword */
10572 offset += sizeof(u32);
10573 ret_buf += sizeof(u32);
10574 buf_size -= sizeof(u32);
10575 cmd_flags = 0;
10576 }
10577
10578 if (rc == 0) {
10579 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10580 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10581 memcpy(ret_buf, &val, 4);
10582 }
10583
10584 /* disable access to nvram interface */
10585 bnx2x_disable_nvram_access(bp);
10586 bnx2x_release_nvram_lock(bp);
10587
10588 return rc;
10589}
10590
10591static int bnx2x_get_eeprom(struct net_device *dev,
10592 struct ethtool_eeprom *eeprom, u8 *eebuf)
10593{
10594 struct bnx2x *bp = netdev_priv(dev);
10595 int rc;
10596
2add3acb
EG
10597 if (!netif_running(dev))
10598 return -EAGAIN;
10599
34f80b04 10600 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
10601 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10602 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10603 eeprom->len, eeprom->len);
10604
10605 /* parameters already validated in ethtool_get_eeprom */
10606
10607 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10608
10609 return rc;
10610}
10611
10612static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10613 u32 cmd_flags)
10614{
f1410647 10615 int count, i, rc;
a2fbb9ea
ET
10616
10617 /* build the command word */
10618 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10619
10620 /* need to clear DONE bit separately */
10621 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10622
10623 /* write the data */
10624 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10625
10626 /* address of the NVRAM to write to */
10627 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10628 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10629
10630 /* issue the write command */
10631 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10632
10633 /* adjust timeout for emulation/FPGA */
10634 count = NVRAM_TIMEOUT_COUNT;
10635 if (CHIP_REV_IS_SLOW(bp))
10636 count *= 100;
10637
10638 /* wait for completion */
10639 rc = -EBUSY;
10640 for (i = 0; i < count; i++) {
10641 udelay(5);
10642 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10643 if (val & MCPR_NVM_COMMAND_DONE) {
10644 rc = 0;
10645 break;
10646 }
10647 }
10648
10649 return rc;
10650}
10651
f1410647 10652#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
10653
10654static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10655 int buf_size)
10656{
10657 int rc;
10658 u32 cmd_flags;
10659 u32 align_offset;
4781bfad 10660 __be32 val;
a2fbb9ea 10661
34f80b04
EG
10662 if (offset + buf_size > bp->common.flash_size) {
10663 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10664 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10665 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10666 return -EINVAL;
10667 }
10668
10669 /* request access to nvram interface */
10670 rc = bnx2x_acquire_nvram_lock(bp);
10671 if (rc)
10672 return rc;
10673
10674 /* enable access to nvram interface */
10675 bnx2x_enable_nvram_access(bp);
10676
10677 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10678 align_offset = (offset & ~0x03);
10679 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10680
10681 if (rc == 0) {
10682 val &= ~(0xff << BYTE_OFFSET(offset));
10683 val |= (*data_buf << BYTE_OFFSET(offset));
10684
10685 /* nvram data is returned as an array of bytes
10686 * convert it back to cpu order */
10687 val = be32_to_cpu(val);
10688
a2fbb9ea
ET
10689 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10690 cmd_flags);
10691 }
10692
10693 /* disable access to nvram interface */
10694 bnx2x_disable_nvram_access(bp);
10695 bnx2x_release_nvram_lock(bp);
10696
10697 return rc;
10698}
10699
10700static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10701 int buf_size)
10702{
10703 int rc;
10704 u32 cmd_flags;
10705 u32 val;
10706 u32 written_so_far;
10707
34f80b04 10708 if (buf_size == 1) /* ethtool */
a2fbb9ea 10709 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
10710
10711 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 10712 DP(BNX2X_MSG_NVM,
c14423fe 10713 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
10714 offset, buf_size);
10715 return -EINVAL;
10716 }
10717
34f80b04
EG
10718 if (offset + buf_size > bp->common.flash_size) {
10719 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10720 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10721 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10722 return -EINVAL;
10723 }
10724
10725 /* request access to nvram interface */
10726 rc = bnx2x_acquire_nvram_lock(bp);
10727 if (rc)
10728 return rc;
10729
10730 /* enable access to nvram interface */
10731 bnx2x_enable_nvram_access(bp);
10732
10733 written_so_far = 0;
10734 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10735 while ((written_so_far < buf_size) && (rc == 0)) {
10736 if (written_so_far == (buf_size - sizeof(u32)))
10737 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10738 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10739 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10740 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10741 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10742
10743 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
10744
10745 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10746
10747 /* advance to the next dword */
10748 offset += sizeof(u32);
10749 data_buf += sizeof(u32);
10750 written_so_far += sizeof(u32);
10751 cmd_flags = 0;
10752 }
10753
10754 /* disable access to nvram interface */
10755 bnx2x_disable_nvram_access(bp);
10756 bnx2x_release_nvram_lock(bp);
10757
10758 return rc;
10759}
10760
10761static int bnx2x_set_eeprom(struct net_device *dev,
10762 struct ethtool_eeprom *eeprom, u8 *eebuf)
10763{
10764 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
10765 int port = BP_PORT(bp);
10766 int rc = 0;
a2fbb9ea 10767
9f4c9583
EG
10768 if (!netif_running(dev))
10769 return -EAGAIN;
10770
34f80b04 10771 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
10772 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10773 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10774 eeprom->len, eeprom->len);
10775
10776 /* parameters already validated in ethtool_set_eeprom */
10777
f57a6025
EG
10778 /* PHY eeprom can be accessed only by the PMF */
10779 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10780 !bp->port.pmf)
10781 return -EINVAL;
10782
10783 if (eeprom->magic == 0x50485950) {
10784 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10785 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 10786
f57a6025
EG
10787 bnx2x_acquire_phy_lock(bp);
10788 rc |= bnx2x_link_reset(&bp->link_params,
10789 &bp->link_vars, 0);
10790 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10791 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10792 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10793 MISC_REGISTERS_GPIO_HIGH, port);
10794 bnx2x_release_phy_lock(bp);
10795 bnx2x_link_report(bp);
10796
10797 } else if (eeprom->magic == 0x50485952) {
10798 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
f34d28ea 10799 if (bp->state == BNX2X_STATE_OPEN) {
4a37fb66 10800 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
10801 rc |= bnx2x_link_reset(&bp->link_params,
10802 &bp->link_vars, 1);
10803
10804 rc |= bnx2x_phy_init(&bp->link_params,
10805 &bp->link_vars);
4a37fb66 10806 bnx2x_release_phy_lock(bp);
f57a6025
EG
10807 bnx2x_calc_fc_adv(bp);
10808 }
10809 } else if (eeprom->magic == 0x53985943) {
10810 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10811 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10812 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10813 u8 ext_phy_addr =
659bc5c4 10814 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
f57a6025
EG
10815
10816 /* DSP Remove Download Mode */
10817 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10818 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 10819
f57a6025
EG
10820 bnx2x_acquire_phy_lock(bp);
10821
10822 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10823
10824 /* wait 0.5 sec to allow it to run */
10825 msleep(500);
10826 bnx2x_ext_phy_hw_reset(bp, port);
10827 msleep(500);
10828 bnx2x_release_phy_lock(bp);
10829 }
10830 } else
c18487ee 10831 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
10832
10833 return rc;
10834}
10835
10836static int bnx2x_get_coalesce(struct net_device *dev,
10837 struct ethtool_coalesce *coal)
10838{
10839 struct bnx2x *bp = netdev_priv(dev);
10840
10841 memset(coal, 0, sizeof(struct ethtool_coalesce));
10842
10843 coal->rx_coalesce_usecs = bp->rx_ticks;
10844 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
10845
10846 return 0;
10847}
10848
10849static int bnx2x_set_coalesce(struct net_device *dev,
10850 struct ethtool_coalesce *coal)
10851{
10852 struct bnx2x *bp = netdev_priv(dev);
10853
cdaa7cb8
VZ
10854 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
10855 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
10856 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea 10857
cdaa7cb8
VZ
10858 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
10859 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
10860 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea 10861
34f80b04 10862 if (netif_running(dev))
a2fbb9ea
ET
10863 bnx2x_update_coalesce(bp);
10864
10865 return 0;
10866}
10867
10868static void bnx2x_get_ringparam(struct net_device *dev,
10869 struct ethtool_ringparam *ering)
10870{
10871 struct bnx2x *bp = netdev_priv(dev);
10872
10873 ering->rx_max_pending = MAX_RX_AVAIL;
10874 ering->rx_mini_max_pending = 0;
10875 ering->rx_jumbo_max_pending = 0;
10876
10877 ering->rx_pending = bp->rx_ring_size;
10878 ering->rx_mini_pending = 0;
10879 ering->rx_jumbo_pending = 0;
10880
10881 ering->tx_max_pending = MAX_TX_AVAIL;
10882 ering->tx_pending = bp->tx_ring_size;
10883}
10884
10885static int bnx2x_set_ringparam(struct net_device *dev,
10886 struct ethtool_ringparam *ering)
10887{
10888 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10889 int rc = 0;
a2fbb9ea 10890
72fd0718
VZ
10891 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10892 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10893 return -EAGAIN;
10894 }
10895
a2fbb9ea
ET
10896 if ((ering->rx_pending > MAX_RX_AVAIL) ||
10897 (ering->tx_pending > MAX_TX_AVAIL) ||
10898 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10899 return -EINVAL;
10900
10901 bp->rx_ring_size = ering->rx_pending;
10902 bp->tx_ring_size = ering->tx_pending;
10903
34f80b04
EG
10904 if (netif_running(dev)) {
10905 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10906 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
10907 }
10908
34f80b04 10909 return rc;
a2fbb9ea
ET
10910}
10911
10912static void bnx2x_get_pauseparam(struct net_device *dev,
10913 struct ethtool_pauseparam *epause)
10914{
10915 struct bnx2x *bp = netdev_priv(dev);
10916
356e2385
EG
10917 epause->autoneg = (bp->link_params.req_flow_ctrl ==
10918 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
10919 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10920
c0700f90
DM
10921 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10922 BNX2X_FLOW_CTRL_RX);
10923 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10924 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
10925
10926 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10927 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10928 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10929}
10930
10931static int bnx2x_set_pauseparam(struct net_device *dev,
10932 struct ethtool_pauseparam *epause)
10933{
10934 struct bnx2x *bp = netdev_priv(dev);
10935
34f80b04
EG
10936 if (IS_E1HMF(bp))
10937 return 0;
10938
a2fbb9ea
ET
10939 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10940 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10941 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10942
c0700f90 10943 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 10944
f1410647 10945 if (epause->rx_pause)
c0700f90 10946 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 10947
f1410647 10948 if (epause->tx_pause)
c0700f90 10949 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 10950
c0700f90
DM
10951 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10952 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 10953
c18487ee 10954 if (epause->autoneg) {
34f80b04 10955 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 10956 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
10957 return -EINVAL;
10958 }
a2fbb9ea 10959
c18487ee 10960 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 10961 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 10962 }
a2fbb9ea 10963
c18487ee
YR
10964 DP(NETIF_MSG_LINK,
10965 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
10966
10967 if (netif_running(dev)) {
bb2a0f7a 10968 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10969 bnx2x_link_set(bp);
10970 }
a2fbb9ea
ET
10971
10972 return 0;
10973}
10974
df0f2343
VZ
10975static int bnx2x_set_flags(struct net_device *dev, u32 data)
10976{
10977 struct bnx2x *bp = netdev_priv(dev);
10978 int changed = 0;
10979 int rc = 0;
10980
72fd0718
VZ
10981 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10982 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10983 return -EAGAIN;
10984 }
10985
df0f2343
VZ
10986 /* TPA requires Rx CSUM offloading */
10987 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
d43a7e67
VZ
10988 if (!disable_tpa) {
10989 if (!(dev->features & NETIF_F_LRO)) {
10990 dev->features |= NETIF_F_LRO;
10991 bp->flags |= TPA_ENABLE_FLAG;
10992 changed = 1;
10993 }
10994 } else
10995 rc = -EINVAL;
df0f2343
VZ
10996 } else if (dev->features & NETIF_F_LRO) {
10997 dev->features &= ~NETIF_F_LRO;
10998 bp->flags &= ~TPA_ENABLE_FLAG;
10999 changed = 1;
11000 }
11001
11002 if (changed && netif_running(dev)) {
11003 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11004 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11005 }
11006
11007 return rc;
11008}
11009
a2fbb9ea
ET
11010static u32 bnx2x_get_rx_csum(struct net_device *dev)
11011{
11012 struct bnx2x *bp = netdev_priv(dev);
11013
11014 return bp->rx_csum;
11015}
11016
11017static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
11018{
11019 struct bnx2x *bp = netdev_priv(dev);
df0f2343 11020 int rc = 0;
a2fbb9ea 11021
72fd0718
VZ
11022 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11023 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11024 return -EAGAIN;
11025 }
11026
a2fbb9ea 11027 bp->rx_csum = data;
df0f2343
VZ
11028
11029 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
11030 TPA'ed packets will be discarded due to wrong TCP CSUM */
11031 if (!data) {
11032 u32 flags = ethtool_op_get_flags(dev);
11033
11034 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
11035 }
11036
11037 return rc;
a2fbb9ea
ET
11038}
11039
11040static int bnx2x_set_tso(struct net_device *dev, u32 data)
11041{
755735eb 11042 if (data) {
a2fbb9ea 11043 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
11044 dev->features |= NETIF_F_TSO6;
11045 } else {
a2fbb9ea 11046 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
11047 dev->features &= ~NETIF_F_TSO6;
11048 }
11049
a2fbb9ea
ET
11050 return 0;
11051}
11052
f3c87cdd 11053static const struct {
a2fbb9ea
ET
11054 char string[ETH_GSTRING_LEN];
11055} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
11056 { "register_test (offline)" },
11057 { "memory_test (offline)" },
11058 { "loopback_test (offline)" },
11059 { "nvram_test (online)" },
11060 { "interrupt_test (online)" },
11061 { "link_test (online)" },
d3d4f495 11062 { "idle check (online)" }
a2fbb9ea
ET
11063};
11064
f3c87cdd
YG
11065static int bnx2x_test_registers(struct bnx2x *bp)
11066{
11067 int idx, i, rc = -ENODEV;
11068 u32 wr_val = 0;
9dabc424 11069 int port = BP_PORT(bp);
f3c87cdd 11070 static const struct {
cdaa7cb8
VZ
11071 u32 offset0;
11072 u32 offset1;
11073 u32 mask;
f3c87cdd
YG
11074 } reg_tbl[] = {
11075/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
11076 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
11077 { HC_REG_AGG_INT_0, 4, 0x000003ff },
11078 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
11079 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
11080 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
11081 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
11082 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11083 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
11084 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11085/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
11086 { QM_REG_CONNNUM_0, 4, 0x000fffff },
11087 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
11088 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
11089 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
11090 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11091 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
11092 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 11093 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
11094 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
11095/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
11096 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
11097 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
11098 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
11099 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
11100 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
11101 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
11102 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
11103 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
11104 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
11105/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
11106 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
11107 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
11108 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11109 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
11110 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11111 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
11112
11113 { 0xffffffff, 0, 0x00000000 }
11114 };
11115
11116 if (!netif_running(bp->dev))
11117 return rc;
11118
11119 /* Repeat the test twice:
11120 First by writing 0x00000000, second by writing 0xffffffff */
11121 for (idx = 0; idx < 2; idx++) {
11122
11123 switch (idx) {
11124 case 0:
11125 wr_val = 0;
11126 break;
11127 case 1:
11128 wr_val = 0xffffffff;
11129 break;
11130 }
11131
11132 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11133 u32 offset, mask, save_val, val;
f3c87cdd
YG
11134
11135 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11136 mask = reg_tbl[i].mask;
11137
11138 save_val = REG_RD(bp, offset);
11139
8eb5a20c 11140 REG_WR(bp, offset, (wr_val & mask));
f3c87cdd
YG
11141 val = REG_RD(bp, offset);
11142
11143 /* Restore the original register's value */
11144 REG_WR(bp, offset, save_val);
11145
cdaa7cb8
VZ
11146 /* verify value is as expected */
11147 if ((val & mask) != (wr_val & mask)) {
11148 DP(NETIF_MSG_PROBE,
11149 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11150 offset, val, wr_val, mask);
f3c87cdd 11151 goto test_reg_exit;
cdaa7cb8 11152 }
f3c87cdd
YG
11153 }
11154 }
11155
11156 rc = 0;
11157
11158test_reg_exit:
11159 return rc;
11160}
11161
11162static int bnx2x_test_memory(struct bnx2x *bp)
11163{
11164 int i, j, rc = -ENODEV;
11165 u32 val;
11166 static const struct {
11167 u32 offset;
11168 int size;
11169 } mem_tbl[] = {
11170 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
11171 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11172 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
11173 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
11174 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
11175 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
11176 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
11177
11178 { 0xffffffff, 0 }
11179 };
11180 static const struct {
11181 char *name;
11182 u32 offset;
9dabc424
YG
11183 u32 e1_mask;
11184 u32 e1h_mask;
f3c87cdd 11185 } prty_tbl[] = {
9dabc424
YG
11186 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
11187 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
11188 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
11189 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
11190 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
11191 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
11192
11193 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
11194 };
11195
11196 if (!netif_running(bp->dev))
11197 return rc;
11198
11199 /* Go through all the memories */
11200 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11201 for (j = 0; j < mem_tbl[i].size; j++)
11202 REG_RD(bp, mem_tbl[i].offset + j*4);
11203
11204 /* Check the parity status */
11205 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11206 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
11207 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11208 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
11209 DP(NETIF_MSG_HW,
11210 "%s is 0x%x\n", prty_tbl[i].name, val);
11211 goto test_mem_exit;
11212 }
11213 }
11214
11215 rc = 0;
11216
11217test_mem_exit:
11218 return rc;
11219}
11220
f3c87cdd
YG
11221static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11222{
11223 int cnt = 1000;
11224
11225 if (link_up)
11226 while (bnx2x_link_test(bp) && cnt--)
11227 msleep(10);
11228}
11229
11230static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11231{
11232 unsigned int pkt_size, num_pkts, i;
11233 struct sk_buff *skb;
11234 unsigned char *packet;
ca00392c 11235 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
54b9ddaa 11236 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
f3c87cdd
YG
11237 u16 tx_start_idx, tx_idx;
11238 u16 rx_start_idx, rx_idx;
ca00392c 11239 u16 pkt_prod, bd_prod;
f3c87cdd 11240 struct sw_tx_bd *tx_buf;
ca00392c
EG
11241 struct eth_tx_start_bd *tx_start_bd;
11242 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
11243 dma_addr_t mapping;
11244 union eth_rx_cqe *cqe;
11245 u8 cqe_fp_flags;
11246 struct sw_rx_bd *rx_buf;
11247 u16 len;
11248 int rc = -ENODEV;
11249
b5bf9068
EG
11250 /* check the loopback mode */
11251 switch (loopback_mode) {
11252 case BNX2X_PHY_LOOPBACK:
11253 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11254 return -EINVAL;
11255 break;
11256 case BNX2X_MAC_LOOPBACK:
f3c87cdd 11257 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 11258 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
11259 break;
11260 default:
f3c87cdd 11261 return -EINVAL;
b5bf9068 11262 }
f3c87cdd 11263
b5bf9068
EG
11264 /* prepare the loopback packet */
11265 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11266 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
11267 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11268 if (!skb) {
11269 rc = -ENOMEM;
11270 goto test_loopback_exit;
11271 }
11272 packet = skb_put(skb, pkt_size);
11273 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
11274 memset(packet + ETH_ALEN, 0, ETH_ALEN);
11275 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
11276 for (i = ETH_HLEN; i < pkt_size; i++)
11277 packet[i] = (unsigned char) (i & 0xff);
11278
b5bf9068 11279 /* send the loopback packet */
f3c87cdd 11280 num_pkts = 0;
ca00392c
EG
11281 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11282 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 11283
ca00392c
EG
11284 pkt_prod = fp_tx->tx_pkt_prod++;
11285 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11286 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 11287 tx_buf->skb = skb;
ca00392c 11288 tx_buf->flags = 0;
f3c87cdd 11289
ca00392c
EG
11290 bd_prod = TX_BD(fp_tx->tx_bd_prod);
11291 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
1a983142
FT
11292 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11293 skb_headlen(skb), DMA_TO_DEVICE);
ca00392c
EG
11294 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11295 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11296 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11297 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11298 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11299 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11300 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11301 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11302
11303 /* turn on parsing and get a BD */
11304 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11305 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11306
11307 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 11308
58f4c4cf
EG
11309 wmb();
11310
ca00392c
EG
11311 fp_tx->tx_db.data.prod += 2;
11312 barrier();
54b9ddaa 11313 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
f3c87cdd
YG
11314
11315 mmiowb();
11316
11317 num_pkts++;
ca00392c 11318 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
11319
11320 udelay(100);
11321
ca00392c 11322 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
11323 if (tx_idx != tx_start_idx + num_pkts)
11324 goto test_loopback_exit;
11325
ca00392c 11326 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
11327 if (rx_idx != rx_start_idx + num_pkts)
11328 goto test_loopback_exit;
11329
ca00392c 11330 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
11331 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11332 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11333 goto test_loopback_rx_exit;
11334
11335 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11336 if (len != pkt_size)
11337 goto test_loopback_rx_exit;
11338
ca00392c 11339 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
11340 skb = rx_buf->skb;
11341 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11342 for (i = ETH_HLEN; i < pkt_size; i++)
11343 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11344 goto test_loopback_rx_exit;
11345
11346 rc = 0;
11347
11348test_loopback_rx_exit:
f3c87cdd 11349
ca00392c
EG
11350 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11351 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11352 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11353 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
11354
11355 /* Update producers */
ca00392c
EG
11356 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11357 fp_rx->rx_sge_prod);
f3c87cdd
YG
11358
11359test_loopback_exit:
11360 bp->link_params.loopback_mode = LOOPBACK_NONE;
11361
11362 return rc;
11363}
11364
11365static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11366{
b5bf9068 11367 int rc = 0, res;
f3c87cdd 11368
2145a920
VZ
11369 if (BP_NOMCP(bp))
11370 return rc;
11371
f3c87cdd
YG
11372 if (!netif_running(bp->dev))
11373 return BNX2X_LOOPBACK_FAILED;
11374
f8ef6e44 11375 bnx2x_netif_stop(bp, 1);
3910c8ae 11376 bnx2x_acquire_phy_lock(bp);
f3c87cdd 11377
b5bf9068
EG
11378 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11379 if (res) {
11380 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
11381 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
11382 }
11383
b5bf9068
EG
11384 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11385 if (res) {
11386 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
11387 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
11388 }
11389
3910c8ae 11390 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
11391 bnx2x_netif_start(bp);
11392
11393 return rc;
11394}
11395
11396#define CRC32_RESIDUAL 0xdebb20e3
11397
11398static int bnx2x_test_nvram(struct bnx2x *bp)
11399{
11400 static const struct {
11401 int offset;
11402 int size;
11403 } nvram_tbl[] = {
11404 { 0, 0x14 }, /* bootstrap */
11405 { 0x14, 0xec }, /* dir */
11406 { 0x100, 0x350 }, /* manuf_info */
11407 { 0x450, 0xf0 }, /* feature_info */
11408 { 0x640, 0x64 }, /* upgrade_key_info */
11409 { 0x6a4, 0x64 },
11410 { 0x708, 0x70 }, /* manuf_key_info */
11411 { 0x778, 0x70 },
11412 { 0, 0 }
11413 };
4781bfad 11414 __be32 buf[0x350 / 4];
f3c87cdd
YG
11415 u8 *data = (u8 *)buf;
11416 int i, rc;
ab6ad5a4 11417 u32 magic, crc;
f3c87cdd 11418
2145a920
VZ
11419 if (BP_NOMCP(bp))
11420 return 0;
11421
f3c87cdd
YG
11422 rc = bnx2x_nvram_read(bp, 0, data, 4);
11423 if (rc) {
f5372251 11424 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
11425 goto test_nvram_exit;
11426 }
11427
11428 magic = be32_to_cpu(buf[0]);
11429 if (magic != 0x669955aa) {
11430 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11431 rc = -ENODEV;
11432 goto test_nvram_exit;
11433 }
11434
11435 for (i = 0; nvram_tbl[i].size; i++) {
11436
11437 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11438 nvram_tbl[i].size);
11439 if (rc) {
11440 DP(NETIF_MSG_PROBE,
f5372251 11441 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
11442 goto test_nvram_exit;
11443 }
11444
ab6ad5a4
EG
11445 crc = ether_crc_le(nvram_tbl[i].size, data);
11446 if (crc != CRC32_RESIDUAL) {
f3c87cdd 11447 DP(NETIF_MSG_PROBE,
ab6ad5a4 11448 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
f3c87cdd
YG
11449 rc = -ENODEV;
11450 goto test_nvram_exit;
11451 }
11452 }
11453
11454test_nvram_exit:
11455 return rc;
11456}
11457
11458static int bnx2x_test_intr(struct bnx2x *bp)
11459{
11460 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11461 int i, rc;
11462
11463 if (!netif_running(bp->dev))
11464 return -ENODEV;
11465
8d9c5f34 11466 config->hdr.length = 0;
af246401 11467 if (CHIP_IS_E1(bp))
0c43f43f
VZ
11468 /* use last unicast entries */
11469 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
af246401
EG
11470 else
11471 config->hdr.offset = BP_FUNC(bp);
0626b899 11472 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
11473 config->hdr.reserved1 = 0;
11474
e665bfda
MC
11475 bp->set_mac_pending++;
11476 smp_wmb();
f3c87cdd
YG
11477 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11478 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11479 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11480 if (rc == 0) {
f3c87cdd
YG
11481 for (i = 0; i < 10; i++) {
11482 if (!bp->set_mac_pending)
11483 break;
e665bfda 11484 smp_rmb();
f3c87cdd
YG
11485 msleep_interruptible(10);
11486 }
11487 if (i == 10)
11488 rc = -ENODEV;
11489 }
11490
11491 return rc;
11492}
11493
a2fbb9ea
ET
11494static void bnx2x_self_test(struct net_device *dev,
11495 struct ethtool_test *etest, u64 *buf)
11496{
11497 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea 11498
72fd0718
VZ
11499 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11500 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11501 etest->flags |= ETH_TEST_FL_FAILED;
11502 return;
11503 }
11504
a2fbb9ea
ET
11505 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11506
f3c87cdd 11507 if (!netif_running(dev))
a2fbb9ea 11508 return;
a2fbb9ea 11509
33471629 11510 /* offline tests are not supported in MF mode */
f3c87cdd
YG
11511 if (IS_E1HMF(bp))
11512 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11513
11514 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
11515 int port = BP_PORT(bp);
11516 u32 val;
f3c87cdd
YG
11517 u8 link_up;
11518
279abdf5
EG
11519 /* save current value of input enable for TX port IF */
11520 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11521 /* disable input for TX port IF */
11522 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11523
061bc702 11524 link_up = (bnx2x_link_test(bp) == 0);
f3c87cdd
YG
11525 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11526 bnx2x_nic_load(bp, LOAD_DIAG);
11527 /* wait until link state is restored */
11528 bnx2x_wait_for_link(bp, link_up);
11529
11530 if (bnx2x_test_registers(bp) != 0) {
11531 buf[0] = 1;
11532 etest->flags |= ETH_TEST_FL_FAILED;
11533 }
11534 if (bnx2x_test_memory(bp) != 0) {
11535 buf[1] = 1;
11536 etest->flags |= ETH_TEST_FL_FAILED;
11537 }
11538 buf[2] = bnx2x_test_loopback(bp, link_up);
11539 if (buf[2] != 0)
11540 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 11541
f3c87cdd 11542 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
11543
11544 /* restore input for TX port IF */
11545 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11546
f3c87cdd
YG
11547 bnx2x_nic_load(bp, LOAD_NORMAL);
11548 /* wait until link state is restored */
11549 bnx2x_wait_for_link(bp, link_up);
11550 }
11551 if (bnx2x_test_nvram(bp) != 0) {
11552 buf[3] = 1;
a2fbb9ea
ET
11553 etest->flags |= ETH_TEST_FL_FAILED;
11554 }
f3c87cdd
YG
11555 if (bnx2x_test_intr(bp) != 0) {
11556 buf[4] = 1;
11557 etest->flags |= ETH_TEST_FL_FAILED;
11558 }
11559 if (bp->port.pmf)
11560 if (bnx2x_link_test(bp) != 0) {
11561 buf[5] = 1;
11562 etest->flags |= ETH_TEST_FL_FAILED;
11563 }
f3c87cdd
YG
11564
11565#ifdef BNX2X_EXTRA_DEBUG
11566 bnx2x_panic_dump(bp);
11567#endif
a2fbb9ea
ET
11568}
11569
de832a55
EG
11570static const struct {
11571 long offset;
11572 int size;
11573 u8 string[ETH_GSTRING_LEN];
11574} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11575/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11576 { Q_STATS_OFFSET32(error_bytes_received_hi),
11577 8, "[%d]: rx_error_bytes" },
11578 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11579 8, "[%d]: rx_ucast_packets" },
11580 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11581 8, "[%d]: rx_mcast_packets" },
11582 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11583 8, "[%d]: rx_bcast_packets" },
11584 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11585 { Q_STATS_OFFSET32(rx_err_discard_pkt),
11586 4, "[%d]: rx_phy_ip_err_discards"},
11587 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11588 4, "[%d]: rx_skb_alloc_discard" },
11589 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11590
11591/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11592 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
dea7aab1
VZ
11593 8, "[%d]: tx_ucast_packets" },
11594 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11595 8, "[%d]: tx_mcast_packets" },
11596 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11597 8, "[%d]: tx_bcast_packets" }
de832a55
EG
11598};
11599
bb2a0f7a
YG
11600static const struct {
11601 long offset;
11602 int size;
11603 u32 flags;
66e855f3
YG
11604#define STATS_FLAGS_PORT 1
11605#define STATS_FLAGS_FUNC 2
de832a55 11606#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 11607 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 11608} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
11609/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11610 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 11611 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 11612 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 11613 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 11614 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 11615 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 11616 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 11617 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 11618 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 11619 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 11620 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 11621 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 11622 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
11623 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11624 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11625 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11626 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11627/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11628 8, STATS_FLAGS_PORT, "rx_fragments" },
11629 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11630 8, STATS_FLAGS_PORT, "rx_jabbers" },
11631 { STATS_OFFSET32(no_buff_discard_hi),
11632 8, STATS_FLAGS_BOTH, "rx_discards" },
11633 { STATS_OFFSET32(mac_filter_discard),
11634 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11635 { STATS_OFFSET32(xxoverflow_discard),
11636 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11637 { STATS_OFFSET32(brb_drop_hi),
11638 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11639 { STATS_OFFSET32(brb_truncate_hi),
11640 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11641 { STATS_OFFSET32(pause_frames_received_hi),
11642 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11643 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11644 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11645 { STATS_OFFSET32(nig_timer_max),
11646 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11647/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11648 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11649 { STATS_OFFSET32(rx_skb_alloc_failed),
11650 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11651 { STATS_OFFSET32(hw_csum_err),
11652 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11653
11654 { STATS_OFFSET32(total_bytes_transmitted_hi),
11655 8, STATS_FLAGS_BOTH, "tx_bytes" },
11656 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11657 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11658 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
dea7aab1
VZ
11659 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
11660 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11661 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
11662 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11663 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
de832a55
EG
11664 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11665 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11666 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11667 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
dea7aab1 11668/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 11669 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 11670 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 11671 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
dea7aab1 11672 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 11673 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 11674 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 11675 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 11676 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 11677 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 11678 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 11679 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 11680 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 11681 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 11682 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 11683 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 11684 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 11685 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 11686 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 11687 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
dea7aab1 11688/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 11689 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 11690 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 11691 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
dea7aab1 11692 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 11693 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
11694 { STATS_OFFSET32(pause_frames_sent_hi),
11695 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
11696};
11697
de832a55
EG
11698#define IS_PORT_STAT(i) \
11699 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11700#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11701#define IS_E1HMF_MODE_STAT(bp) \
7995c64e 11702 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
66e855f3 11703
15f0a394
BH
11704static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11705{
11706 struct bnx2x *bp = netdev_priv(dev);
11707 int i, num_stats;
11708
cdaa7cb8 11709 switch (stringset) {
15f0a394
BH
11710 case ETH_SS_STATS:
11711 if (is_multi(bp)) {
54b9ddaa 11712 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
15f0a394
BH
11713 if (!IS_E1HMF_MODE_STAT(bp))
11714 num_stats += BNX2X_NUM_STATS;
11715 } else {
11716 if (IS_E1HMF_MODE_STAT(bp)) {
11717 num_stats = 0;
11718 for (i = 0; i < BNX2X_NUM_STATS; i++)
11719 if (IS_FUNC_STAT(i))
11720 num_stats++;
11721 } else
11722 num_stats = BNX2X_NUM_STATS;
11723 }
11724 return num_stats;
11725
11726 case ETH_SS_TEST:
11727 return BNX2X_NUM_TESTS;
11728
11729 default:
11730 return -EINVAL;
11731 }
11732}
11733
a2fbb9ea
ET
11734static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11735{
bb2a0f7a 11736 struct bnx2x *bp = netdev_priv(dev);
de832a55 11737 int i, j, k;
bb2a0f7a 11738
a2fbb9ea
ET
11739 switch (stringset) {
11740 case ETH_SS_STATS:
de832a55
EG
11741 if (is_multi(bp)) {
11742 k = 0;
54b9ddaa 11743 for_each_queue(bp, i) {
de832a55
EG
11744 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11745 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11746 bnx2x_q_stats_arr[j].string, i);
11747 k += BNX2X_NUM_Q_STATS;
11748 }
11749 if (IS_E1HMF_MODE_STAT(bp))
11750 break;
11751 for (j = 0; j < BNX2X_NUM_STATS; j++)
11752 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11753 bnx2x_stats_arr[j].string);
11754 } else {
11755 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11756 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11757 continue;
11758 strcpy(buf + j*ETH_GSTRING_LEN,
11759 bnx2x_stats_arr[i].string);
11760 j++;
11761 }
bb2a0f7a 11762 }
a2fbb9ea
ET
11763 break;
11764
11765 case ETH_SS_TEST:
11766 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11767 break;
11768 }
11769}
11770
a2fbb9ea
ET
11771static void bnx2x_get_ethtool_stats(struct net_device *dev,
11772 struct ethtool_stats *stats, u64 *buf)
11773{
11774 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
11775 u32 *hw_stats, *offset;
11776 int i, j, k;
bb2a0f7a 11777
de832a55
EG
11778 if (is_multi(bp)) {
11779 k = 0;
54b9ddaa 11780 for_each_queue(bp, i) {
de832a55
EG
11781 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11782 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11783 if (bnx2x_q_stats_arr[j].size == 0) {
11784 /* skip this counter */
11785 buf[k + j] = 0;
11786 continue;
11787 }
11788 offset = (hw_stats +
11789 bnx2x_q_stats_arr[j].offset);
11790 if (bnx2x_q_stats_arr[j].size == 4) {
11791 /* 4-byte counter */
11792 buf[k + j] = (u64) *offset;
11793 continue;
11794 }
11795 /* 8-byte counter */
11796 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11797 }
11798 k += BNX2X_NUM_Q_STATS;
11799 }
11800 if (IS_E1HMF_MODE_STAT(bp))
11801 return;
11802 hw_stats = (u32 *)&bp->eth_stats;
11803 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11804 if (bnx2x_stats_arr[j].size == 0) {
11805 /* skip this counter */
11806 buf[k + j] = 0;
11807 continue;
11808 }
11809 offset = (hw_stats + bnx2x_stats_arr[j].offset);
11810 if (bnx2x_stats_arr[j].size == 4) {
11811 /* 4-byte counter */
11812 buf[k + j] = (u64) *offset;
11813 continue;
11814 }
11815 /* 8-byte counter */
11816 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 11817 }
de832a55
EG
11818 } else {
11819 hw_stats = (u32 *)&bp->eth_stats;
11820 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11821 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11822 continue;
11823 if (bnx2x_stats_arr[i].size == 0) {
11824 /* skip this counter */
11825 buf[j] = 0;
11826 j++;
11827 continue;
11828 }
11829 offset = (hw_stats + bnx2x_stats_arr[i].offset);
11830 if (bnx2x_stats_arr[i].size == 4) {
11831 /* 4-byte counter */
11832 buf[j] = (u64) *offset;
11833 j++;
11834 continue;
11835 }
11836 /* 8-byte counter */
11837 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 11838 j++;
a2fbb9ea 11839 }
a2fbb9ea
ET
11840 }
11841}
11842
11843static int bnx2x_phys_id(struct net_device *dev, u32 data)
11844{
11845 struct bnx2x *bp = netdev_priv(dev);
11846 int i;
11847
34f80b04
EG
11848 if (!netif_running(dev))
11849 return 0;
11850
11851 if (!bp->port.pmf)
11852 return 0;
11853
a2fbb9ea
ET
11854 if (data == 0)
11855 data = 2;
11856
11857 for (i = 0; i < (data * 2); i++) {
c18487ee 11858 if ((i % 2) == 0)
7846e471
YR
11859 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11860 SPEED_1000);
c18487ee 11861 else
7846e471 11862 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
c18487ee 11863
a2fbb9ea
ET
11864 msleep_interruptible(500);
11865 if (signal_pending(current))
11866 break;
11867 }
11868
c18487ee 11869 if (bp->link_vars.link_up)
7846e471
YR
11870 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11871 bp->link_vars.line_speed);
a2fbb9ea
ET
11872
11873 return 0;
11874}
11875
0fc0b732 11876static const struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
11877 .get_settings = bnx2x_get_settings,
11878 .set_settings = bnx2x_set_settings,
11879 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
11880 .get_regs_len = bnx2x_get_regs_len,
11881 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
11882 .get_wol = bnx2x_get_wol,
11883 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
11884 .get_msglevel = bnx2x_get_msglevel,
11885 .set_msglevel = bnx2x_set_msglevel,
11886 .nway_reset = bnx2x_nway_reset,
01e53298 11887 .get_link = bnx2x_get_link,
7a9b2557
VZ
11888 .get_eeprom_len = bnx2x_get_eeprom_len,
11889 .get_eeprom = bnx2x_get_eeprom,
11890 .set_eeprom = bnx2x_set_eeprom,
11891 .get_coalesce = bnx2x_get_coalesce,
11892 .set_coalesce = bnx2x_set_coalesce,
11893 .get_ringparam = bnx2x_get_ringparam,
11894 .set_ringparam = bnx2x_set_ringparam,
11895 .get_pauseparam = bnx2x_get_pauseparam,
11896 .set_pauseparam = bnx2x_set_pauseparam,
11897 .get_rx_csum = bnx2x_get_rx_csum,
11898 .set_rx_csum = bnx2x_set_rx_csum,
11899 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 11900 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
11901 .set_flags = bnx2x_set_flags,
11902 .get_flags = ethtool_op_get_flags,
11903 .get_sg = ethtool_op_get_sg,
11904 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
11905 .get_tso = ethtool_op_get_tso,
11906 .set_tso = bnx2x_set_tso,
7a9b2557 11907 .self_test = bnx2x_self_test,
15f0a394 11908 .get_sset_count = bnx2x_get_sset_count,
7a9b2557 11909 .get_strings = bnx2x_get_strings,
a2fbb9ea 11910 .phys_id = bnx2x_phys_id,
bb2a0f7a 11911 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
11912};
11913
11914/* end of ethtool_ops */
11915
11916/****************************************************************************
11917* General service functions
11918****************************************************************************/
11919
11920static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11921{
11922 u16 pmcsr;
11923
11924 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11925
11926 switch (state) {
11927 case PCI_D0:
34f80b04 11928 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
11929 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11930 PCI_PM_CTRL_PME_STATUS));
11931
11932 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 11933 /* delay required during transition out of D3hot */
a2fbb9ea 11934 msleep(20);
34f80b04 11935 break;
a2fbb9ea 11936
34f80b04 11937 case PCI_D3hot:
d3dbfee0
VZ
11938 /* If there are other clients above don't
11939 shut down the power */
11940 if (atomic_read(&bp->pdev->enable_cnt) != 1)
11941 return 0;
11942 /* Don't shut down the power for emulation and FPGA */
11943 if (CHIP_REV_IS_SLOW(bp))
11944 return 0;
11945
34f80b04
EG
11946 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11947 pmcsr |= 3;
a2fbb9ea 11948
34f80b04
EG
11949 if (bp->wol)
11950 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 11951
34f80b04
EG
11952 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11953 pmcsr);
a2fbb9ea 11954
34f80b04
EG
11955 /* No more memory access after this point until
11956 * device is brought back to D0.
11957 */
11958 break;
11959
11960 default:
11961 return -EINVAL;
11962 }
11963 return 0;
a2fbb9ea
ET
11964}
11965
237907c1
EG
11966static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11967{
11968 u16 rx_cons_sb;
11969
11970 /* Tell compiler that status block fields can change */
11971 barrier();
11972 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11973 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
11974 rx_cons_sb++;
11975 return (fp->rx_comp_cons != rx_cons_sb);
11976}
11977
34f80b04
EG
11978/*
11979 * net_device service functions
11980 */
11981
a2fbb9ea
ET
11982static int bnx2x_poll(struct napi_struct *napi, int budget)
11983{
54b9ddaa 11984 int work_done = 0;
a2fbb9ea
ET
11985 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
11986 napi);
11987 struct bnx2x *bp = fp->bp;
a2fbb9ea 11988
54b9ddaa 11989 while (1) {
a2fbb9ea 11990#ifdef BNX2X_STOP_ON_ERROR
54b9ddaa
VZ
11991 if (unlikely(bp->panic)) {
11992 napi_complete(napi);
11993 return 0;
11994 }
a2fbb9ea
ET
11995#endif
11996
54b9ddaa
VZ
11997 if (bnx2x_has_tx_work(fp))
11998 bnx2x_tx_int(fp);
356e2385 11999
54b9ddaa
VZ
12000 if (bnx2x_has_rx_work(fp)) {
12001 work_done += bnx2x_rx_int(fp, budget - work_done);
a2fbb9ea 12002
54b9ddaa
VZ
12003 /* must not complete if we consumed full budget */
12004 if (work_done >= budget)
12005 break;
12006 }
a2fbb9ea 12007
54b9ddaa
VZ
12008 /* Fall out from the NAPI loop if needed */
12009 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12010 bnx2x_update_fpsb_idx(fp);
12011 /* bnx2x_has_rx_work() reads the status block, thus we need
12012 * to ensure that status block indices have been actually read
12013 * (bnx2x_update_fpsb_idx) prior to this check
12014 * (bnx2x_has_rx_work) so that we won't write the "newer"
12015 * value of the status block to IGU (if there was a DMA right
12016 * after bnx2x_has_rx_work and if there is no rmb, the memory
12017 * reading (bnx2x_update_fpsb_idx) may be postponed to right
12018 * before bnx2x_ack_sb). In this case there will never be
12019 * another interrupt until there is another update of the
12020 * status block, while there is still unhandled work.
12021 */
12022 rmb();
a2fbb9ea 12023
54b9ddaa
VZ
12024 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12025 napi_complete(napi);
12026 /* Re-enable interrupts */
12027 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
12028 le16_to_cpu(fp->fp_c_idx),
12029 IGU_INT_NOP, 1);
12030 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
12031 le16_to_cpu(fp->fp_u_idx),
12032 IGU_INT_ENABLE, 1);
12033 break;
12034 }
12035 }
a2fbb9ea 12036 }
356e2385 12037
a2fbb9ea
ET
12038 return work_done;
12039}
12040
755735eb
EG
12041
12042/* we split the first BD into headers and data BDs
33471629 12043 * to ease the pain of our fellow microcode engineers
755735eb
EG
12044 * we use one mapping for both BDs
12045 * So far this has only been observed to happen
12046 * in Other Operating Systems(TM)
12047 */
12048static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
12049 struct bnx2x_fastpath *fp,
ca00392c
EG
12050 struct sw_tx_bd *tx_buf,
12051 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
12052 u16 bd_prod, int nbd)
12053{
ca00392c 12054 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
12055 struct eth_tx_bd *d_tx_bd;
12056 dma_addr_t mapping;
12057 int old_len = le16_to_cpu(h_tx_bd->nbytes);
12058
12059 /* first fix first BD */
12060 h_tx_bd->nbd = cpu_to_le16(nbd);
12061 h_tx_bd->nbytes = cpu_to_le16(hlen);
12062
12063 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
12064 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
12065 h_tx_bd->addr_lo, h_tx_bd->nbd);
12066
12067 /* now get a new data BD
12068 * (after the pbd) and fill it */
12069 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 12070 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
12071
12072 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
12073 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
12074
12075 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12076 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12077 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
12078
12079 /* this marks the BD as one that has no individual mapping */
12080 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
12081
755735eb
EG
12082 DP(NETIF_MSG_TX_QUEUED,
12083 "TSO split data size is %d (%x:%x)\n",
12084 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
12085
ca00392c
EG
12086 /* update tx_bd */
12087 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
12088
12089 return bd_prod;
12090}
12091
12092static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
12093{
12094 if (fix > 0)
12095 csum = (u16) ~csum_fold(csum_sub(csum,
12096 csum_partial(t_header - fix, fix, 0)));
12097
12098 else if (fix < 0)
12099 csum = (u16) ~csum_fold(csum_add(csum,
12100 csum_partial(t_header, -fix, 0)));
12101
12102 return swab16(csum);
12103}
12104
12105static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12106{
12107 u32 rc;
12108
12109 if (skb->ip_summed != CHECKSUM_PARTIAL)
12110 rc = XMIT_PLAIN;
12111
12112 else {
4781bfad 12113 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
12114 rc = XMIT_CSUM_V6;
12115 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12116 rc |= XMIT_CSUM_TCP;
12117
12118 } else {
12119 rc = XMIT_CSUM_V4;
12120 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12121 rc |= XMIT_CSUM_TCP;
12122 }
12123 }
12124
12125 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
d6a2f98b 12126 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
755735eb
EG
12127
12128 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
d6a2f98b 12129 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
755735eb
EG
12130
12131 return rc;
12132}
12133
632da4d6 12134#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
12135/* check if packet requires linearization (packet is too fragmented)
12136 no need to check fragmentation if page size > 8K (there will be no
12137 violation to FW restrictions) */
755735eb
EG
12138static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12139 u32 xmit_type)
12140{
12141 int to_copy = 0;
12142 int hlen = 0;
12143 int first_bd_sz = 0;
12144
12145 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12146 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12147
12148 if (xmit_type & XMIT_GSO) {
12149 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12150 /* Check if LSO packet needs to be copied:
12151 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12152 int wnd_size = MAX_FETCH_BD - 3;
33471629 12153 /* Number of windows to check */
755735eb
EG
12154 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12155 int wnd_idx = 0;
12156 int frag_idx = 0;
12157 u32 wnd_sum = 0;
12158
12159 /* Headers length */
12160 hlen = (int)(skb_transport_header(skb) - skb->data) +
12161 tcp_hdrlen(skb);
12162
12163 /* Amount of data (w/o headers) on linear part of SKB*/
12164 first_bd_sz = skb_headlen(skb) - hlen;
12165
12166 wnd_sum = first_bd_sz;
12167
12168 /* Calculate the first sum - it's special */
12169 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12170 wnd_sum +=
12171 skb_shinfo(skb)->frags[frag_idx].size;
12172
12173 /* If there was data on linear skb data - check it */
12174 if (first_bd_sz > 0) {
12175 if (unlikely(wnd_sum < lso_mss)) {
12176 to_copy = 1;
12177 goto exit_lbl;
12178 }
12179
12180 wnd_sum -= first_bd_sz;
12181 }
12182
12183 /* Others are easier: run through the frag list and
12184 check all windows */
12185 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12186 wnd_sum +=
12187 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12188
12189 if (unlikely(wnd_sum < lso_mss)) {
12190 to_copy = 1;
12191 break;
12192 }
12193 wnd_sum -=
12194 skb_shinfo(skb)->frags[wnd_idx].size;
12195 }
755735eb
EG
12196 } else {
12197 /* in non-LSO too fragmented packet should always
12198 be linearized */
12199 to_copy = 1;
12200 }
12201 }
12202
12203exit_lbl:
12204 if (unlikely(to_copy))
12205 DP(NETIF_MSG_TX_QUEUED,
12206 "Linearization IS REQUIRED for %s packet. "
12207 "num_frags %d hlen %d first_bd_sz %d\n",
12208 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12209 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12210
12211 return to_copy;
12212}
632da4d6 12213#endif
755735eb
EG
12214
12215/* called with netif_tx_lock
a2fbb9ea 12216 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 12217 * netif_wake_queue()
a2fbb9ea 12218 */
61357325 12219static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
a2fbb9ea
ET
12220{
12221 struct bnx2x *bp = netdev_priv(dev);
54b9ddaa 12222 struct bnx2x_fastpath *fp;
555f6c78 12223 struct netdev_queue *txq;
a2fbb9ea 12224 struct sw_tx_bd *tx_buf;
ca00392c
EG
12225 struct eth_tx_start_bd *tx_start_bd;
12226 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
12227 struct eth_tx_parse_bd *pbd = NULL;
12228 u16 pkt_prod, bd_prod;
755735eb 12229 int nbd, fp_index;
a2fbb9ea 12230 dma_addr_t mapping;
755735eb 12231 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
12232 int i;
12233 u8 hlen = 0;
ca00392c 12234 __le16 pkt_size = 0;
dea7aab1
VZ
12235 struct ethhdr *eth;
12236 u8 mac_type = UNICAST_ADDRESS;
a2fbb9ea
ET
12237
12238#ifdef BNX2X_STOP_ON_ERROR
12239 if (unlikely(bp->panic))
12240 return NETDEV_TX_BUSY;
12241#endif
12242
555f6c78
EG
12243 fp_index = skb_get_queue_mapping(skb);
12244 txq = netdev_get_tx_queue(dev, fp_index);
12245
54b9ddaa 12246 fp = &bp->fp[fp_index];
755735eb 12247
231fd58a 12248 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
54b9ddaa 12249 fp->eth_q_stats.driver_xoff++;
555f6c78 12250 netif_tx_stop_queue(txq);
a2fbb9ea
ET
12251 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12252 return NETDEV_TX_BUSY;
12253 }
12254
755735eb
EG
12255 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
12256 " gso type %x xmit_type %x\n",
12257 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12258 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12259
dea7aab1
VZ
12260 eth = (struct ethhdr *)skb->data;
12261
12262 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12263 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12264 if (is_broadcast_ether_addr(eth->h_dest))
12265 mac_type = BROADCAST_ADDRESS;
12266 else
12267 mac_type = MULTICAST_ADDRESS;
12268 }
12269
632da4d6 12270#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
12271 /* First, check if we need to linearize the skb (due to FW
12272 restrictions). No need to check fragmentation if page size > 8K
12273 (there will be no violation to FW restrictions) */
755735eb
EG
12274 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12275 /* Statistics of linearization */
12276 bp->lin_cnt++;
12277 if (skb_linearize(skb) != 0) {
12278 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12279 "silently dropping this SKB\n");
12280 dev_kfree_skb_any(skb);
da5a662a 12281 return NETDEV_TX_OK;
755735eb
EG
12282 }
12283 }
632da4d6 12284#endif
755735eb 12285
a2fbb9ea 12286 /*
755735eb 12287 Please read carefully. First we use one BD which we mark as start,
ca00392c 12288 then we have a parsing info BD (used for TSO or xsum),
755735eb 12289 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
12290 (don't forget to mark the last one as last,
12291 and to unmap only AFTER you write to the BD ...)
755735eb 12292 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
12293 */
12294
12295 pkt_prod = fp->tx_pkt_prod++;
755735eb 12296 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 12297
755735eb 12298 /* get a tx_buf and first BD */
a2fbb9ea 12299 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 12300 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 12301
ca00392c 12302 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
dea7aab1
VZ
12303 tx_start_bd->general_data = (mac_type <<
12304 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 12305 /* header nbd */
ca00392c 12306 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 12307
755735eb
EG
12308 /* remember the first BD of the packet */
12309 tx_buf->first_bd = fp->tx_bd_prod;
12310 tx_buf->skb = skb;
ca00392c 12311 tx_buf->flags = 0;
a2fbb9ea
ET
12312
12313 DP(NETIF_MSG_TX_QUEUED,
12314 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 12315 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 12316
0c6671b0
EG
12317#ifdef BCM_VLAN
12318 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12319 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
12320 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12321 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 12322 } else
0c6671b0 12323#endif
ca00392c 12324 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 12325
ca00392c
EG
12326 /* turn on parsing and get a BD */
12327 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12328 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 12329
ca00392c 12330 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
12331
12332 if (xmit_type & XMIT_CSUM) {
ca00392c 12333 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
12334
12335 /* for now NS flag is not used in Linux */
4781bfad
EG
12336 pbd->global_data =
12337 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12338 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 12339
755735eb
EG
12340 pbd->ip_hlen = (skb_transport_header(skb) -
12341 skb_network_header(skb)) / 2;
12342
12343 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 12344
755735eb 12345 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 12346 hlen = hlen*2;
a2fbb9ea 12347
ca00392c 12348 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
12349
12350 if (xmit_type & XMIT_CSUM_V4)
ca00392c 12351 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
12352 ETH_TX_BD_FLAGS_IP_CSUM;
12353 else
ca00392c
EG
12354 tx_start_bd->bd_flags.as_bitfield |=
12355 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
12356
12357 if (xmit_type & XMIT_CSUM_TCP) {
12358 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12359
12360 } else {
12361 s8 fix = SKB_CS_OFF(skb); /* signed! */
12362
ca00392c 12363 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 12364
755735eb 12365 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
12366 "hlen %d fix %d csum before fix %x\n",
12367 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
12368
12369 /* HW bug: fixup the CSUM */
12370 pbd->tcp_pseudo_csum =
12371 bnx2x_csum_fix(skb_transport_header(skb),
12372 SKB_CS(skb), fix);
12373
12374 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12375 pbd->tcp_pseudo_csum);
12376 }
a2fbb9ea
ET
12377 }
12378
1a983142
FT
12379 mapping = dma_map_single(&bp->pdev->dev, skb->data,
12380 skb_headlen(skb), DMA_TO_DEVICE);
a2fbb9ea 12381
ca00392c
EG
12382 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12383 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12384 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12385 tx_start_bd->nbd = cpu_to_le16(nbd);
12386 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12387 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
12388
12389 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 12390 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
12391 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12392 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12393 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 12394
755735eb 12395 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
12396
12397 DP(NETIF_MSG_TX_QUEUED,
12398 "TSO packet len %d hlen %d total len %d tso size %d\n",
12399 skb->len, hlen, skb_headlen(skb),
12400 skb_shinfo(skb)->gso_size);
12401
ca00392c 12402 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 12403
755735eb 12404 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
12405 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12406 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
12407
12408 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12409 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
12410 pbd->tcp_flags = pbd_tcp_flags(skb);
12411
12412 if (xmit_type & XMIT_GSO_V4) {
12413 pbd->ip_id = swab16(ip_hdr(skb)->id);
12414 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
12415 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12416 ip_hdr(skb)->daddr,
12417 0, IPPROTO_TCP, 0));
755735eb
EG
12418
12419 } else
12420 pbd->tcp_pseudo_csum =
12421 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12422 &ipv6_hdr(skb)->daddr,
12423 0, IPPROTO_TCP, 0));
12424
a2fbb9ea
ET
12425 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12426 }
ca00392c 12427 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 12428
755735eb
EG
12429 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12430 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 12431
755735eb 12432 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
12433 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12434 if (total_pkt_bd == NULL)
12435 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 12436
1a983142
FT
12437 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12438 frag->page_offset,
12439 frag->size, DMA_TO_DEVICE);
a2fbb9ea 12440
ca00392c
EG
12441 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12442 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12443 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12444 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 12445
755735eb 12446 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
12447 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
12448 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12449 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
12450 }
12451
ca00392c 12452 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 12453
a2fbb9ea
ET
12454 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12455
755735eb 12456 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
12457 * if the packet contains or ends with it
12458 */
12459 if (TX_BD_POFF(bd_prod) < nbd)
12460 nbd++;
12461
ca00392c
EG
12462 if (total_pkt_bd != NULL)
12463 total_pkt_bd->total_pkt_bytes = pkt_size;
12464
a2fbb9ea
ET
12465 if (pbd)
12466 DP(NETIF_MSG_TX_QUEUED,
12467 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
12468 " tcp_flags %x xsum %x seq %u hlen %u\n",
12469 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12470 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 12471 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 12472
755735eb 12473 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 12474
58f4c4cf
EG
12475 /*
12476 * Make sure that the BD data is updated before updating the producer
12477 * since FW might read the BD right after the producer is updated.
12478 * This is only applicable for weak-ordered memory model archs such
12479 * as IA-64. The following barrier is also mandatory since FW will
12480 * assumes packets must have BDs.
12481 */
12482 wmb();
12483
ca00392c
EG
12484 fp->tx_db.data.prod += nbd;
12485 barrier();
54b9ddaa 12486 DOORBELL(bp, fp->index, fp->tx_db.raw);
a2fbb9ea
ET
12487
12488 mmiowb();
12489
755735eb 12490 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
12491
12492 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 12493 netif_tx_stop_queue(txq);
9baddeb8
SG
12494
12495 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12496 * ordering of set_bit() in netif_tx_stop_queue() and read of
12497 * fp->bd_tx_cons */
58f4c4cf 12498 smp_mb();
9baddeb8 12499
54b9ddaa 12500 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 12501 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 12502 netif_tx_wake_queue(txq);
a2fbb9ea 12503 }
54b9ddaa 12504 fp->tx_pkt++;
a2fbb9ea
ET
12505
12506 return NETDEV_TX_OK;
12507}
12508
bb2a0f7a 12509/* called with rtnl_lock */
a2fbb9ea
ET
12510static int bnx2x_open(struct net_device *dev)
12511{
12512 struct bnx2x *bp = netdev_priv(dev);
12513
6eccabb3
EG
12514 netif_carrier_off(dev);
12515
a2fbb9ea
ET
12516 bnx2x_set_power_state(bp, PCI_D0);
12517
72fd0718
VZ
12518 if (!bnx2x_reset_is_done(bp)) {
12519 do {
12520 /* Reset MCP mail box sequence if there is on going
12521 * recovery
12522 */
12523 bp->fw_seq = 0;
12524
12525 /* If it's the first function to load and reset done
12526 * is still not cleared it may mean that. We don't
12527 * check the attention state here because it may have
12528 * already been cleared by a "common" reset but we
12529 * shell proceed with "process kill" anyway.
12530 */
12531 if ((bnx2x_get_load_cnt(bp) == 0) &&
12532 bnx2x_trylock_hw_lock(bp,
12533 HW_LOCK_RESOURCE_RESERVED_08) &&
12534 (!bnx2x_leader_reset(bp))) {
12535 DP(NETIF_MSG_HW, "Recovered in open\n");
12536 break;
12537 }
12538
12539 bnx2x_set_power_state(bp, PCI_D3hot);
12540
12541 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12542 " completed yet. Try again later. If u still see this"
12543 " message after a few retries then power cycle is"
12544 " required.\n", bp->dev->name);
12545
12546 return -EAGAIN;
12547 } while (0);
12548 }
12549
12550 bp->recovery_state = BNX2X_RECOVERY_DONE;
12551
bb2a0f7a 12552 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
12553}
12554
bb2a0f7a 12555/* called with rtnl_lock */
a2fbb9ea
ET
12556static int bnx2x_close(struct net_device *dev)
12557{
a2fbb9ea
ET
12558 struct bnx2x *bp = netdev_priv(dev);
12559
12560 /* Unload the driver, release IRQs */
bb2a0f7a 12561 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 12562 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
12563
12564 return 0;
12565}
12566
f5372251 12567/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
12568static void bnx2x_set_rx_mode(struct net_device *dev)
12569{
12570 struct bnx2x *bp = netdev_priv(dev);
12571 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12572 int port = BP_PORT(bp);
12573
12574 if (bp->state != BNX2X_STATE_OPEN) {
12575 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12576 return;
12577 }
12578
12579 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12580
12581 if (dev->flags & IFF_PROMISC)
12582 rx_mode = BNX2X_RX_MODE_PROMISC;
12583
12584 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
12585 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12586 CHIP_IS_E1(bp)))
34f80b04
EG
12587 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12588
12589 else { /* some multicasts */
12590 if (CHIP_IS_E1(bp)) {
12591 int i, old, offset;
22bedad3 12592 struct netdev_hw_addr *ha;
34f80b04
EG
12593 struct mac_configuration_cmd *config =
12594 bnx2x_sp(bp, mcast_config);
12595
0ddf477b 12596 i = 0;
22bedad3 12597 netdev_for_each_mc_addr(ha, dev) {
34f80b04
EG
12598 config->config_table[i].
12599 cam_entry.msb_mac_addr =
22bedad3 12600 swab16(*(u16 *)&ha->addr[0]);
34f80b04
EG
12601 config->config_table[i].
12602 cam_entry.middle_mac_addr =
22bedad3 12603 swab16(*(u16 *)&ha->addr[2]);
34f80b04
EG
12604 config->config_table[i].
12605 cam_entry.lsb_mac_addr =
22bedad3 12606 swab16(*(u16 *)&ha->addr[4]);
34f80b04
EG
12607 config->config_table[i].cam_entry.flags =
12608 cpu_to_le16(port);
12609 config->config_table[i].
12610 target_table_entry.flags = 0;
ca00392c
EG
12611 config->config_table[i].target_table_entry.
12612 clients_bit_vector =
12613 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
12614 config->config_table[i].
12615 target_table_entry.vlan_id = 0;
12616
12617 DP(NETIF_MSG_IFUP,
12618 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12619 config->config_table[i].
12620 cam_entry.msb_mac_addr,
12621 config->config_table[i].
12622 cam_entry.middle_mac_addr,
12623 config->config_table[i].
12624 cam_entry.lsb_mac_addr);
0ddf477b 12625 i++;
34f80b04 12626 }
8d9c5f34 12627 old = config->hdr.length;
34f80b04
EG
12628 if (old > i) {
12629 for (; i < old; i++) {
12630 if (CAM_IS_INVALID(config->
12631 config_table[i])) {
af246401 12632 /* already invalidated */
34f80b04
EG
12633 break;
12634 }
12635 /* invalidate */
12636 CAM_INVALIDATE(config->
12637 config_table[i]);
12638 }
12639 }
12640
12641 if (CHIP_REV_IS_SLOW(bp))
12642 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12643 else
12644 offset = BNX2X_MAX_MULTICAST*(1 + port);
12645
8d9c5f34 12646 config->hdr.length = i;
34f80b04 12647 config->hdr.offset = offset;
8d9c5f34 12648 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
12649 config->hdr.reserved1 = 0;
12650
e665bfda
MC
12651 bp->set_mac_pending++;
12652 smp_wmb();
12653
34f80b04
EG
12654 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12655 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12656 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12657 0);
12658 } else { /* E1H */
12659 /* Accept one or more multicasts */
22bedad3 12660 struct netdev_hw_addr *ha;
34f80b04
EG
12661 u32 mc_filter[MC_HASH_SIZE];
12662 u32 crc, bit, regidx;
12663 int i;
12664
12665 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12666
22bedad3 12667 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 12668 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
22bedad3 12669 ha->addr);
34f80b04 12670
22bedad3 12671 crc = crc32c_le(0, ha->addr, ETH_ALEN);
34f80b04
EG
12672 bit = (crc >> 24) & 0xff;
12673 regidx = bit >> 5;
12674 bit &= 0x1f;
12675 mc_filter[regidx] |= (1 << bit);
12676 }
12677
12678 for (i = 0; i < MC_HASH_SIZE; i++)
12679 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12680 mc_filter[i]);
12681 }
12682 }
12683
12684 bp->rx_mode = rx_mode;
12685 bnx2x_set_storm_rx_mode(bp);
12686}
12687
12688/* called with rtnl_lock */
a2fbb9ea
ET
12689static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12690{
12691 struct sockaddr *addr = p;
12692 struct bnx2x *bp = netdev_priv(dev);
12693
34f80b04 12694 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
12695 return -EINVAL;
12696
12697 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
12698 if (netif_running(dev)) {
12699 if (CHIP_IS_E1(bp))
e665bfda 12700 bnx2x_set_eth_mac_addr_e1(bp, 1);
34f80b04 12701 else
e665bfda 12702 bnx2x_set_eth_mac_addr_e1h(bp, 1);
34f80b04 12703 }
a2fbb9ea
ET
12704
12705 return 0;
12706}
12707
c18487ee 12708/* called with rtnl_lock */
01cd4528
EG
12709static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12710 int devad, u16 addr)
a2fbb9ea 12711{
01cd4528
EG
12712 struct bnx2x *bp = netdev_priv(netdev);
12713 u16 value;
12714 int rc;
12715 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 12716
01cd4528
EG
12717 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12718 prtad, devad, addr);
a2fbb9ea 12719
01cd4528
EG
12720 if (prtad != bp->mdio.prtad) {
12721 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12722 prtad, bp->mdio.prtad);
12723 return -EINVAL;
12724 }
12725
12726 /* The HW expects different devad if CL22 is used */
12727 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 12728
01cd4528
EG
12729 bnx2x_acquire_phy_lock(bp);
12730 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12731 devad, addr, &value);
12732 bnx2x_release_phy_lock(bp);
12733 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 12734
01cd4528
EG
12735 if (!rc)
12736 rc = value;
12737 return rc;
12738}
a2fbb9ea 12739
01cd4528
EG
12740/* called with rtnl_lock */
12741static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12742 u16 addr, u16 value)
12743{
12744 struct bnx2x *bp = netdev_priv(netdev);
12745 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12746 int rc;
12747
12748 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12749 " value 0x%x\n", prtad, devad, addr, value);
12750
12751 if (prtad != bp->mdio.prtad) {
12752 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12753 prtad, bp->mdio.prtad);
12754 return -EINVAL;
a2fbb9ea
ET
12755 }
12756
01cd4528
EG
12757 /* The HW expects different devad if CL22 is used */
12758 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 12759
01cd4528
EG
12760 bnx2x_acquire_phy_lock(bp);
12761 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12762 devad, addr, value);
12763 bnx2x_release_phy_lock(bp);
12764 return rc;
12765}
c18487ee 12766
01cd4528
EG
12767/* called with rtnl_lock */
12768static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12769{
12770 struct bnx2x *bp = netdev_priv(dev);
12771 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 12772
01cd4528
EG
12773 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12774 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 12775
01cd4528
EG
12776 if (!netif_running(dev))
12777 return -EAGAIN;
12778
12779 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
12780}
12781
34f80b04 12782/* called with rtnl_lock */
a2fbb9ea
ET
12783static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12784{
12785 struct bnx2x *bp = netdev_priv(dev);
34f80b04 12786 int rc = 0;
a2fbb9ea 12787
72fd0718
VZ
12788 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12789 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12790 return -EAGAIN;
12791 }
12792
a2fbb9ea
ET
12793 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12794 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12795 return -EINVAL;
12796
12797 /* This does not race with packet allocation
c14423fe 12798 * because the actual alloc size is
a2fbb9ea
ET
12799 * only updated as part of load
12800 */
12801 dev->mtu = new_mtu;
12802
12803 if (netif_running(dev)) {
34f80b04
EG
12804 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12805 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 12806 }
34f80b04
EG
12807
12808 return rc;
a2fbb9ea
ET
12809}
12810
12811static void bnx2x_tx_timeout(struct net_device *dev)
12812{
12813 struct bnx2x *bp = netdev_priv(dev);
12814
12815#ifdef BNX2X_STOP_ON_ERROR
12816 if (!bp->panic)
12817 bnx2x_panic();
12818#endif
12819 /* This allows the netif to be shutdown gracefully before resetting */
72fd0718 12820 schedule_delayed_work(&bp->reset_task, 0);
a2fbb9ea
ET
12821}
12822
12823#ifdef BCM_VLAN
34f80b04 12824/* called with rtnl_lock */
a2fbb9ea
ET
12825static void bnx2x_vlan_rx_register(struct net_device *dev,
12826 struct vlan_group *vlgrp)
12827{
12828 struct bnx2x *bp = netdev_priv(dev);
12829
12830 bp->vlgrp = vlgrp;
0c6671b0
EG
12831
12832 /* Set flags according to the required capabilities */
12833 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12834
12835 if (dev->features & NETIF_F_HW_VLAN_TX)
12836 bp->flags |= HW_VLAN_TX_FLAG;
12837
12838 if (dev->features & NETIF_F_HW_VLAN_RX)
12839 bp->flags |= HW_VLAN_RX_FLAG;
12840
a2fbb9ea 12841 if (netif_running(dev))
49d66772 12842 bnx2x_set_client_config(bp);
a2fbb9ea 12843}
34f80b04 12844
a2fbb9ea
ET
12845#endif
12846
257ddbda 12847#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
12848static void poll_bnx2x(struct net_device *dev)
12849{
12850 struct bnx2x *bp = netdev_priv(dev);
12851
12852 disable_irq(bp->pdev->irq);
12853 bnx2x_interrupt(bp->pdev->irq, dev);
12854 enable_irq(bp->pdev->irq);
12855}
12856#endif
12857
c64213cd
SH
12858static const struct net_device_ops bnx2x_netdev_ops = {
12859 .ndo_open = bnx2x_open,
12860 .ndo_stop = bnx2x_close,
12861 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 12862 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
12863 .ndo_set_mac_address = bnx2x_change_mac_addr,
12864 .ndo_validate_addr = eth_validate_addr,
12865 .ndo_do_ioctl = bnx2x_ioctl,
12866 .ndo_change_mtu = bnx2x_change_mtu,
12867 .ndo_tx_timeout = bnx2x_tx_timeout,
12868#ifdef BCM_VLAN
12869 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
12870#endif
257ddbda 12871#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
12872 .ndo_poll_controller = poll_bnx2x,
12873#endif
12874};
12875
34f80b04
EG
12876static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12877 struct net_device *dev)
a2fbb9ea
ET
12878{
12879 struct bnx2x *bp;
12880 int rc;
12881
12882 SET_NETDEV_DEV(dev, &pdev->dev);
12883 bp = netdev_priv(dev);
12884
34f80b04
EG
12885 bp->dev = dev;
12886 bp->pdev = pdev;
a2fbb9ea 12887 bp->flags = 0;
34f80b04 12888 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
12889
12890 rc = pci_enable_device(pdev);
12891 if (rc) {
cdaa7cb8
VZ
12892 dev_err(&bp->pdev->dev,
12893 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
12894 goto err_out;
12895 }
12896
12897 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
12898 dev_err(&bp->pdev->dev,
12899 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
12900 rc = -ENODEV;
12901 goto err_out_disable;
12902 }
12903
12904 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
12905 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
12906 " base address, aborting\n");
a2fbb9ea
ET
12907 rc = -ENODEV;
12908 goto err_out_disable;
12909 }
12910
34f80b04
EG
12911 if (atomic_read(&pdev->enable_cnt) == 1) {
12912 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12913 if (rc) {
cdaa7cb8
VZ
12914 dev_err(&bp->pdev->dev,
12915 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
12916 goto err_out_disable;
12917 }
a2fbb9ea 12918
34f80b04
EG
12919 pci_set_master(pdev);
12920 pci_save_state(pdev);
12921 }
a2fbb9ea
ET
12922
12923 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12924 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
12925 dev_err(&bp->pdev->dev,
12926 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
12927 rc = -EIO;
12928 goto err_out_release;
12929 }
12930
12931 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12932 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
12933 dev_err(&bp->pdev->dev,
12934 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
12935 rc = -EIO;
12936 goto err_out_release;
12937 }
12938
1a983142 12939 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 12940 bp->flags |= USING_DAC_FLAG;
1a983142 12941 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
12942 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
12943 " failed, aborting\n");
a2fbb9ea
ET
12944 rc = -EIO;
12945 goto err_out_release;
12946 }
12947
1a983142 12948 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
12949 dev_err(&bp->pdev->dev,
12950 "System does not support DMA, aborting\n");
a2fbb9ea
ET
12951 rc = -EIO;
12952 goto err_out_release;
12953 }
12954
34f80b04
EG
12955 dev->mem_start = pci_resource_start(pdev, 0);
12956 dev->base_addr = dev->mem_start;
12957 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
12958
12959 dev->irq = pdev->irq;
12960
275f165f 12961 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 12962 if (!bp->regview) {
cdaa7cb8
VZ
12963 dev_err(&bp->pdev->dev,
12964 "Cannot map register space, aborting\n");
a2fbb9ea
ET
12965 rc = -ENOMEM;
12966 goto err_out_release;
12967 }
12968
34f80b04
EG
12969 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12970 min_t(u64, BNX2X_DB_SIZE,
12971 pci_resource_len(pdev, 2)));
a2fbb9ea 12972 if (!bp->doorbells) {
cdaa7cb8
VZ
12973 dev_err(&bp->pdev->dev,
12974 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
12975 rc = -ENOMEM;
12976 goto err_out_unmap;
12977 }
12978
12979 bnx2x_set_power_state(bp, PCI_D0);
12980
34f80b04
EG
12981 /* clean indirect addresses */
12982 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12983 PCICFG_VENDOR_ID_OFFSET);
12984 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
12985 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
12986 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
12987 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 12988
72fd0718
VZ
12989 /* Reset the load counter */
12990 bnx2x_clear_load_cnt(bp);
12991
34f80b04 12992 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 12993
c64213cd 12994 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 12995 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
12996 dev->features |= NETIF_F_SG;
12997 dev->features |= NETIF_F_HW_CSUM;
12998 if (bp->flags & USING_DAC_FLAG)
12999 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
13000 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13001 dev->features |= NETIF_F_TSO6;
34f80b04
EG
13002#ifdef BCM_VLAN
13003 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 13004 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
13005
13006 dev->vlan_features |= NETIF_F_SG;
13007 dev->vlan_features |= NETIF_F_HW_CSUM;
13008 if (bp->flags & USING_DAC_FLAG)
13009 dev->vlan_features |= NETIF_F_HIGHDMA;
13010 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13011 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 13012#endif
a2fbb9ea 13013
01cd4528
EG
13014 /* get_port_hwinfo() will set prtad and mmds properly */
13015 bp->mdio.prtad = MDIO_PRTAD_NONE;
13016 bp->mdio.mmds = 0;
13017 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13018 bp->mdio.dev = dev;
13019 bp->mdio.mdio_read = bnx2x_mdio_read;
13020 bp->mdio.mdio_write = bnx2x_mdio_write;
13021
a2fbb9ea
ET
13022 return 0;
13023
13024err_out_unmap:
13025 if (bp->regview) {
13026 iounmap(bp->regview);
13027 bp->regview = NULL;
13028 }
a2fbb9ea
ET
13029 if (bp->doorbells) {
13030 iounmap(bp->doorbells);
13031 bp->doorbells = NULL;
13032 }
13033
13034err_out_release:
34f80b04
EG
13035 if (atomic_read(&pdev->enable_cnt) == 1)
13036 pci_release_regions(pdev);
a2fbb9ea
ET
13037
13038err_out_disable:
13039 pci_disable_device(pdev);
13040 pci_set_drvdata(pdev, NULL);
13041
13042err_out:
13043 return rc;
13044}
13045
37f9ce62
EG
13046static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
13047 int *width, int *speed)
25047950
ET
13048{
13049 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
13050
37f9ce62 13051 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 13052
37f9ce62
EG
13053 /* return value of 1=2.5GHz 2=5GHz */
13054 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 13055}
37f9ce62 13056
94a78b79
VZ
13057static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
13058{
37f9ce62 13059 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
13060 struct bnx2x_fw_file_hdr *fw_hdr;
13061 struct bnx2x_fw_file_section *sections;
94a78b79 13062 u32 offset, len, num_ops;
37f9ce62 13063 u16 *ops_offsets;
94a78b79 13064 int i;
37f9ce62 13065 const u8 *fw_ver;
94a78b79
VZ
13066
13067 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
13068 return -EINVAL;
13069
13070 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13071 sections = (struct bnx2x_fw_file_section *)fw_hdr;
13072
13073 /* Make sure none of the offsets and sizes make us read beyond
13074 * the end of the firmware data */
13075 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13076 offset = be32_to_cpu(sections[i].offset);
13077 len = be32_to_cpu(sections[i].len);
13078 if (offset + len > firmware->size) {
cdaa7cb8
VZ
13079 dev_err(&bp->pdev->dev,
13080 "Section %d length is out of bounds\n", i);
94a78b79
VZ
13081 return -EINVAL;
13082 }
13083 }
13084
13085 /* Likewise for the init_ops offsets */
13086 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13087 ops_offsets = (u16 *)(firmware->data + offset);
13088 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13089
13090 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13091 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
13092 dev_err(&bp->pdev->dev,
13093 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
13094 return -EINVAL;
13095 }
13096 }
13097
13098 /* Check FW version */
13099 offset = be32_to_cpu(fw_hdr->fw_version.offset);
13100 fw_ver = firmware->data + offset;
13101 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13102 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13103 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13104 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
13105 dev_err(&bp->pdev->dev,
13106 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
13107 fw_ver[0], fw_ver[1], fw_ver[2],
13108 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
13109 BCM_5710_FW_MINOR_VERSION,
13110 BCM_5710_FW_REVISION_VERSION,
13111 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 13112 return -EINVAL;
94a78b79
VZ
13113 }
13114
13115 return 0;
13116}
13117
ab6ad5a4 13118static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 13119{
ab6ad5a4
EG
13120 const __be32 *source = (const __be32 *)_source;
13121 u32 *target = (u32 *)_target;
94a78b79 13122 u32 i;
94a78b79
VZ
13123
13124 for (i = 0; i < n/4; i++)
13125 target[i] = be32_to_cpu(source[i]);
13126}
13127
13128/*
13129 Ops array is stored in the following format:
13130 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13131 */
ab6ad5a4 13132static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 13133{
ab6ad5a4
EG
13134 const __be32 *source = (const __be32 *)_source;
13135 struct raw_op *target = (struct raw_op *)_target;
94a78b79 13136 u32 i, j, tmp;
94a78b79 13137
ab6ad5a4 13138 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
13139 tmp = be32_to_cpu(source[j]);
13140 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
13141 target[i].offset = tmp & 0xffffff;
13142 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
13143 }
13144}
ab6ad5a4
EG
13145
13146static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 13147{
ab6ad5a4
EG
13148 const __be16 *source = (const __be16 *)_source;
13149 u16 *target = (u16 *)_target;
94a78b79 13150 u32 i;
94a78b79
VZ
13151
13152 for (i = 0; i < n/2; i++)
13153 target[i] = be16_to_cpu(source[i]);
13154}
13155
7995c64e
JP
13156#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
13157do { \
13158 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13159 bp->arr = kmalloc(len, GFP_KERNEL); \
13160 if (!bp->arr) { \
13161 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
13162 goto lbl; \
13163 } \
13164 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13165 (u8 *)bp->arr, len); \
13166} while (0)
94a78b79 13167
94a78b79
VZ
13168static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
13169{
45229b42 13170 const char *fw_file_name;
94a78b79 13171 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 13172 int rc;
94a78b79 13173
94a78b79 13174 if (CHIP_IS_E1(bp))
45229b42 13175 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 13176 else if (CHIP_IS_E1H(bp))
45229b42 13177 fw_file_name = FW_FILE_NAME_E1H;
cdaa7cb8
VZ
13178 else {
13179 dev_err(dev, "Unsupported chip revision\n");
13180 return -EINVAL;
13181 }
94a78b79 13182
cdaa7cb8 13183 dev_info(dev, "Loading %s\n", fw_file_name);
94a78b79
VZ
13184
13185 rc = request_firmware(&bp->firmware, fw_file_name, dev);
13186 if (rc) {
cdaa7cb8 13187 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
13188 goto request_firmware_exit;
13189 }
13190
13191 rc = bnx2x_check_firmware(bp);
13192 if (rc) {
cdaa7cb8 13193 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
13194 goto request_firmware_exit;
13195 }
13196
13197 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13198
13199 /* Initialize the pointers to the init arrays */
13200 /* Blob */
13201 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13202
13203 /* Opcodes */
13204 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13205
13206 /* Offsets */
ab6ad5a4
EG
13207 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13208 be16_to_cpu_n);
94a78b79
VZ
13209
13210 /* STORMs firmware */
573f2035
EG
13211 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13212 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13213 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13214 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13215 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13216 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13217 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13218 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13219 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13220 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13221 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13222 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13223 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13224 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13225 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13226 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
13227
13228 return 0;
ab6ad5a4 13229
94a78b79
VZ
13230init_offsets_alloc_err:
13231 kfree(bp->init_ops);
13232init_ops_alloc_err:
13233 kfree(bp->init_data);
13234request_firmware_exit:
13235 release_firmware(bp->firmware);
13236
13237 return rc;
13238}
13239
13240
a2fbb9ea
ET
13241static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13242 const struct pci_device_id *ent)
13243{
a2fbb9ea
ET
13244 struct net_device *dev = NULL;
13245 struct bnx2x *bp;
37f9ce62 13246 int pcie_width, pcie_speed;
25047950 13247 int rc;
a2fbb9ea 13248
a2fbb9ea 13249 /* dev zeroed in init_etherdev */
555f6c78 13250 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04 13251 if (!dev) {
cdaa7cb8 13252 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 13253 return -ENOMEM;
34f80b04 13254 }
a2fbb9ea 13255
a2fbb9ea 13256 bp = netdev_priv(dev);
7995c64e 13257 bp->msg_enable = debug;
a2fbb9ea 13258
df4770de
EG
13259 pci_set_drvdata(pdev, dev);
13260
34f80b04 13261 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
13262 if (rc < 0) {
13263 free_netdev(dev);
13264 return rc;
13265 }
13266
34f80b04 13267 rc = bnx2x_init_bp(bp);
693fc0d1
EG
13268 if (rc)
13269 goto init_one_exit;
13270
94a78b79
VZ
13271 /* Set init arrays */
13272 rc = bnx2x_init_firmware(bp, &pdev->dev);
13273 if (rc) {
cdaa7cb8 13274 dev_err(&pdev->dev, "Error loading firmware\n");
94a78b79
VZ
13275 goto init_one_exit;
13276 }
13277
693fc0d1 13278 rc = register_netdev(dev);
34f80b04 13279 if (rc) {
693fc0d1 13280 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
13281 goto init_one_exit;
13282 }
13283
37f9ce62 13284 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
cdaa7cb8
VZ
13285 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
13286 " IRQ %d, ", board_info[ent->driver_data].name,
13287 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13288 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13289 dev->base_addr, bp->pdev->irq);
13290 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 13291
a2fbb9ea 13292 return 0;
34f80b04
EG
13293
13294init_one_exit:
13295 if (bp->regview)
13296 iounmap(bp->regview);
13297
13298 if (bp->doorbells)
13299 iounmap(bp->doorbells);
13300
13301 free_netdev(dev);
13302
13303 if (atomic_read(&pdev->enable_cnt) == 1)
13304 pci_release_regions(pdev);
13305
13306 pci_disable_device(pdev);
13307 pci_set_drvdata(pdev, NULL);
13308
13309 return rc;
a2fbb9ea
ET
13310}
13311
13312static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13313{
13314 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
13315 struct bnx2x *bp;
13316
13317 if (!dev) {
cdaa7cb8 13318 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
13319 return;
13320 }
228241eb 13321 bp = netdev_priv(dev);
a2fbb9ea 13322
a2fbb9ea
ET
13323 unregister_netdev(dev);
13324
72fd0718
VZ
13325 /* Make sure RESET task is not scheduled before continuing */
13326 cancel_delayed_work_sync(&bp->reset_task);
13327
94a78b79
VZ
13328 kfree(bp->init_ops_offsets);
13329 kfree(bp->init_ops);
13330 kfree(bp->init_data);
13331 release_firmware(bp->firmware);
13332
a2fbb9ea
ET
13333 if (bp->regview)
13334 iounmap(bp->regview);
13335
13336 if (bp->doorbells)
13337 iounmap(bp->doorbells);
13338
13339 free_netdev(dev);
34f80b04
EG
13340
13341 if (atomic_read(&pdev->enable_cnt) == 1)
13342 pci_release_regions(pdev);
13343
a2fbb9ea
ET
13344 pci_disable_device(pdev);
13345 pci_set_drvdata(pdev, NULL);
13346}
13347
13348static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13349{
13350 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
13351 struct bnx2x *bp;
13352
34f80b04 13353 if (!dev) {
cdaa7cb8 13354 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
34f80b04
EG
13355 return -ENODEV;
13356 }
13357 bp = netdev_priv(dev);
a2fbb9ea 13358
34f80b04 13359 rtnl_lock();
a2fbb9ea 13360
34f80b04 13361 pci_save_state(pdev);
228241eb 13362
34f80b04
EG
13363 if (!netif_running(dev)) {
13364 rtnl_unlock();
13365 return 0;
13366 }
a2fbb9ea
ET
13367
13368 netif_device_detach(dev);
a2fbb9ea 13369
da5a662a 13370 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 13371
a2fbb9ea 13372 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 13373
34f80b04
EG
13374 rtnl_unlock();
13375
a2fbb9ea
ET
13376 return 0;
13377}
13378
13379static int bnx2x_resume(struct pci_dev *pdev)
13380{
13381 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 13382 struct bnx2x *bp;
a2fbb9ea
ET
13383 int rc;
13384
228241eb 13385 if (!dev) {
cdaa7cb8 13386 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
13387 return -ENODEV;
13388 }
228241eb 13389 bp = netdev_priv(dev);
a2fbb9ea 13390
72fd0718
VZ
13391 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13392 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13393 return -EAGAIN;
13394 }
13395
34f80b04
EG
13396 rtnl_lock();
13397
228241eb 13398 pci_restore_state(pdev);
34f80b04
EG
13399
13400 if (!netif_running(dev)) {
13401 rtnl_unlock();
13402 return 0;
13403 }
13404
a2fbb9ea
ET
13405 bnx2x_set_power_state(bp, PCI_D0);
13406 netif_device_attach(dev);
13407
da5a662a 13408 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 13409
34f80b04
EG
13410 rtnl_unlock();
13411
13412 return rc;
a2fbb9ea
ET
13413}
13414
f8ef6e44
YG
13415static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13416{
13417 int i;
13418
13419 bp->state = BNX2X_STATE_ERROR;
13420
13421 bp->rx_mode = BNX2X_RX_MODE_NONE;
13422
13423 bnx2x_netif_stop(bp, 0);
13424
13425 del_timer_sync(&bp->timer);
13426 bp->stats_state = STATS_STATE_DISABLED;
13427 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13428
13429 /* Release IRQs */
6cbe5065 13430 bnx2x_free_irq(bp, false);
f8ef6e44
YG
13431
13432 if (CHIP_IS_E1(bp)) {
13433 struct mac_configuration_cmd *config =
13434 bnx2x_sp(bp, mcast_config);
13435
8d9c5f34 13436 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
13437 CAM_INVALIDATE(config->config_table[i]);
13438 }
13439
13440 /* Free SKBs, SGEs, TPA pool and driver internals */
13441 bnx2x_free_skbs(bp);
54b9ddaa 13442 for_each_queue(bp, i)
f8ef6e44 13443 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 13444 for_each_queue(bp, i)
7cde1c8b 13445 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
13446 bnx2x_free_mem(bp);
13447
13448 bp->state = BNX2X_STATE_CLOSED;
13449
13450 netif_carrier_off(bp->dev);
13451
13452 return 0;
13453}
13454
13455static void bnx2x_eeh_recover(struct bnx2x *bp)
13456{
13457 u32 val;
13458
13459 mutex_init(&bp->port.phy_mutex);
13460
13461 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13462 bp->link_params.shmem_base = bp->common.shmem_base;
13463 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13464
13465 if (!bp->common.shmem_base ||
13466 (bp->common.shmem_base < 0xA0000) ||
13467 (bp->common.shmem_base >= 0xC0000)) {
13468 BNX2X_DEV_INFO("MCP not active\n");
13469 bp->flags |= NO_MCP_FLAG;
13470 return;
13471 }
13472
13473 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13474 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13475 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13476 BNX2X_ERR("BAD MCP validity signature\n");
13477
13478 if (!BP_NOMCP(bp)) {
13479 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13480 & DRV_MSG_SEQ_NUMBER_MASK);
13481 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13482 }
13483}
13484
493adb1f
WX
13485/**
13486 * bnx2x_io_error_detected - called when PCI error is detected
13487 * @pdev: Pointer to PCI device
13488 * @state: The current pci connection state
13489 *
13490 * This function is called after a PCI bus error affecting
13491 * this device has been detected.
13492 */
13493static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13494 pci_channel_state_t state)
13495{
13496 struct net_device *dev = pci_get_drvdata(pdev);
13497 struct bnx2x *bp = netdev_priv(dev);
13498
13499 rtnl_lock();
13500
13501 netif_device_detach(dev);
13502
07ce50e4
DN
13503 if (state == pci_channel_io_perm_failure) {
13504 rtnl_unlock();
13505 return PCI_ERS_RESULT_DISCONNECT;
13506 }
13507
493adb1f 13508 if (netif_running(dev))
f8ef6e44 13509 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
13510
13511 pci_disable_device(pdev);
13512
13513 rtnl_unlock();
13514
13515 /* Request a slot reset */
13516 return PCI_ERS_RESULT_NEED_RESET;
13517}
13518
13519/**
13520 * bnx2x_io_slot_reset - called after the PCI bus has been reset
13521 * @pdev: Pointer to PCI device
13522 *
13523 * Restart the card from scratch, as if from a cold-boot.
13524 */
13525static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13526{
13527 struct net_device *dev = pci_get_drvdata(pdev);
13528 struct bnx2x *bp = netdev_priv(dev);
13529
13530 rtnl_lock();
13531
13532 if (pci_enable_device(pdev)) {
13533 dev_err(&pdev->dev,
13534 "Cannot re-enable PCI device after reset\n");
13535 rtnl_unlock();
13536 return PCI_ERS_RESULT_DISCONNECT;
13537 }
13538
13539 pci_set_master(pdev);
13540 pci_restore_state(pdev);
13541
13542 if (netif_running(dev))
13543 bnx2x_set_power_state(bp, PCI_D0);
13544
13545 rtnl_unlock();
13546
13547 return PCI_ERS_RESULT_RECOVERED;
13548}
13549
13550/**
13551 * bnx2x_io_resume - called when traffic can start flowing again
13552 * @pdev: Pointer to PCI device
13553 *
13554 * This callback is called when the error recovery driver tells us that
13555 * its OK to resume normal operation.
13556 */
13557static void bnx2x_io_resume(struct pci_dev *pdev)
13558{
13559 struct net_device *dev = pci_get_drvdata(pdev);
13560 struct bnx2x *bp = netdev_priv(dev);
13561
72fd0718
VZ
13562 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13563 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13564 return;
13565 }
13566
493adb1f
WX
13567 rtnl_lock();
13568
f8ef6e44
YG
13569 bnx2x_eeh_recover(bp);
13570
493adb1f 13571 if (netif_running(dev))
f8ef6e44 13572 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
13573
13574 netif_device_attach(dev);
13575
13576 rtnl_unlock();
13577}
13578
13579static struct pci_error_handlers bnx2x_err_handler = {
13580 .error_detected = bnx2x_io_error_detected,
356e2385
EG
13581 .slot_reset = bnx2x_io_slot_reset,
13582 .resume = bnx2x_io_resume,
493adb1f
WX
13583};
13584
a2fbb9ea 13585static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
13586 .name = DRV_MODULE_NAME,
13587 .id_table = bnx2x_pci_tbl,
13588 .probe = bnx2x_init_one,
13589 .remove = __devexit_p(bnx2x_remove_one),
13590 .suspend = bnx2x_suspend,
13591 .resume = bnx2x_resume,
13592 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
13593};
13594
13595static int __init bnx2x_init(void)
13596{
dd21ca6d
SG
13597 int ret;
13598
7995c64e 13599 pr_info("%s", version);
938cf541 13600
1cf167f2
EG
13601 bnx2x_wq = create_singlethread_workqueue("bnx2x");
13602 if (bnx2x_wq == NULL) {
7995c64e 13603 pr_err("Cannot create workqueue\n");
1cf167f2
EG
13604 return -ENOMEM;
13605 }
13606
dd21ca6d
SG
13607 ret = pci_register_driver(&bnx2x_pci_driver);
13608 if (ret) {
7995c64e 13609 pr_err("Cannot register driver\n");
dd21ca6d
SG
13610 destroy_workqueue(bnx2x_wq);
13611 }
13612 return ret;
a2fbb9ea
ET
13613}
13614
13615static void __exit bnx2x_cleanup(void)
13616{
13617 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
13618
13619 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
13620}
13621
13622module_init(bnx2x_init);
13623module_exit(bnx2x_cleanup);
13624
993ac7b5
MC
13625#ifdef BCM_CNIC
13626
13627/* count denotes the number of new completions we have seen */
13628static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13629{
13630 struct eth_spe *spe;
13631
13632#ifdef BNX2X_STOP_ON_ERROR
13633 if (unlikely(bp->panic))
13634 return;
13635#endif
13636
13637 spin_lock_bh(&bp->spq_lock);
13638 bp->cnic_spq_pending -= count;
13639
13640 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13641 bp->cnic_spq_pending++) {
13642
13643 if (!bp->cnic_kwq_pending)
13644 break;
13645
13646 spe = bnx2x_sp_get_next(bp);
13647 *spe = *bp->cnic_kwq_cons;
13648
13649 bp->cnic_kwq_pending--;
13650
13651 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13652 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13653
13654 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13655 bp->cnic_kwq_cons = bp->cnic_kwq;
13656 else
13657 bp->cnic_kwq_cons++;
13658 }
13659 bnx2x_sp_prod_update(bp);
13660 spin_unlock_bh(&bp->spq_lock);
13661}
13662
13663static int bnx2x_cnic_sp_queue(struct net_device *dev,
13664 struct kwqe_16 *kwqes[], u32 count)
13665{
13666 struct bnx2x *bp = netdev_priv(dev);
13667 int i;
13668
13669#ifdef BNX2X_STOP_ON_ERROR
13670 if (unlikely(bp->panic))
13671 return -EIO;
13672#endif
13673
13674 spin_lock_bh(&bp->spq_lock);
13675
13676 for (i = 0; i < count; i++) {
13677 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13678
13679 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13680 break;
13681
13682 *bp->cnic_kwq_prod = *spe;
13683
13684 bp->cnic_kwq_pending++;
13685
13686 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13687 spe->hdr.conn_and_cmd_data, spe->hdr.type,
13688 spe->data.mac_config_addr.hi,
13689 spe->data.mac_config_addr.lo,
13690 bp->cnic_kwq_pending);
13691
13692 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13693 bp->cnic_kwq_prod = bp->cnic_kwq;
13694 else
13695 bp->cnic_kwq_prod++;
13696 }
13697
13698 spin_unlock_bh(&bp->spq_lock);
13699
13700 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13701 bnx2x_cnic_sp_post(bp, 0);
13702
13703 return i;
13704}
13705
13706static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13707{
13708 struct cnic_ops *c_ops;
13709 int rc = 0;
13710
13711 mutex_lock(&bp->cnic_mutex);
13712 c_ops = bp->cnic_ops;
13713 if (c_ops)
13714 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13715 mutex_unlock(&bp->cnic_mutex);
13716
13717 return rc;
13718}
13719
13720static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13721{
13722 struct cnic_ops *c_ops;
13723 int rc = 0;
13724
13725 rcu_read_lock();
13726 c_ops = rcu_dereference(bp->cnic_ops);
13727 if (c_ops)
13728 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13729 rcu_read_unlock();
13730
13731 return rc;
13732}
13733
13734/*
13735 * for commands that have no data
13736 */
13737static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13738{
13739 struct cnic_ctl_info ctl = {0};
13740
13741 ctl.cmd = cmd;
13742
13743 return bnx2x_cnic_ctl_send(bp, &ctl);
13744}
13745
13746static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13747{
13748 struct cnic_ctl_info ctl;
13749
13750 /* first we tell CNIC and only then we count this as a completion */
13751 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13752 ctl.data.comp.cid = cid;
13753
13754 bnx2x_cnic_ctl_send_bh(bp, &ctl);
13755 bnx2x_cnic_sp_post(bp, 1);
13756}
13757
13758static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13759{
13760 struct bnx2x *bp = netdev_priv(dev);
13761 int rc = 0;
13762
13763 switch (ctl->cmd) {
13764 case DRV_CTL_CTXTBL_WR_CMD: {
13765 u32 index = ctl->data.io.offset;
13766 dma_addr_t addr = ctl->data.io.dma_addr;
13767
13768 bnx2x_ilt_wr(bp, index, addr);
13769 break;
13770 }
13771
13772 case DRV_CTL_COMPLETION_CMD: {
13773 int count = ctl->data.comp.comp_count;
13774
13775 bnx2x_cnic_sp_post(bp, count);
13776 break;
13777 }
13778
13779 /* rtnl_lock is held. */
13780 case DRV_CTL_START_L2_CMD: {
13781 u32 cli = ctl->data.ring.client_id;
13782
13783 bp->rx_mode_cl_mask |= (1 << cli);
13784 bnx2x_set_storm_rx_mode(bp);
13785 break;
13786 }
13787
13788 /* rtnl_lock is held. */
13789 case DRV_CTL_STOP_L2_CMD: {
13790 u32 cli = ctl->data.ring.client_id;
13791
13792 bp->rx_mode_cl_mask &= ~(1 << cli);
13793 bnx2x_set_storm_rx_mode(bp);
13794 break;
13795 }
13796
13797 default:
13798 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13799 rc = -EINVAL;
13800 }
13801
13802 return rc;
13803}
13804
13805static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13806{
13807 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13808
13809 if (bp->flags & USING_MSIX_FLAG) {
13810 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13811 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13812 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13813 } else {
13814 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13815 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13816 }
13817 cp->irq_arr[0].status_blk = bp->cnic_sb;
13818 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13819 cp->irq_arr[1].status_blk = bp->def_status_blk;
13820 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13821
13822 cp->num_irq = 2;
13823}
13824
13825static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13826 void *data)
13827{
13828 struct bnx2x *bp = netdev_priv(dev);
13829 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13830
13831 if (ops == NULL)
13832 return -EINVAL;
13833
13834 if (atomic_read(&bp->intr_sem) != 0)
13835 return -EBUSY;
13836
13837 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13838 if (!bp->cnic_kwq)
13839 return -ENOMEM;
13840
13841 bp->cnic_kwq_cons = bp->cnic_kwq;
13842 bp->cnic_kwq_prod = bp->cnic_kwq;
13843 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13844
13845 bp->cnic_spq_pending = 0;
13846 bp->cnic_kwq_pending = 0;
13847
13848 bp->cnic_data = data;
13849
13850 cp->num_irq = 0;
13851 cp->drv_state = CNIC_DRV_STATE_REGD;
13852
13853 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13854
13855 bnx2x_setup_cnic_irq_info(bp);
13856 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13857 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13858 rcu_assign_pointer(bp->cnic_ops, ops);
13859
13860 return 0;
13861}
13862
13863static int bnx2x_unregister_cnic(struct net_device *dev)
13864{
13865 struct bnx2x *bp = netdev_priv(dev);
13866 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13867
13868 mutex_lock(&bp->cnic_mutex);
13869 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13870 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13871 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13872 }
13873 cp->drv_state = 0;
13874 rcu_assign_pointer(bp->cnic_ops, NULL);
13875 mutex_unlock(&bp->cnic_mutex);
13876 synchronize_rcu();
13877 kfree(bp->cnic_kwq);
13878 bp->cnic_kwq = NULL;
13879
13880 return 0;
13881}
13882
13883struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13884{
13885 struct bnx2x *bp = netdev_priv(dev);
13886 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13887
13888 cp->drv_owner = THIS_MODULE;
13889 cp->chip_id = CHIP_ID(bp);
13890 cp->pdev = bp->pdev;
13891 cp->io_base = bp->regview;
13892 cp->io_base2 = bp->doorbells;
13893 cp->max_kwqe_pending = 8;
13894 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13895 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13896 cp->ctx_tbl_len = CNIC_ILT_LINES;
13897 cp->starting_cid = BCM_CNIC_CID_START;
13898 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13899 cp->drv_ctl = bnx2x_drv_ctl;
13900 cp->drv_register_cnic = bnx2x_register_cnic;
13901 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13902
13903 return cp;
13904}
13905EXPORT_SYMBOL(bnx2x_cnic_probe);
13906
13907#endif /* BCM_CNIC */
94a78b79 13908