]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Date and version
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea 51#include <linux/io.h>
45229b42 52#include <linux/stringify.h>
a2fbb9ea 53
359d8b15 54
a2fbb9ea
ET
55#include "bnx2x.h"
56#include "bnx2x_init.h"
94a78b79 57#include "bnx2x_init_ops.h"
0a64ea57 58#include "bnx2x_dump.h"
a2fbb9ea 59
a03b1a5c
VZ
60#define DRV_MODULE_VERSION "1.52.53-1"
61#define DRV_MODULE_RELDATE "2010/18/04"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
94a78b79
VZ
64#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h"
66/* FW files */
45229b42
BH
67#define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
94a78b79 74
34f80b04
EG
75/* Time in jiffies before concluding the transmitter is hung */
76#define TX_TIMEOUT (5*HZ)
a2fbb9ea 77
53a10565 78static char version[] __devinitdata =
34f80b04 79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
24e3fcef 82MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 83MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
84MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
86MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
a2fbb9ea 88
555f6c78
EG
89static int multi_mode = 1;
90module_param(multi_mode, int, 0);
ca00392c
EG
91MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
54b9ddaa
VZ
94static int num_queues;
95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
555f6c78 98
19680c48 99static int disable_tpa;
19680c48 100module_param(disable_tpa, int, 0);
9898f86d 101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
102
103static int int_mode;
104module_param(int_mode, int, 0);
cdaa7cb8
VZ
105MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
106 "(1 INT#x; 2 MSI)");
8badd27a 107
a18f5128
EG
108static int dropless_fc;
109module_param(dropless_fc, int, 0);
110MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
111
9898f86d 112static int poll;
a2fbb9ea 113module_param(poll, int, 0);
9898f86d 114MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
115
116static int mrrs = -1;
117module_param(mrrs, int, 0);
118MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
119
9898f86d 120static int debug;
a2fbb9ea 121module_param(debug, int, 0);
9898f86d
EG
122MODULE_PARM_DESC(debug, " Default debug msglevel");
123
124static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 125
1cf167f2 126static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
127
128enum bnx2x_board_type {
129 BCM57710 = 0,
34f80b04
EG
130 BCM57711 = 1,
131 BCM57711E = 2,
a2fbb9ea
ET
132};
133
34f80b04 134/* indexed by board_type, above */
53a10565 135static struct {
a2fbb9ea
ET
136 char *name;
137} board_info[] __devinitdata = {
34f80b04
EG
138 { "Broadcom NetXtreme II BCM57710 XGb" },
139 { "Broadcom NetXtreme II BCM57711 XGb" },
140 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
141};
142
34f80b04 143
a3aa1884 144static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
147 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
148 { 0 }
149};
150
151MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
152
153/****************************************************************************
154* General service functions
155****************************************************************************/
156
157/* used only at init
158 * locking is done by mcp
159 */
573f2035 160void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
161{
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
164 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
165 PCICFG_VENDOR_ID_OFFSET);
166}
167
a2fbb9ea
ET
168static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
169{
170 u32 val;
171
172 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
173 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
174 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
175 PCICFG_VENDOR_ID_OFFSET);
176
177 return val;
178}
a2fbb9ea
ET
179
180static const u32 dmae_reg_go_c[] = {
181 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
182 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
183 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
184 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
185};
186
187/* copy command into DMAE command memory and set DMAE command go */
188static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
189 int idx)
190{
191 u32 cmd_offset;
192 int i;
193
194 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
195 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
196 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
197
ad8d3948
EG
198 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
199 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
200 }
201 REG_WR(bp, dmae_reg_go_c[idx], 1);
202}
203
ad8d3948
EG
204void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
205 u32 len32)
a2fbb9ea 206{
5ff7b6d4 207 struct dmae_command dmae;
a2fbb9ea 208 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
209 int cnt = 200;
210
211 if (!bp->dmae_ready) {
212 u32 *data = bnx2x_sp(bp, wb_data[0]);
213
214 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
215 " using indirect\n", dst_addr, len32);
216 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
217 return;
218 }
219
5ff7b6d4 220 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 221
5ff7b6d4
EG
222 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
223 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
224 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 225#ifdef __BIG_ENDIAN
5ff7b6d4 226 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 227#else
5ff7b6d4 228 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 229#endif
5ff7b6d4
EG
230 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
231 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
232 dmae.src_addr_lo = U64_LO(dma_addr);
233 dmae.src_addr_hi = U64_HI(dma_addr);
234 dmae.dst_addr_lo = dst_addr >> 2;
235 dmae.dst_addr_hi = 0;
236 dmae.len = len32;
237 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
239 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 240
c3eefaf6 241 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
242 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
243 "dst_addr [%x:%08x (%08x)]\n"
244 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
245 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
246 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
247 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 248 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
249 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
250 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 251
5ff7b6d4
EG
252 mutex_lock(&bp->dmae_mutex);
253
a2fbb9ea
ET
254 *wb_comp = 0;
255
5ff7b6d4 256 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
257
258 udelay(5);
ad8d3948
EG
259
260 while (*wb_comp != DMAE_COMP_VAL) {
261 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
262
ad8d3948 263 if (!cnt) {
c3eefaf6 264 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
265 break;
266 }
ad8d3948 267 cnt--;
12469401
YG
268 /* adjust delay for emulation/FPGA */
269 if (CHIP_REV_IS_SLOW(bp))
270 msleep(100);
271 else
272 udelay(5);
a2fbb9ea 273 }
ad8d3948
EG
274
275 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
276}
277
c18487ee 278void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 279{
5ff7b6d4 280 struct dmae_command dmae;
a2fbb9ea 281 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
282 int cnt = 200;
283
284 if (!bp->dmae_ready) {
285 u32 *data = bnx2x_sp(bp, wb_data[0]);
286 int i;
287
288 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
289 " using indirect\n", src_addr, len32);
290 for (i = 0; i < len32; i++)
291 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
292 return;
293 }
294
5ff7b6d4 295 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 296
5ff7b6d4
EG
297 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
298 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
299 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 300#ifdef __BIG_ENDIAN
5ff7b6d4 301 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 302#else
5ff7b6d4 303 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 304#endif
5ff7b6d4
EG
305 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
306 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
307 dmae.src_addr_lo = src_addr >> 2;
308 dmae.src_addr_hi = 0;
309 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
310 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
311 dmae.len = len32;
312 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
314 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 315
c3eefaf6 316 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
317 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
318 "dst_addr [%x:%08x (%08x)]\n"
319 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
320 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
321 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
322 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 323
5ff7b6d4
EG
324 mutex_lock(&bp->dmae_mutex);
325
326 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
327 *wb_comp = 0;
328
5ff7b6d4 329 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
330
331 udelay(5);
ad8d3948
EG
332
333 while (*wb_comp != DMAE_COMP_VAL) {
334
ad8d3948 335 if (!cnt) {
c3eefaf6 336 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
337 break;
338 }
ad8d3948 339 cnt--;
12469401
YG
340 /* adjust delay for emulation/FPGA */
341 if (CHIP_REV_IS_SLOW(bp))
342 msleep(100);
343 else
344 udelay(5);
a2fbb9ea 345 }
ad8d3948 346 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
347 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
348 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
349
350 mutex_unlock(&bp->dmae_mutex);
351}
352
573f2035
EG
353void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
354 u32 addr, u32 len)
355{
02e3c6cb 356 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
357 int offset = 0;
358
02e3c6cb 359 while (len > dmae_wr_max) {
573f2035 360 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
361 addr + offset, dmae_wr_max);
362 offset += dmae_wr_max * 4;
363 len -= dmae_wr_max;
573f2035
EG
364 }
365
366 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
367}
368
ad8d3948
EG
369/* used only for slowpath so not inlined */
370static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
371{
372 u32 wb_write[2];
373
374 wb_write[0] = val_hi;
375 wb_write[1] = val_lo;
376 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 377}
a2fbb9ea 378
ad8d3948
EG
379#ifdef USE_WB_RD
380static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
381{
382 u32 wb_data[2];
383
384 REG_RD_DMAE(bp, reg, wb_data, 2);
385
386 return HILO_U64(wb_data[0], wb_data[1]);
387}
388#endif
389
a2fbb9ea
ET
390static int bnx2x_mc_assert(struct bnx2x *bp)
391{
a2fbb9ea 392 char last_idx;
34f80b04
EG
393 int i, rc = 0;
394 u32 row0, row1, row2, row3;
395
396 /* XSTORM */
397 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
398 XSTORM_ASSERT_LIST_INDEX_OFFSET);
399 if (last_idx)
400 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
401
402 /* print the asserts */
403 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
404
405 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i));
407 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
409 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
411 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
412 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
413
414 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
415 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
416 " 0x%08x 0x%08x 0x%08x\n",
417 i, row3, row2, row1, row0);
418 rc++;
419 } else {
420 break;
421 }
422 }
423
424 /* TSTORM */
425 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
426 TSTORM_ASSERT_LIST_INDEX_OFFSET);
427 if (last_idx)
428 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
429
430 /* print the asserts */
431 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
432
433 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i));
435 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
437 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
439 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
440 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
441
442 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
443 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
444 " 0x%08x 0x%08x 0x%08x\n",
445 i, row3, row2, row1, row0);
446 rc++;
447 } else {
448 break;
449 }
450 }
451
452 /* CSTORM */
453 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
454 CSTORM_ASSERT_LIST_INDEX_OFFSET);
455 if (last_idx)
456 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
457
458 /* print the asserts */
459 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
460
461 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i));
463 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
465 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
467 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
468 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
469
470 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
471 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
472 " 0x%08x 0x%08x 0x%08x\n",
473 i, row3, row2, row1, row0);
474 rc++;
475 } else {
476 break;
477 }
478 }
479
480 /* USTORM */
481 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
482 USTORM_ASSERT_LIST_INDEX_OFFSET);
483 if (last_idx)
484 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
485
486 /* print the asserts */
487 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
488
489 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i));
491 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 4);
493 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 8);
495 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
496 USTORM_ASSERT_LIST_OFFSET(i) + 12);
497
498 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
499 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
500 " 0x%08x 0x%08x 0x%08x\n",
501 i, row3, row2, row1, row0);
502 rc++;
503 } else {
504 break;
a2fbb9ea
ET
505 }
506 }
34f80b04 507
a2fbb9ea
ET
508 return rc;
509}
c14423fe 510
a2fbb9ea
ET
511static void bnx2x_fw_dump(struct bnx2x *bp)
512{
cdaa7cb8 513 u32 addr;
a2fbb9ea 514 u32 mark, offset;
4781bfad 515 __be32 data[9];
a2fbb9ea
ET
516 int word;
517
2145a920
VZ
518 if (BP_NOMCP(bp)) {
519 BNX2X_ERR("NO MCP - can not dump\n");
520 return;
521 }
cdaa7cb8
VZ
522
523 addr = bp->common.shmem_base - 0x0800 + 4;
524 mark = REG_RD(bp, addr);
525 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
7995c64e 526 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 527
7995c64e 528 pr_err("");
cdaa7cb8 529 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
a2fbb9ea 530 for (word = 0; word < 8; word++)
cdaa7cb8 531 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 532 data[8] = 0x0;
7995c64e 533 pr_cont("%s", (char *)data);
a2fbb9ea 534 }
cdaa7cb8 535 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 536 for (word = 0; word < 8; word++)
cdaa7cb8 537 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 538 data[8] = 0x0;
7995c64e 539 pr_cont("%s", (char *)data);
a2fbb9ea 540 }
7995c64e 541 pr_err("end of fw dump\n");
a2fbb9ea
ET
542}
543
544static void bnx2x_panic_dump(struct bnx2x *bp)
545{
546 int i;
547 u16 j, start, end;
548
66e855f3
YG
549 bp->stats_state = STATS_STATE_DISABLED;
550 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
551
a2fbb9ea
ET
552 BNX2X_ERR("begin crash dump -----------------\n");
553
8440d2b6
EG
554 /* Indices */
555 /* Common */
cdaa7cb8
VZ
556 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
557 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
558 " spq_prod_idx(0x%x)\n",
8440d2b6
EG
559 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
560 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
561
562 /* Rx */
54b9ddaa 563 for_each_queue(bp, i) {
a2fbb9ea 564 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 565
cdaa7cb8
VZ
566 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
567 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
568 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 569 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
570 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
571 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8
VZ
572 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
573 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
8440d2b6
EG
574 fp->rx_sge_prod, fp->last_max_sge,
575 le16_to_cpu(fp->fp_u_idx),
576 fp->status_blk->u_status_block.status_block_index);
577 }
a2fbb9ea 578
8440d2b6 579 /* Tx */
54b9ddaa 580 for_each_queue(bp, i) {
8440d2b6 581 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 582
cdaa7cb8
VZ
583 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
584 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
585 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
586 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
587 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
cdaa7cb8
VZ
588 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
589 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 590 fp->status_blk->c_status_block.status_block_index,
ca00392c 591 fp->tx_db.data.prod);
8440d2b6 592 }
a2fbb9ea 593
8440d2b6
EG
594 /* Rings */
595 /* Rx */
54b9ddaa 596 for_each_queue(bp, i) {
8440d2b6 597 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
598
599 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
600 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 601 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
602 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
603 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
604
c3eefaf6
EG
605 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
606 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
607 }
608
3196a88a
EG
609 start = RX_SGE(fp->rx_sge_prod);
610 end = RX_SGE(fp->last_max_sge);
8440d2b6 611 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
612 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
613 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
614
c3eefaf6
EG
615 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
616 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
617 }
618
a2fbb9ea
ET
619 start = RCQ_BD(fp->rx_comp_cons - 10);
620 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 621 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
622 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
623
c3eefaf6
EG
624 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
625 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
626 }
627 }
628
8440d2b6 629 /* Tx */
54b9ddaa 630 for_each_queue(bp, i) {
8440d2b6
EG
631 struct bnx2x_fastpath *fp = &bp->fp[i];
632
633 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
634 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
635 for (j = start; j != end; j = TX_BD(j + 1)) {
636 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
637
c3eefaf6
EG
638 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
639 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
640 }
641
642 start = TX_BD(fp->tx_bd_cons - 10);
643 end = TX_BD(fp->tx_bd_cons + 254);
644 for (j = start; j != end; j = TX_BD(j + 1)) {
645 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
646
c3eefaf6
EG
647 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
648 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
649 }
650 }
a2fbb9ea 651
34f80b04 652 bnx2x_fw_dump(bp);
a2fbb9ea
ET
653 bnx2x_mc_assert(bp);
654 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
655}
656
615f8fd9 657static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 658{
34f80b04 659 int port = BP_PORT(bp);
a2fbb9ea
ET
660 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
661 u32 val = REG_RD(bp, addr);
662 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 663 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
664
665 if (msix) {
8badd27a
EG
666 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
668 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
669 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
670 } else if (msi) {
671 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
672 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
673 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
674 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
675 } else {
676 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 677 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
678 HC_CONFIG_0_REG_INT_LINE_EN_0 |
679 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 680
8badd27a
EG
681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
682 val, port, addr);
615f8fd9
ET
683
684 REG_WR(bp, addr, val);
685
a2fbb9ea
ET
686 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
687 }
688
8badd27a
EG
689 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
690 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
691
692 REG_WR(bp, addr, val);
37dbbf32
EG
693 /*
694 * Ensure that HC_CONFIG is written before leading/trailing edge config
695 */
696 mmiowb();
697 barrier();
34f80b04
EG
698
699 if (CHIP_IS_E1H(bp)) {
700 /* init leading/trailing edge */
701 if (IS_E1HMF(bp)) {
8badd27a 702 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 703 if (bp->port.pmf)
4acac6a5
EG
704 /* enable nig and gpio3 attention */
705 val |= 0x1100;
34f80b04
EG
706 } else
707 val = 0xffff;
708
709 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
710 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
711 }
37dbbf32
EG
712
713 /* Make sure that interrupts are indeed enabled from here on */
714 mmiowb();
a2fbb9ea
ET
715}
716
615f8fd9 717static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 718{
34f80b04 719 int port = BP_PORT(bp);
a2fbb9ea
ET
720 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
721 u32 val = REG_RD(bp, addr);
722
723 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
724 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
725 HC_CONFIG_0_REG_INT_LINE_EN_0 |
726 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
727
728 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
729 val, port, addr);
730
8badd27a
EG
731 /* flush all outstanding writes */
732 mmiowb();
733
a2fbb9ea
ET
734 REG_WR(bp, addr, val);
735 if (REG_RD(bp, addr) != val)
736 BNX2X_ERR("BUG! proper val not read from IGU!\n");
737}
738
f8ef6e44 739static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 740{
a2fbb9ea 741 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 742 int i, offset;
a2fbb9ea 743
34f80b04 744 /* disable interrupt handling */
a2fbb9ea 745 atomic_inc(&bp->intr_sem);
e1510706
EG
746 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
747
f8ef6e44
YG
748 if (disable_hw)
749 /* prevent the HW from sending interrupts */
750 bnx2x_int_disable(bp);
a2fbb9ea
ET
751
752 /* make sure all ISRs are done */
753 if (msix) {
8badd27a
EG
754 synchronize_irq(bp->msix_table[0].vector);
755 offset = 1;
37b091ba
MC
756#ifdef BCM_CNIC
757 offset++;
758#endif
a2fbb9ea 759 for_each_queue(bp, i)
8badd27a 760 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
761 } else
762 synchronize_irq(bp->pdev->irq);
763
764 /* make sure sp_task is not running */
1cf167f2
EG
765 cancel_delayed_work(&bp->sp_task);
766 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
767}
768
34f80b04 769/* fast path */
a2fbb9ea
ET
770
771/*
34f80b04 772 * General service functions
a2fbb9ea
ET
773 */
774
72fd0718
VZ
775/* Return true if succeeded to acquire the lock */
776static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
777{
778 u32 lock_status;
779 u32 resource_bit = (1 << resource);
780 int func = BP_FUNC(bp);
781 u32 hw_lock_control_reg;
782
783 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
784
785 /* Validating that the resource is within range */
786 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
787 DP(NETIF_MSG_HW,
788 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
789 resource, HW_LOCK_MAX_RESOURCE_VALUE);
790 return -EINVAL;
791 }
792
793 if (func <= 5)
794 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
795 else
796 hw_lock_control_reg =
797 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
798
799 /* Try to acquire the lock */
800 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
801 lock_status = REG_RD(bp, hw_lock_control_reg);
802 if (lock_status & resource_bit)
803 return true;
804
805 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
806 return false;
807}
808
34f80b04 809static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
810 u8 storm, u16 index, u8 op, u8 update)
811{
5c862848
EG
812 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
813 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
814 struct igu_ack_register igu_ack;
815
816 igu_ack.status_block_index = index;
817 igu_ack.sb_id_and_flags =
34f80b04 818 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
819 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
820 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
821 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
822
5c862848
EG
823 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
824 (*(u32 *)&igu_ack), hc_addr);
825 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
826
827 /* Make sure that ACK is written */
828 mmiowb();
829 barrier();
a2fbb9ea
ET
830}
831
54b9ddaa 832static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
833{
834 struct host_status_block *fpsb = fp->status_blk;
a2fbb9ea
ET
835
836 barrier(); /* status block is written to by the chip */
54b9ddaa
VZ
837 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
838 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
a2fbb9ea
ET
839}
840
a2fbb9ea
ET
841static u16 bnx2x_ack_int(struct bnx2x *bp)
842{
5c862848
EG
843 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
844 COMMAND_REG_SIMD_MASK);
845 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 846
5c862848
EG
847 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
848 result, hc_addr);
a2fbb9ea 849
a2fbb9ea
ET
850 return result;
851}
852
853
854/*
855 * fast path service functions
856 */
857
e8b5fc51
VZ
858static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
859{
860 /* Tell compiler that consumer and producer can change */
861 barrier();
862 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
863}
864
a2fbb9ea
ET
865/* free skb in the packet ring at pos idx
866 * return idx of last bd freed
867 */
868static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
869 u16 idx)
870{
871 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
872 struct eth_tx_start_bd *tx_start_bd;
873 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 874 struct sk_buff *skb = tx_buf->skb;
34f80b04 875 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
876 int nbd;
877
54b9ddaa
VZ
878 /* prefetch skb end pointer to speedup dev_kfree_skb() */
879 prefetch(&skb->end);
880
a2fbb9ea
ET
881 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
882 idx, tx_buf, skb);
883
884 /* unmap first bd */
885 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c 886 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
1a983142 887 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
ca00392c 888 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 889
ca00392c 890 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 891#ifdef BNX2X_STOP_ON_ERROR
ca00392c 892 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 893 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
894 bnx2x_panic();
895 }
896#endif
ca00392c 897 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 898
ca00392c
EG
899 /* Get the next bd */
900 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 901
ca00392c
EG
902 /* Skip a parse bd... */
903 --nbd;
904 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
905
906 /* ...and the TSO split header bd since they have no mapping */
907 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
908 --nbd;
909 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
910 }
911
912 /* now free frags */
913 while (nbd > 0) {
914
915 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c 916 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
1a983142
FT
917 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
918 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
a2fbb9ea
ET
919 if (--nbd)
920 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
921 }
922
923 /* release skb */
53e5e96e 924 WARN_ON(!skb);
54b9ddaa 925 dev_kfree_skb(skb);
a2fbb9ea
ET
926 tx_buf->first_bd = 0;
927 tx_buf->skb = NULL;
928
34f80b04 929 return new_cons;
a2fbb9ea
ET
930}
931
34f80b04 932static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 933{
34f80b04
EG
934 s16 used;
935 u16 prod;
936 u16 cons;
a2fbb9ea 937
a2fbb9ea
ET
938 prod = fp->tx_bd_prod;
939 cons = fp->tx_bd_cons;
940
34f80b04
EG
941 /* NUM_TX_RINGS = number of "next-page" entries
942 It will be used as a threshold */
943 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 944
34f80b04 945#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
946 WARN_ON(used < 0);
947 WARN_ON(used > fp->bp->tx_ring_size);
948 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 949#endif
a2fbb9ea 950
34f80b04 951 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
952}
953
54b9ddaa
VZ
954static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
955{
956 u16 hw_cons;
957
958 /* Tell compiler that status block fields can change */
959 barrier();
960 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
961 return hw_cons != fp->tx_pkt_cons;
962}
963
964static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
965{
966 struct bnx2x *bp = fp->bp;
555f6c78 967 struct netdev_queue *txq;
a2fbb9ea 968 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
a2fbb9ea
ET
969
970#ifdef BNX2X_STOP_ON_ERROR
971 if (unlikely(bp->panic))
54b9ddaa 972 return -1;
a2fbb9ea
ET
973#endif
974
54b9ddaa 975 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
976 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
977 sw_cons = fp->tx_pkt_cons;
978
979 while (sw_cons != hw_cons) {
980 u16 pkt_cons;
981
982 pkt_cons = TX_BD(sw_cons);
983
984 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
985
34f80b04 986 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
987 hw_cons, sw_cons, pkt_cons);
988
34f80b04 989/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
990 rmb();
991 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
992 }
993*/
994 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
995 sw_cons++;
a2fbb9ea
ET
996 }
997
998 fp->tx_pkt_cons = sw_cons;
999 fp->tx_bd_cons = bd_cons;
1000
c16cc0b4
VZ
1001 /* Need to make the tx_bd_cons update visible to start_xmit()
1002 * before checking for netif_tx_queue_stopped(). Without the
1003 * memory barrier, there is a small possibility that
1004 * start_xmit() will miss it and cause the queue to be stopped
1005 * forever.
1006 */
2d99cf16 1007 smp_mb();
c16cc0b4 1008
a2fbb9ea 1009 /* TBD need a thresh? */
555f6c78 1010 if (unlikely(netif_tx_queue_stopped(txq))) {
c16cc0b4
VZ
1011 /* Taking tx_lock() is needed to prevent reenabling the queue
1012 * while it's empty. This could have happen if rx_action() gets
1013 * suspended in bnx2x_tx_int() after the condition before
1014 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1015 *
1016 * stops the queue->sees fresh tx_bd_cons->releases the queue->
1017 * sends some packets consuming the whole queue again->
1018 * stops the queue
6044735d 1019 */
c16cc0b4
VZ
1020
1021 __netif_tx_lock(txq, smp_processor_id());
6044735d 1022
555f6c78 1023 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 1024 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 1025 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 1026 netif_tx_wake_queue(txq);
c16cc0b4
VZ
1027
1028 __netif_tx_unlock(txq);
a2fbb9ea 1029 }
54b9ddaa 1030 return 0;
a2fbb9ea
ET
1031}
1032
993ac7b5
MC
1033#ifdef BCM_CNIC
1034static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1035#endif
3196a88a 1036
a2fbb9ea
ET
1037static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1038 union eth_rx_cqe *rr_cqe)
1039{
1040 struct bnx2x *bp = fp->bp;
1041 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1042 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1043
34f80b04 1044 DP(BNX2X_MSG_SP,
a2fbb9ea 1045 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1046 fp->index, cid, command, bp->state,
34f80b04 1047 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
1048
1049 bp->spq_left++;
1050
0626b899 1051 if (fp->index) {
a2fbb9ea
ET
1052 switch (command | fp->state) {
1053 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1054 BNX2X_FP_STATE_OPENING):
1055 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1056 cid);
1057 fp->state = BNX2X_FP_STATE_OPEN;
1058 break;
1059
1060 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1061 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1062 cid);
1063 fp->state = BNX2X_FP_STATE_HALTED;
1064 break;
1065
1066 default:
34f80b04 1067 BNX2X_ERR("unexpected MC reply (%d) "
cdaa7cb8
VZ
1068 "fp[%d] state is %x\n",
1069 command, fp->index, fp->state);
34f80b04 1070 break;
a2fbb9ea 1071 }
34f80b04 1072 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1073 return;
1074 }
c14423fe 1075
a2fbb9ea
ET
1076 switch (command | bp->state) {
1077 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1078 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1079 bp->state = BNX2X_STATE_OPEN;
1080 break;
1081
1082 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1083 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1084 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1085 fp->state = BNX2X_FP_STATE_HALTED;
1086 break;
1087
a2fbb9ea 1088 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1089 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1090 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1091 break;
1092
993ac7b5
MC
1093#ifdef BCM_CNIC
1094 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1095 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1096 bnx2x_cnic_cfc_comp(bp, cid);
1097 break;
1098#endif
3196a88a 1099
a2fbb9ea 1100 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1101 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1102 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
1103 bp->set_mac_pending--;
1104 smp_wmb();
a2fbb9ea
ET
1105 break;
1106
49d66772 1107 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1108 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
1109 bp->set_mac_pending--;
1110 smp_wmb();
49d66772
ET
1111 break;
1112
a2fbb9ea 1113 default:
34f80b04 1114 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1115 command, bp->state);
34f80b04 1116 break;
a2fbb9ea 1117 }
34f80b04 1118 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1119}
1120
7a9b2557
VZ
1121static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1122 struct bnx2x_fastpath *fp, u16 index)
1123{
1124 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1125 struct page *page = sw_buf->page;
1126 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1127
1128 /* Skip "next page" elements */
1129 if (!page)
1130 return;
1131
1a983142 1132 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
4f40f2cb 1133 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1134 __free_pages(page, PAGES_PER_SGE_SHIFT);
1135
1136 sw_buf->page = NULL;
1137 sge->addr_hi = 0;
1138 sge->addr_lo = 0;
1139}
1140
1141static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1142 struct bnx2x_fastpath *fp, int last)
1143{
1144 int i;
1145
1146 for (i = 0; i < last; i++)
1147 bnx2x_free_rx_sge(bp, fp, i);
1148}
1149
1150static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1151 struct bnx2x_fastpath *fp, u16 index)
1152{
1153 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1154 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1155 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1156 dma_addr_t mapping;
1157
1158 if (unlikely(page == NULL))
1159 return -ENOMEM;
1160
1a983142
FT
1161 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1162 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
8d8bb39b 1163 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1164 __free_pages(page, PAGES_PER_SGE_SHIFT);
1165 return -ENOMEM;
1166 }
1167
1168 sw_buf->page = page;
1a983142 1169 dma_unmap_addr_set(sw_buf, mapping, mapping);
7a9b2557
VZ
1170
1171 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1172 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1173
1174 return 0;
1175}
1176
a2fbb9ea
ET
1177static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1178 struct bnx2x_fastpath *fp, u16 index)
1179{
1180 struct sk_buff *skb;
1181 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1182 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1183 dma_addr_t mapping;
1184
1185 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1186 if (unlikely(skb == NULL))
1187 return -ENOMEM;
1188
1a983142
FT
1189 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1190 DMA_FROM_DEVICE);
8d8bb39b 1191 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1192 dev_kfree_skb(skb);
1193 return -ENOMEM;
1194 }
1195
1196 rx_buf->skb = skb;
1a983142 1197 dma_unmap_addr_set(rx_buf, mapping, mapping);
a2fbb9ea
ET
1198
1199 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1200 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1201
1202 return 0;
1203}
1204
1205/* note that we are not allocating a new skb,
1206 * we are just moving one from cons to prod
1207 * we are not creating a new mapping,
1208 * so there is no need to check for dma_mapping_error().
1209 */
1210static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1211 struct sk_buff *skb, u16 cons, u16 prod)
1212{
1213 struct bnx2x *bp = fp->bp;
1214 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1215 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1216 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1217 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1218
1a983142
FT
1219 dma_sync_single_for_device(&bp->pdev->dev,
1220 dma_unmap_addr(cons_rx_buf, mapping),
1221 RX_COPY_THRESH, DMA_FROM_DEVICE);
a2fbb9ea
ET
1222
1223 prod_rx_buf->skb = cons_rx_buf->skb;
1a983142
FT
1224 dma_unmap_addr_set(prod_rx_buf, mapping,
1225 dma_unmap_addr(cons_rx_buf, mapping));
a2fbb9ea
ET
1226 *prod_bd = *cons_bd;
1227}
1228
7a9b2557
VZ
1229static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1230 u16 idx)
1231{
1232 u16 last_max = fp->last_max_sge;
1233
1234 if (SUB_S16(idx, last_max) > 0)
1235 fp->last_max_sge = idx;
1236}
1237
1238static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1239{
1240 int i, j;
1241
1242 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1243 int idx = RX_SGE_CNT * i - 1;
1244
1245 for (j = 0; j < 2; j++) {
1246 SGE_MASK_CLEAR_BIT(fp, idx);
1247 idx--;
1248 }
1249 }
1250}
1251
1252static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1253 struct eth_fast_path_rx_cqe *fp_cqe)
1254{
1255 struct bnx2x *bp = fp->bp;
4f40f2cb 1256 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1257 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1258 SGE_PAGE_SHIFT;
7a9b2557
VZ
1259 u16 last_max, last_elem, first_elem;
1260 u16 delta = 0;
1261 u16 i;
1262
1263 if (!sge_len)
1264 return;
1265
1266 /* First mark all used pages */
1267 for (i = 0; i < sge_len; i++)
1268 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1269
1270 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1271 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1272
1273 /* Here we assume that the last SGE index is the biggest */
1274 prefetch((void *)(fp->sge_mask));
1275 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1276
1277 last_max = RX_SGE(fp->last_max_sge);
1278 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1279 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1280
1281 /* If ring is not full */
1282 if (last_elem + 1 != first_elem)
1283 last_elem++;
1284
1285 /* Now update the prod */
1286 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1287 if (likely(fp->sge_mask[i]))
1288 break;
1289
1290 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1291 delta += RX_SGE_MASK_ELEM_SZ;
1292 }
1293
1294 if (delta > 0) {
1295 fp->rx_sge_prod += delta;
1296 /* clear page-end entries */
1297 bnx2x_clear_sge_mask_next_elems(fp);
1298 }
1299
1300 DP(NETIF_MSG_RX_STATUS,
1301 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1302 fp->last_max_sge, fp->rx_sge_prod);
1303}
1304
1305static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1306{
1307 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1308 memset(fp->sge_mask, 0xff,
1309 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1310
33471629
EG
1311 /* Clear the two last indices in the page to 1:
1312 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1313 hence will never be indicated and should be removed from
1314 the calculations. */
1315 bnx2x_clear_sge_mask_next_elems(fp);
1316}
1317
1318static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1319 struct sk_buff *skb, u16 cons, u16 prod)
1320{
1321 struct bnx2x *bp = fp->bp;
1322 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1323 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1324 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1325 dma_addr_t mapping;
1326
1327 /* move empty skb from pool to prod and map it */
1328 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1a983142
FT
1329 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1330 bp->rx_buf_size, DMA_FROM_DEVICE);
1331 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
7a9b2557
VZ
1332
1333 /* move partial skb from cons to pool (don't unmap yet) */
1334 fp->tpa_pool[queue] = *cons_rx_buf;
1335
1336 /* mark bin state as start - print error if current state != stop */
1337 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1338 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1339
1340 fp->tpa_state[queue] = BNX2X_TPA_START;
1341
1342 /* point prod_bd to new skb */
1343 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1344 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1345
1346#ifdef BNX2X_STOP_ON_ERROR
1347 fp->tpa_queue_used |= (1 << queue);
cdaa7cb8 1348#ifdef _ASM_GENERIC_INT_L64_H
7a9b2557
VZ
1349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1350#else
1351 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1352#endif
1353 fp->tpa_queue_used);
1354#endif
1355}
1356
1357static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1358 struct sk_buff *skb,
1359 struct eth_fast_path_rx_cqe *fp_cqe,
1360 u16 cqe_idx)
1361{
1362 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1363 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1364 u32 i, frag_len, frag_size, pages;
1365 int err;
1366 int j;
1367
1368 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1369 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1370
1371 /* This is needed in order to enable forwarding support */
1372 if (frag_size)
4f40f2cb 1373 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1374 max(frag_size, (u32)len_on_bd));
1375
1376#ifdef BNX2X_STOP_ON_ERROR
cdaa7cb8 1377 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
7a9b2557
VZ
1378 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1379 pages, cqe_idx);
1380 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1381 fp_cqe->pkt_len, len_on_bd);
1382 bnx2x_panic();
1383 return -EINVAL;
1384 }
1385#endif
1386
1387 /* Run through the SGL and compose the fragmented skb */
1388 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1389 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1390
1391 /* FW gives the indices of the SGE as if the ring is an array
1392 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1393 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1394 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1395 old_rx_pg = *rx_pg;
1396
1397 /* If we fail to allocate a substitute page, we simply stop
1398 where we are and drop the whole packet */
1399 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1400 if (unlikely(err)) {
de832a55 1401 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1402 return err;
1403 }
1404
1405 /* Unmap the page as we r going to pass it to the stack */
1a983142
FT
1406 dma_unmap_page(&bp->pdev->dev,
1407 dma_unmap_addr(&old_rx_pg, mapping),
1408 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
7a9b2557
VZ
1409
1410 /* Add one frag and update the appropriate fields in the skb */
1411 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1412
1413 skb->data_len += frag_len;
1414 skb->truesize += frag_len;
1415 skb->len += frag_len;
1416
1417 frag_size -= frag_len;
1418 }
1419
1420 return 0;
1421}
1422
1423static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1424 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1425 u16 cqe_idx)
1426{
1427 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1428 struct sk_buff *skb = rx_buf->skb;
1429 /* alloc new skb */
1430 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1431
1432 /* Unmap skb in the pool anyway, as we are going to change
1433 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1434 fails. */
1a983142
FT
1435 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1436 bp->rx_buf_size, DMA_FROM_DEVICE);
7a9b2557 1437
7a9b2557 1438 if (likely(new_skb)) {
66e855f3
YG
1439 /* fix ip xsum and give it to the stack */
1440 /* (no need to map the new skb) */
0c6671b0
EG
1441#ifdef BCM_VLAN
1442 int is_vlan_cqe =
1443 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1444 PARSING_FLAGS_VLAN);
1445 int is_not_hwaccel_vlan_cqe =
1446 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1447#endif
7a9b2557
VZ
1448
1449 prefetch(skb);
1450 prefetch(((char *)(skb)) + 128);
1451
7a9b2557
VZ
1452#ifdef BNX2X_STOP_ON_ERROR
1453 if (pad + len > bp->rx_buf_size) {
1454 BNX2X_ERR("skb_put is about to fail... "
1455 "pad %d len %d rx_buf_size %d\n",
1456 pad, len, bp->rx_buf_size);
1457 bnx2x_panic();
1458 return;
1459 }
1460#endif
1461
1462 skb_reserve(skb, pad);
1463 skb_put(skb, len);
1464
1465 skb->protocol = eth_type_trans(skb, bp->dev);
1466 skb->ip_summed = CHECKSUM_UNNECESSARY;
1467
1468 {
1469 struct iphdr *iph;
1470
1471 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1472#ifdef BCM_VLAN
1473 /* If there is no Rx VLAN offloading -
1474 take VLAN tag into an account */
1475 if (unlikely(is_not_hwaccel_vlan_cqe))
1476 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1477#endif
7a9b2557
VZ
1478 iph->check = 0;
1479 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1480 }
1481
1482 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1483 &cqe->fast_path_cqe, cqe_idx)) {
1484#ifdef BCM_VLAN
0c6671b0
EG
1485 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1486 (!is_not_hwaccel_vlan_cqe))
4fd89b7a
DK
1487 vlan_gro_receive(&fp->napi, bp->vlgrp,
1488 le16_to_cpu(cqe->fast_path_cqe.
1489 vlan_tag), skb);
7a9b2557
VZ
1490 else
1491#endif
4fd89b7a 1492 napi_gro_receive(&fp->napi, skb);
7a9b2557
VZ
1493 } else {
1494 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1495 " - dropping packet!\n");
1496 dev_kfree_skb(skb);
1497 }
1498
7a9b2557
VZ
1499
1500 /* put new skb in bin */
1501 fp->tpa_pool[queue].skb = new_skb;
1502
1503 } else {
66e855f3 1504 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1505 DP(NETIF_MSG_RX_STATUS,
1506 "Failed to allocate new skb - dropping packet!\n");
de832a55 1507 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1508 }
1509
1510 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1511}
1512
1513static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1514 struct bnx2x_fastpath *fp,
1515 u16 bd_prod, u16 rx_comp_prod,
1516 u16 rx_sge_prod)
1517{
8d9c5f34 1518 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1519 int i;
1520
1521 /* Update producers */
1522 rx_prods.bd_prod = bd_prod;
1523 rx_prods.cqe_prod = rx_comp_prod;
1524 rx_prods.sge_prod = rx_sge_prod;
1525
58f4c4cf
EG
1526 /*
1527 * Make sure that the BD and SGE data is updated before updating the
1528 * producers since FW might read the BD/SGE right after the producer
1529 * is updated.
1530 * This is only applicable for weak-ordered memory model archs such
1531 * as IA-64. The following barrier is also mandatory since FW will
1532 * assumes BDs must have buffers.
1533 */
1534 wmb();
1535
8d9c5f34
EG
1536 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1537 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1538 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1539 ((u32 *)&rx_prods)[i]);
1540
58f4c4cf
EG
1541 mmiowb(); /* keep prod updates ordered */
1542
7a9b2557 1543 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1544 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1545 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1546}
1547
a2fbb9ea
ET
1548static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1549{
1550 struct bnx2x *bp = fp->bp;
34f80b04 1551 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1552 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1553 int rx_pkt = 0;
1554
1555#ifdef BNX2X_STOP_ON_ERROR
1556 if (unlikely(bp->panic))
1557 return 0;
1558#endif
1559
34f80b04
EG
1560 /* CQ "next element" is of the size of the regular element,
1561 that's why it's ok here */
a2fbb9ea
ET
1562 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1563 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1564 hw_comp_cons++;
1565
1566 bd_cons = fp->rx_bd_cons;
1567 bd_prod = fp->rx_bd_prod;
34f80b04 1568 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1569 sw_comp_cons = fp->rx_comp_cons;
1570 sw_comp_prod = fp->rx_comp_prod;
1571
1572 /* Memory barrier necessary as speculative reads of the rx
1573 * buffer can be ahead of the index in the status block
1574 */
1575 rmb();
1576
1577 DP(NETIF_MSG_RX_STATUS,
1578 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1579 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1580
1581 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1582 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1583 struct sk_buff *skb;
1584 union eth_rx_cqe *cqe;
34f80b04
EG
1585 u8 cqe_fp_flags;
1586 u16 len, pad;
a2fbb9ea
ET
1587
1588 comp_ring_cons = RCQ_BD(sw_comp_cons);
1589 bd_prod = RX_BD(bd_prod);
1590 bd_cons = RX_BD(bd_cons);
1591
619e7a66
EG
1592 /* Prefetch the page containing the BD descriptor
1593 at producer's index. It will be needed when new skb is
1594 allocated */
1595 prefetch((void *)(PAGE_ALIGN((unsigned long)
1596 (&fp->rx_desc_ring[bd_prod])) -
1597 PAGE_SIZE + 1));
1598
a2fbb9ea 1599 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1600 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1601
a2fbb9ea 1602 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1603 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1604 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1605 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1606 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1607 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1608
1609 /* is this a slowpath msg? */
34f80b04 1610 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1611 bnx2x_sp_event(fp, cqe);
1612 goto next_cqe;
1613
1614 /* this is an rx packet */
1615 } else {
1616 rx_buf = &fp->rx_buf_ring[bd_cons];
1617 skb = rx_buf->skb;
54b9ddaa
VZ
1618 prefetch(skb);
1619 prefetch((u8 *)skb + 256);
a2fbb9ea
ET
1620 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1621 pad = cqe->fast_path_cqe.placement_offset;
1622
7a9b2557
VZ
1623 /* If CQE is marked both TPA_START and TPA_END
1624 it is a non-TPA CQE */
1625 if ((!fp->disable_tpa) &&
1626 (TPA_TYPE(cqe_fp_flags) !=
1627 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1628 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1629
1630 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1631 DP(NETIF_MSG_RX_STATUS,
1632 "calling tpa_start on queue %d\n",
1633 queue);
1634
1635 bnx2x_tpa_start(fp, queue, skb,
1636 bd_cons, bd_prod);
1637 goto next_rx;
1638 }
1639
1640 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1641 DP(NETIF_MSG_RX_STATUS,
1642 "calling tpa_stop on queue %d\n",
1643 queue);
1644
1645 if (!BNX2X_RX_SUM_FIX(cqe))
1646 BNX2X_ERR("STOP on none TCP "
1647 "data\n");
1648
1649 /* This is a size of the linear data
1650 on this skb */
1651 len = le16_to_cpu(cqe->fast_path_cqe.
1652 len_on_bd);
1653 bnx2x_tpa_stop(bp, fp, queue, pad,
1654 len, cqe, comp_ring_cons);
1655#ifdef BNX2X_STOP_ON_ERROR
1656 if (bp->panic)
17cb4006 1657 return 0;
7a9b2557
VZ
1658#endif
1659
1660 bnx2x_update_sge_prod(fp,
1661 &cqe->fast_path_cqe);
1662 goto next_cqe;
1663 }
1664 }
1665
1a983142
FT
1666 dma_sync_single_for_device(&bp->pdev->dev,
1667 dma_unmap_addr(rx_buf, mapping),
1668 pad + RX_COPY_THRESH,
1669 DMA_FROM_DEVICE);
a2fbb9ea
ET
1670 prefetch(skb);
1671 prefetch(((char *)(skb)) + 128);
1672
1673 /* is this an error packet? */
34f80b04 1674 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1675 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1676 "ERROR flags %x rx packet %u\n",
1677 cqe_fp_flags, sw_comp_cons);
de832a55 1678 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1679 goto reuse_rx;
1680 }
1681
1682 /* Since we don't have a jumbo ring
1683 * copy small packets if mtu > 1500
1684 */
1685 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1686 (len <= RX_COPY_THRESH)) {
1687 struct sk_buff *new_skb;
1688
1689 new_skb = netdev_alloc_skb(bp->dev,
1690 len + pad);
1691 if (new_skb == NULL) {
1692 DP(NETIF_MSG_RX_ERR,
34f80b04 1693 "ERROR packet dropped "
a2fbb9ea 1694 "because of alloc failure\n");
de832a55 1695 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1696 goto reuse_rx;
1697 }
1698
1699 /* aligned copy */
1700 skb_copy_from_linear_data_offset(skb, pad,
1701 new_skb->data + pad, len);
1702 skb_reserve(new_skb, pad);
1703 skb_put(new_skb, len);
1704
1705 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1706
1707 skb = new_skb;
1708
a119a069
EG
1709 } else
1710 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1a983142
FT
1711 dma_unmap_single(&bp->pdev->dev,
1712 dma_unmap_addr(rx_buf, mapping),
437cf2f1 1713 bp->rx_buf_size,
1a983142 1714 DMA_FROM_DEVICE);
a2fbb9ea
ET
1715 skb_reserve(skb, pad);
1716 skb_put(skb, len);
1717
1718 } else {
1719 DP(NETIF_MSG_RX_ERR,
34f80b04 1720 "ERROR packet dropped because "
a2fbb9ea 1721 "of alloc failure\n");
de832a55 1722 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1723reuse_rx:
1724 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1725 goto next_rx;
1726 }
1727
1728 skb->protocol = eth_type_trans(skb, bp->dev);
1729
1730 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1731 if (bp->rx_csum) {
1adcd8be
EG
1732 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1733 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1734 else
de832a55 1735 fp->eth_q_stats.hw_csum_err++;
66e855f3 1736 }
a2fbb9ea
ET
1737 }
1738
748e5439 1739 skb_record_rx_queue(skb, fp->index);
ab6ad5a4 1740
a2fbb9ea 1741#ifdef BCM_VLAN
0c6671b0 1742 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1743 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1744 PARSING_FLAGS_VLAN))
4fd89b7a
DK
1745 vlan_gro_receive(&fp->napi, bp->vlgrp,
1746 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
a2fbb9ea
ET
1747 else
1748#endif
4fd89b7a 1749 napi_gro_receive(&fp->napi, skb);
a2fbb9ea 1750
a2fbb9ea
ET
1751
1752next_rx:
1753 rx_buf->skb = NULL;
1754
1755 bd_cons = NEXT_RX_IDX(bd_cons);
1756 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1757 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1758 rx_pkt++;
a2fbb9ea
ET
1759next_cqe:
1760 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1761 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1762
34f80b04 1763 if (rx_pkt == budget)
a2fbb9ea
ET
1764 break;
1765 } /* while */
1766
1767 fp->rx_bd_cons = bd_cons;
34f80b04 1768 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1769 fp->rx_comp_cons = sw_comp_cons;
1770 fp->rx_comp_prod = sw_comp_prod;
1771
7a9b2557
VZ
1772 /* Update producers */
1773 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1774 fp->rx_sge_prod);
a2fbb9ea
ET
1775
1776 fp->rx_pkt += rx_pkt;
1777 fp->rx_calls++;
1778
1779 return rx_pkt;
1780}
1781
1782static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1783{
1784 struct bnx2x_fastpath *fp = fp_cookie;
1785 struct bnx2x *bp = fp->bp;
a2fbb9ea 1786
da5a662a
VZ
1787 /* Return here if interrupt is disabled */
1788 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1789 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1790 return IRQ_HANDLED;
1791 }
1792
34f80b04 1793 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1794 fp->index, fp->sb_id);
0626b899 1795 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1796
1797#ifdef BNX2X_STOP_ON_ERROR
1798 if (unlikely(bp->panic))
1799 return IRQ_HANDLED;
1800#endif
ca00392c 1801
54b9ddaa
VZ
1802 /* Handle Rx and Tx according to MSI-X vector */
1803 prefetch(fp->rx_cons_sb);
1804 prefetch(fp->tx_cons_sb);
1805 prefetch(&fp->status_blk->u_status_block.status_block_index);
1806 prefetch(&fp->status_blk->c_status_block.status_block_index);
1807 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
34f80b04 1808
a2fbb9ea
ET
1809 return IRQ_HANDLED;
1810}
1811
1812static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1813{
555f6c78 1814 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1815 u16 status = bnx2x_ack_int(bp);
34f80b04 1816 u16 mask;
ca00392c 1817 int i;
a2fbb9ea 1818
34f80b04 1819 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1820 if (unlikely(status == 0)) {
1821 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1822 return IRQ_NONE;
1823 }
f5372251 1824 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1825
34f80b04 1826 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1827 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1828 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1829 return IRQ_HANDLED;
1830 }
1831
3196a88a
EG
1832#ifdef BNX2X_STOP_ON_ERROR
1833 if (unlikely(bp->panic))
1834 return IRQ_HANDLED;
1835#endif
1836
ca00392c
EG
1837 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1838 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1839
ca00392c
EG
1840 mask = 0x2 << fp->sb_id;
1841 if (status & mask) {
54b9ddaa
VZ
1842 /* Handle Rx and Tx according to SB id */
1843 prefetch(fp->rx_cons_sb);
1844 prefetch(&fp->status_blk->u_status_block.
1845 status_block_index);
1846 prefetch(fp->tx_cons_sb);
1847 prefetch(&fp->status_blk->c_status_block.
1848 status_block_index);
1849 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1850 status &= ~mask;
1851 }
a2fbb9ea
ET
1852 }
1853
993ac7b5
MC
1854#ifdef BCM_CNIC
1855 mask = 0x2 << CNIC_SB_ID(bp);
1856 if (status & (mask | 0x1)) {
1857 struct cnic_ops *c_ops = NULL;
1858
1859 rcu_read_lock();
1860 c_ops = rcu_dereference(bp->cnic_ops);
1861 if (c_ops)
1862 c_ops->cnic_handler(bp->cnic_data, NULL);
1863 rcu_read_unlock();
1864
1865 status &= ~mask;
1866 }
1867#endif
a2fbb9ea 1868
34f80b04 1869 if (unlikely(status & 0x1)) {
1cf167f2 1870 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1871
1872 status &= ~0x1;
1873 if (!status)
1874 return IRQ_HANDLED;
1875 }
1876
cdaa7cb8
VZ
1877 if (unlikely(status))
1878 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 1879 status);
a2fbb9ea 1880
c18487ee 1881 return IRQ_HANDLED;
a2fbb9ea
ET
1882}
1883
c18487ee 1884/* end of fast path */
a2fbb9ea 1885
bb2a0f7a 1886static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1887
c18487ee
YR
1888/* Link */
1889
1890/*
1891 * General service functions
1892 */
a2fbb9ea 1893
4a37fb66 1894static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1895{
1896 u32 lock_status;
1897 u32 resource_bit = (1 << resource);
4a37fb66
YG
1898 int func = BP_FUNC(bp);
1899 u32 hw_lock_control_reg;
c18487ee 1900 int cnt;
a2fbb9ea 1901
c18487ee
YR
1902 /* Validating that the resource is within range */
1903 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1904 DP(NETIF_MSG_HW,
1905 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1906 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1907 return -EINVAL;
1908 }
a2fbb9ea 1909
4a37fb66
YG
1910 if (func <= 5) {
1911 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1912 } else {
1913 hw_lock_control_reg =
1914 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1915 }
1916
c18487ee 1917 /* Validating that the resource is not already taken */
4a37fb66 1918 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1919 if (lock_status & resource_bit) {
1920 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1921 lock_status, resource_bit);
1922 return -EEXIST;
1923 }
a2fbb9ea 1924
46230476
EG
1925 /* Try for 5 second every 5ms */
1926 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1927 /* Try to acquire the lock */
4a37fb66
YG
1928 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1929 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1930 if (lock_status & resource_bit)
1931 return 0;
a2fbb9ea 1932
c18487ee 1933 msleep(5);
a2fbb9ea 1934 }
c18487ee
YR
1935 DP(NETIF_MSG_HW, "Timeout\n");
1936 return -EAGAIN;
1937}
a2fbb9ea 1938
4a37fb66 1939static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1940{
1941 u32 lock_status;
1942 u32 resource_bit = (1 << resource);
4a37fb66
YG
1943 int func = BP_FUNC(bp);
1944 u32 hw_lock_control_reg;
a2fbb9ea 1945
72fd0718
VZ
1946 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1947
c18487ee
YR
1948 /* Validating that the resource is within range */
1949 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1950 DP(NETIF_MSG_HW,
1951 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1952 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1953 return -EINVAL;
1954 }
1955
4a37fb66
YG
1956 if (func <= 5) {
1957 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1958 } else {
1959 hw_lock_control_reg =
1960 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1961 }
1962
c18487ee 1963 /* Validating that the resource is currently taken */
4a37fb66 1964 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1965 if (!(lock_status & resource_bit)) {
1966 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1967 lock_status, resource_bit);
1968 return -EFAULT;
a2fbb9ea
ET
1969 }
1970
4a37fb66 1971 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1972 return 0;
1973}
1974
1975/* HW Lock for shared dual port PHYs */
4a37fb66 1976static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1977{
34f80b04 1978 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1979
46c6a674
EG
1980 if (bp->port.need_hw_lock)
1981 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1982}
a2fbb9ea 1983
4a37fb66 1984static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1985{
46c6a674
EG
1986 if (bp->port.need_hw_lock)
1987 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1988
34f80b04 1989 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1990}
a2fbb9ea 1991
4acac6a5
EG
1992int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1993{
1994 /* The GPIO should be swapped if swap register is set and active */
1995 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1996 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1997 int gpio_shift = gpio_num +
1998 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1999 u32 gpio_mask = (1 << gpio_shift);
2000 u32 gpio_reg;
2001 int value;
2002
2003 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2004 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2005 return -EINVAL;
2006 }
2007
2008 /* read GPIO value */
2009 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2010
2011 /* get the requested pin value */
2012 if ((gpio_reg & gpio_mask) == gpio_mask)
2013 value = 1;
2014 else
2015 value = 0;
2016
2017 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
2018
2019 return value;
2020}
2021
17de50b7 2022int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
2023{
2024 /* The GPIO should be swapped if swap register is set and active */
2025 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 2026 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
2027 int gpio_shift = gpio_num +
2028 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2029 u32 gpio_mask = (1 << gpio_shift);
2030 u32 gpio_reg;
a2fbb9ea 2031
c18487ee
YR
2032 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2033 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2034 return -EINVAL;
2035 }
a2fbb9ea 2036
4a37fb66 2037 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
2038 /* read GPIO and mask except the float bits */
2039 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 2040
c18487ee
YR
2041 switch (mode) {
2042 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2043 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2044 gpio_num, gpio_shift);
2045 /* clear FLOAT and set CLR */
2046 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2047 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2048 break;
a2fbb9ea 2049
c18487ee
YR
2050 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2051 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2052 gpio_num, gpio_shift);
2053 /* clear FLOAT and set SET */
2054 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2055 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2056 break;
a2fbb9ea 2057
17de50b7 2058 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
2059 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2060 gpio_num, gpio_shift);
2061 /* set FLOAT */
2062 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2063 break;
a2fbb9ea 2064
c18487ee
YR
2065 default:
2066 break;
a2fbb9ea
ET
2067 }
2068
c18487ee 2069 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2070 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2071
c18487ee 2072 return 0;
a2fbb9ea
ET
2073}
2074
4acac6a5
EG
2075int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2076{
2077 /* The GPIO should be swapped if swap register is set and active */
2078 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2079 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2080 int gpio_shift = gpio_num +
2081 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2082 u32 gpio_mask = (1 << gpio_shift);
2083 u32 gpio_reg;
2084
2085 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2086 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2087 return -EINVAL;
2088 }
2089
2090 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2091 /* read GPIO int */
2092 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2093
2094 switch (mode) {
2095 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2096 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2097 "output low\n", gpio_num, gpio_shift);
2098 /* clear SET and set CLR */
2099 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2100 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2101 break;
2102
2103 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2104 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2105 "output high\n", gpio_num, gpio_shift);
2106 /* clear CLR and set SET */
2107 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2108 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2109 break;
2110
2111 default:
2112 break;
2113 }
2114
2115 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2116 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2117
2118 return 0;
2119}
2120
c18487ee 2121static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2122{
c18487ee
YR
2123 u32 spio_mask = (1 << spio_num);
2124 u32 spio_reg;
a2fbb9ea 2125
c18487ee
YR
2126 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2127 (spio_num > MISC_REGISTERS_SPIO_7)) {
2128 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2129 return -EINVAL;
a2fbb9ea
ET
2130 }
2131
4a37fb66 2132 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2133 /* read SPIO and mask except the float bits */
2134 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2135
c18487ee 2136 switch (mode) {
6378c025 2137 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2138 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2139 /* clear FLOAT and set CLR */
2140 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2141 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2142 break;
a2fbb9ea 2143
6378c025 2144 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2145 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2146 /* clear FLOAT and set SET */
2147 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2148 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2149 break;
a2fbb9ea 2150
c18487ee
YR
2151 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2152 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2153 /* set FLOAT */
2154 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2155 break;
a2fbb9ea 2156
c18487ee
YR
2157 default:
2158 break;
a2fbb9ea
ET
2159 }
2160
c18487ee 2161 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2162 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2163
a2fbb9ea
ET
2164 return 0;
2165}
2166
c18487ee 2167static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2168{
ad33ea3a
EG
2169 switch (bp->link_vars.ieee_fc &
2170 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2171 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2172 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2173 ADVERTISED_Pause);
2174 break;
356e2385 2175
c18487ee 2176 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2177 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2178 ADVERTISED_Pause);
2179 break;
356e2385 2180
c18487ee 2181 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2182 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2183 break;
356e2385 2184
c18487ee 2185 default:
34f80b04 2186 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2187 ADVERTISED_Pause);
2188 break;
2189 }
2190}
f1410647 2191
c18487ee
YR
2192static void bnx2x_link_report(struct bnx2x *bp)
2193{
f34d28ea 2194 if (bp->flags & MF_FUNC_DIS) {
2691d51d 2195 netif_carrier_off(bp->dev);
7995c64e 2196 netdev_err(bp->dev, "NIC Link is Down\n");
2691d51d
EG
2197 return;
2198 }
2199
c18487ee 2200 if (bp->link_vars.link_up) {
35c5f8fe
EG
2201 u16 line_speed;
2202
c18487ee
YR
2203 if (bp->state == BNX2X_STATE_OPEN)
2204 netif_carrier_on(bp->dev);
7995c64e 2205 netdev_info(bp->dev, "NIC Link is Up, ");
f1410647 2206
35c5f8fe
EG
2207 line_speed = bp->link_vars.line_speed;
2208 if (IS_E1HMF(bp)) {
2209 u16 vn_max_rate;
2210
2211 vn_max_rate =
2212 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2213 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2214 if (vn_max_rate < line_speed)
2215 line_speed = vn_max_rate;
2216 }
7995c64e 2217 pr_cont("%d Mbps ", line_speed);
f1410647 2218
c18487ee 2219 if (bp->link_vars.duplex == DUPLEX_FULL)
7995c64e 2220 pr_cont("full duplex");
c18487ee 2221 else
7995c64e 2222 pr_cont("half duplex");
f1410647 2223
c0700f90
DM
2224 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2225 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
7995c64e 2226 pr_cont(", receive ");
356e2385
EG
2227 if (bp->link_vars.flow_ctrl &
2228 BNX2X_FLOW_CTRL_TX)
7995c64e 2229 pr_cont("& transmit ");
c18487ee 2230 } else {
7995c64e 2231 pr_cont(", transmit ");
c18487ee 2232 }
7995c64e 2233 pr_cont("flow control ON");
c18487ee 2234 }
7995c64e 2235 pr_cont("\n");
f1410647 2236
c18487ee
YR
2237 } else { /* link_down */
2238 netif_carrier_off(bp->dev);
7995c64e 2239 netdev_err(bp->dev, "NIC Link is Down\n");
f1410647 2240 }
c18487ee
YR
2241}
2242
b5bf9068 2243static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2244{
19680c48
EG
2245 if (!BP_NOMCP(bp)) {
2246 u8 rc;
a2fbb9ea 2247
19680c48 2248 /* Initialize link parameters structure variables */
8c99e7b0
YR
2249 /* It is recommended to turn off RX FC for jumbo frames
2250 for better performance */
0c593270 2251 if (bp->dev->mtu > 5000)
c0700f90 2252 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2253 else
c0700f90 2254 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2255
4a37fb66 2256 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2257
2258 if (load_mode == LOAD_DIAG)
2259 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2260
19680c48 2261 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2262
4a37fb66 2263 bnx2x_release_phy_lock(bp);
a2fbb9ea 2264
3c96c68b
EG
2265 bnx2x_calc_fc_adv(bp);
2266
b5bf9068
EG
2267 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2268 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2269 bnx2x_link_report(bp);
b5bf9068 2270 }
34f80b04 2271
19680c48
EG
2272 return rc;
2273 }
f5372251 2274 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2275 return -EINVAL;
a2fbb9ea
ET
2276}
2277
c18487ee 2278static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2279{
19680c48 2280 if (!BP_NOMCP(bp)) {
4a37fb66 2281 bnx2x_acquire_phy_lock(bp);
19680c48 2282 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2283 bnx2x_release_phy_lock(bp);
a2fbb9ea 2284
19680c48
EG
2285 bnx2x_calc_fc_adv(bp);
2286 } else
f5372251 2287 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2288}
a2fbb9ea 2289
c18487ee
YR
2290static void bnx2x__link_reset(struct bnx2x *bp)
2291{
19680c48 2292 if (!BP_NOMCP(bp)) {
4a37fb66 2293 bnx2x_acquire_phy_lock(bp);
589abe3a 2294 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2295 bnx2x_release_phy_lock(bp);
19680c48 2296 } else
f5372251 2297 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2298}
a2fbb9ea 2299
c18487ee
YR
2300static u8 bnx2x_link_test(struct bnx2x *bp)
2301{
2145a920 2302 u8 rc = 0;
a2fbb9ea 2303
2145a920
VZ
2304 if (!BP_NOMCP(bp)) {
2305 bnx2x_acquire_phy_lock(bp);
2306 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2307 bnx2x_release_phy_lock(bp);
2308 } else
2309 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 2310
c18487ee
YR
2311 return rc;
2312}
a2fbb9ea 2313
8a1c38d1 2314static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2315{
8a1c38d1
EG
2316 u32 r_param = bp->link_vars.line_speed / 8;
2317 u32 fair_periodic_timeout_usec;
2318 u32 t_fair;
34f80b04 2319
8a1c38d1
EG
2320 memset(&(bp->cmng.rs_vars), 0,
2321 sizeof(struct rate_shaping_vars_per_port));
2322 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2323
8a1c38d1
EG
2324 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2325 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2326
8a1c38d1
EG
2327 /* this is the threshold below which no timer arming will occur
2328 1.25 coefficient is for the threshold to be a little bigger
2329 than the real time, to compensate for timer in-accuracy */
2330 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2331 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2332
8a1c38d1
EG
2333 /* resolution of fairness timer */
2334 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2335 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2336 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2337
8a1c38d1
EG
2338 /* this is the threshold below which we won't arm the timer anymore */
2339 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2340
8a1c38d1
EG
2341 /* we multiply by 1e3/8 to get bytes/msec.
2342 We don't want the credits to pass a credit
2343 of the t_fair*FAIR_MEM (algorithm resolution) */
2344 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2345 /* since each tick is 4 usec */
2346 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2347}
2348
2691d51d
EG
2349/* Calculates the sum of vn_min_rates.
2350 It's needed for further normalizing of the min_rates.
2351 Returns:
2352 sum of vn_min_rates.
2353 or
2354 0 - if all the min_rates are 0.
2355 In the later case fainess algorithm should be deactivated.
2356 If not all min_rates are zero then those that are zeroes will be set to 1.
2357 */
2358static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2359{
2360 int all_zero = 1;
2361 int port = BP_PORT(bp);
2362 int vn;
2363
2364 bp->vn_weight_sum = 0;
2365 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2366 int func = 2*vn + port;
2367 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2368 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2369 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2370
2371 /* Skip hidden vns */
2372 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2373 continue;
2374
2375 /* If min rate is zero - set it to 1 */
2376 if (!vn_min_rate)
2377 vn_min_rate = DEF_MIN_RATE;
2378 else
2379 all_zero = 0;
2380
2381 bp->vn_weight_sum += vn_min_rate;
2382 }
2383
2384 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
2385 if (all_zero) {
2386 bp->cmng.flags.cmng_enables &=
2387 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2388 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2389 " fairness will be disabled\n");
2390 } else
2391 bp->cmng.flags.cmng_enables |=
2392 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
2393}
2394
8a1c38d1 2395static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2396{
2397 struct rate_shaping_vars_per_vn m_rs_vn;
2398 struct fairness_vars_per_vn m_fair_vn;
2399 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2400 u16 vn_min_rate, vn_max_rate;
2401 int i;
2402
2403 /* If function is hidden - set min and max to zeroes */
2404 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2405 vn_min_rate = 0;
2406 vn_max_rate = 0;
2407
2408 } else {
2409 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2410 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
2411 /* If min rate is zero - set it to 1 */
2412 if (!vn_min_rate)
34f80b04
EG
2413 vn_min_rate = DEF_MIN_RATE;
2414 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2415 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2416 }
8a1c38d1 2417 DP(NETIF_MSG_IFUP,
b015e3d1 2418 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 2419 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2420
2421 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2422 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2423
2424 /* global vn counter - maximal Mbps for this vn */
2425 m_rs_vn.vn_counter.rate = vn_max_rate;
2426
2427 /* quota - number of bytes transmitted in this period */
2428 m_rs_vn.vn_counter.quota =
2429 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2430
8a1c38d1 2431 if (bp->vn_weight_sum) {
34f80b04
EG
2432 /* credit for each period of the fairness algorithm:
2433 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2434 vn_weight_sum should not be larger than 10000, thus
2435 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2436 than zero */
34f80b04 2437 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
2438 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2439 (8 * bp->vn_weight_sum))),
2440 (bp->cmng.fair_vars.fair_threshold * 2));
2441 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
2442 m_fair_vn.vn_credit_delta);
2443 }
2444
34f80b04
EG
2445 /* Store it to internal memory */
2446 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2447 REG_WR(bp, BAR_XSTRORM_INTMEM +
2448 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2449 ((u32 *)(&m_rs_vn))[i]);
2450
2451 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2452 REG_WR(bp, BAR_XSTRORM_INTMEM +
2453 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2454 ((u32 *)(&m_fair_vn))[i]);
2455}
2456
8a1c38d1 2457
c18487ee
YR
2458/* This function is called upon link interrupt */
2459static void bnx2x_link_attn(struct bnx2x *bp)
2460{
d9e8b185 2461 u32 prev_link_status = bp->link_vars.link_status;
bb2a0f7a
YG
2462 /* Make sure that we are synced with the current statistics */
2463 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2464
c18487ee 2465 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2466
bb2a0f7a
YG
2467 if (bp->link_vars.link_up) {
2468
1c06328c 2469 /* dropless flow control */
a18f5128 2470 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2471 int port = BP_PORT(bp);
2472 u32 pause_enabled = 0;
2473
2474 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2475 pause_enabled = 1;
2476
2477 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2478 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2479 pause_enabled);
2480 }
2481
bb2a0f7a
YG
2482 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2483 struct host_port_stats *pstats;
2484
2485 pstats = bnx2x_sp(bp, port_stats);
2486 /* reset old bmac stats */
2487 memset(&(pstats->mac_stx[0]), 0,
2488 sizeof(struct mac_stx));
2489 }
f34d28ea 2490 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2491 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2492 }
2493
d9e8b185
VZ
2494 /* indicate link status only if link status actually changed */
2495 if (prev_link_status != bp->link_vars.link_status)
2496 bnx2x_link_report(bp);
34f80b04
EG
2497
2498 if (IS_E1HMF(bp)) {
8a1c38d1 2499 int port = BP_PORT(bp);
34f80b04 2500 int func;
8a1c38d1 2501 int vn;
34f80b04 2502
ab6ad5a4 2503 /* Set the attention towards other drivers on the same port */
34f80b04
EG
2504 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2505 if (vn == BP_E1HVN(bp))
2506 continue;
2507
8a1c38d1 2508 func = ((vn << 1) | port);
34f80b04
EG
2509 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2510 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2511 }
34f80b04 2512
8a1c38d1
EG
2513 if (bp->link_vars.link_up) {
2514 int i;
2515
2516 /* Init rate shaping and fairness contexts */
2517 bnx2x_init_port_minmax(bp);
34f80b04 2518
34f80b04 2519 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2520 bnx2x_init_vn_minmax(bp, 2*vn + port);
2521
2522 /* Store it to internal memory */
2523 for (i = 0;
2524 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2525 REG_WR(bp, BAR_XSTRORM_INTMEM +
2526 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2527 ((u32 *)(&bp->cmng))[i]);
2528 }
34f80b04 2529 }
c18487ee 2530}
a2fbb9ea 2531
c18487ee
YR
2532static void bnx2x__link_status_update(struct bnx2x *bp)
2533{
f34d28ea 2534 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2535 return;
a2fbb9ea 2536
c18487ee 2537 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2538
bb2a0f7a
YG
2539 if (bp->link_vars.link_up)
2540 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2541 else
2542 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2543
2691d51d
EG
2544 bnx2x_calc_vn_weight_sum(bp);
2545
c18487ee
YR
2546 /* indicate link status */
2547 bnx2x_link_report(bp);
a2fbb9ea 2548}
a2fbb9ea 2549
34f80b04
EG
2550static void bnx2x_pmf_update(struct bnx2x *bp)
2551{
2552 int port = BP_PORT(bp);
2553 u32 val;
2554
2555 bp->port.pmf = 1;
2556 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2557
2558 /* enable nig attention */
2559 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2560 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2561 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2562
2563 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2564}
2565
c18487ee 2566/* end of Link */
a2fbb9ea
ET
2567
2568/* slow path */
2569
2570/*
2571 * General service functions
2572 */
2573
2691d51d
EG
2574/* send the MCP a request, block until there is a reply */
2575u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2576{
2577 int func = BP_FUNC(bp);
2578 u32 seq = ++bp->fw_seq;
2579 u32 rc = 0;
2580 u32 cnt = 1;
2581 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2582
c4ff7cbf 2583 mutex_lock(&bp->fw_mb_mutex);
2691d51d
EG
2584 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2585 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2586
2587 do {
2588 /* let the FW do it's magic ... */
2589 msleep(delay);
2590
2591 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2592
c4ff7cbf
EG
2593 /* Give the FW up to 5 second (500*10ms) */
2594 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2595
2596 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2597 cnt*delay, rc, seq);
2598
2599 /* is this a reply to our command? */
2600 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2601 rc &= FW_MSG_CODE_MASK;
2602 else {
2603 /* FW BUG! */
2604 BNX2X_ERR("FW failed to respond!\n");
2605 bnx2x_fw_dump(bp);
2606 rc = 0;
2607 }
c4ff7cbf 2608 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2609
2610 return rc;
2611}
2612
e665bfda 2613static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2691d51d
EG
2614static void bnx2x_set_rx_mode(struct net_device *dev);
2615
2616static void bnx2x_e1h_disable(struct bnx2x *bp)
2617{
2618 int port = BP_PORT(bp);
2691d51d
EG
2619
2620 netif_tx_disable(bp->dev);
2691d51d
EG
2621
2622 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2623
2691d51d
EG
2624 netif_carrier_off(bp->dev);
2625}
2626
2627static void bnx2x_e1h_enable(struct bnx2x *bp)
2628{
2629 int port = BP_PORT(bp);
2630
2631 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2632
2691d51d
EG
2633 /* Tx queue should be only reenabled */
2634 netif_tx_wake_all_queues(bp->dev);
2635
061bc702
EG
2636 /*
2637 * Should not call netif_carrier_on since it will be called if the link
2638 * is up when checking for link state
2639 */
2691d51d
EG
2640}
2641
2642static void bnx2x_update_min_max(struct bnx2x *bp)
2643{
2644 int port = BP_PORT(bp);
2645 int vn, i;
2646
2647 /* Init rate shaping and fairness contexts */
2648 bnx2x_init_port_minmax(bp);
2649
2650 bnx2x_calc_vn_weight_sum(bp);
2651
2652 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2653 bnx2x_init_vn_minmax(bp, 2*vn + port);
2654
2655 if (bp->port.pmf) {
2656 int func;
2657
2658 /* Set the attention towards other drivers on the same port */
2659 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2660 if (vn == BP_E1HVN(bp))
2661 continue;
2662
2663 func = ((vn << 1) | port);
2664 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2665 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2666 }
2667
2668 /* Store it to internal memory */
2669 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2670 REG_WR(bp, BAR_XSTRORM_INTMEM +
2671 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2672 ((u32 *)(&bp->cmng))[i]);
2673 }
2674}
2675
2676static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2677{
2691d51d 2678 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
2679
2680 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2681
f34d28ea
EG
2682 /*
2683 * This is the only place besides the function initialization
2684 * where the bp->flags can change so it is done without any
2685 * locks
2686 */
2691d51d
EG
2687 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2688 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
f34d28ea 2689 bp->flags |= MF_FUNC_DIS;
2691d51d
EG
2690
2691 bnx2x_e1h_disable(bp);
2692 } else {
2693 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
f34d28ea 2694 bp->flags &= ~MF_FUNC_DIS;
2691d51d
EG
2695
2696 bnx2x_e1h_enable(bp);
2697 }
2698 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2699 }
2700 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2701
2702 bnx2x_update_min_max(bp);
2703 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2704 }
2705
2706 /* Report results to MCP */
2707 if (dcc_event)
2708 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2709 else
2710 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2711}
2712
28912902
MC
2713/* must be called under the spq lock */
2714static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2715{
2716 struct eth_spe *next_spe = bp->spq_prod_bd;
2717
2718 if (bp->spq_prod_bd == bp->spq_last_bd) {
2719 bp->spq_prod_bd = bp->spq;
2720 bp->spq_prod_idx = 0;
2721 DP(NETIF_MSG_TIMER, "end of spq\n");
2722 } else {
2723 bp->spq_prod_bd++;
2724 bp->spq_prod_idx++;
2725 }
2726 return next_spe;
2727}
2728
2729/* must be called under the spq lock */
2730static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2731{
2732 int func = BP_FUNC(bp);
2733
2734 /* Make sure that BD data is updated before writing the producer */
2735 wmb();
2736
2737 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2738 bp->spq_prod_idx);
2739 mmiowb();
2740}
2741
a2fbb9ea
ET
2742/* the slow path queue is odd since completions arrive on the fastpath ring */
2743static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2744 u32 data_hi, u32 data_lo, int common)
2745{
28912902 2746 struct eth_spe *spe;
a2fbb9ea 2747
a2fbb9ea
ET
2748#ifdef BNX2X_STOP_ON_ERROR
2749 if (unlikely(bp->panic))
2750 return -EIO;
2751#endif
2752
34f80b04 2753 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2754
2755 if (!bp->spq_left) {
2756 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2757 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2758 bnx2x_panic();
2759 return -EBUSY;
2760 }
f1410647 2761
28912902
MC
2762 spe = bnx2x_sp_get_next(bp);
2763
a2fbb9ea 2764 /* CID needs port number to be encoded int it */
28912902 2765 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
2766 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2767 HW_CID(bp, cid));
28912902 2768 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 2769 if (common)
28912902 2770 spe->hdr.type |=
a2fbb9ea
ET
2771 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2772
28912902
MC
2773 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2774 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
2775
2776 bp->spq_left--;
2777
cdaa7cb8
VZ
2778 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2779 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2780 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2781 (u32)(U64_LO(bp->spq_mapping) +
2782 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2783 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2784
28912902 2785 bnx2x_sp_prod_update(bp);
34f80b04 2786 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2787 return 0;
2788}
2789
2790/* acquire split MCP access lock register */
4a37fb66 2791static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2792{
72fd0718 2793 u32 j, val;
34f80b04 2794 int rc = 0;
a2fbb9ea
ET
2795
2796 might_sleep();
72fd0718 2797 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
2798 val = (1UL << 31);
2799 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2800 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2801 if (val & (1L << 31))
2802 break;
2803
2804 msleep(5);
2805 }
a2fbb9ea 2806 if (!(val & (1L << 31))) {
19680c48 2807 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2808 rc = -EBUSY;
2809 }
2810
2811 return rc;
2812}
2813
4a37fb66
YG
2814/* release split MCP access lock register */
2815static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 2816{
72fd0718 2817 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
2818}
2819
2820static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2821{
2822 struct host_def_status_block *def_sb = bp->def_status_blk;
2823 u16 rc = 0;
2824
2825 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2826 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2827 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2828 rc |= 1;
2829 }
2830 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2831 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2832 rc |= 2;
2833 }
2834 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2835 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2836 rc |= 4;
2837 }
2838 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2839 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2840 rc |= 8;
2841 }
2842 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2843 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2844 rc |= 16;
2845 }
2846 return rc;
2847}
2848
2849/*
2850 * slow path service functions
2851 */
2852
2853static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2854{
34f80b04 2855 int port = BP_PORT(bp);
5c862848
EG
2856 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2857 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2858 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2859 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2860 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2861 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2862 u32 aeu_mask;
87942b46 2863 u32 nig_mask = 0;
a2fbb9ea 2864
a2fbb9ea
ET
2865 if (bp->attn_state & asserted)
2866 BNX2X_ERR("IGU ERROR\n");
2867
3fcaf2e5
EG
2868 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2869 aeu_mask = REG_RD(bp, aeu_addr);
2870
a2fbb9ea 2871 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 2872 aeu_mask, asserted);
72fd0718 2873 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 2874 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2875
3fcaf2e5
EG
2876 REG_WR(bp, aeu_addr, aeu_mask);
2877 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2878
3fcaf2e5 2879 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2880 bp->attn_state |= asserted;
3fcaf2e5 2881 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2882
2883 if (asserted & ATTN_HARD_WIRED_MASK) {
2884 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2885
a5e9a7cf
EG
2886 bnx2x_acquire_phy_lock(bp);
2887
877e9aa4 2888 /* save nig interrupt mask */
87942b46 2889 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2890 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2891
c18487ee 2892 bnx2x_link_attn(bp);
a2fbb9ea
ET
2893
2894 /* handle unicore attn? */
2895 }
2896 if (asserted & ATTN_SW_TIMER_4_FUNC)
2897 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2898
2899 if (asserted & GPIO_2_FUNC)
2900 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2901
2902 if (asserted & GPIO_3_FUNC)
2903 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2904
2905 if (asserted & GPIO_4_FUNC)
2906 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2907
2908 if (port == 0) {
2909 if (asserted & ATTN_GENERAL_ATTN_1) {
2910 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2911 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2912 }
2913 if (asserted & ATTN_GENERAL_ATTN_2) {
2914 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2915 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2916 }
2917 if (asserted & ATTN_GENERAL_ATTN_3) {
2918 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2919 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2920 }
2921 } else {
2922 if (asserted & ATTN_GENERAL_ATTN_4) {
2923 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2924 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2925 }
2926 if (asserted & ATTN_GENERAL_ATTN_5) {
2927 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2928 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2929 }
2930 if (asserted & ATTN_GENERAL_ATTN_6) {
2931 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2932 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2933 }
2934 }
2935
2936 } /* if hardwired */
2937
5c862848
EG
2938 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2939 asserted, hc_addr);
2940 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2941
2942 /* now set back the mask */
a5e9a7cf 2943 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2944 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2945 bnx2x_release_phy_lock(bp);
2946 }
a2fbb9ea
ET
2947}
2948
fd4ef40d
EG
2949static inline void bnx2x_fan_failure(struct bnx2x *bp)
2950{
2951 int port = BP_PORT(bp);
2952
2953 /* mark the failure */
2954 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2955 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2956 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2957 bp->link_params.ext_phy_config);
2958
2959 /* log the failure */
cdaa7cb8
VZ
2960 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2961 " the driver to shutdown the card to prevent permanent"
2962 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 2963}
ab6ad5a4 2964
877e9aa4 2965static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2966{
34f80b04 2967 int port = BP_PORT(bp);
877e9aa4 2968 int reg_offset;
4d295db0 2969 u32 val, swap_val, swap_override;
877e9aa4 2970
34f80b04
EG
2971 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2972 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2973
34f80b04 2974 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2975
2976 val = REG_RD(bp, reg_offset);
2977 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2978 REG_WR(bp, reg_offset, val);
2979
2980 BNX2X_ERR("SPIO5 hw attention\n");
2981
fd4ef40d 2982 /* Fan failure attention */
35b19ba5
EG
2983 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2984 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2985 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2986 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2987 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2988 /* The PHY reset is controlled by GPIO 1 */
2989 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2990 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2991 break;
2992
4d295db0
EG
2993 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2994 /* The PHY reset is controlled by GPIO 1 */
2995 /* fake the port number to cancel the swap done in
2996 set_gpio() */
2997 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2998 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2999 port = (swap_val && swap_override) ^ 1;
3000 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3001 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3002 break;
3003
877e9aa4
ET
3004 default:
3005 break;
3006 }
fd4ef40d 3007 bnx2x_fan_failure(bp);
877e9aa4 3008 }
34f80b04 3009
589abe3a
EG
3010 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3011 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3012 bnx2x_acquire_phy_lock(bp);
3013 bnx2x_handle_module_detect_int(&bp->link_params);
3014 bnx2x_release_phy_lock(bp);
3015 }
3016
34f80b04
EG
3017 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3018
3019 val = REG_RD(bp, reg_offset);
3020 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3021 REG_WR(bp, reg_offset, val);
3022
3023 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 3024 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
3025 bnx2x_panic();
3026 }
877e9aa4
ET
3027}
3028
3029static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3030{
3031 u32 val;
3032
0626b899 3033 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
3034
3035 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3036 BNX2X_ERR("DB hw attention 0x%x\n", val);
3037 /* DORQ discard attention */
3038 if (val & 0x2)
3039 BNX2X_ERR("FATAL error from DORQ\n");
3040 }
34f80b04
EG
3041
3042 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3043
3044 int port = BP_PORT(bp);
3045 int reg_offset;
3046
3047 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3048 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3049
3050 val = REG_RD(bp, reg_offset);
3051 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3052 REG_WR(bp, reg_offset, val);
3053
3054 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3055 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3056 bnx2x_panic();
3057 }
877e9aa4
ET
3058}
3059
3060static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3061{
3062 u32 val;
3063
3064 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3065
3066 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3067 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3068 /* CFC error attention */
3069 if (val & 0x2)
3070 BNX2X_ERR("FATAL error from CFC\n");
3071 }
3072
3073 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3074
3075 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3076 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3077 /* RQ_USDMDP_FIFO_OVERFLOW */
3078 if (val & 0x18000)
3079 BNX2X_ERR("FATAL error from PXP\n");
3080 }
34f80b04
EG
3081
3082 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3083
3084 int port = BP_PORT(bp);
3085 int reg_offset;
3086
3087 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3088 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3089
3090 val = REG_RD(bp, reg_offset);
3091 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3092 REG_WR(bp, reg_offset, val);
3093
3094 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3095 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3096 bnx2x_panic();
3097 }
877e9aa4
ET
3098}
3099
3100static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3101{
34f80b04
EG
3102 u32 val;
3103
877e9aa4
ET
3104 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3105
34f80b04
EG
3106 if (attn & BNX2X_PMF_LINK_ASSERT) {
3107 int func = BP_FUNC(bp);
3108
3109 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
3110 bp->mf_config = SHMEM_RD(bp,
3111 mf_cfg.func_mf_config[func].config);
2691d51d
EG
3112 val = SHMEM_RD(bp, func_mb[func].drv_status);
3113 if (val & DRV_STATUS_DCC_EVENT_MASK)
3114 bnx2x_dcc_event(bp,
3115 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3116 bnx2x__link_status_update(bp);
2691d51d 3117 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3118 bnx2x_pmf_update(bp);
3119
3120 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3121
3122 BNX2X_ERR("MC assert!\n");
3123 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3124 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3125 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3126 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3127 bnx2x_panic();
3128
3129 } else if (attn & BNX2X_MCP_ASSERT) {
3130
3131 BNX2X_ERR("MCP assert!\n");
3132 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3133 bnx2x_fw_dump(bp);
877e9aa4
ET
3134
3135 } else
3136 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3137 }
3138
3139 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3140 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3141 if (attn & BNX2X_GRC_TIMEOUT) {
3142 val = CHIP_IS_E1H(bp) ?
3143 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3144 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3145 }
3146 if (attn & BNX2X_GRC_RSV) {
3147 val = CHIP_IS_E1H(bp) ?
3148 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3149 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3150 }
877e9aa4 3151 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3152 }
3153}
3154
72fd0718
VZ
3155static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3156static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3157
3158
3159#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3160#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3161#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3162#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3163#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3164#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3165/*
3166 * should be run under rtnl lock
3167 */
3168static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3169{
3170 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3171 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3172 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3173 barrier();
3174 mmiowb();
3175}
3176
3177/*
3178 * should be run under rtnl lock
3179 */
3180static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3181{
3182 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3183 val |= (1 << 16);
3184 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3185 barrier();
3186 mmiowb();
3187}
3188
3189/*
3190 * should be run under rtnl lock
3191 */
3192static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3193{
3194 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3195 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3196 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3197}
3198
3199/*
3200 * should be run under rtnl lock
3201 */
3202static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3203{
3204 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3205
3206 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3207
3208 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3209 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3210 barrier();
3211 mmiowb();
3212}
3213
3214/*
3215 * should be run under rtnl lock
3216 */
3217static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3218{
3219 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3220
3221 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3222
3223 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3224 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3225 barrier();
3226 mmiowb();
3227
3228 return val1;
3229}
3230
3231/*
3232 * should be run under rtnl lock
3233 */
3234static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3235{
3236 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3237}
3238
3239static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3240{
3241 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3242 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3243}
3244
3245static inline void _print_next_block(int idx, const char *blk)
3246{
3247 if (idx)
3248 pr_cont(", ");
3249 pr_cont("%s", blk);
3250}
3251
3252static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3253{
3254 int i = 0;
3255 u32 cur_bit = 0;
3256 for (i = 0; sig; i++) {
3257 cur_bit = ((u32)0x1 << i);
3258 if (sig & cur_bit) {
3259 switch (cur_bit) {
3260 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3261 _print_next_block(par_num++, "BRB");
3262 break;
3263 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3264 _print_next_block(par_num++, "PARSER");
3265 break;
3266 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3267 _print_next_block(par_num++, "TSDM");
3268 break;
3269 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3270 _print_next_block(par_num++, "SEARCHER");
3271 break;
3272 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3273 _print_next_block(par_num++, "TSEMI");
3274 break;
3275 }
3276
3277 /* Clear the bit */
3278 sig &= ~cur_bit;
3279 }
3280 }
3281
3282 return par_num;
3283}
3284
3285static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3286{
3287 int i = 0;
3288 u32 cur_bit = 0;
3289 for (i = 0; sig; i++) {
3290 cur_bit = ((u32)0x1 << i);
3291 if (sig & cur_bit) {
3292 switch (cur_bit) {
3293 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3294 _print_next_block(par_num++, "PBCLIENT");
3295 break;
3296 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3297 _print_next_block(par_num++, "QM");
3298 break;
3299 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3300 _print_next_block(par_num++, "XSDM");
3301 break;
3302 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3303 _print_next_block(par_num++, "XSEMI");
3304 break;
3305 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3306 _print_next_block(par_num++, "DOORBELLQ");
3307 break;
3308 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3309 _print_next_block(par_num++, "VAUX PCI CORE");
3310 break;
3311 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3312 _print_next_block(par_num++, "DEBUG");
3313 break;
3314 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3315 _print_next_block(par_num++, "USDM");
3316 break;
3317 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3318 _print_next_block(par_num++, "USEMI");
3319 break;
3320 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3321 _print_next_block(par_num++, "UPB");
3322 break;
3323 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3324 _print_next_block(par_num++, "CSDM");
3325 break;
3326 }
3327
3328 /* Clear the bit */
3329 sig &= ~cur_bit;
3330 }
3331 }
3332
3333 return par_num;
3334}
3335
3336static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3337{
3338 int i = 0;
3339 u32 cur_bit = 0;
3340 for (i = 0; sig; i++) {
3341 cur_bit = ((u32)0x1 << i);
3342 if (sig & cur_bit) {
3343 switch (cur_bit) {
3344 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3345 _print_next_block(par_num++, "CSEMI");
3346 break;
3347 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3348 _print_next_block(par_num++, "PXP");
3349 break;
3350 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3351 _print_next_block(par_num++,
3352 "PXPPCICLOCKCLIENT");
3353 break;
3354 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3355 _print_next_block(par_num++, "CFC");
3356 break;
3357 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3358 _print_next_block(par_num++, "CDU");
3359 break;
3360 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3361 _print_next_block(par_num++, "IGU");
3362 break;
3363 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3364 _print_next_block(par_num++, "MISC");
3365 break;
3366 }
3367
3368 /* Clear the bit */
3369 sig &= ~cur_bit;
3370 }
3371 }
3372
3373 return par_num;
3374}
3375
3376static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3377{
3378 int i = 0;
3379 u32 cur_bit = 0;
3380 for (i = 0; sig; i++) {
3381 cur_bit = ((u32)0x1 << i);
3382 if (sig & cur_bit) {
3383 switch (cur_bit) {
3384 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3385 _print_next_block(par_num++, "MCP ROM");
3386 break;
3387 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3388 _print_next_block(par_num++, "MCP UMP RX");
3389 break;
3390 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3391 _print_next_block(par_num++, "MCP UMP TX");
3392 break;
3393 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3394 _print_next_block(par_num++, "MCP SCPAD");
3395 break;
3396 }
3397
3398 /* Clear the bit */
3399 sig &= ~cur_bit;
3400 }
3401 }
3402
3403 return par_num;
3404}
3405
3406static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3407 u32 sig2, u32 sig3)
3408{
3409 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3410 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3411 int par_num = 0;
3412 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3413 "[0]:0x%08x [1]:0x%08x "
3414 "[2]:0x%08x [3]:0x%08x\n",
3415 sig0 & HW_PRTY_ASSERT_SET_0,
3416 sig1 & HW_PRTY_ASSERT_SET_1,
3417 sig2 & HW_PRTY_ASSERT_SET_2,
3418 sig3 & HW_PRTY_ASSERT_SET_3);
3419 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3420 bp->dev->name);
3421 par_num = bnx2x_print_blocks_with_parity0(
3422 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3423 par_num = bnx2x_print_blocks_with_parity1(
3424 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3425 par_num = bnx2x_print_blocks_with_parity2(
3426 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3427 par_num = bnx2x_print_blocks_with_parity3(
3428 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3429 printk("\n");
3430 return true;
3431 } else
3432 return false;
3433}
3434
3435static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 3436{
a2fbb9ea 3437 struct attn_route attn;
72fd0718
VZ
3438 int port = BP_PORT(bp);
3439
3440 attn.sig[0] = REG_RD(bp,
3441 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3442 port*4);
3443 attn.sig[1] = REG_RD(bp,
3444 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3445 port*4);
3446 attn.sig[2] = REG_RD(bp,
3447 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3448 port*4);
3449 attn.sig[3] = REG_RD(bp,
3450 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3451 port*4);
3452
3453 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3454 attn.sig[3]);
3455}
3456
3457static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3458{
3459 struct attn_route attn, *group_mask;
34f80b04 3460 int port = BP_PORT(bp);
877e9aa4 3461 int index;
a2fbb9ea
ET
3462 u32 reg_addr;
3463 u32 val;
3fcaf2e5 3464 u32 aeu_mask;
a2fbb9ea
ET
3465
3466 /* need to take HW lock because MCP or other port might also
3467 try to handle this event */
4a37fb66 3468 bnx2x_acquire_alr(bp);
a2fbb9ea 3469
72fd0718
VZ
3470 if (bnx2x_chk_parity_attn(bp)) {
3471 bp->recovery_state = BNX2X_RECOVERY_INIT;
3472 bnx2x_set_reset_in_progress(bp);
3473 schedule_delayed_work(&bp->reset_task, 0);
3474 /* Disable HW interrupts */
3475 bnx2x_int_disable(bp);
3476 bnx2x_release_alr(bp);
3477 /* In case of parity errors don't handle attentions so that
3478 * other function would "see" parity errors.
3479 */
3480 return;
3481 }
3482
a2fbb9ea
ET
3483 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3484 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3485 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3486 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3487 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3488 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3489
3490 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3491 if (deasserted & (1 << index)) {
72fd0718 3492 group_mask = &bp->attn_group[index];
a2fbb9ea 3493
34f80b04 3494 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
72fd0718
VZ
3495 index, group_mask->sig[0], group_mask->sig[1],
3496 group_mask->sig[2], group_mask->sig[3]);
a2fbb9ea 3497
877e9aa4 3498 bnx2x_attn_int_deasserted3(bp,
72fd0718 3499 attn.sig[3] & group_mask->sig[3]);
877e9aa4 3500 bnx2x_attn_int_deasserted1(bp,
72fd0718 3501 attn.sig[1] & group_mask->sig[1]);
877e9aa4 3502 bnx2x_attn_int_deasserted2(bp,
72fd0718 3503 attn.sig[2] & group_mask->sig[2]);
877e9aa4 3504 bnx2x_attn_int_deasserted0(bp,
72fd0718 3505 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
3506 }
3507 }
3508
4a37fb66 3509 bnx2x_release_alr(bp);
a2fbb9ea 3510
5c862848 3511 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3512
3513 val = ~deasserted;
3fcaf2e5
EG
3514 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3515 val, reg_addr);
5c862848 3516 REG_WR(bp, reg_addr, val);
a2fbb9ea 3517
a2fbb9ea 3518 if (~bp->attn_state & deasserted)
3fcaf2e5 3519 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3520
3521 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3522 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3523
3fcaf2e5
EG
3524 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3525 aeu_mask = REG_RD(bp, reg_addr);
3526
3527 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3528 aeu_mask, deasserted);
72fd0718 3529 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 3530 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3531
3fcaf2e5
EG
3532 REG_WR(bp, reg_addr, aeu_mask);
3533 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3534
3535 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3536 bp->attn_state &= ~deasserted;
3537 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3538}
3539
3540static void bnx2x_attn_int(struct bnx2x *bp)
3541{
3542 /* read local copy of bits */
68d59484
EG
3543 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3544 attn_bits);
3545 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3546 attn_bits_ack);
a2fbb9ea
ET
3547 u32 attn_state = bp->attn_state;
3548
3549 /* look for changed bits */
3550 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3551 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3552
3553 DP(NETIF_MSG_HW,
3554 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3555 attn_bits, attn_ack, asserted, deasserted);
3556
3557 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3558 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3559
3560 /* handle bits that were raised */
3561 if (asserted)
3562 bnx2x_attn_int_asserted(bp, asserted);
3563
3564 if (deasserted)
3565 bnx2x_attn_int_deasserted(bp, deasserted);
3566}
3567
3568static void bnx2x_sp_task(struct work_struct *work)
3569{
1cf167f2 3570 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3571 u16 status;
3572
3573 /* Return here if interrupt is disabled */
3574 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3575 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3576 return;
3577 }
3578
3579 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3580/* if (status == 0) */
3581/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3582
cdaa7cb8 3583 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 3584
877e9aa4 3585 /* HW attentions */
cdaa7cb8 3586 if (status & 0x1) {
a2fbb9ea 3587 bnx2x_attn_int(bp);
cdaa7cb8
VZ
3588 status &= ~0x1;
3589 }
3590
3591 /* CStorm events: STAT_QUERY */
3592 if (status & 0x2) {
3593 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
3594 status &= ~0x2;
3595 }
3596
3597 if (unlikely(status))
3598 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3599 status);
a2fbb9ea 3600
68d59484 3601 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3602 IGU_INT_NOP, 1);
3603 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3604 IGU_INT_NOP, 1);
3605 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3606 IGU_INT_NOP, 1);
3607 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3608 IGU_INT_NOP, 1);
3609 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3610 IGU_INT_ENABLE, 1);
3611}
3612
3613static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3614{
3615 struct net_device *dev = dev_instance;
3616 struct bnx2x *bp = netdev_priv(dev);
3617
3618 /* Return here if interrupt is disabled */
3619 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3620 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3621 return IRQ_HANDLED;
3622 }
3623
8d9c5f34 3624 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3625
3626#ifdef BNX2X_STOP_ON_ERROR
3627 if (unlikely(bp->panic))
3628 return IRQ_HANDLED;
3629#endif
3630
993ac7b5
MC
3631#ifdef BCM_CNIC
3632 {
3633 struct cnic_ops *c_ops;
3634
3635 rcu_read_lock();
3636 c_ops = rcu_dereference(bp->cnic_ops);
3637 if (c_ops)
3638 c_ops->cnic_handler(bp->cnic_data, NULL);
3639 rcu_read_unlock();
3640 }
3641#endif
1cf167f2 3642 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3643
3644 return IRQ_HANDLED;
3645}
3646
3647/* end of slow path */
3648
3649/* Statistics */
3650
3651/****************************************************************************
3652* Macros
3653****************************************************************************/
3654
a2fbb9ea
ET
3655/* sum[hi:lo] += add[hi:lo] */
3656#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3657 do { \
3658 s_lo += a_lo; \
f5ba6772 3659 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3660 } while (0)
3661
3662/* difference = minuend - subtrahend */
3663#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3664 do { \
bb2a0f7a
YG
3665 if (m_lo < s_lo) { \
3666 /* underflow */ \
a2fbb9ea 3667 d_hi = m_hi - s_hi; \
bb2a0f7a 3668 if (d_hi > 0) { \
6378c025 3669 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3670 d_hi--; \
3671 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3672 } else { \
6378c025 3673 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3674 d_hi = 0; \
3675 d_lo = 0; \
3676 } \
bb2a0f7a
YG
3677 } else { \
3678 /* m_lo >= s_lo */ \
a2fbb9ea 3679 if (m_hi < s_hi) { \
bb2a0f7a
YG
3680 d_hi = 0; \
3681 d_lo = 0; \
3682 } else { \
6378c025 3683 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3684 d_hi = m_hi - s_hi; \
3685 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3686 } \
3687 } \
3688 } while (0)
3689
bb2a0f7a 3690#define UPDATE_STAT64(s, t) \
a2fbb9ea 3691 do { \
bb2a0f7a
YG
3692 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3693 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3694 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3695 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3696 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3697 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3698 } while (0)
3699
bb2a0f7a 3700#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3701 do { \
bb2a0f7a
YG
3702 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3703 diff.lo, new->s##_lo, old->s##_lo); \
3704 ADD_64(estats->t##_hi, diff.hi, \
3705 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3706 } while (0)
3707
3708/* sum[hi:lo] += add */
3709#define ADD_EXTEND_64(s_hi, s_lo, a) \
3710 do { \
3711 s_lo += a; \
3712 s_hi += (s_lo < a) ? 1 : 0; \
3713 } while (0)
3714
bb2a0f7a 3715#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3716 do { \
bb2a0f7a
YG
3717 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3718 pstats->mac_stx[1].s##_lo, \
3719 new->s); \
a2fbb9ea
ET
3720 } while (0)
3721
bb2a0f7a 3722#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3723 do { \
4781bfad
EG
3724 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3725 old_tclient->s = tclient->s; \
de832a55
EG
3726 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3727 } while (0)
3728
3729#define UPDATE_EXTEND_USTAT(s, t) \
3730 do { \
3731 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3732 old_uclient->s = uclient->s; \
3733 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3734 } while (0)
3735
3736#define UPDATE_EXTEND_XSTAT(s, t) \
3737 do { \
4781bfad
EG
3738 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3739 old_xclient->s = xclient->s; \
de832a55
EG
3740 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3741 } while (0)
3742
3743/* minuend -= subtrahend */
3744#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3745 do { \
3746 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3747 } while (0)
3748
3749/* minuend[hi:lo] -= subtrahend */
3750#define SUB_EXTEND_64(m_hi, m_lo, s) \
3751 do { \
3752 SUB_64(m_hi, 0, m_lo, s); \
3753 } while (0)
3754
3755#define SUB_EXTEND_USTAT(s, t) \
3756 do { \
3757 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3758 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3759 } while (0)
3760
3761/*
3762 * General service functions
3763 */
3764
3765static inline long bnx2x_hilo(u32 *hiref)
3766{
3767 u32 lo = *(hiref + 1);
3768#if (BITS_PER_LONG == 64)
3769 u32 hi = *hiref;
3770
3771 return HILO_U64(hi, lo);
3772#else
3773 return lo;
3774#endif
3775}
3776
3777/*
3778 * Init service functions
3779 */
3780
bb2a0f7a
YG
3781static void bnx2x_storm_stats_post(struct bnx2x *bp)
3782{
3783 if (!bp->stats_pending) {
3784 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3785 int i, rc;
bb2a0f7a
YG
3786
3787 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3788 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3789 for_each_queue(bp, i)
3790 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3791
3792 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3793 ((u32 *)&ramrod_data)[1],
3794 ((u32 *)&ramrod_data)[0], 0);
3795 if (rc == 0) {
3796 /* stats ramrod has it's own slot on the spq */
3797 bp->spq_left++;
3798 bp->stats_pending = 1;
3799 }
3800 }
3801}
3802
bb2a0f7a
YG
3803static void bnx2x_hw_stats_post(struct bnx2x *bp)
3804{
3805 struct dmae_command *dmae = &bp->stats_dmae;
3806 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3807
3808 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3809 if (CHIP_REV_IS_SLOW(bp))
3810 return;
bb2a0f7a
YG
3811
3812 /* loader */
3813 if (bp->executer_idx) {
3814 int loader_idx = PMF_DMAE_C(bp);
3815
3816 memset(dmae, 0, sizeof(struct dmae_command));
3817
3818 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3819 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3820 DMAE_CMD_DST_RESET |
3821#ifdef __BIG_ENDIAN
3822 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3823#else
3824 DMAE_CMD_ENDIANITY_DW_SWAP |
3825#endif
3826 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3827 DMAE_CMD_PORT_0) |
3828 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3829 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3830 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3831 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3832 sizeof(struct dmae_command) *
3833 (loader_idx + 1)) >> 2;
3834 dmae->dst_addr_hi = 0;
3835 dmae->len = sizeof(struct dmae_command) >> 2;
3836 if (CHIP_IS_E1(bp))
3837 dmae->len--;
3838 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3839 dmae->comp_addr_hi = 0;
3840 dmae->comp_val = 1;
3841
3842 *stats_comp = 0;
3843 bnx2x_post_dmae(bp, dmae, loader_idx);
3844
3845 } else if (bp->func_stx) {
3846 *stats_comp = 0;
3847 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3848 }
3849}
3850
3851static int bnx2x_stats_comp(struct bnx2x *bp)
3852{
3853 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3854 int cnt = 10;
3855
3856 might_sleep();
3857 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3858 if (!cnt) {
3859 BNX2X_ERR("timeout waiting for stats finished\n");
3860 break;
3861 }
3862 cnt--;
12469401 3863 msleep(1);
bb2a0f7a
YG
3864 }
3865 return 1;
3866}
3867
3868/*
3869 * Statistics service functions
3870 */
3871
3872static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3873{
3874 struct dmae_command *dmae;
3875 u32 opcode;
3876 int loader_idx = PMF_DMAE_C(bp);
3877 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3878
3879 /* sanity */
3880 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3881 BNX2X_ERR("BUG!\n");
3882 return;
3883 }
3884
3885 bp->executer_idx = 0;
3886
3887 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3888 DMAE_CMD_C_ENABLE |
3889 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3890#ifdef __BIG_ENDIAN
3891 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3892#else
3893 DMAE_CMD_ENDIANITY_DW_SWAP |
3894#endif
3895 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3896 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3897
3898 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3899 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3900 dmae->src_addr_lo = bp->port.port_stx >> 2;
3901 dmae->src_addr_hi = 0;
3902 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3903 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3904 dmae->len = DMAE_LEN32_RD_MAX;
3905 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3906 dmae->comp_addr_hi = 0;
3907 dmae->comp_val = 1;
3908
3909 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3910 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3911 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3912 dmae->src_addr_hi = 0;
7a9b2557
VZ
3913 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3914 DMAE_LEN32_RD_MAX * 4);
3915 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3916 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3917 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3918 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3919 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3920 dmae->comp_val = DMAE_COMP_VAL;
3921
3922 *stats_comp = 0;
3923 bnx2x_hw_stats_post(bp);
3924 bnx2x_stats_comp(bp);
3925}
3926
3927static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3928{
3929 struct dmae_command *dmae;
34f80b04 3930 int port = BP_PORT(bp);
bb2a0f7a 3931 int vn = BP_E1HVN(bp);
a2fbb9ea 3932 u32 opcode;
bb2a0f7a 3933 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3934 u32 mac_addr;
bb2a0f7a
YG
3935 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3936
3937 /* sanity */
3938 if (!bp->link_vars.link_up || !bp->port.pmf) {
3939 BNX2X_ERR("BUG!\n");
3940 return;
3941 }
a2fbb9ea
ET
3942
3943 bp->executer_idx = 0;
bb2a0f7a
YG
3944
3945 /* MCP */
3946 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3947 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3948 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3949#ifdef __BIG_ENDIAN
bb2a0f7a 3950 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3951#else
bb2a0f7a 3952 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3953#endif
bb2a0f7a
YG
3954 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3955 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3956
bb2a0f7a 3957 if (bp->port.port_stx) {
a2fbb9ea
ET
3958
3959 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3960 dmae->opcode = opcode;
bb2a0f7a
YG
3961 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3962 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3963 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3964 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3965 dmae->len = sizeof(struct host_port_stats) >> 2;
3966 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3967 dmae->comp_addr_hi = 0;
3968 dmae->comp_val = 1;
a2fbb9ea
ET
3969 }
3970
bb2a0f7a
YG
3971 if (bp->func_stx) {
3972
3973 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3974 dmae->opcode = opcode;
3975 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3976 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3977 dmae->dst_addr_lo = bp->func_stx >> 2;
3978 dmae->dst_addr_hi = 0;
3979 dmae->len = sizeof(struct host_func_stats) >> 2;
3980 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3981 dmae->comp_addr_hi = 0;
3982 dmae->comp_val = 1;
a2fbb9ea
ET
3983 }
3984
bb2a0f7a 3985 /* MAC */
a2fbb9ea
ET
3986 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3987 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3988 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3989#ifdef __BIG_ENDIAN
3990 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3991#else
3992 DMAE_CMD_ENDIANITY_DW_SWAP |
3993#endif
bb2a0f7a
YG
3994 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3995 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3996
c18487ee 3997 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3998
3999 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4000 NIG_REG_INGRESS_BMAC0_MEM);
4001
4002 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4003 BIGMAC_REGISTER_TX_STAT_GTBYT */
4004 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4005 dmae->opcode = opcode;
4006 dmae->src_addr_lo = (mac_addr +
4007 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4008 dmae->src_addr_hi = 0;
4009 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4010 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4011 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4012 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4013 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4014 dmae->comp_addr_hi = 0;
4015 dmae->comp_val = 1;
4016
4017 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4018 BIGMAC_REGISTER_RX_STAT_GRIPJ */
4019 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4020 dmae->opcode = opcode;
4021 dmae->src_addr_lo = (mac_addr +
4022 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4023 dmae->src_addr_hi = 0;
4024 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4025 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 4026 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4027 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
4028 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4029 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4030 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4031 dmae->comp_addr_hi = 0;
4032 dmae->comp_val = 1;
4033
c18487ee 4034 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
4035
4036 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4037
4038 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4039 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4040 dmae->opcode = opcode;
4041 dmae->src_addr_lo = (mac_addr +
4042 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4043 dmae->src_addr_hi = 0;
4044 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4045 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4046 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4047 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4048 dmae->comp_addr_hi = 0;
4049 dmae->comp_val = 1;
4050
4051 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4052 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4053 dmae->opcode = opcode;
4054 dmae->src_addr_lo = (mac_addr +
4055 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4056 dmae->src_addr_hi = 0;
4057 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4058 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 4059 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4060 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
4061 dmae->len = 1;
4062 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4063 dmae->comp_addr_hi = 0;
4064 dmae->comp_val = 1;
4065
4066 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4067 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4068 dmae->opcode = opcode;
4069 dmae->src_addr_lo = (mac_addr +
4070 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4071 dmae->src_addr_hi = 0;
4072 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4073 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 4074 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4075 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
4076 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4077 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4078 dmae->comp_addr_hi = 0;
4079 dmae->comp_val = 1;
4080 }
4081
4082 /* NIG */
bb2a0f7a
YG
4083 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4084 dmae->opcode = opcode;
4085 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4086 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4087 dmae->src_addr_hi = 0;
4088 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4089 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4090 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4091 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4092 dmae->comp_addr_hi = 0;
4093 dmae->comp_val = 1;
4094
4095 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4096 dmae->opcode = opcode;
4097 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4098 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4099 dmae->src_addr_hi = 0;
4100 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4101 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4102 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4103 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4104 dmae->len = (2*sizeof(u32)) >> 2;
4105 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4106 dmae->comp_addr_hi = 0;
4107 dmae->comp_val = 1;
4108
a2fbb9ea
ET
4109 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4110 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4111 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4112 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4113#ifdef __BIG_ENDIAN
4114 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4115#else
4116 DMAE_CMD_ENDIANITY_DW_SWAP |
4117#endif
bb2a0f7a
YG
4118 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4119 (vn << DMAE_CMD_E1HVN_SHIFT));
4120 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4121 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 4122 dmae->src_addr_hi = 0;
bb2a0f7a
YG
4123 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4124 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4125 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4126 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4127 dmae->len = (2*sizeof(u32)) >> 2;
4128 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4129 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4130 dmae->comp_val = DMAE_COMP_VAL;
4131
4132 *stats_comp = 0;
a2fbb9ea
ET
4133}
4134
bb2a0f7a 4135static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 4136{
bb2a0f7a
YG
4137 struct dmae_command *dmae = &bp->stats_dmae;
4138 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4139
bb2a0f7a
YG
4140 /* sanity */
4141 if (!bp->func_stx) {
4142 BNX2X_ERR("BUG!\n");
4143 return;
4144 }
a2fbb9ea 4145
bb2a0f7a
YG
4146 bp->executer_idx = 0;
4147 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 4148
bb2a0f7a
YG
4149 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4150 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4151 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4152#ifdef __BIG_ENDIAN
4153 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4154#else
4155 DMAE_CMD_ENDIANITY_DW_SWAP |
4156#endif
4157 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4158 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4159 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4160 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4161 dmae->dst_addr_lo = bp->func_stx >> 2;
4162 dmae->dst_addr_hi = 0;
4163 dmae->len = sizeof(struct host_func_stats) >> 2;
4164 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4165 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4166 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4167
bb2a0f7a
YG
4168 *stats_comp = 0;
4169}
a2fbb9ea 4170
bb2a0f7a
YG
4171static void bnx2x_stats_start(struct bnx2x *bp)
4172{
4173 if (bp->port.pmf)
4174 bnx2x_port_stats_init(bp);
4175
4176 else if (bp->func_stx)
4177 bnx2x_func_stats_init(bp);
4178
4179 bnx2x_hw_stats_post(bp);
4180 bnx2x_storm_stats_post(bp);
4181}
4182
4183static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4184{
4185 bnx2x_stats_comp(bp);
4186 bnx2x_stats_pmf_update(bp);
4187 bnx2x_stats_start(bp);
4188}
4189
4190static void bnx2x_stats_restart(struct bnx2x *bp)
4191{
4192 bnx2x_stats_comp(bp);
4193 bnx2x_stats_start(bp);
4194}
4195
4196static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4197{
4198 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4199 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 4200 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
4201 struct {
4202 u32 lo;
4203 u32 hi;
4204 } diff;
bb2a0f7a
YG
4205
4206 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4207 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4208 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4209 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4210 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4211 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 4212 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 4213 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 4214 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
4215 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4216 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4217 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4218 UPDATE_STAT64(tx_stat_gt127,
4219 tx_stat_etherstatspkts65octetsto127octets);
4220 UPDATE_STAT64(tx_stat_gt255,
4221 tx_stat_etherstatspkts128octetsto255octets);
4222 UPDATE_STAT64(tx_stat_gt511,
4223 tx_stat_etherstatspkts256octetsto511octets);
4224 UPDATE_STAT64(tx_stat_gt1023,
4225 tx_stat_etherstatspkts512octetsto1023octets);
4226 UPDATE_STAT64(tx_stat_gt1518,
4227 tx_stat_etherstatspkts1024octetsto1522octets);
4228 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4229 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4230 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4231 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4232 UPDATE_STAT64(tx_stat_gterr,
4233 tx_stat_dot3statsinternalmactransmiterrors);
4234 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
4235
4236 estats->pause_frames_received_hi =
4237 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4238 estats->pause_frames_received_lo =
4239 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4240
4241 estats->pause_frames_sent_hi =
4242 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4243 estats->pause_frames_sent_lo =
4244 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
4245}
4246
4247static void bnx2x_emac_stats_update(struct bnx2x *bp)
4248{
4249 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4250 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 4251 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
4252
4253 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4254 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4255 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4256 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4257 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4258 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4259 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4260 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4261 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4262 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4263 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4264 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4265 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4266 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4267 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4268 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4269 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4270 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4271 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4272 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4273 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4274 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4275 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4276 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4277 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4278 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4279 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4280 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4281 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4282 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4283 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
4284
4285 estats->pause_frames_received_hi =
4286 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4287 estats->pause_frames_received_lo =
4288 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4289 ADD_64(estats->pause_frames_received_hi,
4290 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4291 estats->pause_frames_received_lo,
4292 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4293
4294 estats->pause_frames_sent_hi =
4295 pstats->mac_stx[1].tx_stat_outxonsent_hi;
4296 estats->pause_frames_sent_lo =
4297 pstats->mac_stx[1].tx_stat_outxonsent_lo;
4298 ADD_64(estats->pause_frames_sent_hi,
4299 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4300 estats->pause_frames_sent_lo,
4301 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
4302}
4303
4304static int bnx2x_hw_stats_update(struct bnx2x *bp)
4305{
4306 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4307 struct nig_stats *old = &(bp->port.old_nig_stats);
4308 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4309 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
4310 struct {
4311 u32 lo;
4312 u32 hi;
4313 } diff;
bb2a0f7a
YG
4314
4315 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4316 bnx2x_bmac_stats_update(bp);
4317
4318 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4319 bnx2x_emac_stats_update(bp);
4320
4321 else { /* unreached */
c3eefaf6 4322 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
4323 return -1;
4324 }
a2fbb9ea 4325
bb2a0f7a
YG
4326 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4327 new->brb_discard - old->brb_discard);
66e855f3
YG
4328 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4329 new->brb_truncate - old->brb_truncate);
a2fbb9ea 4330
bb2a0f7a
YG
4331 UPDATE_STAT64_NIG(egress_mac_pkt0,
4332 etherstatspkts1024octetsto1522octets);
4333 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 4334
bb2a0f7a 4335 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 4336
bb2a0f7a
YG
4337 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4338 sizeof(struct mac_stx));
4339 estats->brb_drop_hi = pstats->brb_drop_hi;
4340 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 4341
bb2a0f7a 4342 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 4343
2145a920
VZ
4344 if (!BP_NOMCP(bp)) {
4345 u32 nig_timer_max =
4346 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4347 if (nig_timer_max != estats->nig_timer_max) {
4348 estats->nig_timer_max = nig_timer_max;
4349 BNX2X_ERR("NIG timer max (%u)\n",
4350 estats->nig_timer_max);
4351 }
de832a55
EG
4352 }
4353
bb2a0f7a 4354 return 0;
a2fbb9ea
ET
4355}
4356
bb2a0f7a 4357static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
4358{
4359 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 4360 struct tstorm_per_port_stats *tport =
de832a55 4361 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
4362 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4363 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
4364 int i;
4365
6fe49bb9
EG
4366 memcpy(&(fstats->total_bytes_received_hi),
4367 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
4368 sizeof(struct host_func_stats) - 2*sizeof(u32));
4369 estats->error_bytes_received_hi = 0;
4370 estats->error_bytes_received_lo = 0;
4371 estats->etherstatsoverrsizepkts_hi = 0;
4372 estats->etherstatsoverrsizepkts_lo = 0;
4373 estats->no_buff_discard_hi = 0;
4374 estats->no_buff_discard_lo = 0;
a2fbb9ea 4375
54b9ddaa 4376 for_each_queue(bp, i) {
de832a55
EG
4377 struct bnx2x_fastpath *fp = &bp->fp[i];
4378 int cl_id = fp->cl_id;
4379 struct tstorm_per_client_stats *tclient =
4380 &stats->tstorm_common.client_statistics[cl_id];
4381 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4382 struct ustorm_per_client_stats *uclient =
4383 &stats->ustorm_common.client_statistics[cl_id];
4384 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4385 struct xstorm_per_client_stats *xclient =
4386 &stats->xstorm_common.client_statistics[cl_id];
4387 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4388 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4389 u32 diff;
4390
4391 /* are storm stats valid? */
4392 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 4393 bp->stats_counter) {
de832a55 4394 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
cdaa7cb8 4395 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
de832a55
EG
4396 i, xclient->stats_counter, bp->stats_counter);
4397 return -1;
4398 }
4399 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 4400 bp->stats_counter) {
de832a55 4401 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
cdaa7cb8 4402 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
de832a55
EG
4403 i, tclient->stats_counter, bp->stats_counter);
4404 return -2;
4405 }
4406 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4407 bp->stats_counter) {
4408 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
cdaa7cb8 4409 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
de832a55
EG
4410 i, uclient->stats_counter, bp->stats_counter);
4411 return -4;
4412 }
a2fbb9ea 4413
de832a55 4414 qstats->total_bytes_received_hi =
ca00392c 4415 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4416 qstats->total_bytes_received_lo =
ca00392c
EG
4417 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4418
4419 ADD_64(qstats->total_bytes_received_hi,
4420 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4421 qstats->total_bytes_received_lo,
4422 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4423
4424 ADD_64(qstats->total_bytes_received_hi,
4425 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4426 qstats->total_bytes_received_lo,
4427 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4428
dea7aab1
VZ
4429 SUB_64(qstats->total_bytes_received_hi,
4430 le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
4431 qstats->total_bytes_received_lo,
4432 le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
4433
4434 SUB_64(qstats->total_bytes_received_hi,
4435 le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
4436 qstats->total_bytes_received_lo,
4437 le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
4438
4439 SUB_64(qstats->total_bytes_received_hi,
4440 le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
4441 qstats->total_bytes_received_lo,
4442 le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
4443
ca00392c
EG
4444 qstats->valid_bytes_received_hi =
4445 qstats->total_bytes_received_hi;
de832a55 4446 qstats->valid_bytes_received_lo =
ca00392c 4447 qstats->total_bytes_received_lo;
bb2a0f7a 4448
de832a55 4449 qstats->error_bytes_received_hi =
bb2a0f7a 4450 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4451 qstats->error_bytes_received_lo =
bb2a0f7a 4452 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4453
de832a55
EG
4454 ADD_64(qstats->total_bytes_received_hi,
4455 qstats->error_bytes_received_hi,
4456 qstats->total_bytes_received_lo,
4457 qstats->error_bytes_received_lo);
4458
4459 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4460 total_unicast_packets_received);
4461 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4462 total_multicast_packets_received);
4463 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4464 total_broadcast_packets_received);
4465 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4466 etherstatsoverrsizepkts);
4467 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4468
4469 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4470 total_unicast_packets_received);
4471 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4472 total_multicast_packets_received);
4473 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4474 total_broadcast_packets_received);
4475 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4476 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4477 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4478
4479 qstats->total_bytes_transmitted_hi =
ca00392c 4480 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4481 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4482 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4483
4484 ADD_64(qstats->total_bytes_transmitted_hi,
4485 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4486 qstats->total_bytes_transmitted_lo,
4487 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4488
4489 ADD_64(qstats->total_bytes_transmitted_hi,
4490 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4491 qstats->total_bytes_transmitted_lo,
4492 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4493
de832a55
EG
4494 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4495 total_unicast_packets_transmitted);
4496 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4497 total_multicast_packets_transmitted);
4498 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4499 total_broadcast_packets_transmitted);
4500
4501 old_tclient->checksum_discard = tclient->checksum_discard;
4502 old_tclient->ttl0_discard = tclient->ttl0_discard;
4503
4504 ADD_64(fstats->total_bytes_received_hi,
4505 qstats->total_bytes_received_hi,
4506 fstats->total_bytes_received_lo,
4507 qstats->total_bytes_received_lo);
4508 ADD_64(fstats->total_bytes_transmitted_hi,
4509 qstats->total_bytes_transmitted_hi,
4510 fstats->total_bytes_transmitted_lo,
4511 qstats->total_bytes_transmitted_lo);
4512 ADD_64(fstats->total_unicast_packets_received_hi,
4513 qstats->total_unicast_packets_received_hi,
4514 fstats->total_unicast_packets_received_lo,
4515 qstats->total_unicast_packets_received_lo);
4516 ADD_64(fstats->total_multicast_packets_received_hi,
4517 qstats->total_multicast_packets_received_hi,
4518 fstats->total_multicast_packets_received_lo,
4519 qstats->total_multicast_packets_received_lo);
4520 ADD_64(fstats->total_broadcast_packets_received_hi,
4521 qstats->total_broadcast_packets_received_hi,
4522 fstats->total_broadcast_packets_received_lo,
4523 qstats->total_broadcast_packets_received_lo);
4524 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4525 qstats->total_unicast_packets_transmitted_hi,
4526 fstats->total_unicast_packets_transmitted_lo,
4527 qstats->total_unicast_packets_transmitted_lo);
4528 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4529 qstats->total_multicast_packets_transmitted_hi,
4530 fstats->total_multicast_packets_transmitted_lo,
4531 qstats->total_multicast_packets_transmitted_lo);
4532 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4533 qstats->total_broadcast_packets_transmitted_hi,
4534 fstats->total_broadcast_packets_transmitted_lo,
4535 qstats->total_broadcast_packets_transmitted_lo);
4536 ADD_64(fstats->valid_bytes_received_hi,
4537 qstats->valid_bytes_received_hi,
4538 fstats->valid_bytes_received_lo,
4539 qstats->valid_bytes_received_lo);
4540
4541 ADD_64(estats->error_bytes_received_hi,
4542 qstats->error_bytes_received_hi,
4543 estats->error_bytes_received_lo,
4544 qstats->error_bytes_received_lo);
4545 ADD_64(estats->etherstatsoverrsizepkts_hi,
4546 qstats->etherstatsoverrsizepkts_hi,
4547 estats->etherstatsoverrsizepkts_lo,
4548 qstats->etherstatsoverrsizepkts_lo);
4549 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4550 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4551 }
4552
4553 ADD_64(fstats->total_bytes_received_hi,
4554 estats->rx_stat_ifhcinbadoctets_hi,
4555 fstats->total_bytes_received_lo,
4556 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4557
4558 memcpy(estats, &(fstats->total_bytes_received_hi),
4559 sizeof(struct host_func_stats) - 2*sizeof(u32));
4560
de832a55
EG
4561 ADD_64(estats->etherstatsoverrsizepkts_hi,
4562 estats->rx_stat_dot3statsframestoolong_hi,
4563 estats->etherstatsoverrsizepkts_lo,
4564 estats->rx_stat_dot3statsframestoolong_lo);
4565 ADD_64(estats->error_bytes_received_hi,
4566 estats->rx_stat_ifhcinbadoctets_hi,
4567 estats->error_bytes_received_lo,
4568 estats->rx_stat_ifhcinbadoctets_lo);
4569
4570 if (bp->port.pmf) {
4571 estats->mac_filter_discard =
4572 le32_to_cpu(tport->mac_filter_discard);
4573 estats->xxoverflow_discard =
4574 le32_to_cpu(tport->xxoverflow_discard);
4575 estats->brb_truncate_discard =
bb2a0f7a 4576 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4577 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4578 }
bb2a0f7a
YG
4579
4580 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4581
de832a55
EG
4582 bp->stats_pending = 0;
4583
a2fbb9ea
ET
4584 return 0;
4585}
4586
bb2a0f7a 4587static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4588{
bb2a0f7a 4589 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4590 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4591 int i;
a2fbb9ea
ET
4592
4593 nstats->rx_packets =
4594 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4595 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4596 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4597
4598 nstats->tx_packets =
4599 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4600 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4601 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4602
de832a55 4603 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4604
0e39e645 4605 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4606
de832a55 4607 nstats->rx_dropped = estats->mac_discard;
54b9ddaa 4608 for_each_queue(bp, i)
de832a55
EG
4609 nstats->rx_dropped +=
4610 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4611
a2fbb9ea
ET
4612 nstats->tx_dropped = 0;
4613
4614 nstats->multicast =
de832a55 4615 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4616
bb2a0f7a 4617 nstats->collisions =
de832a55 4618 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4619
4620 nstats->rx_length_errors =
de832a55
EG
4621 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4622 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4623 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4624 bnx2x_hilo(&estats->brb_truncate_hi);
4625 nstats->rx_crc_errors =
4626 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4627 nstats->rx_frame_errors =
4628 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4629 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4630 nstats->rx_missed_errors = estats->xxoverflow_discard;
4631
4632 nstats->rx_errors = nstats->rx_length_errors +
4633 nstats->rx_over_errors +
4634 nstats->rx_crc_errors +
4635 nstats->rx_frame_errors +
0e39e645
ET
4636 nstats->rx_fifo_errors +
4637 nstats->rx_missed_errors;
a2fbb9ea 4638
bb2a0f7a 4639 nstats->tx_aborted_errors =
de832a55
EG
4640 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4641 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4642 nstats->tx_carrier_errors =
4643 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4644 nstats->tx_fifo_errors = 0;
4645 nstats->tx_heartbeat_errors = 0;
4646 nstats->tx_window_errors = 0;
4647
4648 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4649 nstats->tx_carrier_errors +
4650 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4651}
4652
4653static void bnx2x_drv_stats_update(struct bnx2x *bp)
4654{
4655 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4656 int i;
4657
4658 estats->driver_xoff = 0;
4659 estats->rx_err_discard_pkt = 0;
4660 estats->rx_skb_alloc_failed = 0;
4661 estats->hw_csum_err = 0;
54b9ddaa 4662 for_each_queue(bp, i) {
de832a55
EG
4663 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4664
4665 estats->driver_xoff += qstats->driver_xoff;
4666 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4667 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4668 estats->hw_csum_err += qstats->hw_csum_err;
4669 }
a2fbb9ea
ET
4670}
4671
bb2a0f7a 4672static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4673{
bb2a0f7a 4674 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4675
bb2a0f7a
YG
4676 if (*stats_comp != DMAE_COMP_VAL)
4677 return;
4678
4679 if (bp->port.pmf)
de832a55 4680 bnx2x_hw_stats_update(bp);
a2fbb9ea 4681
de832a55
EG
4682 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4683 BNX2X_ERR("storm stats were not updated for 3 times\n");
4684 bnx2x_panic();
4685 return;
a2fbb9ea
ET
4686 }
4687
de832a55
EG
4688 bnx2x_net_stats_update(bp);
4689 bnx2x_drv_stats_update(bp);
4690
7995c64e 4691 if (netif_msg_timer(bp)) {
bb2a0f7a 4692 struct bnx2x_eth_stats *estats = &bp->eth_stats;
34f80b04 4693 int i;
a2fbb9ea 4694
dea7aab1
VZ
4695 printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
4696 bp->dev->name,
de832a55 4697 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea
ET
4698
4699 for_each_queue(bp, i) {
dea7aab1
VZ
4700 struct bnx2x_fastpath *fp = &bp->fp[i];
4701 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4702
4703 printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
4704 " rx pkt(%lu) rx calls(%lu %lu)\n",
4705 fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
4706 fp->rx_comp_cons),
4707 le16_to_cpu(*fp->rx_cons_sb),
4708 bnx2x_hilo(&qstats->
4709 total_unicast_packets_received_hi),
4710 fp->rx_calls, fp->rx_pkt);
4711 }
4712
4713 for_each_queue(bp, i) {
4714 struct bnx2x_fastpath *fp = &bp->fp[i];
4715 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4716 struct netdev_queue *txq =
4717 netdev_get_tx_queue(bp->dev, i);
4718
4719 printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
4720 " tx pkt(%lu) tx calls (%lu)"
4721 " %s (Xoff events %u)\n",
4722 fp->name, bnx2x_tx_avail(fp),
4723 le16_to_cpu(*fp->tx_cons_sb),
4724 bnx2x_hilo(&qstats->
4725 total_unicast_packets_transmitted_hi),
4726 fp->tx_pkt,
4727 (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
4728 qstats->driver_xoff);
a2fbb9ea
ET
4729 }
4730 }
4731
bb2a0f7a
YG
4732 bnx2x_hw_stats_post(bp);
4733 bnx2x_storm_stats_post(bp);
4734}
a2fbb9ea 4735
bb2a0f7a
YG
4736static void bnx2x_port_stats_stop(struct bnx2x *bp)
4737{
4738 struct dmae_command *dmae;
4739 u32 opcode;
4740 int loader_idx = PMF_DMAE_C(bp);
4741 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4742
bb2a0f7a 4743 bp->executer_idx = 0;
a2fbb9ea 4744
bb2a0f7a
YG
4745 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4746 DMAE_CMD_C_ENABLE |
4747 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4748#ifdef __BIG_ENDIAN
bb2a0f7a 4749 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4750#else
bb2a0f7a 4751 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4752#endif
bb2a0f7a
YG
4753 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4754 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4755
4756 if (bp->port.port_stx) {
4757
4758 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4759 if (bp->func_stx)
4760 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4761 else
4762 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4763 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4764 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4765 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4766 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4767 dmae->len = sizeof(struct host_port_stats) >> 2;
4768 if (bp->func_stx) {
4769 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4770 dmae->comp_addr_hi = 0;
4771 dmae->comp_val = 1;
4772 } else {
4773 dmae->comp_addr_lo =
4774 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4775 dmae->comp_addr_hi =
4776 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4777 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4778
bb2a0f7a
YG
4779 *stats_comp = 0;
4780 }
a2fbb9ea
ET
4781 }
4782
bb2a0f7a
YG
4783 if (bp->func_stx) {
4784
4785 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4786 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4787 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4788 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4789 dmae->dst_addr_lo = bp->func_stx >> 2;
4790 dmae->dst_addr_hi = 0;
4791 dmae->len = sizeof(struct host_func_stats) >> 2;
4792 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4793 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4794 dmae->comp_val = DMAE_COMP_VAL;
4795
4796 *stats_comp = 0;
a2fbb9ea 4797 }
bb2a0f7a
YG
4798}
4799
4800static void bnx2x_stats_stop(struct bnx2x *bp)
4801{
4802 int update = 0;
4803
4804 bnx2x_stats_comp(bp);
4805
4806 if (bp->port.pmf)
4807 update = (bnx2x_hw_stats_update(bp) == 0);
4808
4809 update |= (bnx2x_storm_stats_update(bp) == 0);
4810
4811 if (update) {
4812 bnx2x_net_stats_update(bp);
a2fbb9ea 4813
bb2a0f7a
YG
4814 if (bp->port.pmf)
4815 bnx2x_port_stats_stop(bp);
4816
4817 bnx2x_hw_stats_post(bp);
4818 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4819 }
4820}
4821
bb2a0f7a
YG
4822static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4823{
4824}
4825
4826static const struct {
4827 void (*action)(struct bnx2x *bp);
4828 enum bnx2x_stats_state next_state;
4829} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4830/* state event */
4831{
4832/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4833/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4834/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4835/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4836},
4837{
4838/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4839/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4840/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4841/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4842}
4843};
4844
4845static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4846{
4847 enum bnx2x_stats_state state = bp->stats_state;
4848
cdaa7cb8
VZ
4849 if (unlikely(bp->panic))
4850 return;
4851
bb2a0f7a
YG
4852 bnx2x_stats_stm[state][event].action(bp);
4853 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4854
8924665a
EG
4855 /* Make sure the state has been "changed" */
4856 smp_wmb();
4857
7995c64e 4858 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
bb2a0f7a
YG
4859 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4860 state, event, bp->stats_state);
4861}
4862
6fe49bb9
EG
4863static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4864{
4865 struct dmae_command *dmae;
4866 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4867
4868 /* sanity */
4869 if (!bp->port.pmf || !bp->port.port_stx) {
4870 BNX2X_ERR("BUG!\n");
4871 return;
4872 }
4873
4874 bp->executer_idx = 0;
4875
4876 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4877 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4878 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4879 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4880#ifdef __BIG_ENDIAN
4881 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4882#else
4883 DMAE_CMD_ENDIANITY_DW_SWAP |
4884#endif
4885 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4886 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4887 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4888 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4889 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4890 dmae->dst_addr_hi = 0;
4891 dmae->len = sizeof(struct host_port_stats) >> 2;
4892 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4893 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4894 dmae->comp_val = DMAE_COMP_VAL;
4895
4896 *stats_comp = 0;
4897 bnx2x_hw_stats_post(bp);
4898 bnx2x_stats_comp(bp);
4899}
4900
4901static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4902{
4903 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4904 int port = BP_PORT(bp);
4905 int func;
4906 u32 func_stx;
4907
4908 /* sanity */
4909 if (!bp->port.pmf || !bp->func_stx) {
4910 BNX2X_ERR("BUG!\n");
4911 return;
4912 }
4913
4914 /* save our func_stx */
4915 func_stx = bp->func_stx;
4916
4917 for (vn = VN_0; vn < vn_max; vn++) {
4918 func = 2*vn + port;
4919
4920 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4921 bnx2x_func_stats_init(bp);
4922 bnx2x_hw_stats_post(bp);
4923 bnx2x_stats_comp(bp);
4924 }
4925
4926 /* restore our func_stx */
4927 bp->func_stx = func_stx;
4928}
4929
4930static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4931{
4932 struct dmae_command *dmae = &bp->stats_dmae;
4933 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4934
4935 /* sanity */
4936 if (!bp->func_stx) {
4937 BNX2X_ERR("BUG!\n");
4938 return;
4939 }
4940
4941 bp->executer_idx = 0;
4942 memset(dmae, 0, sizeof(struct dmae_command));
4943
4944 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4945 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4946 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4947#ifdef __BIG_ENDIAN
4948 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4949#else
4950 DMAE_CMD_ENDIANITY_DW_SWAP |
4951#endif
4952 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4953 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4954 dmae->src_addr_lo = bp->func_stx >> 2;
4955 dmae->src_addr_hi = 0;
4956 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4957 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4958 dmae->len = sizeof(struct host_func_stats) >> 2;
4959 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4960 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4961 dmae->comp_val = DMAE_COMP_VAL;
4962
4963 *stats_comp = 0;
4964 bnx2x_hw_stats_post(bp);
4965 bnx2x_stats_comp(bp);
4966}
4967
4968static void bnx2x_stats_init(struct bnx2x *bp)
4969{
4970 int port = BP_PORT(bp);
4971 int func = BP_FUNC(bp);
4972 int i;
4973
4974 bp->stats_pending = 0;
4975 bp->executer_idx = 0;
4976 bp->stats_counter = 0;
4977
4978 /* port and func stats for management */
4979 if (!BP_NOMCP(bp)) {
4980 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4981 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4982
4983 } else {
4984 bp->port.port_stx = 0;
4985 bp->func_stx = 0;
4986 }
4987 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4988 bp->port.port_stx, bp->func_stx);
4989
4990 /* port stats */
4991 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4992 bp->port.old_nig_stats.brb_discard =
4993 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4994 bp->port.old_nig_stats.brb_truncate =
4995 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4996 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4997 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4998 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4999 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
5000
5001 /* function stats */
5002 for_each_queue(bp, i) {
5003 struct bnx2x_fastpath *fp = &bp->fp[i];
5004
5005 memset(&fp->old_tclient, 0,
5006 sizeof(struct tstorm_per_client_stats));
5007 memset(&fp->old_uclient, 0,
5008 sizeof(struct ustorm_per_client_stats));
5009 memset(&fp->old_xclient, 0,
5010 sizeof(struct xstorm_per_client_stats));
5011 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
5012 }
5013
5014 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
5015 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
5016
5017 bp->stats_state = STATS_STATE_DISABLED;
5018
5019 if (bp->port.pmf) {
5020 if (bp->port.port_stx)
5021 bnx2x_port_stats_base_init(bp);
5022
5023 if (bp->func_stx)
5024 bnx2x_func_stats_base_init(bp);
5025
5026 } else if (bp->func_stx)
5027 bnx2x_func_stats_base_update(bp);
5028}
5029
a2fbb9ea
ET
5030static void bnx2x_timer(unsigned long data)
5031{
5032 struct bnx2x *bp = (struct bnx2x *) data;
5033
5034 if (!netif_running(bp->dev))
5035 return;
5036
5037 if (atomic_read(&bp->intr_sem) != 0)
f1410647 5038 goto timer_restart;
a2fbb9ea
ET
5039
5040 if (poll) {
5041 struct bnx2x_fastpath *fp = &bp->fp[0];
5042 int rc;
5043
7961f791 5044 bnx2x_tx_int(fp);
a2fbb9ea
ET
5045 rc = bnx2x_rx_int(fp, 1000);
5046 }
5047
34f80b04
EG
5048 if (!BP_NOMCP(bp)) {
5049 int func = BP_FUNC(bp);
a2fbb9ea
ET
5050 u32 drv_pulse;
5051 u32 mcp_pulse;
5052
5053 ++bp->fw_drv_pulse_wr_seq;
5054 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5055 /* TBD - add SYSTEM_TIME */
5056 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 5057 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 5058
34f80b04 5059 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
5060 MCP_PULSE_SEQ_MASK);
5061 /* The delta between driver pulse and mcp response
5062 * should be 1 (before mcp response) or 0 (after mcp response)
5063 */
5064 if ((drv_pulse != mcp_pulse) &&
5065 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5066 /* someone lost a heartbeat... */
5067 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5068 drv_pulse, mcp_pulse);
5069 }
5070 }
5071
f34d28ea 5072 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 5073 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 5074
f1410647 5075timer_restart:
a2fbb9ea
ET
5076 mod_timer(&bp->timer, jiffies + bp->current_interval);
5077}
5078
5079/* end of Statistics */
5080
5081/* nic init */
5082
5083/*
5084 * nic init service functions
5085 */
5086
34f80b04 5087static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 5088{
34f80b04
EG
5089 int port = BP_PORT(bp);
5090
ca00392c
EG
5091 /* "CSTORM" */
5092 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5093 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5094 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5095 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5096 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5097 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
5098}
5099
5c862848
EG
5100static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5101 dma_addr_t mapping, int sb_id)
34f80b04
EG
5102{
5103 int port = BP_PORT(bp);
bb2a0f7a 5104 int func = BP_FUNC(bp);
a2fbb9ea 5105 int index;
34f80b04 5106 u64 section;
a2fbb9ea
ET
5107
5108 /* USTORM */
5109 section = ((u64)mapping) + offsetof(struct host_status_block,
5110 u_status_block);
34f80b04 5111 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 5112
ca00392c
EG
5113 REG_WR(bp, BAR_CSTRORM_INTMEM +
5114 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5115 REG_WR(bp, BAR_CSTRORM_INTMEM +
5116 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 5117 U64_HI(section));
ca00392c
EG
5118 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5119 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
5120
5121 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
5122 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5123 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
5124
5125 /* CSTORM */
5126 section = ((u64)mapping) + offsetof(struct host_status_block,
5127 c_status_block);
34f80b04 5128 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5129
5130 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5131 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 5132 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5133 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 5134 U64_HI(section));
7a9b2557 5135 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 5136 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
5137
5138 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5139 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 5140 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
5141
5142 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5143}
5144
5145static void bnx2x_zero_def_sb(struct bnx2x *bp)
5146{
5147 int func = BP_FUNC(bp);
a2fbb9ea 5148
ca00392c 5149 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
5150 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5151 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
5152 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5153 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5154 sizeof(struct cstorm_def_status_block_u)/4);
5155 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5156 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5157 sizeof(struct cstorm_def_status_block_c)/4);
5158 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
5159 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5160 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
5161}
5162
5163static void bnx2x_init_def_sb(struct bnx2x *bp,
5164 struct host_def_status_block *def_sb,
34f80b04 5165 dma_addr_t mapping, int sb_id)
a2fbb9ea 5166{
34f80b04
EG
5167 int port = BP_PORT(bp);
5168 int func = BP_FUNC(bp);
a2fbb9ea
ET
5169 int index, val, reg_offset;
5170 u64 section;
5171
5172 /* ATTN */
5173 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5174 atten_status_block);
34f80b04 5175 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 5176
49d66772
ET
5177 bp->attn_state = 0;
5178
a2fbb9ea
ET
5179 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5180 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5181
34f80b04 5182 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
5183 bp->attn_group[index].sig[0] = REG_RD(bp,
5184 reg_offset + 0x10*index);
5185 bp->attn_group[index].sig[1] = REG_RD(bp,
5186 reg_offset + 0x4 + 0x10*index);
5187 bp->attn_group[index].sig[2] = REG_RD(bp,
5188 reg_offset + 0x8 + 0x10*index);
5189 bp->attn_group[index].sig[3] = REG_RD(bp,
5190 reg_offset + 0xc + 0x10*index);
5191 }
5192
a2fbb9ea
ET
5193 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5194 HC_REG_ATTN_MSG0_ADDR_L);
5195
5196 REG_WR(bp, reg_offset, U64_LO(section));
5197 REG_WR(bp, reg_offset + 4, U64_HI(section));
5198
5199 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5200
5201 val = REG_RD(bp, reg_offset);
34f80b04 5202 val |= sb_id;
a2fbb9ea
ET
5203 REG_WR(bp, reg_offset, val);
5204
5205 /* USTORM */
5206 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5207 u_def_status_block);
34f80b04 5208 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 5209
ca00392c
EG
5210 REG_WR(bp, BAR_CSTRORM_INTMEM +
5211 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5212 REG_WR(bp, BAR_CSTRORM_INTMEM +
5213 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 5214 U64_HI(section));
ca00392c
EG
5215 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5216 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
5217
5218 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
5219 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5220 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
5221
5222 /* CSTORM */
5223 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5224 c_def_status_block);
34f80b04 5225 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5226
5227 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5228 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 5229 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5230 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 5231 U64_HI(section));
5c862848 5232 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 5233 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
5234
5235 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5236 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 5237 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
5238
5239 /* TSTORM */
5240 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5241 t_def_status_block);
34f80b04 5242 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5243
5244 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5245 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 5246 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5247 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 5248 U64_HI(section));
5c862848 5249 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 5250 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
5251
5252 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5253 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 5254 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
5255
5256 /* XSTORM */
5257 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5258 x_def_status_block);
34f80b04 5259 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5260
5261 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 5262 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 5263 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 5264 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 5265 U64_HI(section));
5c862848 5266 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 5267 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
5268
5269 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5270 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 5271 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 5272
bb2a0f7a 5273 bp->stats_pending = 0;
66e855f3 5274 bp->set_mac_pending = 0;
bb2a0f7a 5275
34f80b04 5276 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
5277}
5278
5279static void bnx2x_update_coalesce(struct bnx2x *bp)
5280{
34f80b04 5281 int port = BP_PORT(bp);
a2fbb9ea
ET
5282 int i;
5283
5284 for_each_queue(bp, i) {
34f80b04 5285 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
5286
5287 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
5288 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5289 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5290 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 5291 bp->rx_ticks/(4 * BNX2X_BTR));
ca00392c
EG
5292 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5293 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5294 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 5295 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
5296
5297 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5298 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
5299 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5300 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 5301 bp->tx_ticks/(4 * BNX2X_BTR));
a2fbb9ea 5302 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
5303 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5304 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 5305 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
5306 }
5307}
5308
7a9b2557
VZ
5309static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5310 struct bnx2x_fastpath *fp, int last)
5311{
5312 int i;
5313
5314 for (i = 0; i < last; i++) {
5315 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5316 struct sk_buff *skb = rx_buf->skb;
5317
5318 if (skb == NULL) {
5319 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5320 continue;
5321 }
5322
5323 if (fp->tpa_state[i] == BNX2X_TPA_START)
1a983142
FT
5324 dma_unmap_single(&bp->pdev->dev,
5325 dma_unmap_addr(rx_buf, mapping),
5326 bp->rx_buf_size, DMA_FROM_DEVICE);
7a9b2557
VZ
5327
5328 dev_kfree_skb(skb);
5329 rx_buf->skb = NULL;
5330 }
5331}
5332
a2fbb9ea
ET
5333static void bnx2x_init_rx_rings(struct bnx2x *bp)
5334{
7a9b2557 5335 int func = BP_FUNC(bp);
32626230
EG
5336 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5337 ETH_MAX_AGGREGATION_QUEUES_E1H;
5338 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 5339 int i, j;
a2fbb9ea 5340
87942b46 5341 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
5342 DP(NETIF_MSG_IFUP,
5343 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 5344
7a9b2557 5345 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 5346
54b9ddaa 5347 for_each_queue(bp, j) {
32626230 5348 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 5349
32626230 5350 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
5351 fp->tpa_pool[i].skb =
5352 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5353 if (!fp->tpa_pool[i].skb) {
5354 BNX2X_ERR("Failed to allocate TPA "
5355 "skb pool for queue[%d] - "
5356 "disabling TPA on this "
5357 "queue!\n", j);
5358 bnx2x_free_tpa_pool(bp, fp, i);
5359 fp->disable_tpa = 1;
5360 break;
5361 }
1a983142 5362 dma_unmap_addr_set((struct sw_rx_bd *)
7a9b2557
VZ
5363 &bp->fp->tpa_pool[i],
5364 mapping, 0);
5365 fp->tpa_state[i] = BNX2X_TPA_STOP;
5366 }
5367 }
5368 }
5369
54b9ddaa 5370 for_each_queue(bp, j) {
a2fbb9ea
ET
5371 struct bnx2x_fastpath *fp = &bp->fp[j];
5372
5373 fp->rx_bd_cons = 0;
5374 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
5375 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5376
5377 /* "next page" elements initialization */
5378 /* SGE ring */
5379 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5380 struct eth_rx_sge *sge;
5381
5382 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5383 sge->addr_hi =
5384 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5385 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5386 sge->addr_lo =
5387 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5388 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5389 }
5390
5391 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 5392
7a9b2557 5393 /* RX BD ring */
a2fbb9ea
ET
5394 for (i = 1; i <= NUM_RX_RINGS; i++) {
5395 struct eth_rx_bd *rx_bd;
5396
5397 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5398 rx_bd->addr_hi =
5399 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 5400 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5401 rx_bd->addr_lo =
5402 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 5403 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5404 }
5405
34f80b04 5406 /* CQ ring */
a2fbb9ea
ET
5407 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5408 struct eth_rx_cqe_next_page *nextpg;
5409
5410 nextpg = (struct eth_rx_cqe_next_page *)
5411 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5412 nextpg->addr_hi =
5413 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 5414 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5415 nextpg->addr_lo =
5416 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 5417 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5418 }
5419
7a9b2557
VZ
5420 /* Allocate SGEs and initialize the ring elements */
5421 for (i = 0, ring_prod = 0;
5422 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 5423
7a9b2557
VZ
5424 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5425 BNX2X_ERR("was only able to allocate "
5426 "%d rx sges\n", i);
5427 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5428 /* Cleanup already allocated elements */
5429 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 5430 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
5431 fp->disable_tpa = 1;
5432 ring_prod = 0;
5433 break;
5434 }
5435 ring_prod = NEXT_SGE_IDX(ring_prod);
5436 }
5437 fp->rx_sge_prod = ring_prod;
5438
5439 /* Allocate BDs and initialize BD ring */
66e855f3 5440 fp->rx_comp_cons = 0;
7a9b2557 5441 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5442 for (i = 0; i < bp->rx_ring_size; i++) {
5443 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5444 BNX2X_ERR("was only able to allocate "
de832a55
EG
5445 "%d rx skbs on queue[%d]\n", i, j);
5446 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5447 break;
5448 }
5449 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5450 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5451 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5452 }
5453
7a9b2557
VZ
5454 fp->rx_bd_prod = ring_prod;
5455 /* must not have more available CQEs than BDs */
cdaa7cb8
VZ
5456 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5457 cqe_ring_prod);
a2fbb9ea
ET
5458 fp->rx_pkt = fp->rx_calls = 0;
5459
7a9b2557
VZ
5460 /* Warning!
5461 * this will generate an interrupt (to the TSTORM)
5462 * must only be done after chip is initialized
5463 */
5464 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5465 fp->rx_sge_prod);
a2fbb9ea
ET
5466 if (j != 0)
5467 continue;
5468
5469 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5470 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5471 U64_LO(fp->rx_comp_mapping));
5472 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5473 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5474 U64_HI(fp->rx_comp_mapping));
5475 }
5476}
5477
5478static void bnx2x_init_tx_ring(struct bnx2x *bp)
5479{
5480 int i, j;
5481
54b9ddaa 5482 for_each_queue(bp, j) {
a2fbb9ea
ET
5483 struct bnx2x_fastpath *fp = &bp->fp[j];
5484
5485 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5486 struct eth_tx_next_bd *tx_next_bd =
5487 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5488
ca00392c 5489 tx_next_bd->addr_hi =
a2fbb9ea 5490 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5491 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5492 tx_next_bd->addr_lo =
a2fbb9ea 5493 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5494 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5495 }
5496
ca00392c
EG
5497 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5498 fp->tx_db.data.zero_fill1 = 0;
5499 fp->tx_db.data.prod = 0;
5500
a2fbb9ea
ET
5501 fp->tx_pkt_prod = 0;
5502 fp->tx_pkt_cons = 0;
5503 fp->tx_bd_prod = 0;
5504 fp->tx_bd_cons = 0;
5505 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5506 fp->tx_pkt = 0;
5507 }
5508}
5509
5510static void bnx2x_init_sp_ring(struct bnx2x *bp)
5511{
34f80b04 5512 int func = BP_FUNC(bp);
a2fbb9ea
ET
5513
5514 spin_lock_init(&bp->spq_lock);
5515
5516 bp->spq_left = MAX_SPQ_PENDING;
5517 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5518 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5519 bp->spq_prod_bd = bp->spq;
5520 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5521
34f80b04 5522 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5523 U64_LO(bp->spq_mapping));
34f80b04
EG
5524 REG_WR(bp,
5525 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5526 U64_HI(bp->spq_mapping));
5527
34f80b04 5528 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5529 bp->spq_prod_idx);
5530}
5531
5532static void bnx2x_init_context(struct bnx2x *bp)
5533{
5534 int i;
5535
54b9ddaa
VZ
5536 /* Rx */
5537 for_each_queue(bp, i) {
a2fbb9ea
ET
5538 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5539 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5540 u8 cl_id = fp->cl_id;
a2fbb9ea 5541
34f80b04
EG
5542 context->ustorm_st_context.common.sb_index_numbers =
5543 BNX2X_RX_SB_INDEX_NUM;
0626b899 5544 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5545 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5546 context->ustorm_st_context.common.flags =
de832a55
EG
5547 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5548 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5549 context->ustorm_st_context.common.statistics_counter_id =
5550 cl_id;
8d9c5f34 5551 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5552 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5553 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5554 bp->rx_buf_size;
34f80b04 5555 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5556 U64_HI(fp->rx_desc_mapping);
34f80b04 5557 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5558 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5559 if (!fp->disable_tpa) {
5560 context->ustorm_st_context.common.flags |=
ca00392c 5561 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5562 context->ustorm_st_context.common.sge_buff_size =
cdaa7cb8
VZ
5563 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
5564 0xffff);
7a9b2557
VZ
5565 context->ustorm_st_context.common.sge_page_base_hi =
5566 U64_HI(fp->rx_sge_mapping);
5567 context->ustorm_st_context.common.sge_page_base_lo =
5568 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5569
5570 context->ustorm_st_context.common.max_sges_for_packet =
5571 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5572 context->ustorm_st_context.common.max_sges_for_packet =
5573 ((context->ustorm_st_context.common.
5574 max_sges_for_packet + PAGES_PER_SGE - 1) &
5575 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5576 }
5577
8d9c5f34
EG
5578 context->ustorm_ag_context.cdu_usage =
5579 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5580 CDU_REGION_NUMBER_UCM_AG,
5581 ETH_CONNECTION_TYPE);
5582
ca00392c
EG
5583 context->xstorm_ag_context.cdu_reserved =
5584 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5585 CDU_REGION_NUMBER_XCM_AG,
5586 ETH_CONNECTION_TYPE);
5587 }
5588
54b9ddaa
VZ
5589 /* Tx */
5590 for_each_queue(bp, i) {
ca00392c
EG
5591 struct bnx2x_fastpath *fp = &bp->fp[i];
5592 struct eth_context *context =
54b9ddaa 5593 bnx2x_sp(bp, context[i].eth);
ca00392c
EG
5594
5595 context->cstorm_st_context.sb_index_number =
5596 C_SB_ETH_TX_CQ_INDEX;
5597 context->cstorm_st_context.status_block_id = fp->sb_id;
5598
8d9c5f34
EG
5599 context->xstorm_st_context.tx_bd_page_base_hi =
5600 U64_HI(fp->tx_desc_mapping);
5601 context->xstorm_st_context.tx_bd_page_base_lo =
5602 U64_LO(fp->tx_desc_mapping);
ca00392c 5603 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5604 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5605 }
5606}
5607
5608static void bnx2x_init_ind_table(struct bnx2x *bp)
5609{
26c8fa4d 5610 int func = BP_FUNC(bp);
a2fbb9ea
ET
5611 int i;
5612
555f6c78 5613 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5614 return;
5615
555f6c78
EG
5616 DP(NETIF_MSG_IFUP,
5617 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5618 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5619 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5620 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 5621 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
5622}
5623
49d66772
ET
5624static void bnx2x_set_client_config(struct bnx2x *bp)
5625{
49d66772 5626 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5627 int port = BP_PORT(bp);
5628 int i;
49d66772 5629
e7799c5f 5630 tstorm_client.mtu = bp->dev->mtu;
49d66772 5631 tstorm_client.config_flags =
de832a55
EG
5632 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5633 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5634#ifdef BCM_VLAN
0c6671b0 5635 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5636 tstorm_client.config_flags |=
8d9c5f34 5637 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5638 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5639 }
5640#endif
49d66772
ET
5641
5642 for_each_queue(bp, i) {
de832a55
EG
5643 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5644
49d66772 5645 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5646 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5647 ((u32 *)&tstorm_client)[0]);
5648 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5649 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5650 ((u32 *)&tstorm_client)[1]);
5651 }
5652
34f80b04
EG
5653 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5654 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5655}
5656
a2fbb9ea
ET
5657static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5658{
a2fbb9ea 5659 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 5660 int mode = bp->rx_mode;
37b091ba 5661 int mask = bp->rx_mode_cl_mask;
34f80b04 5662 int func = BP_FUNC(bp);
581ce43d 5663 int port = BP_PORT(bp);
a2fbb9ea 5664 int i;
581ce43d
EG
5665 /* All but management unicast packets should pass to the host as well */
5666 u32 llh_mask =
5667 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5668 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5669 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5670 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5671
3196a88a 5672 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5673
5674 switch (mode) {
5675 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5676 tstorm_mac_filter.ucast_drop_all = mask;
5677 tstorm_mac_filter.mcast_drop_all = mask;
5678 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5679 break;
356e2385 5680
a2fbb9ea 5681 case BNX2X_RX_MODE_NORMAL:
34f80b04 5682 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5683 break;
356e2385 5684
a2fbb9ea 5685 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5686 tstorm_mac_filter.mcast_accept_all = mask;
5687 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5688 break;
356e2385 5689
a2fbb9ea 5690 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5691 tstorm_mac_filter.ucast_accept_all = mask;
5692 tstorm_mac_filter.mcast_accept_all = mask;
5693 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5694 /* pass management unicast packets as well */
5695 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5696 break;
356e2385 5697
a2fbb9ea 5698 default:
34f80b04
EG
5699 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5700 break;
a2fbb9ea
ET
5701 }
5702
581ce43d
EG
5703 REG_WR(bp,
5704 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5705 llh_mask);
5706
a2fbb9ea
ET
5707 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5708 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5709 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5710 ((u32 *)&tstorm_mac_filter)[i]);
5711
34f80b04 5712/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5713 ((u32 *)&tstorm_mac_filter)[i]); */
5714 }
a2fbb9ea 5715
49d66772
ET
5716 if (mode != BNX2X_RX_MODE_NONE)
5717 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5718}
5719
471de716
EG
5720static void bnx2x_init_internal_common(struct bnx2x *bp)
5721{
5722 int i;
5723
5724 /* Zero this manually as its initialization is
5725 currently missing in the initTool */
5726 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5727 REG_WR(bp, BAR_USTRORM_INTMEM +
5728 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5729}
5730
5731static void bnx2x_init_internal_port(struct bnx2x *bp)
5732{
5733 int port = BP_PORT(bp);
5734
ca00392c
EG
5735 REG_WR(bp,
5736 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5737 REG_WR(bp,
5738 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5739 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5740 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5741}
5742
5743static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5744{
a2fbb9ea
ET
5745 struct tstorm_eth_function_common_config tstorm_config = {0};
5746 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5747 int port = BP_PORT(bp);
5748 int func = BP_FUNC(bp);
de832a55
EG
5749 int i, j;
5750 u32 offset;
471de716 5751 u16 max_agg_size;
a2fbb9ea
ET
5752
5753 if (is_multi(bp)) {
555f6c78 5754 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5755 tstorm_config.rss_result_mask = MULTI_MASK;
5756 }
ca00392c
EG
5757
5758 /* Enable TPA if needed */
5759 if (bp->flags & TPA_ENABLE_FLAG)
5760 tstorm_config.config_flags |=
5761 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5762
8d9c5f34
EG
5763 if (IS_E1HMF(bp))
5764 tstorm_config.config_flags |=
5765 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5766
34f80b04
EG
5767 tstorm_config.leading_client_id = BP_L_ID(bp);
5768
a2fbb9ea 5769 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5770 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5771 (*(u32 *)&tstorm_config));
5772
c14423fe 5773 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 5774 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
5775 bnx2x_set_storm_rx_mode(bp);
5776
de832a55
EG
5777 for_each_queue(bp, i) {
5778 u8 cl_id = bp->fp[i].cl_id;
5779
5780 /* reset xstorm per client statistics */
5781 offset = BAR_XSTRORM_INTMEM +
5782 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5783 for (j = 0;
5784 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5785 REG_WR(bp, offset + j*4, 0);
5786
5787 /* reset tstorm per client statistics */
5788 offset = BAR_TSTRORM_INTMEM +
5789 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5790 for (j = 0;
5791 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5792 REG_WR(bp, offset + j*4, 0);
5793
5794 /* reset ustorm per client statistics */
5795 offset = BAR_USTRORM_INTMEM +
5796 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5797 for (j = 0;
5798 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5799 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5800 }
5801
5802 /* Init statistics related context */
34f80b04 5803 stats_flags.collect_eth = 1;
a2fbb9ea 5804
66e855f3 5805 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5806 ((u32 *)&stats_flags)[0]);
66e855f3 5807 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5808 ((u32 *)&stats_flags)[1]);
5809
66e855f3 5810 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5811 ((u32 *)&stats_flags)[0]);
66e855f3 5812 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5813 ((u32 *)&stats_flags)[1]);
5814
de832a55
EG
5815 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5816 ((u32 *)&stats_flags)[0]);
5817 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5818 ((u32 *)&stats_flags)[1]);
5819
66e855f3 5820 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5821 ((u32 *)&stats_flags)[0]);
66e855f3 5822 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5823 ((u32 *)&stats_flags)[1]);
5824
66e855f3
YG
5825 REG_WR(bp, BAR_XSTRORM_INTMEM +
5826 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5827 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5828 REG_WR(bp, BAR_XSTRORM_INTMEM +
5829 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5830 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5831
5832 REG_WR(bp, BAR_TSTRORM_INTMEM +
5833 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5834 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5835 REG_WR(bp, BAR_TSTRORM_INTMEM +
5836 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5837 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5838
de832a55
EG
5839 REG_WR(bp, BAR_USTRORM_INTMEM +
5840 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5841 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5842 REG_WR(bp, BAR_USTRORM_INTMEM +
5843 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5844 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5845
34f80b04
EG
5846 if (CHIP_IS_E1H(bp)) {
5847 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5848 IS_E1HMF(bp));
5849 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5850 IS_E1HMF(bp));
5851 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5852 IS_E1HMF(bp));
5853 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5854 IS_E1HMF(bp));
5855
7a9b2557
VZ
5856 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5857 bp->e1hov);
34f80b04
EG
5858 }
5859
4f40f2cb 5860 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
cdaa7cb8
VZ
5861 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
5862 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
54b9ddaa 5863 for_each_queue(bp, i) {
7a9b2557 5864 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5865
5866 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5867 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5868 U64_LO(fp->rx_comp_mapping));
5869 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5870 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5871 U64_HI(fp->rx_comp_mapping));
5872
ca00392c
EG
5873 /* Next page */
5874 REG_WR(bp, BAR_USTRORM_INTMEM +
5875 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5876 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5877 REG_WR(bp, BAR_USTRORM_INTMEM +
5878 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5879 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5880
7a9b2557 5881 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5882 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5883 max_agg_size);
5884 }
8a1c38d1 5885
1c06328c
EG
5886 /* dropless flow control */
5887 if (CHIP_IS_E1H(bp)) {
5888 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5889
5890 rx_pause.bd_thr_low = 250;
5891 rx_pause.cqe_thr_low = 250;
5892 rx_pause.cos = 1;
5893 rx_pause.sge_thr_low = 0;
5894 rx_pause.bd_thr_high = 350;
5895 rx_pause.cqe_thr_high = 350;
5896 rx_pause.sge_thr_high = 0;
5897
54b9ddaa 5898 for_each_queue(bp, i) {
1c06328c
EG
5899 struct bnx2x_fastpath *fp = &bp->fp[i];
5900
5901 if (!fp->disable_tpa) {
5902 rx_pause.sge_thr_low = 150;
5903 rx_pause.sge_thr_high = 250;
5904 }
5905
5906
5907 offset = BAR_USTRORM_INTMEM +
5908 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5909 fp->cl_id);
5910 for (j = 0;
5911 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5912 j++)
5913 REG_WR(bp, offset + j*4,
5914 ((u32 *)&rx_pause)[j]);
5915 }
5916 }
5917
8a1c38d1
EG
5918 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5919
5920 /* Init rate shaping and fairness contexts */
5921 if (IS_E1HMF(bp)) {
5922 int vn;
5923
5924 /* During init there is no active link
5925 Until link is up, set link rate to 10Gbps */
5926 bp->link_vars.line_speed = SPEED_10000;
5927 bnx2x_init_port_minmax(bp);
5928
b015e3d1
EG
5929 if (!BP_NOMCP(bp))
5930 bp->mf_config =
5931 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
5932 bnx2x_calc_vn_weight_sum(bp);
5933
5934 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5935 bnx2x_init_vn_minmax(bp, 2*vn + port);
5936
5937 /* Enable rate shaping and fairness */
b015e3d1 5938 bp->cmng.flags.cmng_enables |=
8a1c38d1 5939 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 5940
8a1c38d1
EG
5941 } else {
5942 /* rate shaping and fairness are disabled */
5943 DP(NETIF_MSG_IFUP,
5944 "single function mode minmax will be disabled\n");
5945 }
5946
5947
cdaa7cb8 5948 /* Store cmng structures to internal memory */
8a1c38d1
EG
5949 if (bp->port.pmf)
5950 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5951 REG_WR(bp, BAR_XSTRORM_INTMEM +
5952 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5953 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5954}
5955
471de716
EG
5956static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5957{
5958 switch (load_code) {
5959 case FW_MSG_CODE_DRV_LOAD_COMMON:
5960 bnx2x_init_internal_common(bp);
5961 /* no break */
5962
5963 case FW_MSG_CODE_DRV_LOAD_PORT:
5964 bnx2x_init_internal_port(bp);
5965 /* no break */
5966
5967 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5968 bnx2x_init_internal_func(bp);
5969 break;
5970
5971 default:
5972 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5973 break;
5974 }
5975}
5976
5977static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5978{
5979 int i;
5980
5981 for_each_queue(bp, i) {
5982 struct bnx2x_fastpath *fp = &bp->fp[i];
5983
34f80b04 5984 fp->bp = bp;
a2fbb9ea 5985 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5986 fp->index = i;
34f80b04 5987 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
5988#ifdef BCM_CNIC
5989 fp->sb_id = fp->cl_id + 1;
5990#else
34f80b04 5991 fp->sb_id = fp->cl_id;
37b091ba 5992#endif
34f80b04 5993 DP(NETIF_MSG_IFUP,
f5372251
EG
5994 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5995 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5996 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5997 fp->sb_id);
5c862848 5998 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5999 }
6000
16119785
EG
6001 /* ensure status block indices were read */
6002 rmb();
6003
6004
5c862848
EG
6005 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
6006 DEF_SB_ID);
6007 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
6008 bnx2x_update_coalesce(bp);
6009 bnx2x_init_rx_rings(bp);
6010 bnx2x_init_tx_ring(bp);
6011 bnx2x_init_sp_ring(bp);
6012 bnx2x_init_context(bp);
471de716 6013 bnx2x_init_internal(bp, load_code);
a2fbb9ea 6014 bnx2x_init_ind_table(bp);
0ef00459
EG
6015 bnx2x_stats_init(bp);
6016
6017 /* At this point, we are ready for interrupts */
6018 atomic_set(&bp->intr_sem, 0);
6019
6020 /* flush all before enabling interrupts */
6021 mb();
6022 mmiowb();
6023
615f8fd9 6024 bnx2x_int_enable(bp);
eb8da205
EG
6025
6026 /* Check for SPIO5 */
6027 bnx2x_attn_int_deasserted0(bp,
6028 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6029 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
6030}
6031
6032/* end of nic init */
6033
6034/*
6035 * gzip service functions
6036 */
6037
6038static int bnx2x_gunzip_init(struct bnx2x *bp)
6039{
1a983142
FT
6040 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6041 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
6042 if (bp->gunzip_buf == NULL)
6043 goto gunzip_nomem1;
6044
6045 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6046 if (bp->strm == NULL)
6047 goto gunzip_nomem2;
6048
6049 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6050 GFP_KERNEL);
6051 if (bp->strm->workspace == NULL)
6052 goto gunzip_nomem3;
6053
6054 return 0;
6055
6056gunzip_nomem3:
6057 kfree(bp->strm);
6058 bp->strm = NULL;
6059
6060gunzip_nomem2:
1a983142
FT
6061 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6062 bp->gunzip_mapping);
a2fbb9ea
ET
6063 bp->gunzip_buf = NULL;
6064
6065gunzip_nomem1:
cdaa7cb8
VZ
6066 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
6067 " un-compression\n");
a2fbb9ea
ET
6068 return -ENOMEM;
6069}
6070
6071static void bnx2x_gunzip_end(struct bnx2x *bp)
6072{
6073 kfree(bp->strm->workspace);
6074
6075 kfree(bp->strm);
6076 bp->strm = NULL;
6077
6078 if (bp->gunzip_buf) {
1a983142
FT
6079 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6080 bp->gunzip_mapping);
a2fbb9ea
ET
6081 bp->gunzip_buf = NULL;
6082 }
6083}
6084
94a78b79 6085static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
6086{
6087 int n, rc;
6088
6089 /* check gzip header */
94a78b79
VZ
6090 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6091 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 6092 return -EINVAL;
94a78b79 6093 }
a2fbb9ea
ET
6094
6095 n = 10;
6096
34f80b04 6097#define FNAME 0x8
a2fbb9ea
ET
6098
6099 if (zbuf[3] & FNAME)
6100 while ((zbuf[n++] != 0) && (n < len));
6101
94a78b79 6102 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
6103 bp->strm->avail_in = len - n;
6104 bp->strm->next_out = bp->gunzip_buf;
6105 bp->strm->avail_out = FW_BUF_SIZE;
6106
6107 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6108 if (rc != Z_OK)
6109 return rc;
6110
6111 rc = zlib_inflate(bp->strm, Z_FINISH);
6112 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
6113 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6114 bp->strm->msg);
a2fbb9ea
ET
6115
6116 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6117 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
6118 netdev_err(bp->dev, "Firmware decompression error:"
6119 " gunzip_outlen (%d) not aligned\n",
6120 bp->gunzip_outlen);
a2fbb9ea
ET
6121 bp->gunzip_outlen >>= 2;
6122
6123 zlib_inflateEnd(bp->strm);
6124
6125 if (rc == Z_STREAM_END)
6126 return 0;
6127
6128 return rc;
6129}
6130
6131/* nic load/unload */
6132
6133/*
34f80b04 6134 * General service functions
a2fbb9ea
ET
6135 */
6136
6137/* send a NIG loopback debug packet */
6138static void bnx2x_lb_pckt(struct bnx2x *bp)
6139{
a2fbb9ea 6140 u32 wb_write[3];
a2fbb9ea
ET
6141
6142 /* Ethernet source and destination addresses */
a2fbb9ea
ET
6143 wb_write[0] = 0x55555555;
6144 wb_write[1] = 0x55555555;
34f80b04 6145 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 6146 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
6147
6148 /* NON-IP protocol */
a2fbb9ea
ET
6149 wb_write[0] = 0x09000000;
6150 wb_write[1] = 0x55555555;
34f80b04 6151 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 6152 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
6153}
6154
6155/* some of the internal memories
6156 * are not directly readable from the driver
6157 * to test them we send debug packets
6158 */
6159static int bnx2x_int_mem_test(struct bnx2x *bp)
6160{
6161 int factor;
6162 int count, i;
6163 u32 val = 0;
6164
ad8d3948 6165 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 6166 factor = 120;
ad8d3948
EG
6167 else if (CHIP_REV_IS_EMUL(bp))
6168 factor = 200;
6169 else
a2fbb9ea 6170 factor = 1;
a2fbb9ea
ET
6171
6172 DP(NETIF_MSG_HW, "start part1\n");
6173
6174 /* Disable inputs of parser neighbor blocks */
6175 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6176 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6177 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 6178 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
6179
6180 /* Write 0 to parser credits for CFC search request */
6181 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6182
6183 /* send Ethernet packet */
6184 bnx2x_lb_pckt(bp);
6185
6186 /* TODO do i reset NIG statistic? */
6187 /* Wait until NIG register shows 1 packet of size 0x10 */
6188 count = 1000 * factor;
6189 while (count) {
34f80b04 6190
a2fbb9ea
ET
6191 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6192 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
6193 if (val == 0x10)
6194 break;
6195
6196 msleep(10);
6197 count--;
6198 }
6199 if (val != 0x10) {
6200 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6201 return -1;
6202 }
6203
6204 /* Wait until PRS register shows 1 packet */
6205 count = 1000 * factor;
6206 while (count) {
6207 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
6208 if (val == 1)
6209 break;
6210
6211 msleep(10);
6212 count--;
6213 }
6214 if (val != 0x1) {
6215 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6216 return -2;
6217 }
6218
6219 /* Reset and init BRB, PRS */
34f80b04 6220 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 6221 msleep(50);
34f80b04 6222 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 6223 msleep(50);
94a78b79
VZ
6224 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6225 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
6226
6227 DP(NETIF_MSG_HW, "part2\n");
6228
6229 /* Disable inputs of parser neighbor blocks */
6230 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6231 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6232 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 6233 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
6234
6235 /* Write 0 to parser credits for CFC search request */
6236 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6237
6238 /* send 10 Ethernet packets */
6239 for (i = 0; i < 10; i++)
6240 bnx2x_lb_pckt(bp);
6241
6242 /* Wait until NIG register shows 10 + 1
6243 packets of size 11*0x10 = 0xb0 */
6244 count = 1000 * factor;
6245 while (count) {
34f80b04 6246
a2fbb9ea
ET
6247 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6248 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
6249 if (val == 0xb0)
6250 break;
6251
6252 msleep(10);
6253 count--;
6254 }
6255 if (val != 0xb0) {
6256 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6257 return -3;
6258 }
6259
6260 /* Wait until PRS register shows 2 packets */
6261 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6262 if (val != 2)
6263 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6264
6265 /* Write 1 to parser credits for CFC search request */
6266 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6267
6268 /* Wait until PRS register shows 3 packets */
6269 msleep(10 * factor);
6270 /* Wait until NIG register shows 1 packet of size 0x10 */
6271 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6272 if (val != 3)
6273 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6274
6275 /* clear NIG EOP FIFO */
6276 for (i = 0; i < 11; i++)
6277 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6278 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6279 if (val != 1) {
6280 BNX2X_ERR("clear of NIG failed\n");
6281 return -4;
6282 }
6283
6284 /* Reset and init BRB, PRS, NIG */
6285 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6286 msleep(50);
6287 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6288 msleep(50);
94a78b79
VZ
6289 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6290 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 6291#ifndef BCM_CNIC
a2fbb9ea
ET
6292 /* set NIC mode */
6293 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6294#endif
6295
6296 /* Enable inputs of parser neighbor blocks */
6297 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6298 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6299 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 6300 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
6301
6302 DP(NETIF_MSG_HW, "done\n");
6303
6304 return 0; /* OK */
6305}
6306
6307static void enable_blocks_attention(struct bnx2x *bp)
6308{
6309 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6310 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6311 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6312 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6313 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6314 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6315 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6316 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6317 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
6318/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6319/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6320 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6321 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6322 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
6323/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6324/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6325 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6326 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6327 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6328 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
6329/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6330/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6331 if (CHIP_REV_IS_FPGA(bp))
6332 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6333 else
6334 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
6335 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6336 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6337 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
6338/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6339/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6340 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6341 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
6342/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6343 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
6344}
6345
72fd0718
VZ
6346static const struct {
6347 u32 addr;
6348 u32 mask;
6349} bnx2x_parity_mask[] = {
6350 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6351 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6352 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6353 {HC_REG_HC_PRTY_MASK, 0xffffffff},
6354 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6355 {QM_REG_QM_PRTY_MASK, 0x0},
6356 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6357 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6358 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6359 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6360 {CDU_REG_CDU_PRTY_MASK, 0x0},
6361 {CFC_REG_CFC_PRTY_MASK, 0x0},
6362 {DBG_REG_DBG_PRTY_MASK, 0x0},
6363 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6364 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6365 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6366 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6367 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6368 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6369 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6370 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6371 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6372 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6373 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6374 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6375 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6376 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6377 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6378};
6379
6380static void enable_blocks_parity(struct bnx2x *bp)
6381{
6382 int i, mask_arr_len =
6383 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6384
6385 for (i = 0; i < mask_arr_len; i++)
6386 REG_WR(bp, bnx2x_parity_mask[i].addr,
6387 bnx2x_parity_mask[i].mask);
6388}
6389
34f80b04 6390
81f75bbf
EG
6391static void bnx2x_reset_common(struct bnx2x *bp)
6392{
6393 /* reset_common */
6394 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6395 0xd3ffff7f);
6396 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6397}
6398
573f2035
EG
6399static void bnx2x_init_pxp(struct bnx2x *bp)
6400{
6401 u16 devctl;
6402 int r_order, w_order;
6403
6404 pci_read_config_word(bp->pdev,
6405 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6406 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6407 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6408 if (bp->mrrs == -1)
6409 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6410 else {
6411 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6412 r_order = bp->mrrs;
6413 }
6414
6415 bnx2x_init_pxp_arb(bp, r_order, w_order);
6416}
fd4ef40d
EG
6417
6418static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6419{
2145a920 6420 int is_required;
fd4ef40d 6421 u32 val;
2145a920 6422 int port;
fd4ef40d 6423
2145a920
VZ
6424 if (BP_NOMCP(bp))
6425 return;
6426
6427 is_required = 0;
fd4ef40d
EG
6428 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6429 SHARED_HW_CFG_FAN_FAILURE_MASK;
6430
6431 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6432 is_required = 1;
6433
6434 /*
6435 * The fan failure mechanism is usually related to the PHY type since
6436 * the power consumption of the board is affected by the PHY. Currently,
6437 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6438 */
6439 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6440 for (port = PORT_0; port < PORT_MAX; port++) {
6441 u32 phy_type =
6442 SHMEM_RD(bp, dev_info.port_hw_config[port].
6443 external_phy_config) &
6444 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6445 is_required |=
6446 ((phy_type ==
6447 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
6448 (phy_type ==
6449 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
6450 (phy_type ==
6451 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6452 }
6453
6454 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6455
6456 if (is_required == 0)
6457 return;
6458
6459 /* Fan failure is indicated by SPIO 5 */
6460 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6461 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6462
6463 /* set to active low mode */
6464 val = REG_RD(bp, MISC_REG_SPIO_INT);
6465 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 6466 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
6467 REG_WR(bp, MISC_REG_SPIO_INT, val);
6468
6469 /* enable interrupt to signal the IGU */
6470 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6471 val |= (1 << MISC_REGISTERS_SPIO_5);
6472 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6473}
6474
34f80b04 6475static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 6476{
a2fbb9ea 6477 u32 val, i;
37b091ba
MC
6478#ifdef BCM_CNIC
6479 u32 wb_write[2];
6480#endif
a2fbb9ea 6481
34f80b04 6482 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 6483
81f75bbf 6484 bnx2x_reset_common(bp);
34f80b04
EG
6485 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6486 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 6487
94a78b79 6488 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
6489 if (CHIP_IS_E1H(bp))
6490 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 6491
34f80b04
EG
6492 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6493 msleep(30);
6494 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 6495
94a78b79 6496 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
6497 if (CHIP_IS_E1(bp)) {
6498 /* enable HW interrupt from PXP on USDM overflow
6499 bit 16 on INT_MASK_0 */
6500 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6501 }
a2fbb9ea 6502
94a78b79 6503 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6504 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6505
6506#ifdef __BIG_ENDIAN
34f80b04
EG
6507 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6508 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6509 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6510 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6511 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6512 /* make sure this value is 0 */
6513 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6514
6515/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6516 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6517 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6518 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6519 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6520#endif
6521
34f80b04 6522 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 6523#ifdef BCM_CNIC
34f80b04
EG
6524 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6525 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6526 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6527#endif
6528
34f80b04
EG
6529 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6530 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6531
34f80b04
EG
6532 /* let the HW do it's magic ... */
6533 msleep(100);
6534 /* finish PXP init */
6535 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6536 if (val != 1) {
6537 BNX2X_ERR("PXP2 CFG failed\n");
6538 return -EBUSY;
6539 }
6540 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6541 if (val != 1) {
6542 BNX2X_ERR("PXP2 RD_INIT failed\n");
6543 return -EBUSY;
6544 }
a2fbb9ea 6545
34f80b04
EG
6546 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6547 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6548
94a78b79 6549 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6550
34f80b04
EG
6551 /* clean the DMAE memory */
6552 bp->dmae_ready = 1;
6553 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6554
94a78b79
VZ
6555 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6556 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6557 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6558 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6559
34f80b04
EG
6560 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6561 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6562 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6563 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6564
94a78b79 6565 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
6566
6567#ifdef BCM_CNIC
6568 wb_write[0] = 0;
6569 wb_write[1] = 0;
6570 for (i = 0; i < 64; i++) {
6571 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6572 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6573
6574 if (CHIP_IS_E1H(bp)) {
6575 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6576 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6577 wb_write, 2);
6578 }
6579 }
6580#endif
34f80b04
EG
6581 /* soft reset pulse */
6582 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6583 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 6584
37b091ba 6585#ifdef BCM_CNIC
94a78b79 6586 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6587#endif
a2fbb9ea 6588
94a78b79 6589 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6590 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6591 if (!CHIP_REV_IS_SLOW(bp)) {
6592 /* enable hw interrupt from doorbell Q */
6593 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6594 }
a2fbb9ea 6595
94a78b79
VZ
6596 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6597 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6598 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 6599#ifndef BCM_CNIC
3196a88a
EG
6600 /* set NIC mode */
6601 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 6602#endif
34f80b04
EG
6603 if (CHIP_IS_E1H(bp))
6604 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6605
94a78b79
VZ
6606 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6607 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6608 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6609 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6610
ca00392c
EG
6611 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6612 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6613 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6614 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6615
94a78b79
VZ
6616 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6617 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6618 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6619 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6620
34f80b04
EG
6621 /* sync semi rtc */
6622 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6623 0x80000000);
6624 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6625 0x80000000);
a2fbb9ea 6626
94a78b79
VZ
6627 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6628 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6629 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6630
34f80b04
EG
6631 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6632 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6633 REG_WR(bp, i, 0xc0cac01a);
6634 /* TODO: replace with something meaningful */
6635 }
94a78b79 6636 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
6637#ifdef BCM_CNIC
6638 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6639 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6640 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6641 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6642 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6643 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6644 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6645 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6646 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6647 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6648#endif
34f80b04 6649 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6650
34f80b04
EG
6651 if (sizeof(union cdu_context) != 1024)
6652 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
6653 dev_alert(&bp->pdev->dev, "please adjust the size "
6654 "of cdu_context(%ld)\n",
7995c64e 6655 (long)sizeof(union cdu_context));
a2fbb9ea 6656
94a78b79 6657 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6658 val = (4 << 24) + (0 << 12) + 1024;
6659 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6660
94a78b79 6661 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6662 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6663 /* enable context validation interrupt from CFC */
6664 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6665
6666 /* set the thresholds to prevent CFC/CDU race */
6667 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6668
94a78b79
VZ
6669 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6670 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6671
94a78b79 6672 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6673 /* Reset PCIE errors for debug */
6674 REG_WR(bp, 0x2814, 0xffffffff);
6675 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6676
94a78b79 6677 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6678 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6679 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6680 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6681
94a78b79 6682 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6683 if (CHIP_IS_E1H(bp)) {
6684 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6685 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6686 }
6687
6688 if (CHIP_REV_IS_SLOW(bp))
6689 msleep(200);
6690
6691 /* finish CFC init */
6692 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6693 if (val != 1) {
6694 BNX2X_ERR("CFC LL_INIT failed\n");
6695 return -EBUSY;
6696 }
6697 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6698 if (val != 1) {
6699 BNX2X_ERR("CFC AC_INIT failed\n");
6700 return -EBUSY;
6701 }
6702 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6703 if (val != 1) {
6704 BNX2X_ERR("CFC CAM_INIT failed\n");
6705 return -EBUSY;
6706 }
6707 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6708
34f80b04
EG
6709 /* read NIG statistic
6710 to see if this is our first up since powerup */
6711 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6712 val = *bnx2x_sp(bp, wb_data[0]);
6713
6714 /* do internal memory self test */
6715 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6716 BNX2X_ERR("internal mem self test failed\n");
6717 return -EBUSY;
6718 }
6719
35b19ba5 6720 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6721 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6722 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6723 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6724 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6725 bp->port.need_hw_lock = 1;
6726 break;
6727
34f80b04
EG
6728 default:
6729 break;
6730 }
f1410647 6731
fd4ef40d
EG
6732 bnx2x_setup_fan_failure_detection(bp);
6733
34f80b04
EG
6734 /* clear PXP2 attentions */
6735 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6736
34f80b04 6737 enable_blocks_attention(bp);
72fd0718
VZ
6738 if (CHIP_PARITY_SUPPORTED(bp))
6739 enable_blocks_parity(bp);
a2fbb9ea 6740
6bbca910
YR
6741 if (!BP_NOMCP(bp)) {
6742 bnx2x_acquire_phy_lock(bp);
6743 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6744 bnx2x_release_phy_lock(bp);
6745 } else
6746 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6747
34f80b04
EG
6748 return 0;
6749}
a2fbb9ea 6750
34f80b04
EG
6751static int bnx2x_init_port(struct bnx2x *bp)
6752{
6753 int port = BP_PORT(bp);
94a78b79 6754 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6755 u32 low, high;
34f80b04 6756 u32 val;
a2fbb9ea 6757
cdaa7cb8 6758 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
6759
6760 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6761
94a78b79 6762 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6763 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6764
6765 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6766 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6767 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 6768 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6769
37b091ba
MC
6770#ifdef BCM_CNIC
6771 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 6772
94a78b79 6773 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
6774 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6775 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 6776#endif
cdaa7cb8 6777
94a78b79 6778 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6779
94a78b79 6780 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6781 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6782 /* no pause for emulation and FPGA */
6783 low = 0;
6784 high = 513;
6785 } else {
6786 if (IS_E1HMF(bp))
6787 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6788 else if (bp->dev->mtu > 4096) {
6789 if (bp->flags & ONE_PORT_FLAG)
6790 low = 160;
6791 else {
6792 val = bp->dev->mtu;
6793 /* (24*1024 + val*4)/256 */
6794 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6795 }
6796 } else
6797 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6798 high = low + 56; /* 14*1024/256 */
6799 }
6800 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6801 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6802
6803
94a78b79 6804 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6805
94a78b79 6806 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6807 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6808 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6809 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6810
94a78b79
VZ
6811 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6812 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6813 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6814 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6815
94a78b79 6816 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6817 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6818
94a78b79 6819 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6820
6821 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6822 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6823
6824 /* update threshold */
34f80b04 6825 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6826 /* update init credit */
34f80b04 6827 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6828
6829 /* probe changes */
34f80b04 6830 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6831 msleep(5);
34f80b04 6832 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 6833
37b091ba
MC
6834#ifdef BCM_CNIC
6835 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 6836#endif
94a78b79 6837 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6838 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6839
6840 if (CHIP_IS_E1(bp)) {
6841 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6842 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6843 }
94a78b79 6844 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6845
94a78b79 6846 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6847 /* init aeu_mask_attn_func_0/1:
6848 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6849 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6850 * bits 4-7 are used for "per vn group attention" */
6851 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6852 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6853
94a78b79 6854 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6855 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6856 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6857 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6858 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6859
94a78b79 6860 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6861
6862 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6863
6864 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6865 /* 0x2 disable e1hov, 0x1 enable */
6866 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6867 (IS_E1HMF(bp) ? 0x1 : 0x2));
6868
1c06328c
EG
6869 {
6870 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6871 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6872 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6873 }
34f80b04
EG
6874 }
6875
94a78b79 6876 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6877 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6878
35b19ba5 6879 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6880 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6881 {
6882 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6883
6884 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6885 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6886
6887 /* The GPIO should be swapped if the swap register is
6888 set and active */
6889 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6890 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6891
6892 /* Select function upon port-swap configuration */
6893 if (port == 0) {
6894 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6895 aeu_gpio_mask = (swap_val && swap_override) ?
6896 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6897 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6898 } else {
6899 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6900 aeu_gpio_mask = (swap_val && swap_override) ?
6901 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6902 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6903 }
6904 val = REG_RD(bp, offset);
6905 /* add GPIO3 to group */
6906 val |= aeu_gpio_mask;
6907 REG_WR(bp, offset, val);
6908 }
6909 break;
6910
35b19ba5 6911 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6912 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6913 /* add SPIO 5 to group 0 */
4d295db0
EG
6914 {
6915 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6916 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6917 val = REG_RD(bp, reg_addr);
f1410647 6918 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6919 REG_WR(bp, reg_addr, val);
6920 }
f1410647
ET
6921 break;
6922
6923 default:
6924 break;
6925 }
6926
c18487ee 6927 bnx2x__link_reset(bp);
a2fbb9ea 6928
34f80b04
EG
6929 return 0;
6930}
6931
6932#define ILT_PER_FUNC (768/2)
6933#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6934/* the phys address is shifted right 12 bits and has an added
6935 1=valid bit added to the 53rd bit
6936 then since this is a wide register(TM)
6937 we split it into two 32 bit writes
6938 */
6939#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6940#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6941#define PXP_ONE_ILT(x) (((x) << 10) | x)
6942#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6943
37b091ba
MC
6944#ifdef BCM_CNIC
6945#define CNIC_ILT_LINES 127
6946#define CNIC_CTX_PER_ILT 16
6947#else
34f80b04 6948#define CNIC_ILT_LINES 0
37b091ba 6949#endif
34f80b04
EG
6950
6951static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6952{
6953 int reg;
6954
6955 if (CHIP_IS_E1H(bp))
6956 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6957 else /* E1 */
6958 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6959
6960 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6961}
6962
6963static int bnx2x_init_func(struct bnx2x *bp)
6964{
6965 int port = BP_PORT(bp);
6966 int func = BP_FUNC(bp);
8badd27a 6967 u32 addr, val;
34f80b04
EG
6968 int i;
6969
cdaa7cb8 6970 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 6971
8badd27a
EG
6972 /* set MSI reconfigure capability */
6973 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6974 val = REG_RD(bp, addr);
6975 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6976 REG_WR(bp, addr, val);
6977
34f80b04
EG
6978 i = FUNC_ILT_BASE(func);
6979
6980 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6981 if (CHIP_IS_E1H(bp)) {
6982 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6983 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6984 } else /* E1 */
6985 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6986 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6987
37b091ba
MC
6988#ifdef BCM_CNIC
6989 i += 1 + CNIC_ILT_LINES;
6990 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6991 if (CHIP_IS_E1(bp))
6992 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6993 else {
6994 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6995 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6996 }
6997
6998 i++;
6999 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
7000 if (CHIP_IS_E1(bp))
7001 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
7002 else {
7003 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
7004 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
7005 }
7006
7007 i++;
7008 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
7009 if (CHIP_IS_E1(bp))
7010 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
7011 else {
7012 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
7013 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
7014 }
7015
7016 /* tell the searcher where the T2 table is */
7017 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
7018
7019 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
7020 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
7021
7022 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
7023 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
7024 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
7025
7026 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
7027#endif
34f80b04
EG
7028
7029 if (CHIP_IS_E1H(bp)) {
573f2035
EG
7030 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
7031 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
7032 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
7033 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
7034 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
7035 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
7036 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
7037 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
7038 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
7039
7040 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7041 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
7042 }
7043
7044 /* HC init per function */
7045 if (CHIP_IS_E1H(bp)) {
7046 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7047
7048 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7049 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7050 }
94a78b79 7051 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 7052
c14423fe 7053 /* Reset PCIE errors for debug */
a2fbb9ea
ET
7054 REG_WR(bp, 0x2114, 0xffffffff);
7055 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 7056
34f80b04
EG
7057 return 0;
7058}
7059
7060static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7061{
7062 int i, rc = 0;
a2fbb9ea 7063
34f80b04
EG
7064 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
7065 BP_FUNC(bp), load_code);
a2fbb9ea 7066
34f80b04
EG
7067 bp->dmae_ready = 0;
7068 mutex_init(&bp->dmae_mutex);
54016b26
EG
7069 rc = bnx2x_gunzip_init(bp);
7070 if (rc)
7071 return rc;
a2fbb9ea 7072
34f80b04
EG
7073 switch (load_code) {
7074 case FW_MSG_CODE_DRV_LOAD_COMMON:
7075 rc = bnx2x_init_common(bp);
7076 if (rc)
7077 goto init_hw_err;
7078 /* no break */
7079
7080 case FW_MSG_CODE_DRV_LOAD_PORT:
7081 bp->dmae_ready = 1;
7082 rc = bnx2x_init_port(bp);
7083 if (rc)
7084 goto init_hw_err;
7085 /* no break */
7086
7087 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7088 bp->dmae_ready = 1;
7089 rc = bnx2x_init_func(bp);
7090 if (rc)
7091 goto init_hw_err;
7092 break;
7093
7094 default:
7095 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7096 break;
7097 }
7098
7099 if (!BP_NOMCP(bp)) {
7100 int func = BP_FUNC(bp);
a2fbb9ea
ET
7101
7102 bp->fw_drv_pulse_wr_seq =
34f80b04 7103 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 7104 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
7105 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7106 }
a2fbb9ea 7107
34f80b04
EG
7108 /* this needs to be done before gunzip end */
7109 bnx2x_zero_def_sb(bp);
7110 for_each_queue(bp, i)
7111 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
7112#ifdef BCM_CNIC
7113 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7114#endif
34f80b04
EG
7115
7116init_hw_err:
7117 bnx2x_gunzip_end(bp);
7118
7119 return rc;
a2fbb9ea
ET
7120}
7121
a2fbb9ea
ET
7122static void bnx2x_free_mem(struct bnx2x *bp)
7123{
7124
7125#define BNX2X_PCI_FREE(x, y, size) \
7126 do { \
7127 if (x) { \
1a983142 7128 dma_free_coherent(&bp->pdev->dev, size, x, y); \
a2fbb9ea
ET
7129 x = NULL; \
7130 y = 0; \
7131 } \
7132 } while (0)
7133
7134#define BNX2X_FREE(x) \
7135 do { \
7136 if (x) { \
7137 vfree(x); \
7138 x = NULL; \
7139 } \
7140 } while (0)
7141
7142 int i;
7143
7144 /* fastpath */
555f6c78 7145 /* Common */
a2fbb9ea
ET
7146 for_each_queue(bp, i) {
7147
555f6c78 7148 /* status blocks */
a2fbb9ea
ET
7149 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7150 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 7151 sizeof(struct host_status_block));
555f6c78
EG
7152 }
7153 /* Rx */
54b9ddaa 7154 for_each_queue(bp, i) {
a2fbb9ea 7155
555f6c78 7156 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
7157 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7158 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7159 bnx2x_fp(bp, i, rx_desc_mapping),
7160 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7161
7162 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7163 bnx2x_fp(bp, i, rx_comp_mapping),
7164 sizeof(struct eth_fast_path_rx_cqe) *
7165 NUM_RCQ_BD);
a2fbb9ea 7166
7a9b2557 7167 /* SGE ring */
32626230 7168 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
7169 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7170 bnx2x_fp(bp, i, rx_sge_mapping),
7171 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7172 }
555f6c78 7173 /* Tx */
54b9ddaa 7174 for_each_queue(bp, i) {
555f6c78
EG
7175
7176 /* fastpath tx rings: tx_buf tx_desc */
7177 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7178 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7179 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 7180 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 7181 }
a2fbb9ea
ET
7182 /* end of fastpath */
7183
7184 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 7185 sizeof(struct host_def_status_block));
a2fbb9ea
ET
7186
7187 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 7188 sizeof(struct bnx2x_slowpath));
a2fbb9ea 7189
37b091ba 7190#ifdef BCM_CNIC
a2fbb9ea
ET
7191 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7192 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7193 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7194 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
7195 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7196 sizeof(struct host_status_block));
a2fbb9ea 7197#endif
7a9b2557 7198 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
7199
7200#undef BNX2X_PCI_FREE
7201#undef BNX2X_KFREE
7202}
7203
7204static int bnx2x_alloc_mem(struct bnx2x *bp)
7205{
7206
7207#define BNX2X_PCI_ALLOC(x, y, size) \
7208 do { \
1a983142 7209 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
a2fbb9ea
ET
7210 if (x == NULL) \
7211 goto alloc_mem_err; \
7212 memset(x, 0, size); \
7213 } while (0)
7214
7215#define BNX2X_ALLOC(x, size) \
7216 do { \
7217 x = vmalloc(size); \
7218 if (x == NULL) \
7219 goto alloc_mem_err; \
7220 memset(x, 0, size); \
7221 } while (0)
7222
7223 int i;
7224
7225 /* fastpath */
555f6c78 7226 /* Common */
a2fbb9ea
ET
7227 for_each_queue(bp, i) {
7228 bnx2x_fp(bp, i, bp) = bp;
7229
555f6c78 7230 /* status blocks */
a2fbb9ea
ET
7231 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7232 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 7233 sizeof(struct host_status_block));
555f6c78
EG
7234 }
7235 /* Rx */
54b9ddaa 7236 for_each_queue(bp, i) {
a2fbb9ea 7237
555f6c78 7238 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
7239 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7240 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7241 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7242 &bnx2x_fp(bp, i, rx_desc_mapping),
7243 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7244
7245 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7246 &bnx2x_fp(bp, i, rx_comp_mapping),
7247 sizeof(struct eth_fast_path_rx_cqe) *
7248 NUM_RCQ_BD);
7249
7a9b2557
VZ
7250 /* SGE ring */
7251 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7252 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7253 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7254 &bnx2x_fp(bp, i, rx_sge_mapping),
7255 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 7256 }
555f6c78 7257 /* Tx */
54b9ddaa 7258 for_each_queue(bp, i) {
555f6c78 7259
555f6c78
EG
7260 /* fastpath tx rings: tx_buf tx_desc */
7261 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7262 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7263 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7264 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 7265 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 7266 }
a2fbb9ea
ET
7267 /* end of fastpath */
7268
7269 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7270 sizeof(struct host_def_status_block));
7271
7272 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7273 sizeof(struct bnx2x_slowpath));
7274
37b091ba 7275#ifdef BCM_CNIC
a2fbb9ea
ET
7276 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7277
a2fbb9ea
ET
7278 /* allocate searcher T2 table
7279 we allocate 1/4 of alloc num for T2
7280 (which is not entered into the ILT) */
7281 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7282
37b091ba 7283 /* Initialize T2 (for 1024 connections) */
a2fbb9ea 7284 for (i = 0; i < 16*1024; i += 64)
37b091ba 7285 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 7286
37b091ba 7287 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
a2fbb9ea
ET
7288 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7289
7290 /* QM queues (128*MAX_CONN) */
7291 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
37b091ba
MC
7292
7293 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7294 sizeof(struct host_status_block));
a2fbb9ea
ET
7295#endif
7296
7297 /* Slow path ring */
7298 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7299
7300 return 0;
7301
7302alloc_mem_err:
7303 bnx2x_free_mem(bp);
7304 return -ENOMEM;
7305
7306#undef BNX2X_PCI_ALLOC
7307#undef BNX2X_ALLOC
7308}
7309
7310static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7311{
7312 int i;
7313
54b9ddaa 7314 for_each_queue(bp, i) {
a2fbb9ea
ET
7315 struct bnx2x_fastpath *fp = &bp->fp[i];
7316
7317 u16 bd_cons = fp->tx_bd_cons;
7318 u16 sw_prod = fp->tx_pkt_prod;
7319 u16 sw_cons = fp->tx_pkt_cons;
7320
a2fbb9ea
ET
7321 while (sw_cons != sw_prod) {
7322 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7323 sw_cons++;
7324 }
7325 }
7326}
7327
7328static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7329{
7330 int i, j;
7331
54b9ddaa 7332 for_each_queue(bp, j) {
a2fbb9ea
ET
7333 struct bnx2x_fastpath *fp = &bp->fp[j];
7334
a2fbb9ea
ET
7335 for (i = 0; i < NUM_RX_BD; i++) {
7336 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7337 struct sk_buff *skb = rx_buf->skb;
7338
7339 if (skb == NULL)
7340 continue;
7341
1a983142
FT
7342 dma_unmap_single(&bp->pdev->dev,
7343 dma_unmap_addr(rx_buf, mapping),
7344 bp->rx_buf_size, DMA_FROM_DEVICE);
a2fbb9ea
ET
7345
7346 rx_buf->skb = NULL;
7347 dev_kfree_skb(skb);
7348 }
7a9b2557 7349 if (!fp->disable_tpa)
32626230
EG
7350 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7351 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 7352 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
7353 }
7354}
7355
7356static void bnx2x_free_skbs(struct bnx2x *bp)
7357{
7358 bnx2x_free_tx_skbs(bp);
7359 bnx2x_free_rx_skbs(bp);
7360}
7361
7362static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7363{
34f80b04 7364 int i, offset = 1;
a2fbb9ea
ET
7365
7366 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 7367 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
7368 bp->msix_table[0].vector);
7369
37b091ba
MC
7370#ifdef BCM_CNIC
7371 offset++;
7372#endif
a2fbb9ea 7373 for_each_queue(bp, i) {
c14423fe 7374 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 7375 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
7376 bnx2x_fp(bp, i, state));
7377
34f80b04 7378 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 7379 }
a2fbb9ea
ET
7380}
7381
6cbe5065 7382static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
a2fbb9ea 7383{
a2fbb9ea 7384 if (bp->flags & USING_MSIX_FLAG) {
6cbe5065
VZ
7385 if (!disable_only)
7386 bnx2x_free_msix_irqs(bp);
a2fbb9ea 7387 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
7388 bp->flags &= ~USING_MSIX_FLAG;
7389
8badd27a 7390 } else if (bp->flags & USING_MSI_FLAG) {
6cbe5065
VZ
7391 if (!disable_only)
7392 free_irq(bp->pdev->irq, bp->dev);
8badd27a
EG
7393 pci_disable_msi(bp->pdev);
7394 bp->flags &= ~USING_MSI_FLAG;
7395
6cbe5065 7396 } else if (!disable_only)
a2fbb9ea
ET
7397 free_irq(bp->pdev->irq, bp->dev);
7398}
7399
7400static int bnx2x_enable_msix(struct bnx2x *bp)
7401{
8badd27a
EG
7402 int i, rc, offset = 1;
7403 int igu_vec = 0;
a2fbb9ea 7404
8badd27a
EG
7405 bp->msix_table[0].entry = igu_vec;
7406 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 7407
37b091ba
MC
7408#ifdef BCM_CNIC
7409 igu_vec = BP_L_ID(bp) + offset;
7410 bp->msix_table[1].entry = igu_vec;
7411 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7412 offset++;
7413#endif
34f80b04 7414 for_each_queue(bp, i) {
8badd27a 7415 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
7416 bp->msix_table[i + offset].entry = igu_vec;
7417 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7418 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
7419 }
7420
34f80b04 7421 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 7422 BNX2X_NUM_QUEUES(bp) + offset);
1ac218c8
VZ
7423
7424 /*
7425 * reconfigure number of tx/rx queues according to available
7426 * MSI-X vectors
7427 */
7428 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
7429 /* vectors available for FP */
7430 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
7431
7432 DP(NETIF_MSG_IFUP,
7433 "Trying to use less MSI-X vectors: %d\n", rc);
7434
7435 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
7436
7437 if (rc) {
7438 DP(NETIF_MSG_IFUP,
7439 "MSI-X is not attainable rc %d\n", rc);
7440 return rc;
7441 }
7442
7443 bp->num_queues = min(bp->num_queues, fp_vec);
7444
7445 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
7446 bp->num_queues);
7447 } else if (rc) {
8badd27a
EG
7448 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7449 return rc;
34f80b04 7450 }
8badd27a 7451
a2fbb9ea
ET
7452 bp->flags |= USING_MSIX_FLAG;
7453
7454 return 0;
a2fbb9ea
ET
7455}
7456
a2fbb9ea
ET
7457static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7458{
34f80b04 7459 int i, rc, offset = 1;
a2fbb9ea 7460
a2fbb9ea
ET
7461 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7462 bp->dev->name, bp->dev);
a2fbb9ea
ET
7463 if (rc) {
7464 BNX2X_ERR("request sp irq failed\n");
7465 return -EBUSY;
7466 }
7467
37b091ba
MC
7468#ifdef BCM_CNIC
7469 offset++;
7470#endif
a2fbb9ea 7471 for_each_queue(bp, i) {
555f6c78 7472 struct bnx2x_fastpath *fp = &bp->fp[i];
54b9ddaa
VZ
7473 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7474 bp->dev->name, i);
ca00392c 7475
34f80b04 7476 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 7477 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 7478 if (rc) {
555f6c78 7479 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
7480 bnx2x_free_msix_irqs(bp);
7481 return -EBUSY;
7482 }
7483
555f6c78 7484 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
7485 }
7486
555f6c78 7487 i = BNX2X_NUM_QUEUES(bp);
cdaa7cb8
VZ
7488 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
7489 " ... fp[%d] %d\n",
7490 bp->msix_table[0].vector,
7491 0, bp->msix_table[offset].vector,
7492 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 7493
a2fbb9ea 7494 return 0;
a2fbb9ea
ET
7495}
7496
8badd27a
EG
7497static int bnx2x_enable_msi(struct bnx2x *bp)
7498{
7499 int rc;
7500
7501 rc = pci_enable_msi(bp->pdev);
7502 if (rc) {
7503 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7504 return -1;
7505 }
7506 bp->flags |= USING_MSI_FLAG;
7507
7508 return 0;
7509}
7510
a2fbb9ea
ET
7511static int bnx2x_req_irq(struct bnx2x *bp)
7512{
8badd27a 7513 unsigned long flags;
34f80b04 7514 int rc;
a2fbb9ea 7515
8badd27a
EG
7516 if (bp->flags & USING_MSI_FLAG)
7517 flags = 0;
7518 else
7519 flags = IRQF_SHARED;
7520
7521 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 7522 bp->dev->name, bp->dev);
a2fbb9ea
ET
7523 if (!rc)
7524 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7525
7526 return rc;
a2fbb9ea
ET
7527}
7528
65abd74d
YG
7529static void bnx2x_napi_enable(struct bnx2x *bp)
7530{
7531 int i;
7532
54b9ddaa 7533 for_each_queue(bp, i)
65abd74d
YG
7534 napi_enable(&bnx2x_fp(bp, i, napi));
7535}
7536
7537static void bnx2x_napi_disable(struct bnx2x *bp)
7538{
7539 int i;
7540
54b9ddaa 7541 for_each_queue(bp, i)
65abd74d
YG
7542 napi_disable(&bnx2x_fp(bp, i, napi));
7543}
7544
7545static void bnx2x_netif_start(struct bnx2x *bp)
7546{
e1510706
EG
7547 int intr_sem;
7548
7549 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7550 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7551
7552 if (intr_sem) {
65abd74d 7553 if (netif_running(bp->dev)) {
65abd74d
YG
7554 bnx2x_napi_enable(bp);
7555 bnx2x_int_enable(bp);
555f6c78
EG
7556 if (bp->state == BNX2X_STATE_OPEN)
7557 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
7558 }
7559 }
7560}
7561
f8ef6e44 7562static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 7563{
f8ef6e44 7564 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 7565 bnx2x_napi_disable(bp);
762d5f6c 7566 netif_tx_disable(bp->dev);
65abd74d
YG
7567}
7568
a2fbb9ea
ET
7569/*
7570 * Init service functions
7571 */
7572
e665bfda
MC
7573/**
7574 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7575 *
7576 * @param bp driver descriptor
7577 * @param set set or clear an entry (1 or 0)
7578 * @param mac pointer to a buffer containing a MAC
7579 * @param cl_bit_vec bit vector of clients to register a MAC for
7580 * @param cam_offset offset in a CAM to use
7581 * @param with_bcast set broadcast MAC as well
7582 */
7583static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7584 u32 cl_bit_vec, u8 cam_offset,
7585 u8 with_bcast)
a2fbb9ea
ET
7586{
7587 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 7588 int port = BP_PORT(bp);
a2fbb9ea
ET
7589
7590 /* CAM allocation
7591 * unicasts 0-31:port0 32-63:port1
7592 * multicast 64-127:port0 128-191:port1
7593 */
e665bfda
MC
7594 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7595 config->hdr.offset = cam_offset;
7596 config->hdr.client_id = 0xff;
a2fbb9ea
ET
7597 config->hdr.reserved1 = 0;
7598
7599 /* primary MAC */
7600 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 7601 swab16(*(u16 *)&mac[0]);
a2fbb9ea 7602 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 7603 swab16(*(u16 *)&mac[2]);
a2fbb9ea 7604 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 7605 swab16(*(u16 *)&mac[4]);
34f80b04 7606 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7607 if (set)
7608 config->config_table[0].target_table_entry.flags = 0;
7609 else
7610 CAM_INVALIDATE(config->config_table[0]);
ca00392c 7611 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 7612 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
7613 config->config_table[0].target_table_entry.vlan_id = 0;
7614
3101c2bc
YG
7615 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7616 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7617 config->config_table[0].cam_entry.msb_mac_addr,
7618 config->config_table[0].cam_entry.middle_mac_addr,
7619 config->config_table[0].cam_entry.lsb_mac_addr);
7620
7621 /* broadcast */
e665bfda
MC
7622 if (with_bcast) {
7623 config->config_table[1].cam_entry.msb_mac_addr =
7624 cpu_to_le16(0xffff);
7625 config->config_table[1].cam_entry.middle_mac_addr =
7626 cpu_to_le16(0xffff);
7627 config->config_table[1].cam_entry.lsb_mac_addr =
7628 cpu_to_le16(0xffff);
7629 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7630 if (set)
7631 config->config_table[1].target_table_entry.flags =
7632 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7633 else
7634 CAM_INVALIDATE(config->config_table[1]);
7635 config->config_table[1].target_table_entry.clients_bit_vector =
7636 cpu_to_le32(cl_bit_vec);
7637 config->config_table[1].target_table_entry.vlan_id = 0;
7638 }
a2fbb9ea
ET
7639
7640 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7641 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7642 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7643}
7644
e665bfda
MC
7645/**
7646 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7647 *
7648 * @param bp driver descriptor
7649 * @param set set or clear an entry (1 or 0)
7650 * @param mac pointer to a buffer containing a MAC
7651 * @param cl_bit_vec bit vector of clients to register a MAC for
7652 * @param cam_offset offset in a CAM to use
7653 */
7654static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7655 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
7656{
7657 struct mac_configuration_cmd_e1h *config =
7658 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7659
8d9c5f34 7660 config->hdr.length = 1;
e665bfda
MC
7661 config->hdr.offset = cam_offset;
7662 config->hdr.client_id = 0xff;
34f80b04
EG
7663 config->hdr.reserved1 = 0;
7664
7665 /* primary MAC */
7666 config->config_table[0].msb_mac_addr =
e665bfda 7667 swab16(*(u16 *)&mac[0]);
34f80b04 7668 config->config_table[0].middle_mac_addr =
e665bfda 7669 swab16(*(u16 *)&mac[2]);
34f80b04 7670 config->config_table[0].lsb_mac_addr =
e665bfda 7671 swab16(*(u16 *)&mac[4]);
ca00392c 7672 config->config_table[0].clients_bit_vector =
e665bfda 7673 cpu_to_le32(cl_bit_vec);
34f80b04
EG
7674 config->config_table[0].vlan_id = 0;
7675 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7676 if (set)
7677 config->config_table[0].flags = BP_PORT(bp);
7678 else
7679 config->config_table[0].flags =
7680 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7681
e665bfda 7682 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 7683 (set ? "setting" : "clearing"),
34f80b04
EG
7684 config->config_table[0].msb_mac_addr,
7685 config->config_table[0].middle_mac_addr,
e665bfda 7686 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
7687
7688 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7689 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7690 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7691}
7692
a2fbb9ea
ET
7693static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7694 int *state_p, int poll)
7695{
7696 /* can take a while if any port is running */
8b3a0f0b 7697 int cnt = 5000;
a2fbb9ea 7698
c14423fe
ET
7699 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7700 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7701
7702 might_sleep();
34f80b04 7703 while (cnt--) {
a2fbb9ea
ET
7704 if (poll) {
7705 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7706 /* if index is different from 0
7707 * the reply for some commands will
3101c2bc 7708 * be on the non default queue
a2fbb9ea
ET
7709 */
7710 if (idx)
7711 bnx2x_rx_int(&bp->fp[idx], 10);
7712 }
a2fbb9ea 7713
3101c2bc 7714 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7715 if (*state_p == state) {
7716#ifdef BNX2X_STOP_ON_ERROR
7717 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7718#endif
a2fbb9ea 7719 return 0;
8b3a0f0b 7720 }
a2fbb9ea 7721
a2fbb9ea 7722 msleep(1);
e3553b29
EG
7723
7724 if (bp->panic)
7725 return -EIO;
a2fbb9ea
ET
7726 }
7727
a2fbb9ea 7728 /* timeout! */
49d66772
ET
7729 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7730 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7731#ifdef BNX2X_STOP_ON_ERROR
7732 bnx2x_panic();
7733#endif
a2fbb9ea 7734
49d66772 7735 return -EBUSY;
a2fbb9ea
ET
7736}
7737
e665bfda
MC
7738static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7739{
7740 bp->set_mac_pending++;
7741 smp_wmb();
7742
7743 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7744 (1 << bp->fp->cl_id), BP_FUNC(bp));
7745
7746 /* Wait for a completion */
7747 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7748}
7749
7750static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7751{
7752 bp->set_mac_pending++;
7753 smp_wmb();
7754
7755 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7756 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7757 1);
7758
7759 /* Wait for a completion */
7760 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7761}
7762
993ac7b5
MC
7763#ifdef BCM_CNIC
7764/**
7765 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7766 * MAC(s). This function will wait until the ramdord completion
7767 * returns.
7768 *
7769 * @param bp driver handle
7770 * @param set set or clear the CAM entry
7771 *
7772 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7773 */
7774static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7775{
7776 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7777
7778 bp->set_mac_pending++;
7779 smp_wmb();
7780
7781 /* Send a SET_MAC ramrod */
7782 if (CHIP_IS_E1(bp))
7783 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7784 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7785 1);
7786 else
7787 /* CAM allocation for E1H
7788 * unicasts: by func number
7789 * multicast: 20+FUNC*20, 20 each
7790 */
7791 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7792 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7793
7794 /* Wait for a completion when setting */
7795 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7796
7797 return 0;
7798}
7799#endif
7800
a2fbb9ea
ET
7801static int bnx2x_setup_leading(struct bnx2x *bp)
7802{
34f80b04 7803 int rc;
a2fbb9ea 7804
c14423fe 7805 /* reset IGU state */
34f80b04 7806 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7807
7808 /* SETUP ramrod */
7809 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7810
34f80b04
EG
7811 /* Wait for completion */
7812 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7813
34f80b04 7814 return rc;
a2fbb9ea
ET
7815}
7816
7817static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7818{
555f6c78
EG
7819 struct bnx2x_fastpath *fp = &bp->fp[index];
7820
a2fbb9ea 7821 /* reset IGU state */
555f6c78 7822 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7823
228241eb 7824 /* SETUP ramrod */
555f6c78
EG
7825 fp->state = BNX2X_FP_STATE_OPENING;
7826 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7827 fp->cl_id, 0);
a2fbb9ea
ET
7828
7829 /* Wait for completion */
7830 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7831 &(fp->state), 0);
a2fbb9ea
ET
7832}
7833
a2fbb9ea 7834static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7835
54b9ddaa 7836static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
ca00392c 7837{
ca00392c
EG
7838
7839 switch (bp->multi_mode) {
7840 case ETH_RSS_MODE_DISABLED:
54b9ddaa 7841 bp->num_queues = 1;
ca00392c
EG
7842 break;
7843
7844 case ETH_RSS_MODE_REGULAR:
54b9ddaa
VZ
7845 if (num_queues)
7846 bp->num_queues = min_t(u32, num_queues,
7847 BNX2X_MAX_QUEUES(bp));
ca00392c 7848 else
54b9ddaa
VZ
7849 bp->num_queues = min_t(u32, num_online_cpus(),
7850 BNX2X_MAX_QUEUES(bp));
ca00392c
EG
7851 break;
7852
7853
7854 default:
54b9ddaa 7855 bp->num_queues = 1;
ca00392c
EG
7856 break;
7857 }
ca00392c
EG
7858}
7859
54b9ddaa 7860static int bnx2x_set_num_queues(struct bnx2x *bp)
a2fbb9ea 7861{
ca00392c 7862 int rc = 0;
a2fbb9ea 7863
8badd27a
EG
7864 switch (int_mode) {
7865 case INT_MODE_INTx:
7866 case INT_MODE_MSI:
54b9ddaa 7867 bp->num_queues = 1;
ca00392c 7868 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a 7869 break;
8badd27a 7870 default:
54b9ddaa
VZ
7871 /* Set number of queues according to bp->multi_mode value */
7872 bnx2x_set_num_queues_msix(bp);
ca00392c 7873
54b9ddaa
VZ
7874 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7875 bp->num_queues);
ca00392c 7876
2dfe0e1f
EG
7877 /* if we can't use MSI-X we only need one fp,
7878 * so try to enable MSI-X with the requested number of fp's
7879 * and fallback to MSI or legacy INTx with one fp
7880 */
ca00392c 7881 rc = bnx2x_enable_msix(bp);
54b9ddaa 7882 if (rc)
34f80b04 7883 /* failed to enable MSI-X */
54b9ddaa 7884 bp->num_queues = 1;
8badd27a 7885 break;
a2fbb9ea 7886 }
54b9ddaa 7887 bp->dev->real_num_tx_queues = bp->num_queues;
ca00392c 7888 return rc;
8badd27a
EG
7889}
7890
993ac7b5
MC
7891#ifdef BCM_CNIC
7892static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7893static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7894#endif
8badd27a
EG
7895
7896/* must be called with rtnl_lock */
7897static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7898{
7899 u32 load_code;
ca00392c
EG
7900 int i, rc;
7901
8badd27a 7902#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7903 if (unlikely(bp->panic))
7904 return -EPERM;
7905#endif
7906
7907 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7908
54b9ddaa 7909 rc = bnx2x_set_num_queues(bp);
c14423fe 7910
6cbe5065
VZ
7911 if (bnx2x_alloc_mem(bp)) {
7912 bnx2x_free_irq(bp, true);
a2fbb9ea 7913 return -ENOMEM;
6cbe5065 7914 }
a2fbb9ea 7915
54b9ddaa 7916 for_each_queue(bp, i)
7a9b2557
VZ
7917 bnx2x_fp(bp, i, disable_tpa) =
7918 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7919
54b9ddaa 7920 for_each_queue(bp, i)
2dfe0e1f
EG
7921 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7922 bnx2x_poll, 128);
7923
2dfe0e1f
EG
7924 bnx2x_napi_enable(bp);
7925
34f80b04
EG
7926 if (bp->flags & USING_MSIX_FLAG) {
7927 rc = bnx2x_req_msix_irqs(bp);
7928 if (rc) {
6cbe5065 7929 bnx2x_free_irq(bp, true);
2dfe0e1f 7930 goto load_error1;
34f80b04
EG
7931 }
7932 } else {
ca00392c 7933 /* Fall to INTx if failed to enable MSI-X due to lack of
54b9ddaa 7934 memory (in bnx2x_set_num_queues()) */
8badd27a
EG
7935 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7936 bnx2x_enable_msi(bp);
34f80b04
EG
7937 bnx2x_ack_int(bp);
7938 rc = bnx2x_req_irq(bp);
7939 if (rc) {
2dfe0e1f 7940 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6cbe5065 7941 bnx2x_free_irq(bp, true);
2dfe0e1f 7942 goto load_error1;
a2fbb9ea 7943 }
8badd27a
EG
7944 if (bp->flags & USING_MSI_FLAG) {
7945 bp->dev->irq = bp->pdev->irq;
7995c64e
JP
7946 netdev_info(bp->dev, "using MSI IRQ %d\n",
7947 bp->pdev->irq);
8badd27a 7948 }
a2fbb9ea
ET
7949 }
7950
2dfe0e1f
EG
7951 /* Send LOAD_REQUEST command to MCP
7952 Returns the type of LOAD command:
7953 if it is the first port to be initialized
7954 common blocks should be initialized, otherwise - not
7955 */
7956 if (!BP_NOMCP(bp)) {
7957 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7958 if (!load_code) {
7959 BNX2X_ERR("MCP response failure, aborting\n");
7960 rc = -EBUSY;
7961 goto load_error2;
7962 }
7963 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7964 rc = -EBUSY; /* other port in diagnostic mode */
7965 goto load_error2;
7966 }
7967
7968 } else {
7969 int port = BP_PORT(bp);
7970
f5372251 7971 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7972 load_count[0], load_count[1], load_count[2]);
7973 load_count[0]++;
7974 load_count[1 + port]++;
f5372251 7975 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7976 load_count[0], load_count[1], load_count[2]);
7977 if (load_count[0] == 1)
7978 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7979 else if (load_count[1 + port] == 1)
7980 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7981 else
7982 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7983 }
7984
7985 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7986 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7987 bp->port.pmf = 1;
7988 else
7989 bp->port.pmf = 0;
7990 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7991
a2fbb9ea 7992 /* Initialize HW */
34f80b04
EG
7993 rc = bnx2x_init_hw(bp, load_code);
7994 if (rc) {
a2fbb9ea 7995 BNX2X_ERR("HW init failed, aborting\n");
f1e1a199
VZ
7996 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7997 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7998 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
2dfe0e1f 7999 goto load_error2;
a2fbb9ea
ET
8000 }
8001
a2fbb9ea 8002 /* Setup NIC internals and enable interrupts */
471de716 8003 bnx2x_nic_init(bp, load_code);
a2fbb9ea 8004
2691d51d
EG
8005 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
8006 (bp->common.shmem2_base))
8007 SHMEM2_WR(bp, dcc_support,
8008 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
8009 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
8010
a2fbb9ea 8011 /* Send LOAD_DONE command to MCP */
34f80b04 8012 if (!BP_NOMCP(bp)) {
228241eb
ET
8013 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8014 if (!load_code) {
da5a662a 8015 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 8016 rc = -EBUSY;
2dfe0e1f 8017 goto load_error3;
a2fbb9ea
ET
8018 }
8019 }
8020
8021 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
8022
34f80b04
EG
8023 rc = bnx2x_setup_leading(bp);
8024 if (rc) {
da5a662a 8025 BNX2X_ERR("Setup leading failed!\n");
e3553b29 8026#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 8027 goto load_error3;
e3553b29
EG
8028#else
8029 bp->panic = 1;
8030 return -EBUSY;
8031#endif
34f80b04 8032 }
a2fbb9ea 8033
34f80b04
EG
8034 if (CHIP_IS_E1H(bp))
8035 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 8036 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
f34d28ea 8037 bp->flags |= MF_FUNC_DIS;
34f80b04 8038 }
a2fbb9ea 8039
ca00392c 8040 if (bp->state == BNX2X_STATE_OPEN) {
37b091ba
MC
8041#ifdef BCM_CNIC
8042 /* Enable Timer scan */
8043 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
8044#endif
34f80b04
EG
8045 for_each_nondefault_queue(bp, i) {
8046 rc = bnx2x_setup_multi(bp, i);
8047 if (rc)
37b091ba
MC
8048#ifdef BCM_CNIC
8049 goto load_error4;
8050#else
2dfe0e1f 8051 goto load_error3;
37b091ba 8052#endif
34f80b04 8053 }
a2fbb9ea 8054
ca00392c 8055 if (CHIP_IS_E1(bp))
e665bfda 8056 bnx2x_set_eth_mac_addr_e1(bp, 1);
ca00392c 8057 else
e665bfda 8058 bnx2x_set_eth_mac_addr_e1h(bp, 1);
993ac7b5
MC
8059#ifdef BCM_CNIC
8060 /* Set iSCSI L2 MAC */
8061 mutex_lock(&bp->cnic_mutex);
8062 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
8063 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8064 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
4a6e47a4
MC
8065 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
8066 CNIC_SB_ID(bp));
993ac7b5
MC
8067 }
8068 mutex_unlock(&bp->cnic_mutex);
8069#endif
ca00392c 8070 }
34f80b04
EG
8071
8072 if (bp->port.pmf)
b5bf9068 8073 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
8074
8075 /* Start fast path */
34f80b04
EG
8076 switch (load_mode) {
8077 case LOAD_NORMAL:
ca00392c
EG
8078 if (bp->state == BNX2X_STATE_OPEN) {
8079 /* Tx queue should be only reenabled */
8080 netif_tx_wake_all_queues(bp->dev);
8081 }
2dfe0e1f 8082 /* Initialize the receive filter. */
34f80b04
EG
8083 bnx2x_set_rx_mode(bp->dev);
8084 break;
8085
8086 case LOAD_OPEN:
555f6c78 8087 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
8088 if (bp->state != BNX2X_STATE_OPEN)
8089 netif_tx_disable(bp->dev);
2dfe0e1f 8090 /* Initialize the receive filter. */
34f80b04 8091 bnx2x_set_rx_mode(bp->dev);
34f80b04 8092 break;
a2fbb9ea 8093
34f80b04 8094 case LOAD_DIAG:
2dfe0e1f 8095 /* Initialize the receive filter. */
a2fbb9ea 8096 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
8097 bp->state = BNX2X_STATE_DIAG;
8098 break;
8099
8100 default:
8101 break;
a2fbb9ea
ET
8102 }
8103
34f80b04
EG
8104 if (!bp->port.pmf)
8105 bnx2x__link_status_update(bp);
8106
a2fbb9ea
ET
8107 /* start the timer */
8108 mod_timer(&bp->timer, jiffies + bp->current_interval);
8109
993ac7b5
MC
8110#ifdef BCM_CNIC
8111 bnx2x_setup_cnic_irq_info(bp);
8112 if (bp->state == BNX2X_STATE_OPEN)
8113 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8114#endif
72fd0718 8115 bnx2x_inc_load_cnt(bp);
34f80b04 8116
a2fbb9ea
ET
8117 return 0;
8118
37b091ba
MC
8119#ifdef BCM_CNIC
8120load_error4:
8121 /* Disable Timer scan */
8122 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8123#endif
2dfe0e1f
EG
8124load_error3:
8125 bnx2x_int_disable_sync(bp, 1);
8126 if (!BP_NOMCP(bp)) {
8127 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8128 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8129 }
8130 bp->port.pmf = 0;
7a9b2557
VZ
8131 /* Free SKBs, SGEs, TPA pool and driver internals */
8132 bnx2x_free_skbs(bp);
54b9ddaa 8133 for_each_queue(bp, i)
3196a88a 8134 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 8135load_error2:
d1014634 8136 /* Release IRQs */
6cbe5065 8137 bnx2x_free_irq(bp, false);
2dfe0e1f
EG
8138load_error1:
8139 bnx2x_napi_disable(bp);
54b9ddaa 8140 for_each_queue(bp, i)
7cde1c8b 8141 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8142 bnx2x_free_mem(bp);
8143
34f80b04 8144 return rc;
a2fbb9ea
ET
8145}
8146
8147static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8148{
555f6c78 8149 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
8150 int rc;
8151
c14423fe 8152 /* halt the connection */
555f6c78
EG
8153 fp->state = BNX2X_FP_STATE_HALTING;
8154 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 8155
34f80b04 8156 /* Wait for completion */
a2fbb9ea 8157 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 8158 &(fp->state), 1);
c14423fe 8159 if (rc) /* timeout */
a2fbb9ea
ET
8160 return rc;
8161
8162 /* delete cfc entry */
8163 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8164
34f80b04
EG
8165 /* Wait for completion */
8166 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 8167 &(fp->state), 1);
34f80b04 8168 return rc;
a2fbb9ea
ET
8169}
8170
da5a662a 8171static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 8172{
4781bfad 8173 __le16 dsb_sp_prod_idx;
c14423fe 8174 /* if the other port is handling traffic,
a2fbb9ea 8175 this can take a lot of time */
34f80b04
EG
8176 int cnt = 500;
8177 int rc;
a2fbb9ea
ET
8178
8179 might_sleep();
8180
8181 /* Send HALT ramrod */
8182 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 8183 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 8184
34f80b04
EG
8185 /* Wait for completion */
8186 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8187 &(bp->fp[0].state), 1);
8188 if (rc) /* timeout */
da5a662a 8189 return rc;
a2fbb9ea 8190
49d66772 8191 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 8192
228241eb 8193 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
8194 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8195
49d66772 8196 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
8197 we are going to reset the chip anyway
8198 so there is not much to do if this times out
8199 */
34f80b04 8200 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
8201 if (!cnt) {
8202 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8203 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8204 *bp->dsb_sp_prod, dsb_sp_prod_idx);
8205#ifdef BNX2X_STOP_ON_ERROR
8206 bnx2x_panic();
8207#endif
36e552ab 8208 rc = -EBUSY;
34f80b04
EG
8209 break;
8210 }
8211 cnt--;
da5a662a 8212 msleep(1);
5650d9d4 8213 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
8214 }
8215 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8216 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
8217
8218 return rc;
a2fbb9ea
ET
8219}
8220
34f80b04
EG
8221static void bnx2x_reset_func(struct bnx2x *bp)
8222{
8223 int port = BP_PORT(bp);
8224 int func = BP_FUNC(bp);
8225 int base, i;
8226
8227 /* Configure IGU */
8228 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8229 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8230
37b091ba
MC
8231#ifdef BCM_CNIC
8232 /* Disable Timer scan */
8233 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8234 /*
8235 * Wait for at least 10ms and up to 2 second for the timers scan to
8236 * complete
8237 */
8238 for (i = 0; i < 200; i++) {
8239 msleep(10);
8240 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8241 break;
8242 }
8243#endif
34f80b04
EG
8244 /* Clear ILT */
8245 base = FUNC_ILT_BASE(func);
8246 for (i = base; i < base + ILT_PER_FUNC; i++)
8247 bnx2x_ilt_wr(bp, i, 0);
8248}
8249
8250static void bnx2x_reset_port(struct bnx2x *bp)
8251{
8252 int port = BP_PORT(bp);
8253 u32 val;
8254
8255 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8256
8257 /* Do not rcv packets to BRB */
8258 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8259 /* Do not direct rcv packets that are not for MCP to the BRB */
8260 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8261 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8262
8263 /* Configure AEU */
8264 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8265
8266 msleep(100);
8267 /* Check for BRB port occupancy */
8268 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8269 if (val)
8270 DP(NETIF_MSG_IFDOWN,
33471629 8271 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
8272
8273 /* TODO: Close Doorbell port? */
8274}
8275
34f80b04
EG
8276static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8277{
8278 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
8279 BP_FUNC(bp), reset_code);
8280
8281 switch (reset_code) {
8282 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8283 bnx2x_reset_port(bp);
8284 bnx2x_reset_func(bp);
8285 bnx2x_reset_common(bp);
8286 break;
8287
8288 case FW_MSG_CODE_DRV_UNLOAD_PORT:
8289 bnx2x_reset_port(bp);
8290 bnx2x_reset_func(bp);
8291 break;
8292
8293 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8294 bnx2x_reset_func(bp);
8295 break;
49d66772 8296
34f80b04
EG
8297 default:
8298 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8299 break;
8300 }
8301}
8302
72fd0718 8303static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 8304{
da5a662a 8305 int port = BP_PORT(bp);
a2fbb9ea 8306 u32 reset_code = 0;
da5a662a 8307 int i, cnt, rc;
a2fbb9ea 8308
555f6c78 8309 /* Wait until tx fastpath tasks complete */
54b9ddaa 8310 for_each_queue(bp, i) {
228241eb
ET
8311 struct bnx2x_fastpath *fp = &bp->fp[i];
8312
34f80b04 8313 cnt = 1000;
e8b5fc51 8314 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 8315
7961f791 8316 bnx2x_tx_int(fp);
34f80b04
EG
8317 if (!cnt) {
8318 BNX2X_ERR("timeout waiting for queue[%d]\n",
8319 i);
8320#ifdef BNX2X_STOP_ON_ERROR
8321 bnx2x_panic();
8322 return -EBUSY;
8323#else
8324 break;
8325#endif
8326 }
8327 cnt--;
da5a662a 8328 msleep(1);
34f80b04 8329 }
228241eb 8330 }
da5a662a
VZ
8331 /* Give HW time to discard old tx messages */
8332 msleep(1);
a2fbb9ea 8333
3101c2bc
YG
8334 if (CHIP_IS_E1(bp)) {
8335 struct mac_configuration_cmd *config =
8336 bnx2x_sp(bp, mcast_config);
8337
e665bfda 8338 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 8339
8d9c5f34 8340 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
8341 CAM_INVALIDATE(config->config_table[i]);
8342
8d9c5f34 8343 config->hdr.length = i;
3101c2bc
YG
8344 if (CHIP_REV_IS_SLOW(bp))
8345 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8346 else
8347 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 8348 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
8349 config->hdr.reserved1 = 0;
8350
e665bfda
MC
8351 bp->set_mac_pending++;
8352 smp_wmb();
8353
3101c2bc
YG
8354 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8355 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8356 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8357
8358 } else { /* E1H */
65abd74d
YG
8359 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8360
e665bfda 8361 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
8362
8363 for (i = 0; i < MC_HASH_SIZE; i++)
8364 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
8365
8366 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 8367 }
993ac7b5
MC
8368#ifdef BCM_CNIC
8369 /* Clear iSCSI L2 MAC */
8370 mutex_lock(&bp->cnic_mutex);
8371 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8372 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8373 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8374 }
8375 mutex_unlock(&bp->cnic_mutex);
8376#endif
3101c2bc 8377
65abd74d
YG
8378 if (unload_mode == UNLOAD_NORMAL)
8379 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8380
7d0446c2 8381 else if (bp->flags & NO_WOL_FLAG)
65abd74d 8382 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 8383
7d0446c2 8384 else if (bp->wol) {
65abd74d
YG
8385 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8386 u8 *mac_addr = bp->dev->dev_addr;
8387 u32 val;
8388 /* The mac address is written to entries 1-4 to
8389 preserve entry 0 which is used by the PMF */
8390 u8 entry = (BP_E1HVN(bp) + 1)*8;
8391
8392 val = (mac_addr[0] << 8) | mac_addr[1];
8393 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8394
8395 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8396 (mac_addr[4] << 8) | mac_addr[5];
8397 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8398
8399 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8400
8401 } else
8402 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8403
34f80b04
EG
8404 /* Close multi and leading connections
8405 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
8406 for_each_nondefault_queue(bp, i)
8407 if (bnx2x_stop_multi(bp, i))
228241eb 8408 goto unload_error;
a2fbb9ea 8409
da5a662a
VZ
8410 rc = bnx2x_stop_leading(bp);
8411 if (rc) {
34f80b04 8412 BNX2X_ERR("Stop leading failed!\n");
da5a662a 8413#ifdef BNX2X_STOP_ON_ERROR
34f80b04 8414 return -EBUSY;
da5a662a
VZ
8415#else
8416 goto unload_error;
34f80b04 8417#endif
228241eb
ET
8418 }
8419
8420unload_error:
34f80b04 8421 if (!BP_NOMCP(bp))
228241eb 8422 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 8423 else {
f5372251 8424 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
8425 load_count[0], load_count[1], load_count[2]);
8426 load_count[0]--;
da5a662a 8427 load_count[1 + port]--;
f5372251 8428 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
8429 load_count[0], load_count[1], load_count[2]);
8430 if (load_count[0] == 0)
8431 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 8432 else if (load_count[1 + port] == 0)
34f80b04
EG
8433 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8434 else
8435 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8436 }
a2fbb9ea 8437
34f80b04
EG
8438 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8439 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8440 bnx2x__link_reset(bp);
a2fbb9ea
ET
8441
8442 /* Reset the chip */
228241eb 8443 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
8444
8445 /* Report UNLOAD_DONE to MCP */
34f80b04 8446 if (!BP_NOMCP(bp))
a2fbb9ea 8447 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 8448
72fd0718
VZ
8449}
8450
8451static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8452{
8453 u32 val;
8454
8455 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8456
8457 if (CHIP_IS_E1(bp)) {
8458 int port = BP_PORT(bp);
8459 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8460 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8461
8462 val = REG_RD(bp, addr);
8463 val &= ~(0x300);
8464 REG_WR(bp, addr, val);
8465 } else if (CHIP_IS_E1H(bp)) {
8466 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8467 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8468 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8469 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8470 }
8471}
8472
8473/* must be called with rtnl_lock */
8474static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8475{
8476 int i;
8477
8478 if (bp->state == BNX2X_STATE_CLOSED) {
8479 /* Interface has been removed - nothing to recover */
8480 bp->recovery_state = BNX2X_RECOVERY_DONE;
8481 bp->is_leader = 0;
8482 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8483 smp_wmb();
8484
8485 return -EINVAL;
8486 }
8487
8488#ifdef BCM_CNIC
8489 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8490#endif
8491 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8492
8493 /* Set "drop all" */
8494 bp->rx_mode = BNX2X_RX_MODE_NONE;
8495 bnx2x_set_storm_rx_mode(bp);
8496
8497 /* Disable HW interrupts, NAPI and Tx */
8498 bnx2x_netif_stop(bp, 1);
8499
8500 del_timer_sync(&bp->timer);
8501 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8502 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8503 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8504
8505 /* Release IRQs */
8506 bnx2x_free_irq(bp, false);
8507
8508 /* Cleanup the chip if needed */
8509 if (unload_mode != UNLOAD_RECOVERY)
8510 bnx2x_chip_cleanup(bp, unload_mode);
8511
9a035440 8512 bp->port.pmf = 0;
a2fbb9ea 8513
7a9b2557 8514 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 8515 bnx2x_free_skbs(bp);
54b9ddaa 8516 for_each_queue(bp, i)
3196a88a 8517 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 8518 for_each_queue(bp, i)
7cde1c8b 8519 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8520 bnx2x_free_mem(bp);
8521
8522 bp->state = BNX2X_STATE_CLOSED;
228241eb 8523
a2fbb9ea
ET
8524 netif_carrier_off(bp->dev);
8525
72fd0718
VZ
8526 /* The last driver must disable a "close the gate" if there is no
8527 * parity attention or "process kill" pending.
8528 */
8529 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8530 bnx2x_reset_is_done(bp))
8531 bnx2x_disable_close_the_gate(bp);
8532
8533 /* Reset MCP mail box sequence if there is on going recovery */
8534 if (unload_mode == UNLOAD_RECOVERY)
8535 bp->fw_seq = 0;
8536
8537 return 0;
8538}
8539
8540/* Close gates #2, #3 and #4: */
8541static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8542{
8543 u32 val, addr;
8544
8545 /* Gates #2 and #4a are closed/opened for "not E1" only */
8546 if (!CHIP_IS_E1(bp)) {
8547 /* #4 */
8548 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8549 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8550 close ? (val | 0x1) : (val & (~(u32)1)));
8551 /* #2 */
8552 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8553 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8554 close ? (val | 0x1) : (val & (~(u32)1)));
8555 }
8556
8557 /* #3 */
8558 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8559 val = REG_RD(bp, addr);
8560 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8561
8562 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8563 close ? "closing" : "opening");
8564 mmiowb();
8565}
8566
8567#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8568
8569static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8570{
8571 /* Do some magic... */
8572 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8573 *magic_val = val & SHARED_MF_CLP_MAGIC;
8574 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8575}
8576
8577/* Restore the value of the `magic' bit.
8578 *
8579 * @param pdev Device handle.
8580 * @param magic_val Old value of the `magic' bit.
8581 */
8582static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8583{
8584 /* Restore the `magic' bit value... */
8585 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8586 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8587 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8588 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8589 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8590 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8591}
8592
8593/* Prepares for MCP reset: takes care of CLP configurations.
8594 *
8595 * @param bp
8596 * @param magic_val Old value of 'magic' bit.
8597 */
8598static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8599{
8600 u32 shmem;
8601 u32 validity_offset;
8602
8603 DP(NETIF_MSG_HW, "Starting\n");
8604
8605 /* Set `magic' bit in order to save MF config */
8606 if (!CHIP_IS_E1(bp))
8607 bnx2x_clp_reset_prep(bp, magic_val);
8608
8609 /* Get shmem offset */
8610 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8611 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8612
8613 /* Clear validity map flags */
8614 if (shmem > 0)
8615 REG_WR(bp, shmem + validity_offset, 0);
8616}
8617
8618#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8619#define MCP_ONE_TIMEOUT 100 /* 100 ms */
8620
8621/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8622 * depending on the HW type.
8623 *
8624 * @param bp
8625 */
8626static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8627{
8628 /* special handling for emulation and FPGA,
8629 wait 10 times longer */
8630 if (CHIP_REV_IS_SLOW(bp))
8631 msleep(MCP_ONE_TIMEOUT*10);
8632 else
8633 msleep(MCP_ONE_TIMEOUT);
8634}
8635
8636static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8637{
8638 u32 shmem, cnt, validity_offset, val;
8639 int rc = 0;
8640
8641 msleep(100);
8642
8643 /* Get shmem offset */
8644 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8645 if (shmem == 0) {
8646 BNX2X_ERR("Shmem 0 return failure\n");
8647 rc = -ENOTTY;
8648 goto exit_lbl;
8649 }
8650
8651 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8652
8653 /* Wait for MCP to come up */
8654 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8655 /* TBD: its best to check validity map of last port.
8656 * currently checks on port 0.
8657 */
8658 val = REG_RD(bp, shmem + validity_offset);
8659 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8660 shmem + validity_offset, val);
8661
8662 /* check that shared memory is valid. */
8663 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8664 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8665 break;
8666
8667 bnx2x_mcp_wait_one(bp);
8668 }
8669
8670 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8671
8672 /* Check that shared memory is valid. This indicates that MCP is up. */
8673 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8674 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8675 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8676 rc = -ENOTTY;
8677 goto exit_lbl;
8678 }
8679
8680exit_lbl:
8681 /* Restore the `magic' bit value */
8682 if (!CHIP_IS_E1(bp))
8683 bnx2x_clp_reset_done(bp, magic_val);
8684
8685 return rc;
8686}
8687
8688static void bnx2x_pxp_prep(struct bnx2x *bp)
8689{
8690 if (!CHIP_IS_E1(bp)) {
8691 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8692 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8693 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8694 mmiowb();
8695 }
8696}
8697
8698/*
8699 * Reset the whole chip except for:
8700 * - PCIE core
8701 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8702 * one reset bit)
8703 * - IGU
8704 * - MISC (including AEU)
8705 * - GRC
8706 * - RBCN, RBCP
8707 */
8708static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8709{
8710 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8711
8712 not_reset_mask1 =
8713 MISC_REGISTERS_RESET_REG_1_RST_HC |
8714 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8715 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8716
8717 not_reset_mask2 =
8718 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8719 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8720 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8721 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8722 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8723 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8724 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8725 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8726
8727 reset_mask1 = 0xffffffff;
8728
8729 if (CHIP_IS_E1(bp))
8730 reset_mask2 = 0xffff;
8731 else
8732 reset_mask2 = 0x1ffff;
8733
8734 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8735 reset_mask1 & (~not_reset_mask1));
8736 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8737 reset_mask2 & (~not_reset_mask2));
8738
8739 barrier();
8740 mmiowb();
8741
8742 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8743 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8744 mmiowb();
8745}
8746
8747static int bnx2x_process_kill(struct bnx2x *bp)
8748{
8749 int cnt = 1000;
8750 u32 val = 0;
8751 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8752
8753
8754 /* Empty the Tetris buffer, wait for 1s */
8755 do {
8756 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8757 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8758 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8759 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8760 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8761 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8762 ((port_is_idle_0 & 0x1) == 0x1) &&
8763 ((port_is_idle_1 & 0x1) == 0x1) &&
8764 (pgl_exp_rom2 == 0xffffffff))
8765 break;
8766 msleep(1);
8767 } while (cnt-- > 0);
8768
8769 if (cnt <= 0) {
8770 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8771 " are still"
8772 " outstanding read requests after 1s!\n");
8773 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8774 " port_is_idle_0=0x%08x,"
8775 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8776 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8777 pgl_exp_rom2);
8778 return -EAGAIN;
8779 }
8780
8781 barrier();
8782
8783 /* Close gates #2, #3 and #4 */
8784 bnx2x_set_234_gates(bp, true);
8785
8786 /* TBD: Indicate that "process kill" is in progress to MCP */
8787
8788 /* Clear "unprepared" bit */
8789 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8790 barrier();
8791
8792 /* Make sure all is written to the chip before the reset */
8793 mmiowb();
8794
8795 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8796 * PSWHST, GRC and PSWRD Tetris buffer.
8797 */
8798 msleep(1);
8799
8800 /* Prepare to chip reset: */
8801 /* MCP */
8802 bnx2x_reset_mcp_prep(bp, &val);
8803
8804 /* PXP */
8805 bnx2x_pxp_prep(bp);
8806 barrier();
8807
8808 /* reset the chip */
8809 bnx2x_process_kill_chip_reset(bp);
8810 barrier();
8811
8812 /* Recover after reset: */
8813 /* MCP */
8814 if (bnx2x_reset_mcp_comp(bp, val))
8815 return -EAGAIN;
8816
8817 /* PXP */
8818 bnx2x_pxp_prep(bp);
8819
8820 /* Open the gates #2, #3 and #4 */
8821 bnx2x_set_234_gates(bp, false);
8822
8823 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8824 * reset state, re-enable attentions. */
8825
a2fbb9ea
ET
8826 return 0;
8827}
8828
72fd0718
VZ
8829static int bnx2x_leader_reset(struct bnx2x *bp)
8830{
8831 int rc = 0;
8832 /* Try to recover after the failure */
8833 if (bnx2x_process_kill(bp)) {
8834 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8835 bp->dev->name);
8836 rc = -EAGAIN;
8837 goto exit_leader_reset;
8838 }
8839
8840 /* Clear "reset is in progress" bit and update the driver state */
8841 bnx2x_set_reset_done(bp);
8842 bp->recovery_state = BNX2X_RECOVERY_DONE;
8843
8844exit_leader_reset:
8845 bp->is_leader = 0;
8846 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8847 smp_wmb();
8848 return rc;
8849}
8850
8851static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8852
8853/* Assumption: runs under rtnl lock. This together with the fact
8854 * that it's called only from bnx2x_reset_task() ensure that it
8855 * will never be called when netif_running(bp->dev) is false.
8856 */
8857static void bnx2x_parity_recover(struct bnx2x *bp)
8858{
8859 DP(NETIF_MSG_HW, "Handling parity\n");
8860 while (1) {
8861 switch (bp->recovery_state) {
8862 case BNX2X_RECOVERY_INIT:
8863 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8864 /* Try to get a LEADER_LOCK HW lock */
8865 if (bnx2x_trylock_hw_lock(bp,
8866 HW_LOCK_RESOURCE_RESERVED_08))
8867 bp->is_leader = 1;
8868
8869 /* Stop the driver */
8870 /* If interface has been removed - break */
8871 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8872 return;
8873
8874 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8875 /* Ensure "is_leader" and "recovery_state"
8876 * update values are seen on other CPUs
8877 */
8878 smp_wmb();
8879 break;
8880
8881 case BNX2X_RECOVERY_WAIT:
8882 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8883 if (bp->is_leader) {
8884 u32 load_counter = bnx2x_get_load_cnt(bp);
8885 if (load_counter) {
8886 /* Wait until all other functions get
8887 * down.
8888 */
8889 schedule_delayed_work(&bp->reset_task,
8890 HZ/10);
8891 return;
8892 } else {
8893 /* If all other functions got down -
8894 * try to bring the chip back to
8895 * normal. In any case it's an exit
8896 * point for a leader.
8897 */
8898 if (bnx2x_leader_reset(bp) ||
8899 bnx2x_nic_load(bp, LOAD_NORMAL)) {
8900 printk(KERN_ERR"%s: Recovery "
8901 "has failed. Power cycle is "
8902 "needed.\n", bp->dev->name);
8903 /* Disconnect this device */
8904 netif_device_detach(bp->dev);
8905 /* Block ifup for all function
8906 * of this ASIC until
8907 * "process kill" or power
8908 * cycle.
8909 */
8910 bnx2x_set_reset_in_progress(bp);
8911 /* Shut down the power */
8912 bnx2x_set_power_state(bp,
8913 PCI_D3hot);
8914 return;
8915 }
8916
8917 return;
8918 }
8919 } else { /* non-leader */
8920 if (!bnx2x_reset_is_done(bp)) {
8921 /* Try to get a LEADER_LOCK HW lock as
8922 * long as a former leader may have
8923 * been unloaded by the user or
8924 * released a leadership by another
8925 * reason.
8926 */
8927 if (bnx2x_trylock_hw_lock(bp,
8928 HW_LOCK_RESOURCE_RESERVED_08)) {
8929 /* I'm a leader now! Restart a
8930 * switch case.
8931 */
8932 bp->is_leader = 1;
8933 break;
8934 }
8935
8936 schedule_delayed_work(&bp->reset_task,
8937 HZ/10);
8938 return;
8939
8940 } else { /* A leader has completed
8941 * the "process kill". It's an exit
8942 * point for a non-leader.
8943 */
8944 bnx2x_nic_load(bp, LOAD_NORMAL);
8945 bp->recovery_state =
8946 BNX2X_RECOVERY_DONE;
8947 smp_wmb();
8948 return;
8949 }
8950 }
8951 default:
8952 return;
8953 }
8954 }
8955}
8956
8957/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8958 * scheduled on a general queue in order to prevent a dead lock.
8959 */
34f80b04
EG
8960static void bnx2x_reset_task(struct work_struct *work)
8961{
72fd0718 8962 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
8963
8964#ifdef BNX2X_STOP_ON_ERROR
8965 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8966 " so reset not done to allow debug dump,\n"
72fd0718 8967 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
8968 return;
8969#endif
8970
8971 rtnl_lock();
8972
8973 if (!netif_running(bp->dev))
8974 goto reset_task_exit;
8975
72fd0718
VZ
8976 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8977 bnx2x_parity_recover(bp);
8978 else {
8979 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8980 bnx2x_nic_load(bp, LOAD_NORMAL);
8981 }
34f80b04
EG
8982
8983reset_task_exit:
8984 rtnl_unlock();
8985}
8986
a2fbb9ea
ET
8987/* end of nic load/unload */
8988
8989/* ethtool_ops */
8990
8991/*
8992 * Init service functions
8993 */
8994
f1ef27ef
EG
8995static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8996{
8997 switch (func) {
8998 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8999 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
9000 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
9001 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
9002 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
9003 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
9004 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
9005 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
9006 default:
9007 BNX2X_ERR("Unsupported function index: %d\n", func);
9008 return (u32)(-1);
9009 }
9010}
9011
9012static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
9013{
9014 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
9015
9016 /* Flush all outstanding writes */
9017 mmiowb();
9018
9019 /* Pretend to be function 0 */
9020 REG_WR(bp, reg, 0);
9021 /* Flush the GRC transaction (in the chip) */
9022 new_val = REG_RD(bp, reg);
9023 if (new_val != 0) {
9024 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
9025 new_val);
9026 BUG();
9027 }
9028
9029 /* From now we are in the "like-E1" mode */
9030 bnx2x_int_disable(bp);
9031
9032 /* Flush all outstanding writes */
9033 mmiowb();
9034
9035 /* Restore the original funtion settings */
9036 REG_WR(bp, reg, orig_func);
9037 new_val = REG_RD(bp, reg);
9038 if (new_val != orig_func) {
9039 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
9040 orig_func, new_val);
9041 BUG();
9042 }
9043}
9044
9045static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
9046{
9047 if (CHIP_IS_E1H(bp))
9048 bnx2x_undi_int_disable_e1h(bp, func);
9049 else
9050 bnx2x_int_disable(bp);
9051}
9052
34f80b04
EG
9053static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
9054{
9055 u32 val;
9056
9057 /* Check if there is any driver already loaded */
9058 val = REG_RD(bp, MISC_REG_UNPREPARED);
9059 if (val == 0x1) {
9060 /* Check if it is the UNDI driver
9061 * UNDI driver initializes CID offset for normal bell to 0x7
9062 */
4a37fb66 9063 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
9064 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9065 if (val == 0x7) {
9066 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 9067 /* save our func */
34f80b04 9068 int func = BP_FUNC(bp);
da5a662a
VZ
9069 u32 swap_en;
9070 u32 swap_val;
34f80b04 9071
b4661739
EG
9072 /* clear the UNDI indication */
9073 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9074
34f80b04
EG
9075 BNX2X_DEV_INFO("UNDI is active! reset device\n");
9076
9077 /* try unload UNDI on port 0 */
9078 bp->func = 0;
da5a662a
VZ
9079 bp->fw_seq =
9080 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9081 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 9082 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
9083
9084 /* if UNDI is loaded on the other port */
9085 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9086
da5a662a
VZ
9087 /* send "DONE" for previous unload */
9088 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9089
9090 /* unload UNDI on port 1 */
34f80b04 9091 bp->func = 1;
da5a662a
VZ
9092 bp->fw_seq =
9093 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9094 DRV_MSG_SEQ_NUMBER_MASK);
9095 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9096
9097 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
9098 }
9099
b4661739
EG
9100 /* now it's safe to release the lock */
9101 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9102
f1ef27ef 9103 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
9104
9105 /* close input traffic and wait for it */
9106 /* Do not rcv packets to BRB */
9107 REG_WR(bp,
9108 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9109 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9110 /* Do not direct rcv packets that are not for MCP to
9111 * the BRB */
9112 REG_WR(bp,
9113 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9114 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9115 /* clear AEU */
9116 REG_WR(bp,
9117 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9118 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9119 msleep(10);
9120
9121 /* save NIG port swap info */
9122 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9123 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
9124 /* reset device */
9125 REG_WR(bp,
9126 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 9127 0xd3ffffff);
34f80b04
EG
9128 REG_WR(bp,
9129 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9130 0x1403);
da5a662a
VZ
9131 /* take the NIG out of reset and restore swap values */
9132 REG_WR(bp,
9133 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9134 MISC_REGISTERS_RESET_REG_1_RST_NIG);
9135 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9136 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9137
9138 /* send unload done to the MCP */
9139 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9140
9141 /* restore our func and fw_seq */
9142 bp->func = func;
9143 bp->fw_seq =
9144 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9145 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
9146
9147 } else
9148 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
9149 }
9150}
9151
9152static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9153{
9154 u32 val, val2, val3, val4, id;
72ce58c3 9155 u16 pmc;
34f80b04
EG
9156
9157 /* Get the chip revision id and number. */
9158 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9159 val = REG_RD(bp, MISC_REG_CHIP_NUM);
9160 id = ((val & 0xffff) << 16);
9161 val = REG_RD(bp, MISC_REG_CHIP_REV);
9162 id |= ((val & 0xf) << 12);
9163 val = REG_RD(bp, MISC_REG_CHIP_METAL);
9164 id |= ((val & 0xff) << 4);
5a40e08e 9165 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
9166 id |= (val & 0xf);
9167 bp->common.chip_id = id;
9168 bp->link_params.chip_id = bp->common.chip_id;
9169 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9170
1c06328c
EG
9171 val = (REG_RD(bp, 0x2874) & 0x55);
9172 if ((bp->common.chip_id & 0x1) ||
9173 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9174 bp->flags |= ONE_PORT_FLAG;
9175 BNX2X_DEV_INFO("single port device\n");
9176 }
9177
34f80b04
EG
9178 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9179 bp->common.flash_size = (NVRAM_1MB_SIZE <<
9180 (val & MCPR_NVM_CFG4_FLASH_SIZE));
9181 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9182 bp->common.flash_size, bp->common.flash_size);
9183
9184 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 9185 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 9186 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
9187 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
9188 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
9189
9190 if (!bp->common.shmem_base ||
9191 (bp->common.shmem_base < 0xA0000) ||
9192 (bp->common.shmem_base >= 0xC0000)) {
9193 BNX2X_DEV_INFO("MCP not active\n");
9194 bp->flags |= NO_MCP_FLAG;
9195 return;
9196 }
9197
9198 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9199 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9200 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
cdaa7cb8 9201 BNX2X_ERROR("BAD MCP validity signature\n");
34f80b04
EG
9202
9203 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 9204 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
9205
9206 bp->link_params.hw_led_mode = ((bp->common.hw_config &
9207 SHARED_HW_CFG_LED_MODE_MASK) >>
9208 SHARED_HW_CFG_LED_MODE_SHIFT);
9209
c2c8b03e
EG
9210 bp->link_params.feature_config_flags = 0;
9211 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9212 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9213 bp->link_params.feature_config_flags |=
9214 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9215 else
9216 bp->link_params.feature_config_flags &=
9217 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9218
34f80b04
EG
9219 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9220 bp->common.bc_ver = val;
9221 BNX2X_DEV_INFO("bc_ver %X\n", val);
9222 if (val < BNX2X_BC_VER) {
9223 /* for now only warn
9224 * later we might need to enforce this */
cdaa7cb8
VZ
9225 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
9226 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 9227 }
4d295db0
EG
9228 bp->link_params.feature_config_flags |=
9229 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9230 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
9231
9232 if (BP_E1HVN(bp) == 0) {
9233 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9234 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9235 } else {
9236 /* no WOL capability for E1HVN != 0 */
9237 bp->flags |= NO_WOL_FLAG;
9238 }
9239 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 9240 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
9241
9242 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9243 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9244 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9245 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9246
cdaa7cb8
VZ
9247 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9248 val, val2, val3, val4);
34f80b04
EG
9249}
9250
9251static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9252 u32 switch_cfg)
a2fbb9ea 9253{
34f80b04 9254 int port = BP_PORT(bp);
a2fbb9ea
ET
9255 u32 ext_phy_type;
9256
a2fbb9ea
ET
9257 switch (switch_cfg) {
9258 case SWITCH_CFG_1G:
9259 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9260
c18487ee
YR
9261 ext_phy_type =
9262 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
9263 switch (ext_phy_type) {
9264 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9265 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9266 ext_phy_type);
9267
34f80b04
EG
9268 bp->port.supported |= (SUPPORTED_10baseT_Half |
9269 SUPPORTED_10baseT_Full |
9270 SUPPORTED_100baseT_Half |
9271 SUPPORTED_100baseT_Full |
9272 SUPPORTED_1000baseT_Full |
9273 SUPPORTED_2500baseX_Full |
9274 SUPPORTED_TP |
9275 SUPPORTED_FIBRE |
9276 SUPPORTED_Autoneg |
9277 SUPPORTED_Pause |
9278 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9279 break;
9280
9281 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9282 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9283 ext_phy_type);
9284
34f80b04
EG
9285 bp->port.supported |= (SUPPORTED_10baseT_Half |
9286 SUPPORTED_10baseT_Full |
9287 SUPPORTED_100baseT_Half |
9288 SUPPORTED_100baseT_Full |
9289 SUPPORTED_1000baseT_Full |
9290 SUPPORTED_TP |
9291 SUPPORTED_FIBRE |
9292 SUPPORTED_Autoneg |
9293 SUPPORTED_Pause |
9294 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9295 break;
9296
9297 default:
9298 BNX2X_ERR("NVRAM config error. "
9299 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 9300 bp->link_params.ext_phy_config);
a2fbb9ea
ET
9301 return;
9302 }
9303
34f80b04
EG
9304 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9305 port*0x10);
9306 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
9307 break;
9308
9309 case SWITCH_CFG_10G:
9310 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9311
c18487ee
YR
9312 ext_phy_type =
9313 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
9314 switch (ext_phy_type) {
9315 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9316 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9317 ext_phy_type);
9318
34f80b04
EG
9319 bp->port.supported |= (SUPPORTED_10baseT_Half |
9320 SUPPORTED_10baseT_Full |
9321 SUPPORTED_100baseT_Half |
9322 SUPPORTED_100baseT_Full |
9323 SUPPORTED_1000baseT_Full |
9324 SUPPORTED_2500baseX_Full |
9325 SUPPORTED_10000baseT_Full |
9326 SUPPORTED_TP |
9327 SUPPORTED_FIBRE |
9328 SUPPORTED_Autoneg |
9329 SUPPORTED_Pause |
9330 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9331 break;
9332
589abe3a
EG
9333 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9334 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 9335 ext_phy_type);
f1410647 9336
34f80b04 9337 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 9338 SUPPORTED_1000baseT_Full |
34f80b04 9339 SUPPORTED_FIBRE |
589abe3a 9340 SUPPORTED_Autoneg |
34f80b04
EG
9341 SUPPORTED_Pause |
9342 SUPPORTED_Asym_Pause);
f1410647
ET
9343 break;
9344
589abe3a
EG
9345 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9346 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
9347 ext_phy_type);
9348
34f80b04 9349 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 9350 SUPPORTED_2500baseX_Full |
34f80b04 9351 SUPPORTED_1000baseT_Full |
589abe3a
EG
9352 SUPPORTED_FIBRE |
9353 SUPPORTED_Autoneg |
9354 SUPPORTED_Pause |
9355 SUPPORTED_Asym_Pause);
9356 break;
9357
9358 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9359 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9360 ext_phy_type);
9361
9362 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
9363 SUPPORTED_FIBRE |
9364 SUPPORTED_Pause |
9365 SUPPORTED_Asym_Pause);
f1410647
ET
9366 break;
9367
589abe3a
EG
9368 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9369 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
9370 ext_phy_type);
9371
34f80b04
EG
9372 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9373 SUPPORTED_1000baseT_Full |
9374 SUPPORTED_FIBRE |
34f80b04
EG
9375 SUPPORTED_Pause |
9376 SUPPORTED_Asym_Pause);
f1410647
ET
9377 break;
9378
589abe3a
EG
9379 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9380 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
9381 ext_phy_type);
9382
34f80b04 9383 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 9384 SUPPORTED_1000baseT_Full |
34f80b04 9385 SUPPORTED_Autoneg |
589abe3a 9386 SUPPORTED_FIBRE |
34f80b04
EG
9387 SUPPORTED_Pause |
9388 SUPPORTED_Asym_Pause);
c18487ee
YR
9389 break;
9390
4d295db0
EG
9391 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9392 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9393 ext_phy_type);
9394
9395 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9396 SUPPORTED_1000baseT_Full |
9397 SUPPORTED_Autoneg |
9398 SUPPORTED_FIBRE |
9399 SUPPORTED_Pause |
9400 SUPPORTED_Asym_Pause);
9401 break;
9402
f1410647
ET
9403 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9404 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9405 ext_phy_type);
9406
34f80b04
EG
9407 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9408 SUPPORTED_TP |
9409 SUPPORTED_Autoneg |
9410 SUPPORTED_Pause |
9411 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9412 break;
9413
28577185
EG
9414 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9415 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9416 ext_phy_type);
9417
9418 bp->port.supported |= (SUPPORTED_10baseT_Half |
9419 SUPPORTED_10baseT_Full |
9420 SUPPORTED_100baseT_Half |
9421 SUPPORTED_100baseT_Full |
9422 SUPPORTED_1000baseT_Full |
9423 SUPPORTED_10000baseT_Full |
9424 SUPPORTED_TP |
9425 SUPPORTED_Autoneg |
9426 SUPPORTED_Pause |
9427 SUPPORTED_Asym_Pause);
9428 break;
9429
c18487ee
YR
9430 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9431 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9432 bp->link_params.ext_phy_config);
9433 break;
9434
a2fbb9ea
ET
9435 default:
9436 BNX2X_ERR("NVRAM config error. "
9437 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 9438 bp->link_params.ext_phy_config);
a2fbb9ea
ET
9439 return;
9440 }
9441
34f80b04
EG
9442 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9443 port*0x18);
9444 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 9445
a2fbb9ea
ET
9446 break;
9447
9448 default:
9449 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 9450 bp->port.link_config);
a2fbb9ea
ET
9451 return;
9452 }
34f80b04 9453 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
9454
9455 /* mask what we support according to speed_cap_mask */
c18487ee
YR
9456 if (!(bp->link_params.speed_cap_mask &
9457 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 9458 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 9459
c18487ee
YR
9460 if (!(bp->link_params.speed_cap_mask &
9461 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 9462 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 9463
c18487ee
YR
9464 if (!(bp->link_params.speed_cap_mask &
9465 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 9466 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 9467
c18487ee
YR
9468 if (!(bp->link_params.speed_cap_mask &
9469 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 9470 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 9471
c18487ee
YR
9472 if (!(bp->link_params.speed_cap_mask &
9473 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
9474 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9475 SUPPORTED_1000baseT_Full);
a2fbb9ea 9476
c18487ee
YR
9477 if (!(bp->link_params.speed_cap_mask &
9478 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 9479 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 9480
c18487ee
YR
9481 if (!(bp->link_params.speed_cap_mask &
9482 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 9483 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 9484
34f80b04 9485 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
9486}
9487
34f80b04 9488static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 9489{
c18487ee 9490 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 9491
34f80b04 9492 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 9493 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 9494 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 9495 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 9496 bp->port.advertising = bp->port.supported;
a2fbb9ea 9497 } else {
c18487ee
YR
9498 u32 ext_phy_type =
9499 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9500
9501 if ((ext_phy_type ==
9502 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9503 (ext_phy_type ==
9504 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 9505 /* force 10G, no AN */
c18487ee 9506 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 9507 bp->port.advertising =
a2fbb9ea
ET
9508 (ADVERTISED_10000baseT_Full |
9509 ADVERTISED_FIBRE);
9510 break;
9511 }
9512 BNX2X_ERR("NVRAM config error. "
9513 "Invalid link_config 0x%x"
9514 " Autoneg not supported\n",
34f80b04 9515 bp->port.link_config);
a2fbb9ea
ET
9516 return;
9517 }
9518 break;
9519
9520 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 9521 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 9522 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
9523 bp->port.advertising = (ADVERTISED_10baseT_Full |
9524 ADVERTISED_TP);
a2fbb9ea 9525 } else {
cdaa7cb8
VZ
9526 BNX2X_ERROR("NVRAM config error. "
9527 "Invalid link_config 0x%x"
9528 " speed_cap_mask 0x%x\n",
9529 bp->port.link_config,
9530 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9531 return;
9532 }
9533 break;
9534
9535 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 9536 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
9537 bp->link_params.req_line_speed = SPEED_10;
9538 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
9539 bp->port.advertising = (ADVERTISED_10baseT_Half |
9540 ADVERTISED_TP);
a2fbb9ea 9541 } else {
cdaa7cb8
VZ
9542 BNX2X_ERROR("NVRAM config error. "
9543 "Invalid link_config 0x%x"
9544 " speed_cap_mask 0x%x\n",
9545 bp->port.link_config,
9546 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9547 return;
9548 }
9549 break;
9550
9551 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 9552 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 9553 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
9554 bp->port.advertising = (ADVERTISED_100baseT_Full |
9555 ADVERTISED_TP);
a2fbb9ea 9556 } else {
cdaa7cb8
VZ
9557 BNX2X_ERROR("NVRAM config error. "
9558 "Invalid link_config 0x%x"
9559 " speed_cap_mask 0x%x\n",
9560 bp->port.link_config,
9561 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9562 return;
9563 }
9564 break;
9565
9566 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 9567 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
9568 bp->link_params.req_line_speed = SPEED_100;
9569 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
9570 bp->port.advertising = (ADVERTISED_100baseT_Half |
9571 ADVERTISED_TP);
a2fbb9ea 9572 } else {
cdaa7cb8
VZ
9573 BNX2X_ERROR("NVRAM config error. "
9574 "Invalid link_config 0x%x"
9575 " speed_cap_mask 0x%x\n",
9576 bp->port.link_config,
9577 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9578 return;
9579 }
9580 break;
9581
9582 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 9583 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 9584 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
9585 bp->port.advertising = (ADVERTISED_1000baseT_Full |
9586 ADVERTISED_TP);
a2fbb9ea 9587 } else {
cdaa7cb8
VZ
9588 BNX2X_ERROR("NVRAM config error. "
9589 "Invalid link_config 0x%x"
9590 " speed_cap_mask 0x%x\n",
9591 bp->port.link_config,
9592 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9593 return;
9594 }
9595 break;
9596
9597 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 9598 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 9599 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
9600 bp->port.advertising = (ADVERTISED_2500baseX_Full |
9601 ADVERTISED_TP);
a2fbb9ea 9602 } else {
cdaa7cb8
VZ
9603 BNX2X_ERROR("NVRAM config error. "
9604 "Invalid link_config 0x%x"
9605 " speed_cap_mask 0x%x\n",
9606 bp->port.link_config,
9607 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9608 return;
9609 }
9610 break;
9611
9612 case PORT_FEATURE_LINK_SPEED_10G_CX4:
9613 case PORT_FEATURE_LINK_SPEED_10G_KX4:
9614 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 9615 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 9616 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
9617 bp->port.advertising = (ADVERTISED_10000baseT_Full |
9618 ADVERTISED_FIBRE);
a2fbb9ea 9619 } else {
cdaa7cb8
VZ
9620 BNX2X_ERROR("NVRAM config error. "
9621 "Invalid link_config 0x%x"
9622 " speed_cap_mask 0x%x\n",
9623 bp->port.link_config,
9624 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9625 return;
9626 }
9627 break;
9628
9629 default:
cdaa7cb8
VZ
9630 BNX2X_ERROR("NVRAM config error. "
9631 "BAD link speed link_config 0x%x\n",
9632 bp->port.link_config);
c18487ee 9633 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 9634 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
9635 break;
9636 }
a2fbb9ea 9637
34f80b04
EG
9638 bp->link_params.req_flow_ctrl = (bp->port.link_config &
9639 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 9640 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 9641 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 9642 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9643
c18487ee 9644 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 9645 " advertising 0x%x\n",
c18487ee
YR
9646 bp->link_params.req_line_speed,
9647 bp->link_params.req_duplex,
34f80b04 9648 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
9649}
9650
e665bfda
MC
9651static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9652{
9653 mac_hi = cpu_to_be16(mac_hi);
9654 mac_lo = cpu_to_be32(mac_lo);
9655 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9656 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9657}
9658
34f80b04 9659static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 9660{
34f80b04
EG
9661 int port = BP_PORT(bp);
9662 u32 val, val2;
589abe3a 9663 u32 config;
c2c8b03e 9664 u16 i;
01cd4528 9665 u32 ext_phy_type;
a2fbb9ea 9666
c18487ee 9667 bp->link_params.bp = bp;
34f80b04 9668 bp->link_params.port = port;
c18487ee 9669
c18487ee 9670 bp->link_params.lane_config =
a2fbb9ea 9671 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 9672 bp->link_params.ext_phy_config =
a2fbb9ea
ET
9673 SHMEM_RD(bp,
9674 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
9675 /* BCM8727_NOC => BCM8727 no over current */
9676 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9677 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9678 bp->link_params.ext_phy_config &=
9679 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9680 bp->link_params.ext_phy_config |=
9681 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9682 bp->link_params.feature_config_flags |=
9683 FEATURE_CONFIG_BCM8727_NOC;
9684 }
9685
c18487ee 9686 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
9687 SHMEM_RD(bp,
9688 dev_info.port_hw_config[port].speed_capability_mask);
9689
34f80b04 9690 bp->port.link_config =
a2fbb9ea
ET
9691 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9692
c2c8b03e
EG
9693 /* Get the 4 lanes xgxs config rx and tx */
9694 for (i = 0; i < 2; i++) {
9695 val = SHMEM_RD(bp,
9696 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9697 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9698 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9699
9700 val = SHMEM_RD(bp,
9701 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9702 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9703 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9704 }
9705
3ce2c3f9
EG
9706 /* If the device is capable of WoL, set the default state according
9707 * to the HW
9708 */
4d295db0 9709 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
9710 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9711 (config & PORT_FEATURE_WOL_ENABLED));
9712
c2c8b03e
EG
9713 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
9714 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
9715 bp->link_params.lane_config,
9716 bp->link_params.ext_phy_config,
34f80b04 9717 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 9718
4d295db0
EG
9719 bp->link_params.switch_cfg |= (bp->port.link_config &
9720 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 9721 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
9722
9723 bnx2x_link_settings_requested(bp);
9724
01cd4528
EG
9725 /*
9726 * If connected directly, work with the internal PHY, otherwise, work
9727 * with the external PHY
9728 */
9729 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9730 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9731 bp->mdio.prtad = bp->link_params.phy_addr;
9732
9733 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9734 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9735 bp->mdio.prtad =
659bc5c4 9736 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 9737
a2fbb9ea
ET
9738 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9739 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 9740 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
9741 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9742 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
9743
9744#ifdef BCM_CNIC
9745 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9746 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9747 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9748#endif
34f80b04
EG
9749}
9750
9751static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9752{
9753 int func = BP_FUNC(bp);
9754 u32 val, val2;
9755 int rc = 0;
a2fbb9ea 9756
34f80b04 9757 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 9758
34f80b04
EG
9759 bp->e1hov = 0;
9760 bp->e1hmf = 0;
2145a920 9761 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
34f80b04
EG
9762 bp->mf_config =
9763 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 9764
2691d51d 9765 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 9766 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 9767 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 9768 bp->e1hmf = 1;
2691d51d
EG
9769 BNX2X_DEV_INFO("%s function mode\n",
9770 IS_E1HMF(bp) ? "multi" : "single");
9771
9772 if (IS_E1HMF(bp)) {
9773 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9774 e1hov_tag) &
9775 FUNC_MF_CFG_E1HOV_TAG_MASK);
9776 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9777 bp->e1hov = val;
9778 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9779 "(0x%04x)\n",
9780 func, bp->e1hov, bp->e1hov);
9781 } else {
cdaa7cb8
VZ
9782 BNX2X_ERROR("No valid E1HOV for func %d,"
9783 " aborting\n", func);
34f80b04
EG
9784 rc = -EPERM;
9785 }
2691d51d
EG
9786 } else {
9787 if (BP_E1HVN(bp)) {
cdaa7cb8
VZ
9788 BNX2X_ERROR("VN %d in single function mode,"
9789 " aborting\n", BP_E1HVN(bp));
2691d51d
EG
9790 rc = -EPERM;
9791 }
34f80b04
EG
9792 }
9793 }
a2fbb9ea 9794
34f80b04
EG
9795 if (!BP_NOMCP(bp)) {
9796 bnx2x_get_port_hwinfo(bp);
9797
9798 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9799 DRV_MSG_SEQ_NUMBER_MASK);
9800 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9801 }
9802
9803 if (IS_E1HMF(bp)) {
9804 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9805 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
9806 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9807 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9808 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9809 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9810 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9811 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9812 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
9813 bp->dev->dev_addr[5] = (u8)(val & 0xff);
9814 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9815 ETH_ALEN);
9816 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9817 ETH_ALEN);
a2fbb9ea 9818 }
34f80b04
EG
9819
9820 return rc;
a2fbb9ea
ET
9821 }
9822
34f80b04
EG
9823 if (BP_NOMCP(bp)) {
9824 /* only supposed to happen on emulation/FPGA */
cdaa7cb8 9825 BNX2X_ERROR("warning: random MAC workaround active\n");
34f80b04
EG
9826 random_ether_addr(bp->dev->dev_addr);
9827 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9828 }
a2fbb9ea 9829
34f80b04
EG
9830 return rc;
9831}
9832
34f24c7f
VZ
9833static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9834{
9835 int cnt, i, block_end, rodi;
9836 char vpd_data[BNX2X_VPD_LEN+1];
9837 char str_id_reg[VENDOR_ID_LEN+1];
9838 char str_id_cap[VENDOR_ID_LEN+1];
9839 u8 len;
9840
9841 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9842 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9843
9844 if (cnt < BNX2X_VPD_LEN)
9845 goto out_not_found;
9846
9847 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9848 PCI_VPD_LRDT_RO_DATA);
9849 if (i < 0)
9850 goto out_not_found;
9851
9852
9853 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9854 pci_vpd_lrdt_size(&vpd_data[i]);
9855
9856 i += PCI_VPD_LRDT_TAG_SIZE;
9857
9858 if (block_end > BNX2X_VPD_LEN)
9859 goto out_not_found;
9860
9861 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9862 PCI_VPD_RO_KEYWORD_MFR_ID);
9863 if (rodi < 0)
9864 goto out_not_found;
9865
9866 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9867
9868 if (len != VENDOR_ID_LEN)
9869 goto out_not_found;
9870
9871 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9872
9873 /* vendor specific info */
9874 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9875 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9876 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9877 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9878
9879 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9880 PCI_VPD_RO_KEYWORD_VENDOR0);
9881 if (rodi >= 0) {
9882 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9883
9884 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9885
9886 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9887 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9888 bp->fw_ver[len] = ' ';
9889 }
9890 }
9891 return;
9892 }
9893out_not_found:
9894 return;
9895}
9896
34f80b04
EG
9897static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9898{
9899 int func = BP_FUNC(bp);
87942b46 9900 int timer_interval;
34f80b04
EG
9901 int rc;
9902
da5a662a
VZ
9903 /* Disable interrupt handling until HW is initialized */
9904 atomic_set(&bp->intr_sem, 1);
e1510706 9905 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 9906
34f80b04 9907 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 9908 mutex_init(&bp->fw_mb_mutex);
993ac7b5
MC
9909#ifdef BCM_CNIC
9910 mutex_init(&bp->cnic_mutex);
9911#endif
a2fbb9ea 9912
1cf167f2 9913 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 9914 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
9915
9916 rc = bnx2x_get_hwinfo(bp);
9917
34f24c7f 9918 bnx2x_read_fwinfo(bp);
34f80b04
EG
9919 /* need to reset chip if undi was active */
9920 if (!BP_NOMCP(bp))
9921 bnx2x_undi_unload(bp);
9922
9923 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 9924 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
9925
9926 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
9927 dev_err(&bp->pdev->dev, "MCP disabled, "
9928 "must load devices in order!\n");
34f80b04 9929
555f6c78 9930 /* Set multi queue mode */
8badd27a
EG
9931 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9932 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
cdaa7cb8
VZ
9933 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
9934 "requested is not MSI-X\n");
555f6c78
EG
9935 multi_mode = ETH_RSS_MODE_DISABLED;
9936 }
9937 bp->multi_mode = multi_mode;
9938
9939
4fd89b7a
DK
9940 bp->dev->features |= NETIF_F_GRO;
9941
7a9b2557
VZ
9942 /* Set TPA flags */
9943 if (disable_tpa) {
9944 bp->flags &= ~TPA_ENABLE_FLAG;
9945 bp->dev->features &= ~NETIF_F_LRO;
9946 } else {
9947 bp->flags |= TPA_ENABLE_FLAG;
9948 bp->dev->features |= NETIF_F_LRO;
9949 }
9950
a18f5128
EG
9951 if (CHIP_IS_E1(bp))
9952 bp->dropless_fc = 0;
9953 else
9954 bp->dropless_fc = dropless_fc;
9955
8d5726c4 9956 bp->mrrs = mrrs;
7a9b2557 9957
34f80b04
EG
9958 bp->tx_ring_size = MAX_TX_AVAIL;
9959 bp->rx_ring_size = MAX_RX_AVAIL;
9960
9961 bp->rx_csum = 1;
34f80b04 9962
7d323bfd
EG
9963 /* make sure that the numbers are in the right granularity */
9964 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9965 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
34f80b04 9966
87942b46
EG
9967 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9968 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
9969
9970 init_timer(&bp->timer);
9971 bp->timer.expires = jiffies + bp->current_interval;
9972 bp->timer.data = (unsigned long) bp;
9973 bp->timer.function = bnx2x_timer;
9974
9975 return rc;
a2fbb9ea
ET
9976}
9977
9978/*
9979 * ethtool service functions
9980 */
9981
9982/* All ethtool functions called with rtnl_lock */
9983
9984static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9985{
9986 struct bnx2x *bp = netdev_priv(dev);
9987
34f80b04
EG
9988 cmd->supported = bp->port.supported;
9989 cmd->advertising = bp->port.advertising;
a2fbb9ea 9990
f34d28ea
EG
9991 if ((bp->state == BNX2X_STATE_OPEN) &&
9992 !(bp->flags & MF_FUNC_DIS) &&
9993 (bp->link_vars.link_up)) {
c18487ee
YR
9994 cmd->speed = bp->link_vars.line_speed;
9995 cmd->duplex = bp->link_vars.duplex;
b015e3d1
EG
9996 if (IS_E1HMF(bp)) {
9997 u16 vn_max_rate;
34f80b04 9998
b015e3d1
EG
9999 vn_max_rate =
10000 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
34f80b04 10001 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
b015e3d1
EG
10002 if (vn_max_rate < cmd->speed)
10003 cmd->speed = vn_max_rate;
10004 }
10005 } else {
10006 cmd->speed = -1;
10007 cmd->duplex = -1;
34f80b04 10008 }
a2fbb9ea 10009
c18487ee
YR
10010 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
10011 u32 ext_phy_type =
10012 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
10013
10014 switch (ext_phy_type) {
10015 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 10016 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 10017 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
10018 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
10019 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
10020 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 10021 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
10022 cmd->port = PORT_FIBRE;
10023 break;
10024
10025 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 10026 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
10027 cmd->port = PORT_TP;
10028 break;
10029
c18487ee
YR
10030 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
10031 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
10032 bp->link_params.ext_phy_config);
10033 break;
10034
f1410647
ET
10035 default:
10036 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
10037 bp->link_params.ext_phy_config);
10038 break;
f1410647
ET
10039 }
10040 } else
a2fbb9ea 10041 cmd->port = PORT_TP;
a2fbb9ea 10042
01cd4528 10043 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
10044 cmd->transceiver = XCVR_INTERNAL;
10045
c18487ee 10046 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 10047 cmd->autoneg = AUTONEG_ENABLE;
f1410647 10048 else
a2fbb9ea 10049 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
10050
10051 cmd->maxtxpkt = 0;
10052 cmd->maxrxpkt = 0;
10053
10054 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10055 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10056 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10057 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10058 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10059 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10060 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10061
10062 return 0;
10063}
10064
10065static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10066{
10067 struct bnx2x *bp = netdev_priv(dev);
10068 u32 advertising;
10069
34f80b04
EG
10070 if (IS_E1HMF(bp))
10071 return 0;
10072
a2fbb9ea
ET
10073 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10074 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10075 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10076 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10077 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10078 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10079 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10080
a2fbb9ea 10081 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
10082 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10083 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 10084 return -EINVAL;
f1410647 10085 }
a2fbb9ea
ET
10086
10087 /* advertise the requested speed and duplex if supported */
34f80b04 10088 cmd->advertising &= bp->port.supported;
a2fbb9ea 10089
c18487ee
YR
10090 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10091 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
10092 bp->port.advertising |= (ADVERTISED_Autoneg |
10093 cmd->advertising);
a2fbb9ea
ET
10094
10095 } else { /* forced speed */
10096 /* advertise the requested speed and duplex if supported */
10097 switch (cmd->speed) {
10098 case SPEED_10:
10099 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 10100 if (!(bp->port.supported &
f1410647
ET
10101 SUPPORTED_10baseT_Full)) {
10102 DP(NETIF_MSG_LINK,
10103 "10M full not supported\n");
a2fbb9ea 10104 return -EINVAL;
f1410647 10105 }
a2fbb9ea
ET
10106
10107 advertising = (ADVERTISED_10baseT_Full |
10108 ADVERTISED_TP);
10109 } else {
34f80b04 10110 if (!(bp->port.supported &
f1410647
ET
10111 SUPPORTED_10baseT_Half)) {
10112 DP(NETIF_MSG_LINK,
10113 "10M half not supported\n");
a2fbb9ea 10114 return -EINVAL;
f1410647 10115 }
a2fbb9ea
ET
10116
10117 advertising = (ADVERTISED_10baseT_Half |
10118 ADVERTISED_TP);
10119 }
10120 break;
10121
10122 case SPEED_100:
10123 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 10124 if (!(bp->port.supported &
f1410647
ET
10125 SUPPORTED_100baseT_Full)) {
10126 DP(NETIF_MSG_LINK,
10127 "100M full not supported\n");
a2fbb9ea 10128 return -EINVAL;
f1410647 10129 }
a2fbb9ea
ET
10130
10131 advertising = (ADVERTISED_100baseT_Full |
10132 ADVERTISED_TP);
10133 } else {
34f80b04 10134 if (!(bp->port.supported &
f1410647
ET
10135 SUPPORTED_100baseT_Half)) {
10136 DP(NETIF_MSG_LINK,
10137 "100M half not supported\n");
a2fbb9ea 10138 return -EINVAL;
f1410647 10139 }
a2fbb9ea
ET
10140
10141 advertising = (ADVERTISED_100baseT_Half |
10142 ADVERTISED_TP);
10143 }
10144 break;
10145
10146 case SPEED_1000:
f1410647
ET
10147 if (cmd->duplex != DUPLEX_FULL) {
10148 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 10149 return -EINVAL;
f1410647 10150 }
a2fbb9ea 10151
34f80b04 10152 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 10153 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 10154 return -EINVAL;
f1410647 10155 }
a2fbb9ea
ET
10156
10157 advertising = (ADVERTISED_1000baseT_Full |
10158 ADVERTISED_TP);
10159 break;
10160
10161 case SPEED_2500:
f1410647
ET
10162 if (cmd->duplex != DUPLEX_FULL) {
10163 DP(NETIF_MSG_LINK,
10164 "2.5G half not supported\n");
a2fbb9ea 10165 return -EINVAL;
f1410647 10166 }
a2fbb9ea 10167
34f80b04 10168 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
10169 DP(NETIF_MSG_LINK,
10170 "2.5G full not supported\n");
a2fbb9ea 10171 return -EINVAL;
f1410647 10172 }
a2fbb9ea 10173
f1410647 10174 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
10175 ADVERTISED_TP);
10176 break;
10177
10178 case SPEED_10000:
f1410647
ET
10179 if (cmd->duplex != DUPLEX_FULL) {
10180 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 10181 return -EINVAL;
f1410647 10182 }
a2fbb9ea 10183
34f80b04 10184 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 10185 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 10186 return -EINVAL;
f1410647 10187 }
a2fbb9ea
ET
10188
10189 advertising = (ADVERTISED_10000baseT_Full |
10190 ADVERTISED_FIBRE);
10191 break;
10192
10193 default:
f1410647 10194 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
10195 return -EINVAL;
10196 }
10197
c18487ee
YR
10198 bp->link_params.req_line_speed = cmd->speed;
10199 bp->link_params.req_duplex = cmd->duplex;
34f80b04 10200 bp->port.advertising = advertising;
a2fbb9ea
ET
10201 }
10202
c18487ee 10203 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 10204 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 10205 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 10206 bp->port.advertising);
a2fbb9ea 10207
34f80b04 10208 if (netif_running(dev)) {
bb2a0f7a 10209 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10210 bnx2x_link_set(bp);
10211 }
a2fbb9ea
ET
10212
10213 return 0;
10214}
10215
0a64ea57
EG
10216#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10217#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10218
10219static int bnx2x_get_regs_len(struct net_device *dev)
10220{
0a64ea57 10221 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 10222 int regdump_len = 0;
0a64ea57
EG
10223 int i;
10224
0a64ea57
EG
10225 if (CHIP_IS_E1(bp)) {
10226 for (i = 0; i < REGS_COUNT; i++)
10227 if (IS_E1_ONLINE(reg_addrs[i].info))
10228 regdump_len += reg_addrs[i].size;
10229
10230 for (i = 0; i < WREGS_COUNT_E1; i++)
10231 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10232 regdump_len += wreg_addrs_e1[i].size *
10233 (1 + wreg_addrs_e1[i].read_regs_count);
10234
10235 } else { /* E1H */
10236 for (i = 0; i < REGS_COUNT; i++)
10237 if (IS_E1H_ONLINE(reg_addrs[i].info))
10238 regdump_len += reg_addrs[i].size;
10239
10240 for (i = 0; i < WREGS_COUNT_E1H; i++)
10241 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10242 regdump_len += wreg_addrs_e1h[i].size *
10243 (1 + wreg_addrs_e1h[i].read_regs_count);
10244 }
10245 regdump_len *= 4;
10246 regdump_len += sizeof(struct dump_hdr);
10247
10248 return regdump_len;
10249}
10250
10251static void bnx2x_get_regs(struct net_device *dev,
10252 struct ethtool_regs *regs, void *_p)
10253{
10254 u32 *p = _p, i, j;
10255 struct bnx2x *bp = netdev_priv(dev);
10256 struct dump_hdr dump_hdr = {0};
10257
10258 regs->version = 0;
10259 memset(p, 0, regs->len);
10260
10261 if (!netif_running(bp->dev))
10262 return;
10263
10264 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10265 dump_hdr.dump_sign = dump_sign_all;
10266 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10267 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10268 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10269 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10270 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10271
10272 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10273 p += dump_hdr.hdr_size + 1;
10274
10275 if (CHIP_IS_E1(bp)) {
10276 for (i = 0; i < REGS_COUNT; i++)
10277 if (IS_E1_ONLINE(reg_addrs[i].info))
10278 for (j = 0; j < reg_addrs[i].size; j++)
10279 *p++ = REG_RD(bp,
10280 reg_addrs[i].addr + j*4);
10281
10282 } else { /* E1H */
10283 for (i = 0; i < REGS_COUNT; i++)
10284 if (IS_E1H_ONLINE(reg_addrs[i].info))
10285 for (j = 0; j < reg_addrs[i].size; j++)
10286 *p++ = REG_RD(bp,
10287 reg_addrs[i].addr + j*4);
10288 }
10289}
10290
0d28e49a
EG
10291#define PHY_FW_VER_LEN 10
10292
10293static void bnx2x_get_drvinfo(struct net_device *dev,
10294 struct ethtool_drvinfo *info)
10295{
10296 struct bnx2x *bp = netdev_priv(dev);
10297 u8 phy_fw_ver[PHY_FW_VER_LEN];
10298
10299 strcpy(info->driver, DRV_MODULE_NAME);
10300 strcpy(info->version, DRV_MODULE_VERSION);
10301
10302 phy_fw_ver[0] = '\0';
10303 if (bp->port.pmf) {
10304 bnx2x_acquire_phy_lock(bp);
10305 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10306 (bp->state != BNX2X_STATE_CLOSED),
10307 phy_fw_ver, PHY_FW_VER_LEN);
10308 bnx2x_release_phy_lock(bp);
10309 }
10310
34f24c7f
VZ
10311 strncpy(info->fw_version, bp->fw_ver, 32);
10312 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10313 "bc %d.%d.%d%s%s",
0d28e49a
EG
10314 (bp->common.bc_ver & 0xff0000) >> 16,
10315 (bp->common.bc_ver & 0xff00) >> 8,
10316 (bp->common.bc_ver & 0xff),
34f24c7f 10317 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
0d28e49a
EG
10318 strcpy(info->bus_info, pci_name(bp->pdev));
10319 info->n_stats = BNX2X_NUM_STATS;
10320 info->testinfo_len = BNX2X_NUM_TESTS;
10321 info->eedump_len = bp->common.flash_size;
10322 info->regdump_len = bnx2x_get_regs_len(dev);
10323}
10324
a2fbb9ea
ET
10325static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10326{
10327 struct bnx2x *bp = netdev_priv(dev);
10328
10329 if (bp->flags & NO_WOL_FLAG) {
10330 wol->supported = 0;
10331 wol->wolopts = 0;
10332 } else {
10333 wol->supported = WAKE_MAGIC;
10334 if (bp->wol)
10335 wol->wolopts = WAKE_MAGIC;
10336 else
10337 wol->wolopts = 0;
10338 }
10339 memset(&wol->sopass, 0, sizeof(wol->sopass));
10340}
10341
10342static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10343{
10344 struct bnx2x *bp = netdev_priv(dev);
10345
10346 if (wol->wolopts & ~WAKE_MAGIC)
10347 return -EINVAL;
10348
10349 if (wol->wolopts & WAKE_MAGIC) {
10350 if (bp->flags & NO_WOL_FLAG)
10351 return -EINVAL;
10352
10353 bp->wol = 1;
34f80b04 10354 } else
a2fbb9ea 10355 bp->wol = 0;
34f80b04 10356
a2fbb9ea
ET
10357 return 0;
10358}
10359
10360static u32 bnx2x_get_msglevel(struct net_device *dev)
10361{
10362 struct bnx2x *bp = netdev_priv(dev);
10363
7995c64e 10364 return bp->msg_enable;
a2fbb9ea
ET
10365}
10366
10367static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10368{
10369 struct bnx2x *bp = netdev_priv(dev);
10370
10371 if (capable(CAP_NET_ADMIN))
7995c64e 10372 bp->msg_enable = level;
a2fbb9ea
ET
10373}
10374
10375static int bnx2x_nway_reset(struct net_device *dev)
10376{
10377 struct bnx2x *bp = netdev_priv(dev);
10378
34f80b04
EG
10379 if (!bp->port.pmf)
10380 return 0;
a2fbb9ea 10381
34f80b04 10382 if (netif_running(dev)) {
bb2a0f7a 10383 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10384 bnx2x_link_set(bp);
10385 }
a2fbb9ea
ET
10386
10387 return 0;
10388}
10389
ab6ad5a4 10390static u32 bnx2x_get_link(struct net_device *dev)
01e53298
NO
10391{
10392 struct bnx2x *bp = netdev_priv(dev);
10393
f34d28ea
EG
10394 if (bp->flags & MF_FUNC_DIS)
10395 return 0;
10396
01e53298
NO
10397 return bp->link_vars.link_up;
10398}
10399
a2fbb9ea
ET
10400static int bnx2x_get_eeprom_len(struct net_device *dev)
10401{
10402 struct bnx2x *bp = netdev_priv(dev);
10403
34f80b04 10404 return bp->common.flash_size;
a2fbb9ea
ET
10405}
10406
10407static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10408{
34f80b04 10409 int port = BP_PORT(bp);
a2fbb9ea
ET
10410 int count, i;
10411 u32 val = 0;
10412
10413 /* adjust timeout for emulation/FPGA */
10414 count = NVRAM_TIMEOUT_COUNT;
10415 if (CHIP_REV_IS_SLOW(bp))
10416 count *= 100;
10417
10418 /* request access to nvram interface */
10419 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10420 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10421
10422 for (i = 0; i < count*10; i++) {
10423 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10424 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10425 break;
10426
10427 udelay(5);
10428 }
10429
10430 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 10431 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
10432 return -EBUSY;
10433 }
10434
10435 return 0;
10436}
10437
10438static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10439{
34f80b04 10440 int port = BP_PORT(bp);
a2fbb9ea
ET
10441 int count, i;
10442 u32 val = 0;
10443
10444 /* adjust timeout for emulation/FPGA */
10445 count = NVRAM_TIMEOUT_COUNT;
10446 if (CHIP_REV_IS_SLOW(bp))
10447 count *= 100;
10448
10449 /* relinquish nvram interface */
10450 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10451 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10452
10453 for (i = 0; i < count*10; i++) {
10454 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10455 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10456 break;
10457
10458 udelay(5);
10459 }
10460
10461 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 10462 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
10463 return -EBUSY;
10464 }
10465
10466 return 0;
10467}
10468
10469static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10470{
10471 u32 val;
10472
10473 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10474
10475 /* enable both bits, even on read */
10476 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10477 (val | MCPR_NVM_ACCESS_ENABLE_EN |
10478 MCPR_NVM_ACCESS_ENABLE_WR_EN));
10479}
10480
10481static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10482{
10483 u32 val;
10484
10485 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10486
10487 /* disable both bits, even after read */
10488 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10489 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10490 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10491}
10492
4781bfad 10493static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
10494 u32 cmd_flags)
10495{
f1410647 10496 int count, i, rc;
a2fbb9ea
ET
10497 u32 val;
10498
10499 /* build the command word */
10500 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10501
10502 /* need to clear DONE bit separately */
10503 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10504
10505 /* address of the NVRAM to read from */
10506 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10507 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10508
10509 /* issue a read command */
10510 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10511
10512 /* adjust timeout for emulation/FPGA */
10513 count = NVRAM_TIMEOUT_COUNT;
10514 if (CHIP_REV_IS_SLOW(bp))
10515 count *= 100;
10516
10517 /* wait for completion */
10518 *ret_val = 0;
10519 rc = -EBUSY;
10520 for (i = 0; i < count; i++) {
10521 udelay(5);
10522 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10523
10524 if (val & MCPR_NVM_COMMAND_DONE) {
10525 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
10526 /* we read nvram data in cpu order
10527 * but ethtool sees it as an array of bytes
10528 * converting to big-endian will do the work */
4781bfad 10529 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
10530 rc = 0;
10531 break;
10532 }
10533 }
10534
10535 return rc;
10536}
10537
10538static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10539 int buf_size)
10540{
10541 int rc;
10542 u32 cmd_flags;
4781bfad 10543 __be32 val;
a2fbb9ea
ET
10544
10545 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 10546 DP(BNX2X_MSG_NVM,
c14423fe 10547 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
10548 offset, buf_size);
10549 return -EINVAL;
10550 }
10551
34f80b04
EG
10552 if (offset + buf_size > bp->common.flash_size) {
10553 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10554 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10555 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10556 return -EINVAL;
10557 }
10558
10559 /* request access to nvram interface */
10560 rc = bnx2x_acquire_nvram_lock(bp);
10561 if (rc)
10562 return rc;
10563
10564 /* enable access to nvram interface */
10565 bnx2x_enable_nvram_access(bp);
10566
10567 /* read the first word(s) */
10568 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10569 while ((buf_size > sizeof(u32)) && (rc == 0)) {
10570 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10571 memcpy(ret_buf, &val, 4);
10572
10573 /* advance to the next dword */
10574 offset += sizeof(u32);
10575 ret_buf += sizeof(u32);
10576 buf_size -= sizeof(u32);
10577 cmd_flags = 0;
10578 }
10579
10580 if (rc == 0) {
10581 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10582 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10583 memcpy(ret_buf, &val, 4);
10584 }
10585
10586 /* disable access to nvram interface */
10587 bnx2x_disable_nvram_access(bp);
10588 bnx2x_release_nvram_lock(bp);
10589
10590 return rc;
10591}
10592
10593static int bnx2x_get_eeprom(struct net_device *dev,
10594 struct ethtool_eeprom *eeprom, u8 *eebuf)
10595{
10596 struct bnx2x *bp = netdev_priv(dev);
10597 int rc;
10598
2add3acb
EG
10599 if (!netif_running(dev))
10600 return -EAGAIN;
10601
34f80b04 10602 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
10603 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10604 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10605 eeprom->len, eeprom->len);
10606
10607 /* parameters already validated in ethtool_get_eeprom */
10608
10609 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10610
10611 return rc;
10612}
10613
10614static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10615 u32 cmd_flags)
10616{
f1410647 10617 int count, i, rc;
a2fbb9ea
ET
10618
10619 /* build the command word */
10620 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10621
10622 /* need to clear DONE bit separately */
10623 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10624
10625 /* write the data */
10626 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10627
10628 /* address of the NVRAM to write to */
10629 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10630 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10631
10632 /* issue the write command */
10633 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10634
10635 /* adjust timeout for emulation/FPGA */
10636 count = NVRAM_TIMEOUT_COUNT;
10637 if (CHIP_REV_IS_SLOW(bp))
10638 count *= 100;
10639
10640 /* wait for completion */
10641 rc = -EBUSY;
10642 for (i = 0; i < count; i++) {
10643 udelay(5);
10644 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10645 if (val & MCPR_NVM_COMMAND_DONE) {
10646 rc = 0;
10647 break;
10648 }
10649 }
10650
10651 return rc;
10652}
10653
f1410647 10654#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
10655
10656static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10657 int buf_size)
10658{
10659 int rc;
10660 u32 cmd_flags;
10661 u32 align_offset;
4781bfad 10662 __be32 val;
a2fbb9ea 10663
34f80b04
EG
10664 if (offset + buf_size > bp->common.flash_size) {
10665 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10666 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10667 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10668 return -EINVAL;
10669 }
10670
10671 /* request access to nvram interface */
10672 rc = bnx2x_acquire_nvram_lock(bp);
10673 if (rc)
10674 return rc;
10675
10676 /* enable access to nvram interface */
10677 bnx2x_enable_nvram_access(bp);
10678
10679 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10680 align_offset = (offset & ~0x03);
10681 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10682
10683 if (rc == 0) {
10684 val &= ~(0xff << BYTE_OFFSET(offset));
10685 val |= (*data_buf << BYTE_OFFSET(offset));
10686
10687 /* nvram data is returned as an array of bytes
10688 * convert it back to cpu order */
10689 val = be32_to_cpu(val);
10690
a2fbb9ea
ET
10691 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10692 cmd_flags);
10693 }
10694
10695 /* disable access to nvram interface */
10696 bnx2x_disable_nvram_access(bp);
10697 bnx2x_release_nvram_lock(bp);
10698
10699 return rc;
10700}
10701
10702static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10703 int buf_size)
10704{
10705 int rc;
10706 u32 cmd_flags;
10707 u32 val;
10708 u32 written_so_far;
10709
34f80b04 10710 if (buf_size == 1) /* ethtool */
a2fbb9ea 10711 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
10712
10713 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 10714 DP(BNX2X_MSG_NVM,
c14423fe 10715 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
10716 offset, buf_size);
10717 return -EINVAL;
10718 }
10719
34f80b04
EG
10720 if (offset + buf_size > bp->common.flash_size) {
10721 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10722 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10723 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10724 return -EINVAL;
10725 }
10726
10727 /* request access to nvram interface */
10728 rc = bnx2x_acquire_nvram_lock(bp);
10729 if (rc)
10730 return rc;
10731
10732 /* enable access to nvram interface */
10733 bnx2x_enable_nvram_access(bp);
10734
10735 written_so_far = 0;
10736 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10737 while ((written_so_far < buf_size) && (rc == 0)) {
10738 if (written_so_far == (buf_size - sizeof(u32)))
10739 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10740 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10741 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10742 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10743 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10744
10745 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
10746
10747 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10748
10749 /* advance to the next dword */
10750 offset += sizeof(u32);
10751 data_buf += sizeof(u32);
10752 written_so_far += sizeof(u32);
10753 cmd_flags = 0;
10754 }
10755
10756 /* disable access to nvram interface */
10757 bnx2x_disable_nvram_access(bp);
10758 bnx2x_release_nvram_lock(bp);
10759
10760 return rc;
10761}
10762
10763static int bnx2x_set_eeprom(struct net_device *dev,
10764 struct ethtool_eeprom *eeprom, u8 *eebuf)
10765{
10766 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
10767 int port = BP_PORT(bp);
10768 int rc = 0;
a2fbb9ea 10769
9f4c9583
EG
10770 if (!netif_running(dev))
10771 return -EAGAIN;
10772
34f80b04 10773 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
10774 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10775 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10776 eeprom->len, eeprom->len);
10777
10778 /* parameters already validated in ethtool_set_eeprom */
10779
f57a6025
EG
10780 /* PHY eeprom can be accessed only by the PMF */
10781 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10782 !bp->port.pmf)
10783 return -EINVAL;
10784
10785 if (eeprom->magic == 0x50485950) {
10786 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10787 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 10788
f57a6025
EG
10789 bnx2x_acquire_phy_lock(bp);
10790 rc |= bnx2x_link_reset(&bp->link_params,
10791 &bp->link_vars, 0);
10792 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10793 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10794 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10795 MISC_REGISTERS_GPIO_HIGH, port);
10796 bnx2x_release_phy_lock(bp);
10797 bnx2x_link_report(bp);
10798
10799 } else if (eeprom->magic == 0x50485952) {
10800 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
f34d28ea 10801 if (bp->state == BNX2X_STATE_OPEN) {
4a37fb66 10802 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
10803 rc |= bnx2x_link_reset(&bp->link_params,
10804 &bp->link_vars, 1);
10805
10806 rc |= bnx2x_phy_init(&bp->link_params,
10807 &bp->link_vars);
4a37fb66 10808 bnx2x_release_phy_lock(bp);
f57a6025
EG
10809 bnx2x_calc_fc_adv(bp);
10810 }
10811 } else if (eeprom->magic == 0x53985943) {
10812 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10813 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10814 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10815 u8 ext_phy_addr =
659bc5c4 10816 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
f57a6025
EG
10817
10818 /* DSP Remove Download Mode */
10819 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10820 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 10821
f57a6025
EG
10822 bnx2x_acquire_phy_lock(bp);
10823
10824 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10825
10826 /* wait 0.5 sec to allow it to run */
10827 msleep(500);
10828 bnx2x_ext_phy_hw_reset(bp, port);
10829 msleep(500);
10830 bnx2x_release_phy_lock(bp);
10831 }
10832 } else
c18487ee 10833 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
10834
10835 return rc;
10836}
10837
10838static int bnx2x_get_coalesce(struct net_device *dev,
10839 struct ethtool_coalesce *coal)
10840{
10841 struct bnx2x *bp = netdev_priv(dev);
10842
10843 memset(coal, 0, sizeof(struct ethtool_coalesce));
10844
10845 coal->rx_coalesce_usecs = bp->rx_ticks;
10846 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
10847
10848 return 0;
10849}
10850
10851static int bnx2x_set_coalesce(struct net_device *dev,
10852 struct ethtool_coalesce *coal)
10853{
10854 struct bnx2x *bp = netdev_priv(dev);
10855
cdaa7cb8
VZ
10856 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
10857 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
10858 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea 10859
cdaa7cb8
VZ
10860 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
10861 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
10862 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea 10863
34f80b04 10864 if (netif_running(dev))
a2fbb9ea
ET
10865 bnx2x_update_coalesce(bp);
10866
10867 return 0;
10868}
10869
10870static void bnx2x_get_ringparam(struct net_device *dev,
10871 struct ethtool_ringparam *ering)
10872{
10873 struct bnx2x *bp = netdev_priv(dev);
10874
10875 ering->rx_max_pending = MAX_RX_AVAIL;
10876 ering->rx_mini_max_pending = 0;
10877 ering->rx_jumbo_max_pending = 0;
10878
10879 ering->rx_pending = bp->rx_ring_size;
10880 ering->rx_mini_pending = 0;
10881 ering->rx_jumbo_pending = 0;
10882
10883 ering->tx_max_pending = MAX_TX_AVAIL;
10884 ering->tx_pending = bp->tx_ring_size;
10885}
10886
10887static int bnx2x_set_ringparam(struct net_device *dev,
10888 struct ethtool_ringparam *ering)
10889{
10890 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10891 int rc = 0;
a2fbb9ea 10892
72fd0718
VZ
10893 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10894 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10895 return -EAGAIN;
10896 }
10897
a2fbb9ea
ET
10898 if ((ering->rx_pending > MAX_RX_AVAIL) ||
10899 (ering->tx_pending > MAX_TX_AVAIL) ||
10900 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10901 return -EINVAL;
10902
10903 bp->rx_ring_size = ering->rx_pending;
10904 bp->tx_ring_size = ering->tx_pending;
10905
34f80b04
EG
10906 if (netif_running(dev)) {
10907 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10908 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
10909 }
10910
34f80b04 10911 return rc;
a2fbb9ea
ET
10912}
10913
10914static void bnx2x_get_pauseparam(struct net_device *dev,
10915 struct ethtool_pauseparam *epause)
10916{
10917 struct bnx2x *bp = netdev_priv(dev);
10918
356e2385
EG
10919 epause->autoneg = (bp->link_params.req_flow_ctrl ==
10920 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
10921 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10922
c0700f90
DM
10923 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10924 BNX2X_FLOW_CTRL_RX);
10925 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10926 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
10927
10928 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10929 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10930 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10931}
10932
10933static int bnx2x_set_pauseparam(struct net_device *dev,
10934 struct ethtool_pauseparam *epause)
10935{
10936 struct bnx2x *bp = netdev_priv(dev);
10937
34f80b04
EG
10938 if (IS_E1HMF(bp))
10939 return 0;
10940
a2fbb9ea
ET
10941 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10942 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10943 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10944
c0700f90 10945 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 10946
f1410647 10947 if (epause->rx_pause)
c0700f90 10948 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 10949
f1410647 10950 if (epause->tx_pause)
c0700f90 10951 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 10952
c0700f90
DM
10953 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10954 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 10955
c18487ee 10956 if (epause->autoneg) {
34f80b04 10957 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 10958 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
10959 return -EINVAL;
10960 }
a2fbb9ea 10961
c18487ee 10962 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 10963 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 10964 }
a2fbb9ea 10965
c18487ee
YR
10966 DP(NETIF_MSG_LINK,
10967 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
10968
10969 if (netif_running(dev)) {
bb2a0f7a 10970 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10971 bnx2x_link_set(bp);
10972 }
a2fbb9ea
ET
10973
10974 return 0;
10975}
10976
df0f2343
VZ
10977static int bnx2x_set_flags(struct net_device *dev, u32 data)
10978{
10979 struct bnx2x *bp = netdev_priv(dev);
10980 int changed = 0;
10981 int rc = 0;
10982
72fd0718
VZ
10983 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10984 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10985 return -EAGAIN;
10986 }
10987
df0f2343
VZ
10988 /* TPA requires Rx CSUM offloading */
10989 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
d43a7e67
VZ
10990 if (!disable_tpa) {
10991 if (!(dev->features & NETIF_F_LRO)) {
10992 dev->features |= NETIF_F_LRO;
10993 bp->flags |= TPA_ENABLE_FLAG;
10994 changed = 1;
10995 }
10996 } else
10997 rc = -EINVAL;
df0f2343
VZ
10998 } else if (dev->features & NETIF_F_LRO) {
10999 dev->features &= ~NETIF_F_LRO;
11000 bp->flags &= ~TPA_ENABLE_FLAG;
11001 changed = 1;
11002 }
11003
11004 if (changed && netif_running(dev)) {
11005 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11006 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11007 }
11008
11009 return rc;
11010}
11011
a2fbb9ea
ET
11012static u32 bnx2x_get_rx_csum(struct net_device *dev)
11013{
11014 struct bnx2x *bp = netdev_priv(dev);
11015
11016 return bp->rx_csum;
11017}
11018
11019static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
11020{
11021 struct bnx2x *bp = netdev_priv(dev);
df0f2343 11022 int rc = 0;
a2fbb9ea 11023
72fd0718
VZ
11024 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11025 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11026 return -EAGAIN;
11027 }
11028
a2fbb9ea 11029 bp->rx_csum = data;
df0f2343
VZ
11030
11031 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
11032 TPA'ed packets will be discarded due to wrong TCP CSUM */
11033 if (!data) {
11034 u32 flags = ethtool_op_get_flags(dev);
11035
11036 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
11037 }
11038
11039 return rc;
a2fbb9ea
ET
11040}
11041
11042static int bnx2x_set_tso(struct net_device *dev, u32 data)
11043{
755735eb 11044 if (data) {
a2fbb9ea 11045 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
11046 dev->features |= NETIF_F_TSO6;
11047 } else {
a2fbb9ea 11048 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
11049 dev->features &= ~NETIF_F_TSO6;
11050 }
11051
a2fbb9ea
ET
11052 return 0;
11053}
11054
f3c87cdd 11055static const struct {
a2fbb9ea
ET
11056 char string[ETH_GSTRING_LEN];
11057} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
11058 { "register_test (offline)" },
11059 { "memory_test (offline)" },
11060 { "loopback_test (offline)" },
11061 { "nvram_test (online)" },
11062 { "interrupt_test (online)" },
11063 { "link_test (online)" },
d3d4f495 11064 { "idle check (online)" }
a2fbb9ea
ET
11065};
11066
f3c87cdd
YG
11067static int bnx2x_test_registers(struct bnx2x *bp)
11068{
11069 int idx, i, rc = -ENODEV;
11070 u32 wr_val = 0;
9dabc424 11071 int port = BP_PORT(bp);
f3c87cdd 11072 static const struct {
cdaa7cb8
VZ
11073 u32 offset0;
11074 u32 offset1;
11075 u32 mask;
f3c87cdd
YG
11076 } reg_tbl[] = {
11077/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
11078 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
11079 { HC_REG_AGG_INT_0, 4, 0x000003ff },
11080 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
11081 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
11082 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
11083 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
11084 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11085 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
11086 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11087/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
11088 { QM_REG_CONNNUM_0, 4, 0x000fffff },
11089 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
11090 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
11091 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
11092 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11093 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
11094 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 11095 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
11096 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
11097/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
11098 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
11099 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
11100 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
11101 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
11102 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
11103 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
11104 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
11105 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
11106 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
11107/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
11108 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
11109 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
11110 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11111 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
11112 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11113 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
11114
11115 { 0xffffffff, 0, 0x00000000 }
11116 };
11117
11118 if (!netif_running(bp->dev))
11119 return rc;
11120
11121 /* Repeat the test twice:
11122 First by writing 0x00000000, second by writing 0xffffffff */
11123 for (idx = 0; idx < 2; idx++) {
11124
11125 switch (idx) {
11126 case 0:
11127 wr_val = 0;
11128 break;
11129 case 1:
11130 wr_val = 0xffffffff;
11131 break;
11132 }
11133
11134 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11135 u32 offset, mask, save_val, val;
f3c87cdd
YG
11136
11137 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11138 mask = reg_tbl[i].mask;
11139
11140 save_val = REG_RD(bp, offset);
11141
8eb5a20c 11142 REG_WR(bp, offset, (wr_val & mask));
f3c87cdd
YG
11143 val = REG_RD(bp, offset);
11144
11145 /* Restore the original register's value */
11146 REG_WR(bp, offset, save_val);
11147
cdaa7cb8
VZ
11148 /* verify value is as expected */
11149 if ((val & mask) != (wr_val & mask)) {
11150 DP(NETIF_MSG_PROBE,
11151 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11152 offset, val, wr_val, mask);
f3c87cdd 11153 goto test_reg_exit;
cdaa7cb8 11154 }
f3c87cdd
YG
11155 }
11156 }
11157
11158 rc = 0;
11159
11160test_reg_exit:
11161 return rc;
11162}
11163
11164static int bnx2x_test_memory(struct bnx2x *bp)
11165{
11166 int i, j, rc = -ENODEV;
11167 u32 val;
11168 static const struct {
11169 u32 offset;
11170 int size;
11171 } mem_tbl[] = {
11172 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
11173 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11174 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
11175 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
11176 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
11177 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
11178 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
11179
11180 { 0xffffffff, 0 }
11181 };
11182 static const struct {
11183 char *name;
11184 u32 offset;
9dabc424
YG
11185 u32 e1_mask;
11186 u32 e1h_mask;
f3c87cdd 11187 } prty_tbl[] = {
9dabc424
YG
11188 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
11189 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
11190 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
11191 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
11192 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
11193 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
11194
11195 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
11196 };
11197
11198 if (!netif_running(bp->dev))
11199 return rc;
11200
11201 /* Go through all the memories */
11202 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11203 for (j = 0; j < mem_tbl[i].size; j++)
11204 REG_RD(bp, mem_tbl[i].offset + j*4);
11205
11206 /* Check the parity status */
11207 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11208 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
11209 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11210 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
11211 DP(NETIF_MSG_HW,
11212 "%s is 0x%x\n", prty_tbl[i].name, val);
11213 goto test_mem_exit;
11214 }
11215 }
11216
11217 rc = 0;
11218
11219test_mem_exit:
11220 return rc;
11221}
11222
f3c87cdd
YG
11223static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11224{
11225 int cnt = 1000;
11226
11227 if (link_up)
11228 while (bnx2x_link_test(bp) && cnt--)
11229 msleep(10);
11230}
11231
11232static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11233{
11234 unsigned int pkt_size, num_pkts, i;
11235 struct sk_buff *skb;
11236 unsigned char *packet;
ca00392c 11237 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
54b9ddaa 11238 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
f3c87cdd
YG
11239 u16 tx_start_idx, tx_idx;
11240 u16 rx_start_idx, rx_idx;
ca00392c 11241 u16 pkt_prod, bd_prod;
f3c87cdd 11242 struct sw_tx_bd *tx_buf;
ca00392c
EG
11243 struct eth_tx_start_bd *tx_start_bd;
11244 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
11245 dma_addr_t mapping;
11246 union eth_rx_cqe *cqe;
11247 u8 cqe_fp_flags;
11248 struct sw_rx_bd *rx_buf;
11249 u16 len;
11250 int rc = -ENODEV;
11251
b5bf9068
EG
11252 /* check the loopback mode */
11253 switch (loopback_mode) {
11254 case BNX2X_PHY_LOOPBACK:
11255 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11256 return -EINVAL;
11257 break;
11258 case BNX2X_MAC_LOOPBACK:
f3c87cdd 11259 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 11260 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
11261 break;
11262 default:
f3c87cdd 11263 return -EINVAL;
b5bf9068 11264 }
f3c87cdd 11265
b5bf9068
EG
11266 /* prepare the loopback packet */
11267 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11268 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
11269 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11270 if (!skb) {
11271 rc = -ENOMEM;
11272 goto test_loopback_exit;
11273 }
11274 packet = skb_put(skb, pkt_size);
11275 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
11276 memset(packet + ETH_ALEN, 0, ETH_ALEN);
11277 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
11278 for (i = ETH_HLEN; i < pkt_size; i++)
11279 packet[i] = (unsigned char) (i & 0xff);
11280
b5bf9068 11281 /* send the loopback packet */
f3c87cdd 11282 num_pkts = 0;
ca00392c
EG
11283 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11284 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 11285
ca00392c
EG
11286 pkt_prod = fp_tx->tx_pkt_prod++;
11287 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11288 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 11289 tx_buf->skb = skb;
ca00392c 11290 tx_buf->flags = 0;
f3c87cdd 11291
ca00392c
EG
11292 bd_prod = TX_BD(fp_tx->tx_bd_prod);
11293 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
1a983142
FT
11294 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11295 skb_headlen(skb), DMA_TO_DEVICE);
ca00392c
EG
11296 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11297 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11298 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11299 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11300 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11301 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11302 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11303 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11304
11305 /* turn on parsing and get a BD */
11306 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11307 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11308
11309 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 11310
58f4c4cf
EG
11311 wmb();
11312
ca00392c
EG
11313 fp_tx->tx_db.data.prod += 2;
11314 barrier();
54b9ddaa 11315 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
f3c87cdd
YG
11316
11317 mmiowb();
11318
11319 num_pkts++;
ca00392c 11320 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
11321
11322 udelay(100);
11323
ca00392c 11324 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
11325 if (tx_idx != tx_start_idx + num_pkts)
11326 goto test_loopback_exit;
11327
ca00392c 11328 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
11329 if (rx_idx != rx_start_idx + num_pkts)
11330 goto test_loopback_exit;
11331
ca00392c 11332 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
11333 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11334 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11335 goto test_loopback_rx_exit;
11336
11337 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11338 if (len != pkt_size)
11339 goto test_loopback_rx_exit;
11340
ca00392c 11341 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
11342 skb = rx_buf->skb;
11343 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11344 for (i = ETH_HLEN; i < pkt_size; i++)
11345 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11346 goto test_loopback_rx_exit;
11347
11348 rc = 0;
11349
11350test_loopback_rx_exit:
f3c87cdd 11351
ca00392c
EG
11352 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11353 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11354 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11355 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
11356
11357 /* Update producers */
ca00392c
EG
11358 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11359 fp_rx->rx_sge_prod);
f3c87cdd
YG
11360
11361test_loopback_exit:
11362 bp->link_params.loopback_mode = LOOPBACK_NONE;
11363
11364 return rc;
11365}
11366
11367static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11368{
b5bf9068 11369 int rc = 0, res;
f3c87cdd 11370
2145a920
VZ
11371 if (BP_NOMCP(bp))
11372 return rc;
11373
f3c87cdd
YG
11374 if (!netif_running(bp->dev))
11375 return BNX2X_LOOPBACK_FAILED;
11376
f8ef6e44 11377 bnx2x_netif_stop(bp, 1);
3910c8ae 11378 bnx2x_acquire_phy_lock(bp);
f3c87cdd 11379
b5bf9068
EG
11380 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11381 if (res) {
11382 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
11383 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
11384 }
11385
b5bf9068
EG
11386 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11387 if (res) {
11388 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
11389 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
11390 }
11391
3910c8ae 11392 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
11393 bnx2x_netif_start(bp);
11394
11395 return rc;
11396}
11397
11398#define CRC32_RESIDUAL 0xdebb20e3
11399
11400static int bnx2x_test_nvram(struct bnx2x *bp)
11401{
11402 static const struct {
11403 int offset;
11404 int size;
11405 } nvram_tbl[] = {
11406 { 0, 0x14 }, /* bootstrap */
11407 { 0x14, 0xec }, /* dir */
11408 { 0x100, 0x350 }, /* manuf_info */
11409 { 0x450, 0xf0 }, /* feature_info */
11410 { 0x640, 0x64 }, /* upgrade_key_info */
11411 { 0x6a4, 0x64 },
11412 { 0x708, 0x70 }, /* manuf_key_info */
11413 { 0x778, 0x70 },
11414 { 0, 0 }
11415 };
4781bfad 11416 __be32 buf[0x350 / 4];
f3c87cdd
YG
11417 u8 *data = (u8 *)buf;
11418 int i, rc;
ab6ad5a4 11419 u32 magic, crc;
f3c87cdd 11420
2145a920
VZ
11421 if (BP_NOMCP(bp))
11422 return 0;
11423
f3c87cdd
YG
11424 rc = bnx2x_nvram_read(bp, 0, data, 4);
11425 if (rc) {
f5372251 11426 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
11427 goto test_nvram_exit;
11428 }
11429
11430 magic = be32_to_cpu(buf[0]);
11431 if (magic != 0x669955aa) {
11432 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11433 rc = -ENODEV;
11434 goto test_nvram_exit;
11435 }
11436
11437 for (i = 0; nvram_tbl[i].size; i++) {
11438
11439 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11440 nvram_tbl[i].size);
11441 if (rc) {
11442 DP(NETIF_MSG_PROBE,
f5372251 11443 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
11444 goto test_nvram_exit;
11445 }
11446
ab6ad5a4
EG
11447 crc = ether_crc_le(nvram_tbl[i].size, data);
11448 if (crc != CRC32_RESIDUAL) {
f3c87cdd 11449 DP(NETIF_MSG_PROBE,
ab6ad5a4 11450 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
f3c87cdd
YG
11451 rc = -ENODEV;
11452 goto test_nvram_exit;
11453 }
11454 }
11455
11456test_nvram_exit:
11457 return rc;
11458}
11459
11460static int bnx2x_test_intr(struct bnx2x *bp)
11461{
11462 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11463 int i, rc;
11464
11465 if (!netif_running(bp->dev))
11466 return -ENODEV;
11467
8d9c5f34 11468 config->hdr.length = 0;
af246401 11469 if (CHIP_IS_E1(bp))
0c43f43f
VZ
11470 /* use last unicast entries */
11471 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
af246401
EG
11472 else
11473 config->hdr.offset = BP_FUNC(bp);
0626b899 11474 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
11475 config->hdr.reserved1 = 0;
11476
e665bfda
MC
11477 bp->set_mac_pending++;
11478 smp_wmb();
f3c87cdd
YG
11479 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11480 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11481 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11482 if (rc == 0) {
f3c87cdd
YG
11483 for (i = 0; i < 10; i++) {
11484 if (!bp->set_mac_pending)
11485 break;
e665bfda 11486 smp_rmb();
f3c87cdd
YG
11487 msleep_interruptible(10);
11488 }
11489 if (i == 10)
11490 rc = -ENODEV;
11491 }
11492
11493 return rc;
11494}
11495
a2fbb9ea
ET
11496static void bnx2x_self_test(struct net_device *dev,
11497 struct ethtool_test *etest, u64 *buf)
11498{
11499 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea 11500
72fd0718
VZ
11501 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11502 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11503 etest->flags |= ETH_TEST_FL_FAILED;
11504 return;
11505 }
11506
a2fbb9ea
ET
11507 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11508
f3c87cdd 11509 if (!netif_running(dev))
a2fbb9ea 11510 return;
a2fbb9ea 11511
33471629 11512 /* offline tests are not supported in MF mode */
f3c87cdd
YG
11513 if (IS_E1HMF(bp))
11514 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11515
11516 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
11517 int port = BP_PORT(bp);
11518 u32 val;
f3c87cdd
YG
11519 u8 link_up;
11520
279abdf5
EG
11521 /* save current value of input enable for TX port IF */
11522 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11523 /* disable input for TX port IF */
11524 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11525
061bc702 11526 link_up = (bnx2x_link_test(bp) == 0);
f3c87cdd
YG
11527 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11528 bnx2x_nic_load(bp, LOAD_DIAG);
11529 /* wait until link state is restored */
11530 bnx2x_wait_for_link(bp, link_up);
11531
11532 if (bnx2x_test_registers(bp) != 0) {
11533 buf[0] = 1;
11534 etest->flags |= ETH_TEST_FL_FAILED;
11535 }
11536 if (bnx2x_test_memory(bp) != 0) {
11537 buf[1] = 1;
11538 etest->flags |= ETH_TEST_FL_FAILED;
11539 }
11540 buf[2] = bnx2x_test_loopback(bp, link_up);
11541 if (buf[2] != 0)
11542 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 11543
f3c87cdd 11544 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
11545
11546 /* restore input for TX port IF */
11547 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11548
f3c87cdd
YG
11549 bnx2x_nic_load(bp, LOAD_NORMAL);
11550 /* wait until link state is restored */
11551 bnx2x_wait_for_link(bp, link_up);
11552 }
11553 if (bnx2x_test_nvram(bp) != 0) {
11554 buf[3] = 1;
a2fbb9ea
ET
11555 etest->flags |= ETH_TEST_FL_FAILED;
11556 }
f3c87cdd
YG
11557 if (bnx2x_test_intr(bp) != 0) {
11558 buf[4] = 1;
11559 etest->flags |= ETH_TEST_FL_FAILED;
11560 }
11561 if (bp->port.pmf)
11562 if (bnx2x_link_test(bp) != 0) {
11563 buf[5] = 1;
11564 etest->flags |= ETH_TEST_FL_FAILED;
11565 }
f3c87cdd
YG
11566
11567#ifdef BNX2X_EXTRA_DEBUG
11568 bnx2x_panic_dump(bp);
11569#endif
a2fbb9ea
ET
11570}
11571
de832a55
EG
11572static const struct {
11573 long offset;
11574 int size;
11575 u8 string[ETH_GSTRING_LEN];
11576} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11577/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11578 { Q_STATS_OFFSET32(error_bytes_received_hi),
11579 8, "[%d]: rx_error_bytes" },
11580 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11581 8, "[%d]: rx_ucast_packets" },
11582 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11583 8, "[%d]: rx_mcast_packets" },
11584 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11585 8, "[%d]: rx_bcast_packets" },
11586 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11587 { Q_STATS_OFFSET32(rx_err_discard_pkt),
11588 4, "[%d]: rx_phy_ip_err_discards"},
11589 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11590 4, "[%d]: rx_skb_alloc_discard" },
11591 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11592
11593/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11594 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
dea7aab1
VZ
11595 8, "[%d]: tx_ucast_packets" },
11596 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11597 8, "[%d]: tx_mcast_packets" },
11598 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11599 8, "[%d]: tx_bcast_packets" }
de832a55
EG
11600};
11601
bb2a0f7a
YG
11602static const struct {
11603 long offset;
11604 int size;
11605 u32 flags;
66e855f3
YG
11606#define STATS_FLAGS_PORT 1
11607#define STATS_FLAGS_FUNC 2
de832a55 11608#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 11609 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 11610} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
11611/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11612 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 11613 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 11614 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 11615 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 11616 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 11617 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 11618 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 11619 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 11620 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 11621 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 11622 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 11623 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 11624 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
11625 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11626 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11627 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11628 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11629/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11630 8, STATS_FLAGS_PORT, "rx_fragments" },
11631 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11632 8, STATS_FLAGS_PORT, "rx_jabbers" },
11633 { STATS_OFFSET32(no_buff_discard_hi),
11634 8, STATS_FLAGS_BOTH, "rx_discards" },
11635 { STATS_OFFSET32(mac_filter_discard),
11636 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11637 { STATS_OFFSET32(xxoverflow_discard),
11638 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11639 { STATS_OFFSET32(brb_drop_hi),
11640 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11641 { STATS_OFFSET32(brb_truncate_hi),
11642 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11643 { STATS_OFFSET32(pause_frames_received_hi),
11644 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11645 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11646 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11647 { STATS_OFFSET32(nig_timer_max),
11648 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11649/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11650 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11651 { STATS_OFFSET32(rx_skb_alloc_failed),
11652 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11653 { STATS_OFFSET32(hw_csum_err),
11654 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11655
11656 { STATS_OFFSET32(total_bytes_transmitted_hi),
11657 8, STATS_FLAGS_BOTH, "tx_bytes" },
11658 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11659 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11660 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
dea7aab1
VZ
11661 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
11662 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11663 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
11664 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11665 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
de832a55
EG
11666 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11667 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11668 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11669 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
dea7aab1 11670/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 11671 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 11672 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 11673 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
dea7aab1 11674 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 11675 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 11676 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 11677 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 11678 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 11679 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 11680 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 11681 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 11682 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 11683 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 11684 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 11685 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 11686 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 11687 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 11688 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 11689 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
dea7aab1 11690/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 11691 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 11692 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 11693 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
dea7aab1 11694 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 11695 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
11696 { STATS_OFFSET32(pause_frames_sent_hi),
11697 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
11698};
11699
de832a55
EG
11700#define IS_PORT_STAT(i) \
11701 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11702#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11703#define IS_E1HMF_MODE_STAT(bp) \
7995c64e 11704 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
66e855f3 11705
15f0a394
BH
11706static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11707{
11708 struct bnx2x *bp = netdev_priv(dev);
11709 int i, num_stats;
11710
cdaa7cb8 11711 switch (stringset) {
15f0a394
BH
11712 case ETH_SS_STATS:
11713 if (is_multi(bp)) {
54b9ddaa 11714 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
15f0a394
BH
11715 if (!IS_E1HMF_MODE_STAT(bp))
11716 num_stats += BNX2X_NUM_STATS;
11717 } else {
11718 if (IS_E1HMF_MODE_STAT(bp)) {
11719 num_stats = 0;
11720 for (i = 0; i < BNX2X_NUM_STATS; i++)
11721 if (IS_FUNC_STAT(i))
11722 num_stats++;
11723 } else
11724 num_stats = BNX2X_NUM_STATS;
11725 }
11726 return num_stats;
11727
11728 case ETH_SS_TEST:
11729 return BNX2X_NUM_TESTS;
11730
11731 default:
11732 return -EINVAL;
11733 }
11734}
11735
a2fbb9ea
ET
11736static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11737{
bb2a0f7a 11738 struct bnx2x *bp = netdev_priv(dev);
de832a55 11739 int i, j, k;
bb2a0f7a 11740
a2fbb9ea
ET
11741 switch (stringset) {
11742 case ETH_SS_STATS:
de832a55
EG
11743 if (is_multi(bp)) {
11744 k = 0;
54b9ddaa 11745 for_each_queue(bp, i) {
de832a55
EG
11746 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11747 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11748 bnx2x_q_stats_arr[j].string, i);
11749 k += BNX2X_NUM_Q_STATS;
11750 }
11751 if (IS_E1HMF_MODE_STAT(bp))
11752 break;
11753 for (j = 0; j < BNX2X_NUM_STATS; j++)
11754 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11755 bnx2x_stats_arr[j].string);
11756 } else {
11757 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11758 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11759 continue;
11760 strcpy(buf + j*ETH_GSTRING_LEN,
11761 bnx2x_stats_arr[i].string);
11762 j++;
11763 }
bb2a0f7a 11764 }
a2fbb9ea
ET
11765 break;
11766
11767 case ETH_SS_TEST:
11768 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11769 break;
11770 }
11771}
11772
a2fbb9ea
ET
11773static void bnx2x_get_ethtool_stats(struct net_device *dev,
11774 struct ethtool_stats *stats, u64 *buf)
11775{
11776 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
11777 u32 *hw_stats, *offset;
11778 int i, j, k;
bb2a0f7a 11779
de832a55
EG
11780 if (is_multi(bp)) {
11781 k = 0;
54b9ddaa 11782 for_each_queue(bp, i) {
de832a55
EG
11783 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11784 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11785 if (bnx2x_q_stats_arr[j].size == 0) {
11786 /* skip this counter */
11787 buf[k + j] = 0;
11788 continue;
11789 }
11790 offset = (hw_stats +
11791 bnx2x_q_stats_arr[j].offset);
11792 if (bnx2x_q_stats_arr[j].size == 4) {
11793 /* 4-byte counter */
11794 buf[k + j] = (u64) *offset;
11795 continue;
11796 }
11797 /* 8-byte counter */
11798 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11799 }
11800 k += BNX2X_NUM_Q_STATS;
11801 }
11802 if (IS_E1HMF_MODE_STAT(bp))
11803 return;
11804 hw_stats = (u32 *)&bp->eth_stats;
11805 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11806 if (bnx2x_stats_arr[j].size == 0) {
11807 /* skip this counter */
11808 buf[k + j] = 0;
11809 continue;
11810 }
11811 offset = (hw_stats + bnx2x_stats_arr[j].offset);
11812 if (bnx2x_stats_arr[j].size == 4) {
11813 /* 4-byte counter */
11814 buf[k + j] = (u64) *offset;
11815 continue;
11816 }
11817 /* 8-byte counter */
11818 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 11819 }
de832a55
EG
11820 } else {
11821 hw_stats = (u32 *)&bp->eth_stats;
11822 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11823 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11824 continue;
11825 if (bnx2x_stats_arr[i].size == 0) {
11826 /* skip this counter */
11827 buf[j] = 0;
11828 j++;
11829 continue;
11830 }
11831 offset = (hw_stats + bnx2x_stats_arr[i].offset);
11832 if (bnx2x_stats_arr[i].size == 4) {
11833 /* 4-byte counter */
11834 buf[j] = (u64) *offset;
11835 j++;
11836 continue;
11837 }
11838 /* 8-byte counter */
11839 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 11840 j++;
a2fbb9ea 11841 }
a2fbb9ea
ET
11842 }
11843}
11844
11845static int bnx2x_phys_id(struct net_device *dev, u32 data)
11846{
11847 struct bnx2x *bp = netdev_priv(dev);
11848 int i;
11849
34f80b04
EG
11850 if (!netif_running(dev))
11851 return 0;
11852
11853 if (!bp->port.pmf)
11854 return 0;
11855
a2fbb9ea
ET
11856 if (data == 0)
11857 data = 2;
11858
11859 for (i = 0; i < (data * 2); i++) {
c18487ee 11860 if ((i % 2) == 0)
7846e471
YR
11861 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11862 SPEED_1000);
c18487ee 11863 else
7846e471 11864 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
c18487ee 11865
a2fbb9ea
ET
11866 msleep_interruptible(500);
11867 if (signal_pending(current))
11868 break;
11869 }
11870
c18487ee 11871 if (bp->link_vars.link_up)
7846e471
YR
11872 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11873 bp->link_vars.line_speed);
a2fbb9ea
ET
11874
11875 return 0;
11876}
11877
0fc0b732 11878static const struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
11879 .get_settings = bnx2x_get_settings,
11880 .set_settings = bnx2x_set_settings,
11881 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
11882 .get_regs_len = bnx2x_get_regs_len,
11883 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
11884 .get_wol = bnx2x_get_wol,
11885 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
11886 .get_msglevel = bnx2x_get_msglevel,
11887 .set_msglevel = bnx2x_set_msglevel,
11888 .nway_reset = bnx2x_nway_reset,
01e53298 11889 .get_link = bnx2x_get_link,
7a9b2557
VZ
11890 .get_eeprom_len = bnx2x_get_eeprom_len,
11891 .get_eeprom = bnx2x_get_eeprom,
11892 .set_eeprom = bnx2x_set_eeprom,
11893 .get_coalesce = bnx2x_get_coalesce,
11894 .set_coalesce = bnx2x_set_coalesce,
11895 .get_ringparam = bnx2x_get_ringparam,
11896 .set_ringparam = bnx2x_set_ringparam,
11897 .get_pauseparam = bnx2x_get_pauseparam,
11898 .set_pauseparam = bnx2x_set_pauseparam,
11899 .get_rx_csum = bnx2x_get_rx_csum,
11900 .set_rx_csum = bnx2x_set_rx_csum,
11901 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 11902 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
11903 .set_flags = bnx2x_set_flags,
11904 .get_flags = ethtool_op_get_flags,
11905 .get_sg = ethtool_op_get_sg,
11906 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
11907 .get_tso = ethtool_op_get_tso,
11908 .set_tso = bnx2x_set_tso,
7a9b2557 11909 .self_test = bnx2x_self_test,
15f0a394 11910 .get_sset_count = bnx2x_get_sset_count,
7a9b2557 11911 .get_strings = bnx2x_get_strings,
a2fbb9ea 11912 .phys_id = bnx2x_phys_id,
bb2a0f7a 11913 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
11914};
11915
11916/* end of ethtool_ops */
11917
11918/****************************************************************************
11919* General service functions
11920****************************************************************************/
11921
11922static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11923{
11924 u16 pmcsr;
11925
11926 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11927
11928 switch (state) {
11929 case PCI_D0:
34f80b04 11930 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
11931 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11932 PCI_PM_CTRL_PME_STATUS));
11933
11934 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 11935 /* delay required during transition out of D3hot */
a2fbb9ea 11936 msleep(20);
34f80b04 11937 break;
a2fbb9ea 11938
34f80b04 11939 case PCI_D3hot:
d3dbfee0
VZ
11940 /* If there are other clients above don't
11941 shut down the power */
11942 if (atomic_read(&bp->pdev->enable_cnt) != 1)
11943 return 0;
11944 /* Don't shut down the power for emulation and FPGA */
11945 if (CHIP_REV_IS_SLOW(bp))
11946 return 0;
11947
34f80b04
EG
11948 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11949 pmcsr |= 3;
a2fbb9ea 11950
34f80b04
EG
11951 if (bp->wol)
11952 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 11953
34f80b04
EG
11954 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11955 pmcsr);
a2fbb9ea 11956
34f80b04
EG
11957 /* No more memory access after this point until
11958 * device is brought back to D0.
11959 */
11960 break;
11961
11962 default:
11963 return -EINVAL;
11964 }
11965 return 0;
a2fbb9ea
ET
11966}
11967
237907c1
EG
11968static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11969{
11970 u16 rx_cons_sb;
11971
11972 /* Tell compiler that status block fields can change */
11973 barrier();
11974 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11975 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
11976 rx_cons_sb++;
11977 return (fp->rx_comp_cons != rx_cons_sb);
11978}
11979
34f80b04
EG
11980/*
11981 * net_device service functions
11982 */
11983
a2fbb9ea
ET
11984static int bnx2x_poll(struct napi_struct *napi, int budget)
11985{
54b9ddaa 11986 int work_done = 0;
a2fbb9ea
ET
11987 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
11988 napi);
11989 struct bnx2x *bp = fp->bp;
a2fbb9ea 11990
54b9ddaa 11991 while (1) {
a2fbb9ea 11992#ifdef BNX2X_STOP_ON_ERROR
54b9ddaa
VZ
11993 if (unlikely(bp->panic)) {
11994 napi_complete(napi);
11995 return 0;
11996 }
a2fbb9ea
ET
11997#endif
11998
54b9ddaa
VZ
11999 if (bnx2x_has_tx_work(fp))
12000 bnx2x_tx_int(fp);
356e2385 12001
54b9ddaa
VZ
12002 if (bnx2x_has_rx_work(fp)) {
12003 work_done += bnx2x_rx_int(fp, budget - work_done);
a2fbb9ea 12004
54b9ddaa
VZ
12005 /* must not complete if we consumed full budget */
12006 if (work_done >= budget)
12007 break;
12008 }
a2fbb9ea 12009
54b9ddaa
VZ
12010 /* Fall out from the NAPI loop if needed */
12011 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12012 bnx2x_update_fpsb_idx(fp);
12013 /* bnx2x_has_rx_work() reads the status block, thus we need
12014 * to ensure that status block indices have been actually read
12015 * (bnx2x_update_fpsb_idx) prior to this check
12016 * (bnx2x_has_rx_work) so that we won't write the "newer"
12017 * value of the status block to IGU (if there was a DMA right
12018 * after bnx2x_has_rx_work and if there is no rmb, the memory
12019 * reading (bnx2x_update_fpsb_idx) may be postponed to right
12020 * before bnx2x_ack_sb). In this case there will never be
12021 * another interrupt until there is another update of the
12022 * status block, while there is still unhandled work.
12023 */
12024 rmb();
a2fbb9ea 12025
54b9ddaa
VZ
12026 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12027 napi_complete(napi);
12028 /* Re-enable interrupts */
12029 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
12030 le16_to_cpu(fp->fp_c_idx),
12031 IGU_INT_NOP, 1);
12032 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
12033 le16_to_cpu(fp->fp_u_idx),
12034 IGU_INT_ENABLE, 1);
12035 break;
12036 }
12037 }
a2fbb9ea 12038 }
356e2385 12039
a2fbb9ea
ET
12040 return work_done;
12041}
12042
755735eb
EG
12043
12044/* we split the first BD into headers and data BDs
33471629 12045 * to ease the pain of our fellow microcode engineers
755735eb
EG
12046 * we use one mapping for both BDs
12047 * So far this has only been observed to happen
12048 * in Other Operating Systems(TM)
12049 */
12050static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
12051 struct bnx2x_fastpath *fp,
ca00392c
EG
12052 struct sw_tx_bd *tx_buf,
12053 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
12054 u16 bd_prod, int nbd)
12055{
ca00392c 12056 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
12057 struct eth_tx_bd *d_tx_bd;
12058 dma_addr_t mapping;
12059 int old_len = le16_to_cpu(h_tx_bd->nbytes);
12060
12061 /* first fix first BD */
12062 h_tx_bd->nbd = cpu_to_le16(nbd);
12063 h_tx_bd->nbytes = cpu_to_le16(hlen);
12064
12065 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
12066 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
12067 h_tx_bd->addr_lo, h_tx_bd->nbd);
12068
12069 /* now get a new data BD
12070 * (after the pbd) and fill it */
12071 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 12072 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
12073
12074 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
12075 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
12076
12077 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12078 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12079 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
12080
12081 /* this marks the BD as one that has no individual mapping */
12082 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
12083
755735eb
EG
12084 DP(NETIF_MSG_TX_QUEUED,
12085 "TSO split data size is %d (%x:%x)\n",
12086 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
12087
ca00392c
EG
12088 /* update tx_bd */
12089 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
12090
12091 return bd_prod;
12092}
12093
12094static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
12095{
12096 if (fix > 0)
12097 csum = (u16) ~csum_fold(csum_sub(csum,
12098 csum_partial(t_header - fix, fix, 0)));
12099
12100 else if (fix < 0)
12101 csum = (u16) ~csum_fold(csum_add(csum,
12102 csum_partial(t_header, -fix, 0)));
12103
12104 return swab16(csum);
12105}
12106
12107static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12108{
12109 u32 rc;
12110
12111 if (skb->ip_summed != CHECKSUM_PARTIAL)
12112 rc = XMIT_PLAIN;
12113
12114 else {
4781bfad 12115 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
12116 rc = XMIT_CSUM_V6;
12117 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12118 rc |= XMIT_CSUM_TCP;
12119
12120 } else {
12121 rc = XMIT_CSUM_V4;
12122 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12123 rc |= XMIT_CSUM_TCP;
12124 }
12125 }
12126
12127 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
d6a2f98b 12128 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
755735eb
EG
12129
12130 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
d6a2f98b 12131 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
755735eb
EG
12132
12133 return rc;
12134}
12135
632da4d6 12136#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
12137/* check if packet requires linearization (packet is too fragmented)
12138 no need to check fragmentation if page size > 8K (there will be no
12139 violation to FW restrictions) */
755735eb
EG
12140static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12141 u32 xmit_type)
12142{
12143 int to_copy = 0;
12144 int hlen = 0;
12145 int first_bd_sz = 0;
12146
12147 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12148 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12149
12150 if (xmit_type & XMIT_GSO) {
12151 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12152 /* Check if LSO packet needs to be copied:
12153 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12154 int wnd_size = MAX_FETCH_BD - 3;
33471629 12155 /* Number of windows to check */
755735eb
EG
12156 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12157 int wnd_idx = 0;
12158 int frag_idx = 0;
12159 u32 wnd_sum = 0;
12160
12161 /* Headers length */
12162 hlen = (int)(skb_transport_header(skb) - skb->data) +
12163 tcp_hdrlen(skb);
12164
12165 /* Amount of data (w/o headers) on linear part of SKB*/
12166 first_bd_sz = skb_headlen(skb) - hlen;
12167
12168 wnd_sum = first_bd_sz;
12169
12170 /* Calculate the first sum - it's special */
12171 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12172 wnd_sum +=
12173 skb_shinfo(skb)->frags[frag_idx].size;
12174
12175 /* If there was data on linear skb data - check it */
12176 if (first_bd_sz > 0) {
12177 if (unlikely(wnd_sum < lso_mss)) {
12178 to_copy = 1;
12179 goto exit_lbl;
12180 }
12181
12182 wnd_sum -= first_bd_sz;
12183 }
12184
12185 /* Others are easier: run through the frag list and
12186 check all windows */
12187 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12188 wnd_sum +=
12189 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12190
12191 if (unlikely(wnd_sum < lso_mss)) {
12192 to_copy = 1;
12193 break;
12194 }
12195 wnd_sum -=
12196 skb_shinfo(skb)->frags[wnd_idx].size;
12197 }
755735eb
EG
12198 } else {
12199 /* in non-LSO too fragmented packet should always
12200 be linearized */
12201 to_copy = 1;
12202 }
12203 }
12204
12205exit_lbl:
12206 if (unlikely(to_copy))
12207 DP(NETIF_MSG_TX_QUEUED,
12208 "Linearization IS REQUIRED for %s packet. "
12209 "num_frags %d hlen %d first_bd_sz %d\n",
12210 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12211 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12212
12213 return to_copy;
12214}
632da4d6 12215#endif
755735eb
EG
12216
12217/* called with netif_tx_lock
a2fbb9ea 12218 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 12219 * netif_wake_queue()
a2fbb9ea 12220 */
61357325 12221static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
a2fbb9ea
ET
12222{
12223 struct bnx2x *bp = netdev_priv(dev);
54b9ddaa 12224 struct bnx2x_fastpath *fp;
555f6c78 12225 struct netdev_queue *txq;
a2fbb9ea 12226 struct sw_tx_bd *tx_buf;
ca00392c
EG
12227 struct eth_tx_start_bd *tx_start_bd;
12228 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
12229 struct eth_tx_parse_bd *pbd = NULL;
12230 u16 pkt_prod, bd_prod;
755735eb 12231 int nbd, fp_index;
a2fbb9ea 12232 dma_addr_t mapping;
755735eb 12233 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
12234 int i;
12235 u8 hlen = 0;
ca00392c 12236 __le16 pkt_size = 0;
dea7aab1
VZ
12237 struct ethhdr *eth;
12238 u8 mac_type = UNICAST_ADDRESS;
a2fbb9ea
ET
12239
12240#ifdef BNX2X_STOP_ON_ERROR
12241 if (unlikely(bp->panic))
12242 return NETDEV_TX_BUSY;
12243#endif
12244
555f6c78
EG
12245 fp_index = skb_get_queue_mapping(skb);
12246 txq = netdev_get_tx_queue(dev, fp_index);
12247
54b9ddaa 12248 fp = &bp->fp[fp_index];
755735eb 12249
231fd58a 12250 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
54b9ddaa 12251 fp->eth_q_stats.driver_xoff++;
555f6c78 12252 netif_tx_stop_queue(txq);
a2fbb9ea
ET
12253 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12254 return NETDEV_TX_BUSY;
12255 }
12256
755735eb
EG
12257 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
12258 " gso type %x xmit_type %x\n",
12259 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12260 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12261
dea7aab1
VZ
12262 eth = (struct ethhdr *)skb->data;
12263
12264 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12265 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12266 if (is_broadcast_ether_addr(eth->h_dest))
12267 mac_type = BROADCAST_ADDRESS;
12268 else
12269 mac_type = MULTICAST_ADDRESS;
12270 }
12271
632da4d6 12272#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
12273 /* First, check if we need to linearize the skb (due to FW
12274 restrictions). No need to check fragmentation if page size > 8K
12275 (there will be no violation to FW restrictions) */
755735eb
EG
12276 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12277 /* Statistics of linearization */
12278 bp->lin_cnt++;
12279 if (skb_linearize(skb) != 0) {
12280 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12281 "silently dropping this SKB\n");
12282 dev_kfree_skb_any(skb);
da5a662a 12283 return NETDEV_TX_OK;
755735eb
EG
12284 }
12285 }
632da4d6 12286#endif
755735eb 12287
a2fbb9ea 12288 /*
755735eb 12289 Please read carefully. First we use one BD which we mark as start,
ca00392c 12290 then we have a parsing info BD (used for TSO or xsum),
755735eb 12291 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
12292 (don't forget to mark the last one as last,
12293 and to unmap only AFTER you write to the BD ...)
755735eb 12294 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
12295 */
12296
12297 pkt_prod = fp->tx_pkt_prod++;
755735eb 12298 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 12299
755735eb 12300 /* get a tx_buf and first BD */
a2fbb9ea 12301 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 12302 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 12303
ca00392c 12304 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
dea7aab1
VZ
12305 tx_start_bd->general_data = (mac_type <<
12306 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 12307 /* header nbd */
ca00392c 12308 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 12309
755735eb
EG
12310 /* remember the first BD of the packet */
12311 tx_buf->first_bd = fp->tx_bd_prod;
12312 tx_buf->skb = skb;
ca00392c 12313 tx_buf->flags = 0;
a2fbb9ea
ET
12314
12315 DP(NETIF_MSG_TX_QUEUED,
12316 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 12317 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 12318
0c6671b0
EG
12319#ifdef BCM_VLAN
12320 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12321 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
12322 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12323 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 12324 } else
0c6671b0 12325#endif
ca00392c 12326 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 12327
ca00392c
EG
12328 /* turn on parsing and get a BD */
12329 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12330 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 12331
ca00392c 12332 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
12333
12334 if (xmit_type & XMIT_CSUM) {
ca00392c 12335 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
12336
12337 /* for now NS flag is not used in Linux */
4781bfad
EG
12338 pbd->global_data =
12339 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12340 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 12341
755735eb
EG
12342 pbd->ip_hlen = (skb_transport_header(skb) -
12343 skb_network_header(skb)) / 2;
12344
12345 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 12346
755735eb 12347 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 12348 hlen = hlen*2;
a2fbb9ea 12349
ca00392c 12350 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
12351
12352 if (xmit_type & XMIT_CSUM_V4)
ca00392c 12353 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
12354 ETH_TX_BD_FLAGS_IP_CSUM;
12355 else
ca00392c
EG
12356 tx_start_bd->bd_flags.as_bitfield |=
12357 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
12358
12359 if (xmit_type & XMIT_CSUM_TCP) {
12360 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12361
12362 } else {
12363 s8 fix = SKB_CS_OFF(skb); /* signed! */
12364
ca00392c 12365 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 12366
755735eb 12367 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
12368 "hlen %d fix %d csum before fix %x\n",
12369 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
12370
12371 /* HW bug: fixup the CSUM */
12372 pbd->tcp_pseudo_csum =
12373 bnx2x_csum_fix(skb_transport_header(skb),
12374 SKB_CS(skb), fix);
12375
12376 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12377 pbd->tcp_pseudo_csum);
12378 }
a2fbb9ea
ET
12379 }
12380
1a983142
FT
12381 mapping = dma_map_single(&bp->pdev->dev, skb->data,
12382 skb_headlen(skb), DMA_TO_DEVICE);
a2fbb9ea 12383
ca00392c
EG
12384 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12385 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12386 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12387 tx_start_bd->nbd = cpu_to_le16(nbd);
12388 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12389 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
12390
12391 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 12392 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
12393 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12394 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12395 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 12396
755735eb 12397 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
12398
12399 DP(NETIF_MSG_TX_QUEUED,
12400 "TSO packet len %d hlen %d total len %d tso size %d\n",
12401 skb->len, hlen, skb_headlen(skb),
12402 skb_shinfo(skb)->gso_size);
12403
ca00392c 12404 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 12405
755735eb 12406 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
12407 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12408 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
12409
12410 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12411 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
12412 pbd->tcp_flags = pbd_tcp_flags(skb);
12413
12414 if (xmit_type & XMIT_GSO_V4) {
12415 pbd->ip_id = swab16(ip_hdr(skb)->id);
12416 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
12417 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12418 ip_hdr(skb)->daddr,
12419 0, IPPROTO_TCP, 0));
755735eb
EG
12420
12421 } else
12422 pbd->tcp_pseudo_csum =
12423 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12424 &ipv6_hdr(skb)->daddr,
12425 0, IPPROTO_TCP, 0));
12426
a2fbb9ea
ET
12427 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12428 }
ca00392c 12429 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 12430
755735eb
EG
12431 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12432 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 12433
755735eb 12434 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
12435 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12436 if (total_pkt_bd == NULL)
12437 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 12438
1a983142
FT
12439 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12440 frag->page_offset,
12441 frag->size, DMA_TO_DEVICE);
a2fbb9ea 12442
ca00392c
EG
12443 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12444 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12445 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12446 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 12447
755735eb 12448 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
12449 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
12450 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12451 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
12452 }
12453
ca00392c 12454 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 12455
a2fbb9ea
ET
12456 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12457
755735eb 12458 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
12459 * if the packet contains or ends with it
12460 */
12461 if (TX_BD_POFF(bd_prod) < nbd)
12462 nbd++;
12463
ca00392c
EG
12464 if (total_pkt_bd != NULL)
12465 total_pkt_bd->total_pkt_bytes = pkt_size;
12466
a2fbb9ea
ET
12467 if (pbd)
12468 DP(NETIF_MSG_TX_QUEUED,
12469 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
12470 " tcp_flags %x xsum %x seq %u hlen %u\n",
12471 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12472 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 12473 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 12474
755735eb 12475 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 12476
58f4c4cf
EG
12477 /*
12478 * Make sure that the BD data is updated before updating the producer
12479 * since FW might read the BD right after the producer is updated.
12480 * This is only applicable for weak-ordered memory model archs such
12481 * as IA-64. The following barrier is also mandatory since FW will
12482 * assumes packets must have BDs.
12483 */
12484 wmb();
12485
ca00392c
EG
12486 fp->tx_db.data.prod += nbd;
12487 barrier();
54b9ddaa 12488 DOORBELL(bp, fp->index, fp->tx_db.raw);
a2fbb9ea
ET
12489
12490 mmiowb();
12491
755735eb 12492 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
12493
12494 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 12495 netif_tx_stop_queue(txq);
9baddeb8
SG
12496
12497 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12498 * ordering of set_bit() in netif_tx_stop_queue() and read of
12499 * fp->bd_tx_cons */
58f4c4cf 12500 smp_mb();
9baddeb8 12501
54b9ddaa 12502 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 12503 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 12504 netif_tx_wake_queue(txq);
a2fbb9ea 12505 }
54b9ddaa 12506 fp->tx_pkt++;
a2fbb9ea
ET
12507
12508 return NETDEV_TX_OK;
12509}
12510
bb2a0f7a 12511/* called with rtnl_lock */
a2fbb9ea
ET
12512static int bnx2x_open(struct net_device *dev)
12513{
12514 struct bnx2x *bp = netdev_priv(dev);
12515
6eccabb3
EG
12516 netif_carrier_off(dev);
12517
a2fbb9ea
ET
12518 bnx2x_set_power_state(bp, PCI_D0);
12519
72fd0718
VZ
12520 if (!bnx2x_reset_is_done(bp)) {
12521 do {
12522 /* Reset MCP mail box sequence if there is on going
12523 * recovery
12524 */
12525 bp->fw_seq = 0;
12526
12527 /* If it's the first function to load and reset done
12528 * is still not cleared it may mean that. We don't
12529 * check the attention state here because it may have
12530 * already been cleared by a "common" reset but we
12531 * shell proceed with "process kill" anyway.
12532 */
12533 if ((bnx2x_get_load_cnt(bp) == 0) &&
12534 bnx2x_trylock_hw_lock(bp,
12535 HW_LOCK_RESOURCE_RESERVED_08) &&
12536 (!bnx2x_leader_reset(bp))) {
12537 DP(NETIF_MSG_HW, "Recovered in open\n");
12538 break;
12539 }
12540
12541 bnx2x_set_power_state(bp, PCI_D3hot);
12542
12543 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12544 " completed yet. Try again later. If u still see this"
12545 " message after a few retries then power cycle is"
12546 " required.\n", bp->dev->name);
12547
12548 return -EAGAIN;
12549 } while (0);
12550 }
12551
12552 bp->recovery_state = BNX2X_RECOVERY_DONE;
12553
bb2a0f7a 12554 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
12555}
12556
bb2a0f7a 12557/* called with rtnl_lock */
a2fbb9ea
ET
12558static int bnx2x_close(struct net_device *dev)
12559{
a2fbb9ea
ET
12560 struct bnx2x *bp = netdev_priv(dev);
12561
12562 /* Unload the driver, release IRQs */
bb2a0f7a 12563 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 12564 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
12565
12566 return 0;
12567}
12568
f5372251 12569/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
12570static void bnx2x_set_rx_mode(struct net_device *dev)
12571{
12572 struct bnx2x *bp = netdev_priv(dev);
12573 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12574 int port = BP_PORT(bp);
12575
12576 if (bp->state != BNX2X_STATE_OPEN) {
12577 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12578 return;
12579 }
12580
12581 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12582
12583 if (dev->flags & IFF_PROMISC)
12584 rx_mode = BNX2X_RX_MODE_PROMISC;
12585
12586 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
12587 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12588 CHIP_IS_E1(bp)))
34f80b04
EG
12589 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12590
12591 else { /* some multicasts */
12592 if (CHIP_IS_E1(bp)) {
12593 int i, old, offset;
22bedad3 12594 struct netdev_hw_addr *ha;
34f80b04
EG
12595 struct mac_configuration_cmd *config =
12596 bnx2x_sp(bp, mcast_config);
12597
0ddf477b 12598 i = 0;
22bedad3 12599 netdev_for_each_mc_addr(ha, dev) {
34f80b04
EG
12600 config->config_table[i].
12601 cam_entry.msb_mac_addr =
22bedad3 12602 swab16(*(u16 *)&ha->addr[0]);
34f80b04
EG
12603 config->config_table[i].
12604 cam_entry.middle_mac_addr =
22bedad3 12605 swab16(*(u16 *)&ha->addr[2]);
34f80b04
EG
12606 config->config_table[i].
12607 cam_entry.lsb_mac_addr =
22bedad3 12608 swab16(*(u16 *)&ha->addr[4]);
34f80b04
EG
12609 config->config_table[i].cam_entry.flags =
12610 cpu_to_le16(port);
12611 config->config_table[i].
12612 target_table_entry.flags = 0;
ca00392c
EG
12613 config->config_table[i].target_table_entry.
12614 clients_bit_vector =
12615 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
12616 config->config_table[i].
12617 target_table_entry.vlan_id = 0;
12618
12619 DP(NETIF_MSG_IFUP,
12620 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12621 config->config_table[i].
12622 cam_entry.msb_mac_addr,
12623 config->config_table[i].
12624 cam_entry.middle_mac_addr,
12625 config->config_table[i].
12626 cam_entry.lsb_mac_addr);
0ddf477b 12627 i++;
34f80b04 12628 }
8d9c5f34 12629 old = config->hdr.length;
34f80b04
EG
12630 if (old > i) {
12631 for (; i < old; i++) {
12632 if (CAM_IS_INVALID(config->
12633 config_table[i])) {
af246401 12634 /* already invalidated */
34f80b04
EG
12635 break;
12636 }
12637 /* invalidate */
12638 CAM_INVALIDATE(config->
12639 config_table[i]);
12640 }
12641 }
12642
12643 if (CHIP_REV_IS_SLOW(bp))
12644 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12645 else
12646 offset = BNX2X_MAX_MULTICAST*(1 + port);
12647
8d9c5f34 12648 config->hdr.length = i;
34f80b04 12649 config->hdr.offset = offset;
8d9c5f34 12650 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
12651 config->hdr.reserved1 = 0;
12652
e665bfda
MC
12653 bp->set_mac_pending++;
12654 smp_wmb();
12655
34f80b04
EG
12656 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12657 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12658 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12659 0);
12660 } else { /* E1H */
12661 /* Accept one or more multicasts */
22bedad3 12662 struct netdev_hw_addr *ha;
34f80b04
EG
12663 u32 mc_filter[MC_HASH_SIZE];
12664 u32 crc, bit, regidx;
12665 int i;
12666
12667 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12668
22bedad3 12669 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 12670 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
22bedad3 12671 ha->addr);
34f80b04 12672
22bedad3 12673 crc = crc32c_le(0, ha->addr, ETH_ALEN);
34f80b04
EG
12674 bit = (crc >> 24) & 0xff;
12675 regidx = bit >> 5;
12676 bit &= 0x1f;
12677 mc_filter[regidx] |= (1 << bit);
12678 }
12679
12680 for (i = 0; i < MC_HASH_SIZE; i++)
12681 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12682 mc_filter[i]);
12683 }
12684 }
12685
12686 bp->rx_mode = rx_mode;
12687 bnx2x_set_storm_rx_mode(bp);
12688}
12689
12690/* called with rtnl_lock */
a2fbb9ea
ET
12691static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12692{
12693 struct sockaddr *addr = p;
12694 struct bnx2x *bp = netdev_priv(dev);
12695
34f80b04 12696 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
12697 return -EINVAL;
12698
12699 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
12700 if (netif_running(dev)) {
12701 if (CHIP_IS_E1(bp))
e665bfda 12702 bnx2x_set_eth_mac_addr_e1(bp, 1);
34f80b04 12703 else
e665bfda 12704 bnx2x_set_eth_mac_addr_e1h(bp, 1);
34f80b04 12705 }
a2fbb9ea
ET
12706
12707 return 0;
12708}
12709
c18487ee 12710/* called with rtnl_lock */
01cd4528
EG
12711static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12712 int devad, u16 addr)
a2fbb9ea 12713{
01cd4528
EG
12714 struct bnx2x *bp = netdev_priv(netdev);
12715 u16 value;
12716 int rc;
12717 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 12718
01cd4528
EG
12719 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12720 prtad, devad, addr);
a2fbb9ea 12721
01cd4528
EG
12722 if (prtad != bp->mdio.prtad) {
12723 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12724 prtad, bp->mdio.prtad);
12725 return -EINVAL;
12726 }
12727
12728 /* The HW expects different devad if CL22 is used */
12729 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 12730
01cd4528
EG
12731 bnx2x_acquire_phy_lock(bp);
12732 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12733 devad, addr, &value);
12734 bnx2x_release_phy_lock(bp);
12735 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 12736
01cd4528
EG
12737 if (!rc)
12738 rc = value;
12739 return rc;
12740}
a2fbb9ea 12741
01cd4528
EG
12742/* called with rtnl_lock */
12743static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12744 u16 addr, u16 value)
12745{
12746 struct bnx2x *bp = netdev_priv(netdev);
12747 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12748 int rc;
12749
12750 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12751 " value 0x%x\n", prtad, devad, addr, value);
12752
12753 if (prtad != bp->mdio.prtad) {
12754 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12755 prtad, bp->mdio.prtad);
12756 return -EINVAL;
a2fbb9ea
ET
12757 }
12758
01cd4528
EG
12759 /* The HW expects different devad if CL22 is used */
12760 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 12761
01cd4528
EG
12762 bnx2x_acquire_phy_lock(bp);
12763 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12764 devad, addr, value);
12765 bnx2x_release_phy_lock(bp);
12766 return rc;
12767}
c18487ee 12768
01cd4528
EG
12769/* called with rtnl_lock */
12770static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12771{
12772 struct bnx2x *bp = netdev_priv(dev);
12773 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 12774
01cd4528
EG
12775 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12776 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 12777
01cd4528
EG
12778 if (!netif_running(dev))
12779 return -EAGAIN;
12780
12781 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
12782}
12783
34f80b04 12784/* called with rtnl_lock */
a2fbb9ea
ET
12785static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12786{
12787 struct bnx2x *bp = netdev_priv(dev);
34f80b04 12788 int rc = 0;
a2fbb9ea 12789
72fd0718
VZ
12790 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12791 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12792 return -EAGAIN;
12793 }
12794
a2fbb9ea
ET
12795 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12796 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12797 return -EINVAL;
12798
12799 /* This does not race with packet allocation
c14423fe 12800 * because the actual alloc size is
a2fbb9ea
ET
12801 * only updated as part of load
12802 */
12803 dev->mtu = new_mtu;
12804
12805 if (netif_running(dev)) {
34f80b04
EG
12806 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12807 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 12808 }
34f80b04
EG
12809
12810 return rc;
a2fbb9ea
ET
12811}
12812
12813static void bnx2x_tx_timeout(struct net_device *dev)
12814{
12815 struct bnx2x *bp = netdev_priv(dev);
12816
12817#ifdef BNX2X_STOP_ON_ERROR
12818 if (!bp->panic)
12819 bnx2x_panic();
12820#endif
12821 /* This allows the netif to be shutdown gracefully before resetting */
72fd0718 12822 schedule_delayed_work(&bp->reset_task, 0);
a2fbb9ea
ET
12823}
12824
12825#ifdef BCM_VLAN
34f80b04 12826/* called with rtnl_lock */
a2fbb9ea
ET
12827static void bnx2x_vlan_rx_register(struct net_device *dev,
12828 struct vlan_group *vlgrp)
12829{
12830 struct bnx2x *bp = netdev_priv(dev);
12831
12832 bp->vlgrp = vlgrp;
0c6671b0
EG
12833
12834 /* Set flags according to the required capabilities */
12835 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12836
12837 if (dev->features & NETIF_F_HW_VLAN_TX)
12838 bp->flags |= HW_VLAN_TX_FLAG;
12839
12840 if (dev->features & NETIF_F_HW_VLAN_RX)
12841 bp->flags |= HW_VLAN_RX_FLAG;
12842
a2fbb9ea 12843 if (netif_running(dev))
49d66772 12844 bnx2x_set_client_config(bp);
a2fbb9ea 12845}
34f80b04 12846
a2fbb9ea
ET
12847#endif
12848
257ddbda 12849#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
12850static void poll_bnx2x(struct net_device *dev)
12851{
12852 struct bnx2x *bp = netdev_priv(dev);
12853
12854 disable_irq(bp->pdev->irq);
12855 bnx2x_interrupt(bp->pdev->irq, dev);
12856 enable_irq(bp->pdev->irq);
12857}
12858#endif
12859
c64213cd
SH
12860static const struct net_device_ops bnx2x_netdev_ops = {
12861 .ndo_open = bnx2x_open,
12862 .ndo_stop = bnx2x_close,
12863 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 12864 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
12865 .ndo_set_mac_address = bnx2x_change_mac_addr,
12866 .ndo_validate_addr = eth_validate_addr,
12867 .ndo_do_ioctl = bnx2x_ioctl,
12868 .ndo_change_mtu = bnx2x_change_mtu,
12869 .ndo_tx_timeout = bnx2x_tx_timeout,
12870#ifdef BCM_VLAN
12871 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
12872#endif
257ddbda 12873#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
12874 .ndo_poll_controller = poll_bnx2x,
12875#endif
12876};
12877
34f80b04
EG
12878static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12879 struct net_device *dev)
a2fbb9ea
ET
12880{
12881 struct bnx2x *bp;
12882 int rc;
12883
12884 SET_NETDEV_DEV(dev, &pdev->dev);
12885 bp = netdev_priv(dev);
12886
34f80b04
EG
12887 bp->dev = dev;
12888 bp->pdev = pdev;
a2fbb9ea 12889 bp->flags = 0;
34f80b04 12890 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
12891
12892 rc = pci_enable_device(pdev);
12893 if (rc) {
cdaa7cb8
VZ
12894 dev_err(&bp->pdev->dev,
12895 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
12896 goto err_out;
12897 }
12898
12899 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
12900 dev_err(&bp->pdev->dev,
12901 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
12902 rc = -ENODEV;
12903 goto err_out_disable;
12904 }
12905
12906 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
12907 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
12908 " base address, aborting\n");
a2fbb9ea
ET
12909 rc = -ENODEV;
12910 goto err_out_disable;
12911 }
12912
34f80b04
EG
12913 if (atomic_read(&pdev->enable_cnt) == 1) {
12914 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12915 if (rc) {
cdaa7cb8
VZ
12916 dev_err(&bp->pdev->dev,
12917 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
12918 goto err_out_disable;
12919 }
a2fbb9ea 12920
34f80b04
EG
12921 pci_set_master(pdev);
12922 pci_save_state(pdev);
12923 }
a2fbb9ea
ET
12924
12925 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12926 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
12927 dev_err(&bp->pdev->dev,
12928 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
12929 rc = -EIO;
12930 goto err_out_release;
12931 }
12932
12933 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12934 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
12935 dev_err(&bp->pdev->dev,
12936 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
12937 rc = -EIO;
12938 goto err_out_release;
12939 }
12940
1a983142 12941 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 12942 bp->flags |= USING_DAC_FLAG;
1a983142 12943 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
12944 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
12945 " failed, aborting\n");
a2fbb9ea
ET
12946 rc = -EIO;
12947 goto err_out_release;
12948 }
12949
1a983142 12950 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
12951 dev_err(&bp->pdev->dev,
12952 "System does not support DMA, aborting\n");
a2fbb9ea
ET
12953 rc = -EIO;
12954 goto err_out_release;
12955 }
12956
34f80b04
EG
12957 dev->mem_start = pci_resource_start(pdev, 0);
12958 dev->base_addr = dev->mem_start;
12959 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
12960
12961 dev->irq = pdev->irq;
12962
275f165f 12963 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 12964 if (!bp->regview) {
cdaa7cb8
VZ
12965 dev_err(&bp->pdev->dev,
12966 "Cannot map register space, aborting\n");
a2fbb9ea
ET
12967 rc = -ENOMEM;
12968 goto err_out_release;
12969 }
12970
34f80b04
EG
12971 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12972 min_t(u64, BNX2X_DB_SIZE,
12973 pci_resource_len(pdev, 2)));
a2fbb9ea 12974 if (!bp->doorbells) {
cdaa7cb8
VZ
12975 dev_err(&bp->pdev->dev,
12976 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
12977 rc = -ENOMEM;
12978 goto err_out_unmap;
12979 }
12980
12981 bnx2x_set_power_state(bp, PCI_D0);
12982
34f80b04
EG
12983 /* clean indirect addresses */
12984 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12985 PCICFG_VENDOR_ID_OFFSET);
12986 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
12987 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
12988 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
12989 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 12990
72fd0718
VZ
12991 /* Reset the load counter */
12992 bnx2x_clear_load_cnt(bp);
12993
34f80b04 12994 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 12995
c64213cd 12996 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 12997 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
12998 dev->features |= NETIF_F_SG;
12999 dev->features |= NETIF_F_HW_CSUM;
13000 if (bp->flags & USING_DAC_FLAG)
13001 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
13002 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13003 dev->features |= NETIF_F_TSO6;
34f80b04
EG
13004#ifdef BCM_VLAN
13005 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 13006 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
13007
13008 dev->vlan_features |= NETIF_F_SG;
13009 dev->vlan_features |= NETIF_F_HW_CSUM;
13010 if (bp->flags & USING_DAC_FLAG)
13011 dev->vlan_features |= NETIF_F_HIGHDMA;
13012 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13013 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 13014#endif
a2fbb9ea 13015
01cd4528
EG
13016 /* get_port_hwinfo() will set prtad and mmds properly */
13017 bp->mdio.prtad = MDIO_PRTAD_NONE;
13018 bp->mdio.mmds = 0;
13019 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13020 bp->mdio.dev = dev;
13021 bp->mdio.mdio_read = bnx2x_mdio_read;
13022 bp->mdio.mdio_write = bnx2x_mdio_write;
13023
a2fbb9ea
ET
13024 return 0;
13025
13026err_out_unmap:
13027 if (bp->regview) {
13028 iounmap(bp->regview);
13029 bp->regview = NULL;
13030 }
a2fbb9ea
ET
13031 if (bp->doorbells) {
13032 iounmap(bp->doorbells);
13033 bp->doorbells = NULL;
13034 }
13035
13036err_out_release:
34f80b04
EG
13037 if (atomic_read(&pdev->enable_cnt) == 1)
13038 pci_release_regions(pdev);
a2fbb9ea
ET
13039
13040err_out_disable:
13041 pci_disable_device(pdev);
13042 pci_set_drvdata(pdev, NULL);
13043
13044err_out:
13045 return rc;
13046}
13047
37f9ce62
EG
13048static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
13049 int *width, int *speed)
25047950
ET
13050{
13051 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
13052
37f9ce62 13053 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 13054
37f9ce62
EG
13055 /* return value of 1=2.5GHz 2=5GHz */
13056 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 13057}
37f9ce62 13058
94a78b79
VZ
13059static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
13060{
37f9ce62 13061 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
13062 struct bnx2x_fw_file_hdr *fw_hdr;
13063 struct bnx2x_fw_file_section *sections;
94a78b79 13064 u32 offset, len, num_ops;
37f9ce62 13065 u16 *ops_offsets;
94a78b79 13066 int i;
37f9ce62 13067 const u8 *fw_ver;
94a78b79
VZ
13068
13069 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
13070 return -EINVAL;
13071
13072 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13073 sections = (struct bnx2x_fw_file_section *)fw_hdr;
13074
13075 /* Make sure none of the offsets and sizes make us read beyond
13076 * the end of the firmware data */
13077 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13078 offset = be32_to_cpu(sections[i].offset);
13079 len = be32_to_cpu(sections[i].len);
13080 if (offset + len > firmware->size) {
cdaa7cb8
VZ
13081 dev_err(&bp->pdev->dev,
13082 "Section %d length is out of bounds\n", i);
94a78b79
VZ
13083 return -EINVAL;
13084 }
13085 }
13086
13087 /* Likewise for the init_ops offsets */
13088 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13089 ops_offsets = (u16 *)(firmware->data + offset);
13090 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13091
13092 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13093 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
13094 dev_err(&bp->pdev->dev,
13095 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
13096 return -EINVAL;
13097 }
13098 }
13099
13100 /* Check FW version */
13101 offset = be32_to_cpu(fw_hdr->fw_version.offset);
13102 fw_ver = firmware->data + offset;
13103 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13104 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13105 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13106 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
13107 dev_err(&bp->pdev->dev,
13108 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
13109 fw_ver[0], fw_ver[1], fw_ver[2],
13110 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
13111 BCM_5710_FW_MINOR_VERSION,
13112 BCM_5710_FW_REVISION_VERSION,
13113 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 13114 return -EINVAL;
94a78b79
VZ
13115 }
13116
13117 return 0;
13118}
13119
ab6ad5a4 13120static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 13121{
ab6ad5a4
EG
13122 const __be32 *source = (const __be32 *)_source;
13123 u32 *target = (u32 *)_target;
94a78b79 13124 u32 i;
94a78b79
VZ
13125
13126 for (i = 0; i < n/4; i++)
13127 target[i] = be32_to_cpu(source[i]);
13128}
13129
13130/*
13131 Ops array is stored in the following format:
13132 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13133 */
ab6ad5a4 13134static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 13135{
ab6ad5a4
EG
13136 const __be32 *source = (const __be32 *)_source;
13137 struct raw_op *target = (struct raw_op *)_target;
94a78b79 13138 u32 i, j, tmp;
94a78b79 13139
ab6ad5a4 13140 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
13141 tmp = be32_to_cpu(source[j]);
13142 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
13143 target[i].offset = tmp & 0xffffff;
13144 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
13145 }
13146}
ab6ad5a4
EG
13147
13148static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 13149{
ab6ad5a4
EG
13150 const __be16 *source = (const __be16 *)_source;
13151 u16 *target = (u16 *)_target;
94a78b79 13152 u32 i;
94a78b79
VZ
13153
13154 for (i = 0; i < n/2; i++)
13155 target[i] = be16_to_cpu(source[i]);
13156}
13157
7995c64e
JP
13158#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
13159do { \
13160 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13161 bp->arr = kmalloc(len, GFP_KERNEL); \
13162 if (!bp->arr) { \
13163 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
13164 goto lbl; \
13165 } \
13166 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13167 (u8 *)bp->arr, len); \
13168} while (0)
94a78b79 13169
94a78b79
VZ
13170static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
13171{
45229b42 13172 const char *fw_file_name;
94a78b79 13173 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 13174 int rc;
94a78b79 13175
94a78b79 13176 if (CHIP_IS_E1(bp))
45229b42 13177 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 13178 else if (CHIP_IS_E1H(bp))
45229b42 13179 fw_file_name = FW_FILE_NAME_E1H;
cdaa7cb8
VZ
13180 else {
13181 dev_err(dev, "Unsupported chip revision\n");
13182 return -EINVAL;
13183 }
94a78b79 13184
cdaa7cb8 13185 dev_info(dev, "Loading %s\n", fw_file_name);
94a78b79
VZ
13186
13187 rc = request_firmware(&bp->firmware, fw_file_name, dev);
13188 if (rc) {
cdaa7cb8 13189 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
13190 goto request_firmware_exit;
13191 }
13192
13193 rc = bnx2x_check_firmware(bp);
13194 if (rc) {
cdaa7cb8 13195 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
13196 goto request_firmware_exit;
13197 }
13198
13199 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13200
13201 /* Initialize the pointers to the init arrays */
13202 /* Blob */
13203 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13204
13205 /* Opcodes */
13206 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13207
13208 /* Offsets */
ab6ad5a4
EG
13209 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13210 be16_to_cpu_n);
94a78b79
VZ
13211
13212 /* STORMs firmware */
573f2035
EG
13213 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13214 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13215 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13216 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13217 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13218 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13219 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13220 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13221 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13222 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13223 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13224 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13225 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13226 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13227 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13228 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
13229
13230 return 0;
ab6ad5a4 13231
94a78b79
VZ
13232init_offsets_alloc_err:
13233 kfree(bp->init_ops);
13234init_ops_alloc_err:
13235 kfree(bp->init_data);
13236request_firmware_exit:
13237 release_firmware(bp->firmware);
13238
13239 return rc;
13240}
13241
13242
a2fbb9ea
ET
13243static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13244 const struct pci_device_id *ent)
13245{
a2fbb9ea
ET
13246 struct net_device *dev = NULL;
13247 struct bnx2x *bp;
37f9ce62 13248 int pcie_width, pcie_speed;
25047950 13249 int rc;
a2fbb9ea 13250
a2fbb9ea 13251 /* dev zeroed in init_etherdev */
555f6c78 13252 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04 13253 if (!dev) {
cdaa7cb8 13254 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 13255 return -ENOMEM;
34f80b04 13256 }
a2fbb9ea 13257
a2fbb9ea 13258 bp = netdev_priv(dev);
7995c64e 13259 bp->msg_enable = debug;
a2fbb9ea 13260
df4770de
EG
13261 pci_set_drvdata(pdev, dev);
13262
34f80b04 13263 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
13264 if (rc < 0) {
13265 free_netdev(dev);
13266 return rc;
13267 }
13268
34f80b04 13269 rc = bnx2x_init_bp(bp);
693fc0d1
EG
13270 if (rc)
13271 goto init_one_exit;
13272
94a78b79
VZ
13273 /* Set init arrays */
13274 rc = bnx2x_init_firmware(bp, &pdev->dev);
13275 if (rc) {
cdaa7cb8 13276 dev_err(&pdev->dev, "Error loading firmware\n");
94a78b79
VZ
13277 goto init_one_exit;
13278 }
13279
693fc0d1 13280 rc = register_netdev(dev);
34f80b04 13281 if (rc) {
693fc0d1 13282 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
13283 goto init_one_exit;
13284 }
13285
37f9ce62 13286 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
cdaa7cb8
VZ
13287 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
13288 " IRQ %d, ", board_info[ent->driver_data].name,
13289 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13290 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13291 dev->base_addr, bp->pdev->irq);
13292 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 13293
a2fbb9ea 13294 return 0;
34f80b04
EG
13295
13296init_one_exit:
13297 if (bp->regview)
13298 iounmap(bp->regview);
13299
13300 if (bp->doorbells)
13301 iounmap(bp->doorbells);
13302
13303 free_netdev(dev);
13304
13305 if (atomic_read(&pdev->enable_cnt) == 1)
13306 pci_release_regions(pdev);
13307
13308 pci_disable_device(pdev);
13309 pci_set_drvdata(pdev, NULL);
13310
13311 return rc;
a2fbb9ea
ET
13312}
13313
13314static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13315{
13316 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
13317 struct bnx2x *bp;
13318
13319 if (!dev) {
cdaa7cb8 13320 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
13321 return;
13322 }
228241eb 13323 bp = netdev_priv(dev);
a2fbb9ea 13324
a2fbb9ea
ET
13325 unregister_netdev(dev);
13326
72fd0718
VZ
13327 /* Make sure RESET task is not scheduled before continuing */
13328 cancel_delayed_work_sync(&bp->reset_task);
13329
94a78b79
VZ
13330 kfree(bp->init_ops_offsets);
13331 kfree(bp->init_ops);
13332 kfree(bp->init_data);
13333 release_firmware(bp->firmware);
13334
a2fbb9ea
ET
13335 if (bp->regview)
13336 iounmap(bp->regview);
13337
13338 if (bp->doorbells)
13339 iounmap(bp->doorbells);
13340
13341 free_netdev(dev);
34f80b04
EG
13342
13343 if (atomic_read(&pdev->enable_cnt) == 1)
13344 pci_release_regions(pdev);
13345
a2fbb9ea
ET
13346 pci_disable_device(pdev);
13347 pci_set_drvdata(pdev, NULL);
13348}
13349
13350static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13351{
13352 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
13353 struct bnx2x *bp;
13354
34f80b04 13355 if (!dev) {
cdaa7cb8 13356 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
34f80b04
EG
13357 return -ENODEV;
13358 }
13359 bp = netdev_priv(dev);
a2fbb9ea 13360
34f80b04 13361 rtnl_lock();
a2fbb9ea 13362
34f80b04 13363 pci_save_state(pdev);
228241eb 13364
34f80b04
EG
13365 if (!netif_running(dev)) {
13366 rtnl_unlock();
13367 return 0;
13368 }
a2fbb9ea
ET
13369
13370 netif_device_detach(dev);
a2fbb9ea 13371
da5a662a 13372 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 13373
a2fbb9ea 13374 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 13375
34f80b04
EG
13376 rtnl_unlock();
13377
a2fbb9ea
ET
13378 return 0;
13379}
13380
13381static int bnx2x_resume(struct pci_dev *pdev)
13382{
13383 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 13384 struct bnx2x *bp;
a2fbb9ea
ET
13385 int rc;
13386
228241eb 13387 if (!dev) {
cdaa7cb8 13388 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
13389 return -ENODEV;
13390 }
228241eb 13391 bp = netdev_priv(dev);
a2fbb9ea 13392
72fd0718
VZ
13393 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13394 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13395 return -EAGAIN;
13396 }
13397
34f80b04
EG
13398 rtnl_lock();
13399
228241eb 13400 pci_restore_state(pdev);
34f80b04
EG
13401
13402 if (!netif_running(dev)) {
13403 rtnl_unlock();
13404 return 0;
13405 }
13406
a2fbb9ea
ET
13407 bnx2x_set_power_state(bp, PCI_D0);
13408 netif_device_attach(dev);
13409
da5a662a 13410 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 13411
34f80b04
EG
13412 rtnl_unlock();
13413
13414 return rc;
a2fbb9ea
ET
13415}
13416
f8ef6e44
YG
13417static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13418{
13419 int i;
13420
13421 bp->state = BNX2X_STATE_ERROR;
13422
13423 bp->rx_mode = BNX2X_RX_MODE_NONE;
13424
13425 bnx2x_netif_stop(bp, 0);
13426
13427 del_timer_sync(&bp->timer);
13428 bp->stats_state = STATS_STATE_DISABLED;
13429 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13430
13431 /* Release IRQs */
6cbe5065 13432 bnx2x_free_irq(bp, false);
f8ef6e44
YG
13433
13434 if (CHIP_IS_E1(bp)) {
13435 struct mac_configuration_cmd *config =
13436 bnx2x_sp(bp, mcast_config);
13437
8d9c5f34 13438 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
13439 CAM_INVALIDATE(config->config_table[i]);
13440 }
13441
13442 /* Free SKBs, SGEs, TPA pool and driver internals */
13443 bnx2x_free_skbs(bp);
54b9ddaa 13444 for_each_queue(bp, i)
f8ef6e44 13445 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 13446 for_each_queue(bp, i)
7cde1c8b 13447 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
13448 bnx2x_free_mem(bp);
13449
13450 bp->state = BNX2X_STATE_CLOSED;
13451
13452 netif_carrier_off(bp->dev);
13453
13454 return 0;
13455}
13456
13457static void bnx2x_eeh_recover(struct bnx2x *bp)
13458{
13459 u32 val;
13460
13461 mutex_init(&bp->port.phy_mutex);
13462
13463 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13464 bp->link_params.shmem_base = bp->common.shmem_base;
13465 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13466
13467 if (!bp->common.shmem_base ||
13468 (bp->common.shmem_base < 0xA0000) ||
13469 (bp->common.shmem_base >= 0xC0000)) {
13470 BNX2X_DEV_INFO("MCP not active\n");
13471 bp->flags |= NO_MCP_FLAG;
13472 return;
13473 }
13474
13475 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13476 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13477 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13478 BNX2X_ERR("BAD MCP validity signature\n");
13479
13480 if (!BP_NOMCP(bp)) {
13481 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13482 & DRV_MSG_SEQ_NUMBER_MASK);
13483 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13484 }
13485}
13486
493adb1f
WX
13487/**
13488 * bnx2x_io_error_detected - called when PCI error is detected
13489 * @pdev: Pointer to PCI device
13490 * @state: The current pci connection state
13491 *
13492 * This function is called after a PCI bus error affecting
13493 * this device has been detected.
13494 */
13495static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13496 pci_channel_state_t state)
13497{
13498 struct net_device *dev = pci_get_drvdata(pdev);
13499 struct bnx2x *bp = netdev_priv(dev);
13500
13501 rtnl_lock();
13502
13503 netif_device_detach(dev);
13504
07ce50e4
DN
13505 if (state == pci_channel_io_perm_failure) {
13506 rtnl_unlock();
13507 return PCI_ERS_RESULT_DISCONNECT;
13508 }
13509
493adb1f 13510 if (netif_running(dev))
f8ef6e44 13511 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
13512
13513 pci_disable_device(pdev);
13514
13515 rtnl_unlock();
13516
13517 /* Request a slot reset */
13518 return PCI_ERS_RESULT_NEED_RESET;
13519}
13520
13521/**
13522 * bnx2x_io_slot_reset - called after the PCI bus has been reset
13523 * @pdev: Pointer to PCI device
13524 *
13525 * Restart the card from scratch, as if from a cold-boot.
13526 */
13527static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13528{
13529 struct net_device *dev = pci_get_drvdata(pdev);
13530 struct bnx2x *bp = netdev_priv(dev);
13531
13532 rtnl_lock();
13533
13534 if (pci_enable_device(pdev)) {
13535 dev_err(&pdev->dev,
13536 "Cannot re-enable PCI device after reset\n");
13537 rtnl_unlock();
13538 return PCI_ERS_RESULT_DISCONNECT;
13539 }
13540
13541 pci_set_master(pdev);
13542 pci_restore_state(pdev);
13543
13544 if (netif_running(dev))
13545 bnx2x_set_power_state(bp, PCI_D0);
13546
13547 rtnl_unlock();
13548
13549 return PCI_ERS_RESULT_RECOVERED;
13550}
13551
13552/**
13553 * bnx2x_io_resume - called when traffic can start flowing again
13554 * @pdev: Pointer to PCI device
13555 *
13556 * This callback is called when the error recovery driver tells us that
13557 * its OK to resume normal operation.
13558 */
13559static void bnx2x_io_resume(struct pci_dev *pdev)
13560{
13561 struct net_device *dev = pci_get_drvdata(pdev);
13562 struct bnx2x *bp = netdev_priv(dev);
13563
72fd0718
VZ
13564 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13565 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13566 return;
13567 }
13568
493adb1f
WX
13569 rtnl_lock();
13570
f8ef6e44
YG
13571 bnx2x_eeh_recover(bp);
13572
493adb1f 13573 if (netif_running(dev))
f8ef6e44 13574 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
13575
13576 netif_device_attach(dev);
13577
13578 rtnl_unlock();
13579}
13580
13581static struct pci_error_handlers bnx2x_err_handler = {
13582 .error_detected = bnx2x_io_error_detected,
356e2385
EG
13583 .slot_reset = bnx2x_io_slot_reset,
13584 .resume = bnx2x_io_resume,
493adb1f
WX
13585};
13586
a2fbb9ea 13587static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
13588 .name = DRV_MODULE_NAME,
13589 .id_table = bnx2x_pci_tbl,
13590 .probe = bnx2x_init_one,
13591 .remove = __devexit_p(bnx2x_remove_one),
13592 .suspend = bnx2x_suspend,
13593 .resume = bnx2x_resume,
13594 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
13595};
13596
13597static int __init bnx2x_init(void)
13598{
dd21ca6d
SG
13599 int ret;
13600
7995c64e 13601 pr_info("%s", version);
938cf541 13602
1cf167f2
EG
13603 bnx2x_wq = create_singlethread_workqueue("bnx2x");
13604 if (bnx2x_wq == NULL) {
7995c64e 13605 pr_err("Cannot create workqueue\n");
1cf167f2
EG
13606 return -ENOMEM;
13607 }
13608
dd21ca6d
SG
13609 ret = pci_register_driver(&bnx2x_pci_driver);
13610 if (ret) {
7995c64e 13611 pr_err("Cannot register driver\n");
dd21ca6d
SG
13612 destroy_workqueue(bnx2x_wq);
13613 }
13614 return ret;
a2fbb9ea
ET
13615}
13616
13617static void __exit bnx2x_cleanup(void)
13618{
13619 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
13620
13621 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
13622}
13623
13624module_init(bnx2x_init);
13625module_exit(bnx2x_cleanup);
13626
993ac7b5
MC
13627#ifdef BCM_CNIC
13628
13629/* count denotes the number of new completions we have seen */
13630static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13631{
13632 struct eth_spe *spe;
13633
13634#ifdef BNX2X_STOP_ON_ERROR
13635 if (unlikely(bp->panic))
13636 return;
13637#endif
13638
13639 spin_lock_bh(&bp->spq_lock);
13640 bp->cnic_spq_pending -= count;
13641
13642 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13643 bp->cnic_spq_pending++) {
13644
13645 if (!bp->cnic_kwq_pending)
13646 break;
13647
13648 spe = bnx2x_sp_get_next(bp);
13649 *spe = *bp->cnic_kwq_cons;
13650
13651 bp->cnic_kwq_pending--;
13652
13653 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13654 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13655
13656 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13657 bp->cnic_kwq_cons = bp->cnic_kwq;
13658 else
13659 bp->cnic_kwq_cons++;
13660 }
13661 bnx2x_sp_prod_update(bp);
13662 spin_unlock_bh(&bp->spq_lock);
13663}
13664
13665static int bnx2x_cnic_sp_queue(struct net_device *dev,
13666 struct kwqe_16 *kwqes[], u32 count)
13667{
13668 struct bnx2x *bp = netdev_priv(dev);
13669 int i;
13670
13671#ifdef BNX2X_STOP_ON_ERROR
13672 if (unlikely(bp->panic))
13673 return -EIO;
13674#endif
13675
13676 spin_lock_bh(&bp->spq_lock);
13677
13678 for (i = 0; i < count; i++) {
13679 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13680
13681 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13682 break;
13683
13684 *bp->cnic_kwq_prod = *spe;
13685
13686 bp->cnic_kwq_pending++;
13687
13688 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13689 spe->hdr.conn_and_cmd_data, spe->hdr.type,
13690 spe->data.mac_config_addr.hi,
13691 spe->data.mac_config_addr.lo,
13692 bp->cnic_kwq_pending);
13693
13694 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13695 bp->cnic_kwq_prod = bp->cnic_kwq;
13696 else
13697 bp->cnic_kwq_prod++;
13698 }
13699
13700 spin_unlock_bh(&bp->spq_lock);
13701
13702 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13703 bnx2x_cnic_sp_post(bp, 0);
13704
13705 return i;
13706}
13707
13708static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13709{
13710 struct cnic_ops *c_ops;
13711 int rc = 0;
13712
13713 mutex_lock(&bp->cnic_mutex);
13714 c_ops = bp->cnic_ops;
13715 if (c_ops)
13716 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13717 mutex_unlock(&bp->cnic_mutex);
13718
13719 return rc;
13720}
13721
13722static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13723{
13724 struct cnic_ops *c_ops;
13725 int rc = 0;
13726
13727 rcu_read_lock();
13728 c_ops = rcu_dereference(bp->cnic_ops);
13729 if (c_ops)
13730 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13731 rcu_read_unlock();
13732
13733 return rc;
13734}
13735
13736/*
13737 * for commands that have no data
13738 */
13739static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13740{
13741 struct cnic_ctl_info ctl = {0};
13742
13743 ctl.cmd = cmd;
13744
13745 return bnx2x_cnic_ctl_send(bp, &ctl);
13746}
13747
13748static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13749{
13750 struct cnic_ctl_info ctl;
13751
13752 /* first we tell CNIC and only then we count this as a completion */
13753 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13754 ctl.data.comp.cid = cid;
13755
13756 bnx2x_cnic_ctl_send_bh(bp, &ctl);
13757 bnx2x_cnic_sp_post(bp, 1);
13758}
13759
13760static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13761{
13762 struct bnx2x *bp = netdev_priv(dev);
13763 int rc = 0;
13764
13765 switch (ctl->cmd) {
13766 case DRV_CTL_CTXTBL_WR_CMD: {
13767 u32 index = ctl->data.io.offset;
13768 dma_addr_t addr = ctl->data.io.dma_addr;
13769
13770 bnx2x_ilt_wr(bp, index, addr);
13771 break;
13772 }
13773
13774 case DRV_CTL_COMPLETION_CMD: {
13775 int count = ctl->data.comp.comp_count;
13776
13777 bnx2x_cnic_sp_post(bp, count);
13778 break;
13779 }
13780
13781 /* rtnl_lock is held. */
13782 case DRV_CTL_START_L2_CMD: {
13783 u32 cli = ctl->data.ring.client_id;
13784
13785 bp->rx_mode_cl_mask |= (1 << cli);
13786 bnx2x_set_storm_rx_mode(bp);
13787 break;
13788 }
13789
13790 /* rtnl_lock is held. */
13791 case DRV_CTL_STOP_L2_CMD: {
13792 u32 cli = ctl->data.ring.client_id;
13793
13794 bp->rx_mode_cl_mask &= ~(1 << cli);
13795 bnx2x_set_storm_rx_mode(bp);
13796 break;
13797 }
13798
13799 default:
13800 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13801 rc = -EINVAL;
13802 }
13803
13804 return rc;
13805}
13806
13807static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13808{
13809 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13810
13811 if (bp->flags & USING_MSIX_FLAG) {
13812 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13813 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13814 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13815 } else {
13816 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13817 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13818 }
13819 cp->irq_arr[0].status_blk = bp->cnic_sb;
13820 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13821 cp->irq_arr[1].status_blk = bp->def_status_blk;
13822 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13823
13824 cp->num_irq = 2;
13825}
13826
13827static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13828 void *data)
13829{
13830 struct bnx2x *bp = netdev_priv(dev);
13831 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13832
13833 if (ops == NULL)
13834 return -EINVAL;
13835
13836 if (atomic_read(&bp->intr_sem) != 0)
13837 return -EBUSY;
13838
13839 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13840 if (!bp->cnic_kwq)
13841 return -ENOMEM;
13842
13843 bp->cnic_kwq_cons = bp->cnic_kwq;
13844 bp->cnic_kwq_prod = bp->cnic_kwq;
13845 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13846
13847 bp->cnic_spq_pending = 0;
13848 bp->cnic_kwq_pending = 0;
13849
13850 bp->cnic_data = data;
13851
13852 cp->num_irq = 0;
13853 cp->drv_state = CNIC_DRV_STATE_REGD;
13854
13855 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13856
13857 bnx2x_setup_cnic_irq_info(bp);
13858 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13859 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13860 rcu_assign_pointer(bp->cnic_ops, ops);
13861
13862 return 0;
13863}
13864
13865static int bnx2x_unregister_cnic(struct net_device *dev)
13866{
13867 struct bnx2x *bp = netdev_priv(dev);
13868 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13869
13870 mutex_lock(&bp->cnic_mutex);
13871 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13872 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13873 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13874 }
13875 cp->drv_state = 0;
13876 rcu_assign_pointer(bp->cnic_ops, NULL);
13877 mutex_unlock(&bp->cnic_mutex);
13878 synchronize_rcu();
13879 kfree(bp->cnic_kwq);
13880 bp->cnic_kwq = NULL;
13881
13882 return 0;
13883}
13884
13885struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13886{
13887 struct bnx2x *bp = netdev_priv(dev);
13888 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13889
13890 cp->drv_owner = THIS_MODULE;
13891 cp->chip_id = CHIP_ID(bp);
13892 cp->pdev = bp->pdev;
13893 cp->io_base = bp->regview;
13894 cp->io_base2 = bp->doorbells;
13895 cp->max_kwqe_pending = 8;
13896 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13897 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13898 cp->ctx_tbl_len = CNIC_ILT_LINES;
13899 cp->starting_cid = BCM_CNIC_CID_START;
13900 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13901 cp->drv_ctl = bnx2x_drv_ctl;
13902 cp->drv_register_cnic = bnx2x_register_cnic;
13903 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13904
13905 return cp;
13906}
13907EXPORT_SYMBOL(bnx2x_cnic_probe);
13908
13909#endif /* BCM_CNIC */
94a78b79 13910