]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x/bnx2x_main.c
bnx2x: Create separate folder for bnx2x driver
[net-next-2.6.git] / drivers / net / bnx2x / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea 51#include <linux/io.h>
45229b42 52#include <linux/stringify.h>
a2fbb9ea 53
359d8b15 54
a2fbb9ea
ET
55#include "bnx2x.h"
56#include "bnx2x_init.h"
94a78b79 57#include "bnx2x_init_ops.h"
0a64ea57 58#include "bnx2x_dump.h"
a2fbb9ea 59
a03b1a5c
VZ
60#define DRV_MODULE_VERSION "1.52.53-1"
61#define DRV_MODULE_RELDATE "2010/18/04"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
94a78b79
VZ
64#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h"
66/* FW files */
45229b42
BH
67#define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
94a78b79 74
34f80b04
EG
75/* Time in jiffies before concluding the transmitter is hung */
76#define TX_TIMEOUT (5*HZ)
a2fbb9ea 77
53a10565 78static char version[] __devinitdata =
34f80b04 79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
24e3fcef 82MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 83MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
84MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
86MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
a2fbb9ea 88
555f6c78
EG
89static int multi_mode = 1;
90module_param(multi_mode, int, 0);
ca00392c
EG
91MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
54b9ddaa
VZ
94static int num_queues;
95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
555f6c78 98
19680c48 99static int disable_tpa;
19680c48 100module_param(disable_tpa, int, 0);
9898f86d 101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
102
103static int int_mode;
104module_param(int_mode, int, 0);
cdaa7cb8
VZ
105MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
106 "(1 INT#x; 2 MSI)");
8badd27a 107
a18f5128
EG
108static int dropless_fc;
109module_param(dropless_fc, int, 0);
110MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
111
9898f86d 112static int poll;
a2fbb9ea 113module_param(poll, int, 0);
9898f86d 114MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
115
116static int mrrs = -1;
117module_param(mrrs, int, 0);
118MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
119
9898f86d 120static int debug;
a2fbb9ea 121module_param(debug, int, 0);
9898f86d
EG
122MODULE_PARM_DESC(debug, " Default debug msglevel");
123
124static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 125
1cf167f2 126static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
127
128enum bnx2x_board_type {
129 BCM57710 = 0,
34f80b04
EG
130 BCM57711 = 1,
131 BCM57711E = 2,
a2fbb9ea
ET
132};
133
34f80b04 134/* indexed by board_type, above */
53a10565 135static struct {
a2fbb9ea
ET
136 char *name;
137} board_info[] __devinitdata = {
34f80b04
EG
138 { "Broadcom NetXtreme II BCM57710 XGb" },
139 { "Broadcom NetXtreme II BCM57711 XGb" },
140 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
141};
142
34f80b04 143
a3aa1884 144static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
147 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
148 { 0 }
149};
150
151MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
152
153/****************************************************************************
154* General service functions
155****************************************************************************/
156
157/* used only at init
158 * locking is done by mcp
159 */
573f2035 160void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
161{
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
164 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
165 PCICFG_VENDOR_ID_OFFSET);
166}
167
a2fbb9ea
ET
168static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
169{
170 u32 val;
171
172 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
173 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
174 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
175 PCICFG_VENDOR_ID_OFFSET);
176
177 return val;
178}
a2fbb9ea
ET
179
180static const u32 dmae_reg_go_c[] = {
181 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
182 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
183 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
184 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
185};
186
187/* copy command into DMAE command memory and set DMAE command go */
188static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
189 int idx)
190{
191 u32 cmd_offset;
192 int i;
193
194 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
195 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
196 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
197
ad8d3948
EG
198 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
199 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
200 }
201 REG_WR(bp, dmae_reg_go_c[idx], 1);
202}
203
ad8d3948
EG
204void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
205 u32 len32)
a2fbb9ea 206{
5ff7b6d4 207 struct dmae_command dmae;
a2fbb9ea 208 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
209 int cnt = 200;
210
211 if (!bp->dmae_ready) {
212 u32 *data = bnx2x_sp(bp, wb_data[0]);
213
214 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
215 " using indirect\n", dst_addr, len32);
216 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
217 return;
218 }
219
5ff7b6d4 220 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 221
5ff7b6d4
EG
222 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
223 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
224 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 225#ifdef __BIG_ENDIAN
5ff7b6d4 226 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 227#else
5ff7b6d4 228 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 229#endif
5ff7b6d4
EG
230 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
231 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
232 dmae.src_addr_lo = U64_LO(dma_addr);
233 dmae.src_addr_hi = U64_HI(dma_addr);
234 dmae.dst_addr_lo = dst_addr >> 2;
235 dmae.dst_addr_hi = 0;
236 dmae.len = len32;
237 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
239 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 240
c3eefaf6 241 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
242 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
243 "dst_addr [%x:%08x (%08x)]\n"
244 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
245 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
246 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
247 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 248 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
249 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
250 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 251
5ff7b6d4
EG
252 mutex_lock(&bp->dmae_mutex);
253
a2fbb9ea
ET
254 *wb_comp = 0;
255
5ff7b6d4 256 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
257
258 udelay(5);
ad8d3948
EG
259
260 while (*wb_comp != DMAE_COMP_VAL) {
261 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
262
ad8d3948 263 if (!cnt) {
c3eefaf6 264 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
265 break;
266 }
ad8d3948 267 cnt--;
12469401
YG
268 /* adjust delay for emulation/FPGA */
269 if (CHIP_REV_IS_SLOW(bp))
270 msleep(100);
271 else
272 udelay(5);
a2fbb9ea 273 }
ad8d3948
EG
274
275 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
276}
277
c18487ee 278void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 279{
5ff7b6d4 280 struct dmae_command dmae;
a2fbb9ea 281 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
282 int cnt = 200;
283
284 if (!bp->dmae_ready) {
285 u32 *data = bnx2x_sp(bp, wb_data[0]);
286 int i;
287
288 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
289 " using indirect\n", src_addr, len32);
290 for (i = 0; i < len32; i++)
291 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
292 return;
293 }
294
5ff7b6d4 295 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 296
5ff7b6d4
EG
297 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
298 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
299 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 300#ifdef __BIG_ENDIAN
5ff7b6d4 301 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 302#else
5ff7b6d4 303 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 304#endif
5ff7b6d4
EG
305 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
306 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
307 dmae.src_addr_lo = src_addr >> 2;
308 dmae.src_addr_hi = 0;
309 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
310 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
311 dmae.len = len32;
312 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
314 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 315
c3eefaf6 316 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
317 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
318 "dst_addr [%x:%08x (%08x)]\n"
319 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
320 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
321 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
322 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 323
5ff7b6d4
EG
324 mutex_lock(&bp->dmae_mutex);
325
326 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
327 *wb_comp = 0;
328
5ff7b6d4 329 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
330
331 udelay(5);
ad8d3948
EG
332
333 while (*wb_comp != DMAE_COMP_VAL) {
334
ad8d3948 335 if (!cnt) {
c3eefaf6 336 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
337 break;
338 }
ad8d3948 339 cnt--;
12469401
YG
340 /* adjust delay for emulation/FPGA */
341 if (CHIP_REV_IS_SLOW(bp))
342 msleep(100);
343 else
344 udelay(5);
a2fbb9ea 345 }
ad8d3948 346 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
347 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
348 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
349
350 mutex_unlock(&bp->dmae_mutex);
351}
352
573f2035
EG
353void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
354 u32 addr, u32 len)
355{
02e3c6cb 356 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
357 int offset = 0;
358
02e3c6cb 359 while (len > dmae_wr_max) {
573f2035 360 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
361 addr + offset, dmae_wr_max);
362 offset += dmae_wr_max * 4;
363 len -= dmae_wr_max;
573f2035
EG
364 }
365
366 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
367}
368
ad8d3948
EG
369/* used only for slowpath so not inlined */
370static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
371{
372 u32 wb_write[2];
373
374 wb_write[0] = val_hi;
375 wb_write[1] = val_lo;
376 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 377}
a2fbb9ea 378
ad8d3948
EG
379#ifdef USE_WB_RD
380static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
381{
382 u32 wb_data[2];
383
384 REG_RD_DMAE(bp, reg, wb_data, 2);
385
386 return HILO_U64(wb_data[0], wb_data[1]);
387}
388#endif
389
a2fbb9ea
ET
390static int bnx2x_mc_assert(struct bnx2x *bp)
391{
a2fbb9ea 392 char last_idx;
34f80b04
EG
393 int i, rc = 0;
394 u32 row0, row1, row2, row3;
395
396 /* XSTORM */
397 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
398 XSTORM_ASSERT_LIST_INDEX_OFFSET);
399 if (last_idx)
400 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
401
402 /* print the asserts */
403 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
404
405 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i));
407 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
409 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
411 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
412 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
413
414 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
415 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
416 " 0x%08x 0x%08x 0x%08x\n",
417 i, row3, row2, row1, row0);
418 rc++;
419 } else {
420 break;
421 }
422 }
423
424 /* TSTORM */
425 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
426 TSTORM_ASSERT_LIST_INDEX_OFFSET);
427 if (last_idx)
428 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
429
430 /* print the asserts */
431 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
432
433 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i));
435 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
437 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
439 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
440 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
441
442 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
443 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
444 " 0x%08x 0x%08x 0x%08x\n",
445 i, row3, row2, row1, row0);
446 rc++;
447 } else {
448 break;
449 }
450 }
451
452 /* CSTORM */
453 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
454 CSTORM_ASSERT_LIST_INDEX_OFFSET);
455 if (last_idx)
456 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
457
458 /* print the asserts */
459 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
460
461 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i));
463 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
465 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
467 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
468 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
469
470 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
471 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
472 " 0x%08x 0x%08x 0x%08x\n",
473 i, row3, row2, row1, row0);
474 rc++;
475 } else {
476 break;
477 }
478 }
479
480 /* USTORM */
481 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
482 USTORM_ASSERT_LIST_INDEX_OFFSET);
483 if (last_idx)
484 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
485
486 /* print the asserts */
487 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
488
489 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i));
491 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 4);
493 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 8);
495 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
496 USTORM_ASSERT_LIST_OFFSET(i) + 12);
497
498 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
499 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
500 " 0x%08x 0x%08x 0x%08x\n",
501 i, row3, row2, row1, row0);
502 rc++;
503 } else {
504 break;
a2fbb9ea
ET
505 }
506 }
34f80b04 507
a2fbb9ea
ET
508 return rc;
509}
c14423fe 510
a2fbb9ea
ET
511static void bnx2x_fw_dump(struct bnx2x *bp)
512{
cdaa7cb8 513 u32 addr;
a2fbb9ea 514 u32 mark, offset;
4781bfad 515 __be32 data[9];
a2fbb9ea
ET
516 int word;
517
2145a920
VZ
518 if (BP_NOMCP(bp)) {
519 BNX2X_ERR("NO MCP - can not dump\n");
520 return;
521 }
cdaa7cb8
VZ
522
523 addr = bp->common.shmem_base - 0x0800 + 4;
524 mark = REG_RD(bp, addr);
525 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
7995c64e 526 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 527
7995c64e 528 pr_err("");
cdaa7cb8 529 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
a2fbb9ea 530 for (word = 0; word < 8; word++)
cdaa7cb8 531 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 532 data[8] = 0x0;
7995c64e 533 pr_cont("%s", (char *)data);
a2fbb9ea 534 }
cdaa7cb8 535 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 536 for (word = 0; word < 8; word++)
cdaa7cb8 537 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 538 data[8] = 0x0;
7995c64e 539 pr_cont("%s", (char *)data);
a2fbb9ea 540 }
7995c64e 541 pr_err("end of fw dump\n");
a2fbb9ea
ET
542}
543
544static void bnx2x_panic_dump(struct bnx2x *bp)
545{
546 int i;
547 u16 j, start, end;
548
66e855f3
YG
549 bp->stats_state = STATS_STATE_DISABLED;
550 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
551
a2fbb9ea
ET
552 BNX2X_ERR("begin crash dump -----------------\n");
553
8440d2b6
EG
554 /* Indices */
555 /* Common */
cdaa7cb8
VZ
556 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
557 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
558 " spq_prod_idx(0x%x)\n",
8440d2b6
EG
559 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
560 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
561
562 /* Rx */
54b9ddaa 563 for_each_queue(bp, i) {
a2fbb9ea 564 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 565
cdaa7cb8
VZ
566 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
567 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
568 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 569 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
570 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
571 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8
VZ
572 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
573 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
8440d2b6
EG
574 fp->rx_sge_prod, fp->last_max_sge,
575 le16_to_cpu(fp->fp_u_idx),
576 fp->status_blk->u_status_block.status_block_index);
577 }
a2fbb9ea 578
8440d2b6 579 /* Tx */
54b9ddaa 580 for_each_queue(bp, i) {
8440d2b6 581 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 582
cdaa7cb8
VZ
583 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
584 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
585 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
586 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
587 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
cdaa7cb8
VZ
588 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
589 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 590 fp->status_blk->c_status_block.status_block_index,
ca00392c 591 fp->tx_db.data.prod);
8440d2b6 592 }
a2fbb9ea 593
8440d2b6
EG
594 /* Rings */
595 /* Rx */
54b9ddaa 596 for_each_queue(bp, i) {
8440d2b6 597 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
598
599 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
600 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 601 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
602 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
603 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
604
c3eefaf6
EG
605 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
606 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
607 }
608
3196a88a
EG
609 start = RX_SGE(fp->rx_sge_prod);
610 end = RX_SGE(fp->last_max_sge);
8440d2b6 611 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
612 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
613 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
614
c3eefaf6
EG
615 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
616 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
617 }
618
a2fbb9ea
ET
619 start = RCQ_BD(fp->rx_comp_cons - 10);
620 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 621 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
622 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
623
c3eefaf6
EG
624 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
625 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
626 }
627 }
628
8440d2b6 629 /* Tx */
54b9ddaa 630 for_each_queue(bp, i) {
8440d2b6
EG
631 struct bnx2x_fastpath *fp = &bp->fp[i];
632
633 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
634 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
635 for (j = start; j != end; j = TX_BD(j + 1)) {
636 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
637
c3eefaf6
EG
638 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
639 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
640 }
641
642 start = TX_BD(fp->tx_bd_cons - 10);
643 end = TX_BD(fp->tx_bd_cons + 254);
644 for (j = start; j != end; j = TX_BD(j + 1)) {
645 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
646
c3eefaf6
EG
647 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
648 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
649 }
650 }
a2fbb9ea 651
34f80b04 652 bnx2x_fw_dump(bp);
a2fbb9ea
ET
653 bnx2x_mc_assert(bp);
654 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
655}
656
615f8fd9 657static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 658{
34f80b04 659 int port = BP_PORT(bp);
a2fbb9ea
ET
660 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
661 u32 val = REG_RD(bp, addr);
662 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 663 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
664
665 if (msix) {
8badd27a
EG
666 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
668 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
669 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
670 } else if (msi) {
671 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
672 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
673 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
674 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
675 } else {
676 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 677 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
678 HC_CONFIG_0_REG_INT_LINE_EN_0 |
679 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 680
8badd27a
EG
681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
682 val, port, addr);
615f8fd9
ET
683
684 REG_WR(bp, addr, val);
685
a2fbb9ea
ET
686 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
687 }
688
8badd27a
EG
689 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
690 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
691
692 REG_WR(bp, addr, val);
37dbbf32
EG
693 /*
694 * Ensure that HC_CONFIG is written before leading/trailing edge config
695 */
696 mmiowb();
697 barrier();
34f80b04
EG
698
699 if (CHIP_IS_E1H(bp)) {
700 /* init leading/trailing edge */
701 if (IS_E1HMF(bp)) {
8badd27a 702 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 703 if (bp->port.pmf)
4acac6a5
EG
704 /* enable nig and gpio3 attention */
705 val |= 0x1100;
34f80b04
EG
706 } else
707 val = 0xffff;
708
709 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
710 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
711 }
37dbbf32
EG
712
713 /* Make sure that interrupts are indeed enabled from here on */
714 mmiowb();
a2fbb9ea
ET
715}
716
615f8fd9 717static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 718{
34f80b04 719 int port = BP_PORT(bp);
a2fbb9ea
ET
720 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
721 u32 val = REG_RD(bp, addr);
722
723 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
724 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
725 HC_CONFIG_0_REG_INT_LINE_EN_0 |
726 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
727
728 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
729 val, port, addr);
730
8badd27a
EG
731 /* flush all outstanding writes */
732 mmiowb();
733
a2fbb9ea
ET
734 REG_WR(bp, addr, val);
735 if (REG_RD(bp, addr) != val)
736 BNX2X_ERR("BUG! proper val not read from IGU!\n");
737}
738
f8ef6e44 739static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 740{
a2fbb9ea 741 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 742 int i, offset;
a2fbb9ea 743
34f80b04 744 /* disable interrupt handling */
a2fbb9ea 745 atomic_inc(&bp->intr_sem);
e1510706
EG
746 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
747
f8ef6e44
YG
748 if (disable_hw)
749 /* prevent the HW from sending interrupts */
750 bnx2x_int_disable(bp);
a2fbb9ea
ET
751
752 /* make sure all ISRs are done */
753 if (msix) {
8badd27a
EG
754 synchronize_irq(bp->msix_table[0].vector);
755 offset = 1;
37b091ba
MC
756#ifdef BCM_CNIC
757 offset++;
758#endif
a2fbb9ea 759 for_each_queue(bp, i)
8badd27a 760 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
761 } else
762 synchronize_irq(bp->pdev->irq);
763
764 /* make sure sp_task is not running */
1cf167f2
EG
765 cancel_delayed_work(&bp->sp_task);
766 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
767}
768
34f80b04 769/* fast path */
a2fbb9ea
ET
770
771/*
34f80b04 772 * General service functions
a2fbb9ea
ET
773 */
774
72fd0718
VZ
775/* Return true if succeeded to acquire the lock */
776static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
777{
778 u32 lock_status;
779 u32 resource_bit = (1 << resource);
780 int func = BP_FUNC(bp);
781 u32 hw_lock_control_reg;
782
783 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
784
785 /* Validating that the resource is within range */
786 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
787 DP(NETIF_MSG_HW,
788 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
789 resource, HW_LOCK_MAX_RESOURCE_VALUE);
790 return -EINVAL;
791 }
792
793 if (func <= 5)
794 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
795 else
796 hw_lock_control_reg =
797 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
798
799 /* Try to acquire the lock */
800 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
801 lock_status = REG_RD(bp, hw_lock_control_reg);
802 if (lock_status & resource_bit)
803 return true;
804
805 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
806 return false;
807}
808
34f80b04 809static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
810 u8 storm, u16 index, u8 op, u8 update)
811{
5c862848
EG
812 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
813 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
814 struct igu_ack_register igu_ack;
815
816 igu_ack.status_block_index = index;
817 igu_ack.sb_id_and_flags =
34f80b04 818 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
819 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
820 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
821 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
822
5c862848
EG
823 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
824 (*(u32 *)&igu_ack), hc_addr);
825 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
826
827 /* Make sure that ACK is written */
828 mmiowb();
829 barrier();
a2fbb9ea
ET
830}
831
54b9ddaa 832static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
833{
834 struct host_status_block *fpsb = fp->status_blk;
a2fbb9ea
ET
835
836 barrier(); /* status block is written to by the chip */
54b9ddaa
VZ
837 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
838 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
a2fbb9ea
ET
839}
840
a2fbb9ea
ET
841static u16 bnx2x_ack_int(struct bnx2x *bp)
842{
5c862848
EG
843 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
844 COMMAND_REG_SIMD_MASK);
845 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 846
5c862848
EG
847 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
848 result, hc_addr);
a2fbb9ea 849
a2fbb9ea
ET
850 return result;
851}
852
853
854/*
855 * fast path service functions
856 */
857
e8b5fc51
VZ
858static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
859{
860 /* Tell compiler that consumer and producer can change */
861 barrier();
862 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
863}
864
a2fbb9ea
ET
865/* free skb in the packet ring at pos idx
866 * return idx of last bd freed
867 */
868static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
869 u16 idx)
870{
871 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
872 struct eth_tx_start_bd *tx_start_bd;
873 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 874 struct sk_buff *skb = tx_buf->skb;
34f80b04 875 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
876 int nbd;
877
54b9ddaa
VZ
878 /* prefetch skb end pointer to speedup dev_kfree_skb() */
879 prefetch(&skb->end);
880
a2fbb9ea
ET
881 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
882 idx, tx_buf, skb);
883
884 /* unmap first bd */
885 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c 886 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
1a983142 887 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
ca00392c 888 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 889
ca00392c 890 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 891#ifdef BNX2X_STOP_ON_ERROR
ca00392c 892 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 893 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
894 bnx2x_panic();
895 }
896#endif
ca00392c 897 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 898
ca00392c
EG
899 /* Get the next bd */
900 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 901
ca00392c
EG
902 /* Skip a parse bd... */
903 --nbd;
904 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
905
906 /* ...and the TSO split header bd since they have no mapping */
907 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
908 --nbd;
909 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
910 }
911
912 /* now free frags */
913 while (nbd > 0) {
914
915 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c 916 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
1a983142
FT
917 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
918 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
a2fbb9ea
ET
919 if (--nbd)
920 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
921 }
922
923 /* release skb */
53e5e96e 924 WARN_ON(!skb);
54b9ddaa 925 dev_kfree_skb(skb);
a2fbb9ea
ET
926 tx_buf->first_bd = 0;
927 tx_buf->skb = NULL;
928
34f80b04 929 return new_cons;
a2fbb9ea
ET
930}
931
34f80b04 932static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 933{
34f80b04
EG
934 s16 used;
935 u16 prod;
936 u16 cons;
a2fbb9ea 937
a2fbb9ea
ET
938 prod = fp->tx_bd_prod;
939 cons = fp->tx_bd_cons;
940
34f80b04
EG
941 /* NUM_TX_RINGS = number of "next-page" entries
942 It will be used as a threshold */
943 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 944
34f80b04 945#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
946 WARN_ON(used < 0);
947 WARN_ON(used > fp->bp->tx_ring_size);
948 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 949#endif
a2fbb9ea 950
34f80b04 951 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
952}
953
54b9ddaa
VZ
954static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
955{
956 u16 hw_cons;
957
958 /* Tell compiler that status block fields can change */
959 barrier();
960 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
961 return hw_cons != fp->tx_pkt_cons;
962}
963
964static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
965{
966 struct bnx2x *bp = fp->bp;
555f6c78 967 struct netdev_queue *txq;
a2fbb9ea 968 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
a2fbb9ea
ET
969
970#ifdef BNX2X_STOP_ON_ERROR
971 if (unlikely(bp->panic))
54b9ddaa 972 return -1;
a2fbb9ea
ET
973#endif
974
54b9ddaa 975 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
976 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
977 sw_cons = fp->tx_pkt_cons;
978
979 while (sw_cons != hw_cons) {
980 u16 pkt_cons;
981
982 pkt_cons = TX_BD(sw_cons);
983
984 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
985
34f80b04 986 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
987 hw_cons, sw_cons, pkt_cons);
988
34f80b04 989/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
990 rmb();
991 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
992 }
993*/
994 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
995 sw_cons++;
a2fbb9ea
ET
996 }
997
998 fp->tx_pkt_cons = sw_cons;
999 fp->tx_bd_cons = bd_cons;
1000
c16cc0b4
VZ
1001 /* Need to make the tx_bd_cons update visible to start_xmit()
1002 * before checking for netif_tx_queue_stopped(). Without the
1003 * memory barrier, there is a small possibility that
1004 * start_xmit() will miss it and cause the queue to be stopped
1005 * forever.
1006 */
2d99cf16 1007 smp_mb();
c16cc0b4 1008
a2fbb9ea 1009 /* TBD need a thresh? */
555f6c78 1010 if (unlikely(netif_tx_queue_stopped(txq))) {
c16cc0b4
VZ
1011 /* Taking tx_lock() is needed to prevent reenabling the queue
1012 * while it's empty. This could have happen if rx_action() gets
1013 * suspended in bnx2x_tx_int() after the condition before
1014 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1015 *
1016 * stops the queue->sees fresh tx_bd_cons->releases the queue->
1017 * sends some packets consuming the whole queue again->
1018 * stops the queue
6044735d 1019 */
c16cc0b4
VZ
1020
1021 __netif_tx_lock(txq, smp_processor_id());
6044735d 1022
555f6c78 1023 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 1024 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 1025 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 1026 netif_tx_wake_queue(txq);
c16cc0b4
VZ
1027
1028 __netif_tx_unlock(txq);
a2fbb9ea 1029 }
54b9ddaa 1030 return 0;
a2fbb9ea
ET
1031}
1032
993ac7b5
MC
1033#ifdef BCM_CNIC
1034static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1035#endif
3196a88a 1036
a2fbb9ea
ET
1037static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1038 union eth_rx_cqe *rr_cqe)
1039{
1040 struct bnx2x *bp = fp->bp;
1041 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1042 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1043
34f80b04 1044 DP(BNX2X_MSG_SP,
a2fbb9ea 1045 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1046 fp->index, cid, command, bp->state,
34f80b04 1047 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
1048
1049 bp->spq_left++;
1050
0626b899 1051 if (fp->index) {
a2fbb9ea
ET
1052 switch (command | fp->state) {
1053 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1054 BNX2X_FP_STATE_OPENING):
1055 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1056 cid);
1057 fp->state = BNX2X_FP_STATE_OPEN;
1058 break;
1059
1060 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1061 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1062 cid);
1063 fp->state = BNX2X_FP_STATE_HALTED;
1064 break;
1065
1066 default:
34f80b04 1067 BNX2X_ERR("unexpected MC reply (%d) "
cdaa7cb8
VZ
1068 "fp[%d] state is %x\n",
1069 command, fp->index, fp->state);
34f80b04 1070 break;
a2fbb9ea 1071 }
34f80b04 1072 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1073 return;
1074 }
c14423fe 1075
a2fbb9ea
ET
1076 switch (command | bp->state) {
1077 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1078 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1079 bp->state = BNX2X_STATE_OPEN;
1080 break;
1081
1082 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1083 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1084 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1085 fp->state = BNX2X_FP_STATE_HALTED;
1086 break;
1087
a2fbb9ea 1088 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1089 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1090 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1091 break;
1092
993ac7b5
MC
1093#ifdef BCM_CNIC
1094 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1095 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1096 bnx2x_cnic_cfc_comp(bp, cid);
1097 break;
1098#endif
3196a88a 1099
a2fbb9ea 1100 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1101 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1102 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
1103 bp->set_mac_pending--;
1104 smp_wmb();
a2fbb9ea
ET
1105 break;
1106
49d66772 1107 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1108 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
1109 bp->set_mac_pending--;
1110 smp_wmb();
49d66772
ET
1111 break;
1112
a2fbb9ea 1113 default:
34f80b04 1114 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1115 command, bp->state);
34f80b04 1116 break;
a2fbb9ea 1117 }
34f80b04 1118 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1119}
1120
7a9b2557
VZ
1121static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1122 struct bnx2x_fastpath *fp, u16 index)
1123{
1124 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1125 struct page *page = sw_buf->page;
1126 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1127
1128 /* Skip "next page" elements */
1129 if (!page)
1130 return;
1131
1a983142 1132 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
4f40f2cb 1133 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1134 __free_pages(page, PAGES_PER_SGE_SHIFT);
1135
1136 sw_buf->page = NULL;
1137 sge->addr_hi = 0;
1138 sge->addr_lo = 0;
1139}
1140
1141static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1142 struct bnx2x_fastpath *fp, int last)
1143{
1144 int i;
1145
1146 for (i = 0; i < last; i++)
1147 bnx2x_free_rx_sge(bp, fp, i);
1148}
1149
1150static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1151 struct bnx2x_fastpath *fp, u16 index)
1152{
1153 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1154 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1155 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1156 dma_addr_t mapping;
1157
1158 if (unlikely(page == NULL))
1159 return -ENOMEM;
1160
1a983142
FT
1161 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1162 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
8d8bb39b 1163 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1164 __free_pages(page, PAGES_PER_SGE_SHIFT);
1165 return -ENOMEM;
1166 }
1167
1168 sw_buf->page = page;
1a983142 1169 dma_unmap_addr_set(sw_buf, mapping, mapping);
7a9b2557
VZ
1170
1171 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1172 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1173
1174 return 0;
1175}
1176
a2fbb9ea
ET
1177static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1178 struct bnx2x_fastpath *fp, u16 index)
1179{
1180 struct sk_buff *skb;
1181 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1182 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1183 dma_addr_t mapping;
1184
1185 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1186 if (unlikely(skb == NULL))
1187 return -ENOMEM;
1188
1a983142
FT
1189 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1190 DMA_FROM_DEVICE);
8d8bb39b 1191 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1192 dev_kfree_skb(skb);
1193 return -ENOMEM;
1194 }
1195
1196 rx_buf->skb = skb;
1a983142 1197 dma_unmap_addr_set(rx_buf, mapping, mapping);
a2fbb9ea
ET
1198
1199 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1200 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1201
1202 return 0;
1203}
1204
1205/* note that we are not allocating a new skb,
1206 * we are just moving one from cons to prod
1207 * we are not creating a new mapping,
1208 * so there is no need to check for dma_mapping_error().
1209 */
1210static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1211 struct sk_buff *skb, u16 cons, u16 prod)
1212{
1213 struct bnx2x *bp = fp->bp;
1214 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1215 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1216 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1217 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1218
1a983142
FT
1219 dma_sync_single_for_device(&bp->pdev->dev,
1220 dma_unmap_addr(cons_rx_buf, mapping),
1221 RX_COPY_THRESH, DMA_FROM_DEVICE);
a2fbb9ea
ET
1222
1223 prod_rx_buf->skb = cons_rx_buf->skb;
1a983142
FT
1224 dma_unmap_addr_set(prod_rx_buf, mapping,
1225 dma_unmap_addr(cons_rx_buf, mapping));
a2fbb9ea
ET
1226 *prod_bd = *cons_bd;
1227}
1228
7a9b2557
VZ
1229static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1230 u16 idx)
1231{
1232 u16 last_max = fp->last_max_sge;
1233
1234 if (SUB_S16(idx, last_max) > 0)
1235 fp->last_max_sge = idx;
1236}
1237
1238static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1239{
1240 int i, j;
1241
1242 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1243 int idx = RX_SGE_CNT * i - 1;
1244
1245 for (j = 0; j < 2; j++) {
1246 SGE_MASK_CLEAR_BIT(fp, idx);
1247 idx--;
1248 }
1249 }
1250}
1251
1252static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1253 struct eth_fast_path_rx_cqe *fp_cqe)
1254{
1255 struct bnx2x *bp = fp->bp;
4f40f2cb 1256 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1257 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1258 SGE_PAGE_SHIFT;
7a9b2557
VZ
1259 u16 last_max, last_elem, first_elem;
1260 u16 delta = 0;
1261 u16 i;
1262
1263 if (!sge_len)
1264 return;
1265
1266 /* First mark all used pages */
1267 for (i = 0; i < sge_len; i++)
1268 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1269
1270 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1271 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1272
1273 /* Here we assume that the last SGE index is the biggest */
1274 prefetch((void *)(fp->sge_mask));
1275 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1276
1277 last_max = RX_SGE(fp->last_max_sge);
1278 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1279 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1280
1281 /* If ring is not full */
1282 if (last_elem + 1 != first_elem)
1283 last_elem++;
1284
1285 /* Now update the prod */
1286 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1287 if (likely(fp->sge_mask[i]))
1288 break;
1289
1290 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1291 delta += RX_SGE_MASK_ELEM_SZ;
1292 }
1293
1294 if (delta > 0) {
1295 fp->rx_sge_prod += delta;
1296 /* clear page-end entries */
1297 bnx2x_clear_sge_mask_next_elems(fp);
1298 }
1299
1300 DP(NETIF_MSG_RX_STATUS,
1301 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1302 fp->last_max_sge, fp->rx_sge_prod);
1303}
1304
1305static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1306{
1307 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1308 memset(fp->sge_mask, 0xff,
1309 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1310
33471629
EG
1311 /* Clear the two last indices in the page to 1:
1312 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1313 hence will never be indicated and should be removed from
1314 the calculations. */
1315 bnx2x_clear_sge_mask_next_elems(fp);
1316}
1317
1318static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1319 struct sk_buff *skb, u16 cons, u16 prod)
1320{
1321 struct bnx2x *bp = fp->bp;
1322 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1323 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1324 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1325 dma_addr_t mapping;
1326
1327 /* move empty skb from pool to prod and map it */
1328 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1a983142
FT
1329 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1330 bp->rx_buf_size, DMA_FROM_DEVICE);
1331 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
7a9b2557
VZ
1332
1333 /* move partial skb from cons to pool (don't unmap yet) */
1334 fp->tpa_pool[queue] = *cons_rx_buf;
1335
1336 /* mark bin state as start - print error if current state != stop */
1337 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1338 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1339
1340 fp->tpa_state[queue] = BNX2X_TPA_START;
1341
1342 /* point prod_bd to new skb */
1343 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1344 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1345
1346#ifdef BNX2X_STOP_ON_ERROR
1347 fp->tpa_queue_used |= (1 << queue);
cdaa7cb8 1348#ifdef _ASM_GENERIC_INT_L64_H
7a9b2557
VZ
1349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1350#else
1351 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1352#endif
1353 fp->tpa_queue_used);
1354#endif
1355}
1356
1357static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1358 struct sk_buff *skb,
1359 struct eth_fast_path_rx_cqe *fp_cqe,
1360 u16 cqe_idx)
1361{
1362 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1363 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1364 u32 i, frag_len, frag_size, pages;
1365 int err;
1366 int j;
1367
1368 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1369 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1370
1371 /* This is needed in order to enable forwarding support */
1372 if (frag_size)
4f40f2cb 1373 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1374 max(frag_size, (u32)len_on_bd));
1375
1376#ifdef BNX2X_STOP_ON_ERROR
cdaa7cb8 1377 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
7a9b2557
VZ
1378 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1379 pages, cqe_idx);
1380 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1381 fp_cqe->pkt_len, len_on_bd);
1382 bnx2x_panic();
1383 return -EINVAL;
1384 }
1385#endif
1386
1387 /* Run through the SGL and compose the fragmented skb */
1388 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1389 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1390
1391 /* FW gives the indices of the SGE as if the ring is an array
1392 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1393 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1394 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1395 old_rx_pg = *rx_pg;
1396
1397 /* If we fail to allocate a substitute page, we simply stop
1398 where we are and drop the whole packet */
1399 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1400 if (unlikely(err)) {
de832a55 1401 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1402 return err;
1403 }
1404
1405 /* Unmap the page as we r going to pass it to the stack */
1a983142
FT
1406 dma_unmap_page(&bp->pdev->dev,
1407 dma_unmap_addr(&old_rx_pg, mapping),
1408 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
7a9b2557
VZ
1409
1410 /* Add one frag and update the appropriate fields in the skb */
1411 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1412
1413 skb->data_len += frag_len;
1414 skb->truesize += frag_len;
1415 skb->len += frag_len;
1416
1417 frag_size -= frag_len;
1418 }
1419
1420 return 0;
1421}
1422
1423static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1424 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1425 u16 cqe_idx)
1426{
1427 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1428 struct sk_buff *skb = rx_buf->skb;
1429 /* alloc new skb */
1430 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1431
1432 /* Unmap skb in the pool anyway, as we are going to change
1433 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1434 fails. */
1a983142
FT
1435 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1436 bp->rx_buf_size, DMA_FROM_DEVICE);
7a9b2557 1437
7a9b2557 1438 if (likely(new_skb)) {
66e855f3
YG
1439 /* fix ip xsum and give it to the stack */
1440 /* (no need to map the new skb) */
0c6671b0
EG
1441#ifdef BCM_VLAN
1442 int is_vlan_cqe =
1443 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1444 PARSING_FLAGS_VLAN);
1445 int is_not_hwaccel_vlan_cqe =
1446 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1447#endif
7a9b2557
VZ
1448
1449 prefetch(skb);
1450 prefetch(((char *)(skb)) + 128);
1451
7a9b2557
VZ
1452#ifdef BNX2X_STOP_ON_ERROR
1453 if (pad + len > bp->rx_buf_size) {
1454 BNX2X_ERR("skb_put is about to fail... "
1455 "pad %d len %d rx_buf_size %d\n",
1456 pad, len, bp->rx_buf_size);
1457 bnx2x_panic();
1458 return;
1459 }
1460#endif
1461
1462 skb_reserve(skb, pad);
1463 skb_put(skb, len);
1464
1465 skb->protocol = eth_type_trans(skb, bp->dev);
1466 skb->ip_summed = CHECKSUM_UNNECESSARY;
1467
1468 {
1469 struct iphdr *iph;
1470
1471 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1472#ifdef BCM_VLAN
1473 /* If there is no Rx VLAN offloading -
1474 take VLAN tag into an account */
1475 if (unlikely(is_not_hwaccel_vlan_cqe))
1476 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1477#endif
7a9b2557
VZ
1478 iph->check = 0;
1479 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1480 }
1481
1482 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1483 &cqe->fast_path_cqe, cqe_idx)) {
1484#ifdef BCM_VLAN
0c6671b0
EG
1485 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1486 (!is_not_hwaccel_vlan_cqe))
4fd89b7a
DK
1487 vlan_gro_receive(&fp->napi, bp->vlgrp,
1488 le16_to_cpu(cqe->fast_path_cqe.
1489 vlan_tag), skb);
7a9b2557
VZ
1490 else
1491#endif
4fd89b7a 1492 napi_gro_receive(&fp->napi, skb);
7a9b2557
VZ
1493 } else {
1494 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1495 " - dropping packet!\n");
1496 dev_kfree_skb(skb);
1497 }
1498
7a9b2557
VZ
1499
1500 /* put new skb in bin */
1501 fp->tpa_pool[queue].skb = new_skb;
1502
1503 } else {
66e855f3 1504 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1505 DP(NETIF_MSG_RX_STATUS,
1506 "Failed to allocate new skb - dropping packet!\n");
de832a55 1507 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1508 }
1509
1510 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1511}
1512
1513static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1514 struct bnx2x_fastpath *fp,
1515 u16 bd_prod, u16 rx_comp_prod,
1516 u16 rx_sge_prod)
1517{
8d9c5f34 1518 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1519 int i;
1520
1521 /* Update producers */
1522 rx_prods.bd_prod = bd_prod;
1523 rx_prods.cqe_prod = rx_comp_prod;
1524 rx_prods.sge_prod = rx_sge_prod;
1525
58f4c4cf
EG
1526 /*
1527 * Make sure that the BD and SGE data is updated before updating the
1528 * producers since FW might read the BD/SGE right after the producer
1529 * is updated.
1530 * This is only applicable for weak-ordered memory model archs such
1531 * as IA-64. The following barrier is also mandatory since FW will
1532 * assumes BDs must have buffers.
1533 */
1534 wmb();
1535
8d9c5f34
EG
1536 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1537 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1538 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1539 ((u32 *)&rx_prods)[i]);
1540
58f4c4cf
EG
1541 mmiowb(); /* keep prod updates ordered */
1542
7a9b2557 1543 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1544 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1545 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1546}
1547
6f3c72a2
VZ
1548/* Set Toeplitz hash value in the skb using the value from the
1549 * CQE (calculated by HW).
1550 */
1551static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
1552 struct sk_buff *skb)
1553{
1554 /* Set Toeplitz hash from CQE */
1555 if ((bp->dev->features & NETIF_F_RXHASH) &&
1556 (cqe->fast_path_cqe.status_flags &
1557 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
1558 skb->rxhash =
1559 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
1560}
1561
a2fbb9ea
ET
1562static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1563{
1564 struct bnx2x *bp = fp->bp;
34f80b04 1565 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1566 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1567 int rx_pkt = 0;
1568
1569#ifdef BNX2X_STOP_ON_ERROR
1570 if (unlikely(bp->panic))
1571 return 0;
1572#endif
1573
34f80b04
EG
1574 /* CQ "next element" is of the size of the regular element,
1575 that's why it's ok here */
a2fbb9ea
ET
1576 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1577 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1578 hw_comp_cons++;
1579
1580 bd_cons = fp->rx_bd_cons;
1581 bd_prod = fp->rx_bd_prod;
34f80b04 1582 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1583 sw_comp_cons = fp->rx_comp_cons;
1584 sw_comp_prod = fp->rx_comp_prod;
1585
1586 /* Memory barrier necessary as speculative reads of the rx
1587 * buffer can be ahead of the index in the status block
1588 */
1589 rmb();
1590
1591 DP(NETIF_MSG_RX_STATUS,
1592 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1593 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1594
1595 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1596 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1597 struct sk_buff *skb;
1598 union eth_rx_cqe *cqe;
6f3c72a2 1599 u8 cqe_fp_flags;
34f80b04 1600 u16 len, pad;
a2fbb9ea
ET
1601
1602 comp_ring_cons = RCQ_BD(sw_comp_cons);
1603 bd_prod = RX_BD(bd_prod);
1604 bd_cons = RX_BD(bd_cons);
1605
619e7a66
EG
1606 /* Prefetch the page containing the BD descriptor
1607 at producer's index. It will be needed when new skb is
1608 allocated */
1609 prefetch((void *)(PAGE_ALIGN((unsigned long)
1610 (&fp->rx_desc_ring[bd_prod])) -
1611 PAGE_SIZE + 1));
1612
a2fbb9ea 1613 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1614 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1615
a2fbb9ea 1616 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1617 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1618 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1619 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1620 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1621 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1622
1623 /* is this a slowpath msg? */
34f80b04 1624 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1625 bnx2x_sp_event(fp, cqe);
1626 goto next_cqe;
1627
1628 /* this is an rx packet */
1629 } else {
1630 rx_buf = &fp->rx_buf_ring[bd_cons];
1631 skb = rx_buf->skb;
54b9ddaa 1632 prefetch(skb);
a2fbb9ea
ET
1633 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1634 pad = cqe->fast_path_cqe.placement_offset;
1635
7a9b2557
VZ
1636 /* If CQE is marked both TPA_START and TPA_END
1637 it is a non-TPA CQE */
1638 if ((!fp->disable_tpa) &&
1639 (TPA_TYPE(cqe_fp_flags) !=
1640 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1641 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1642
1643 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1644 DP(NETIF_MSG_RX_STATUS,
1645 "calling tpa_start on queue %d\n",
1646 queue);
1647
1648 bnx2x_tpa_start(fp, queue, skb,
1649 bd_cons, bd_prod);
6f3c72a2
VZ
1650
1651 /* Set Toeplitz hash for an LRO skb */
1652 bnx2x_set_skb_rxhash(bp, cqe, skb);
1653
7a9b2557
VZ
1654 goto next_rx;
1655 }
1656
1657 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1658 DP(NETIF_MSG_RX_STATUS,
1659 "calling tpa_stop on queue %d\n",
1660 queue);
1661
1662 if (!BNX2X_RX_SUM_FIX(cqe))
1663 BNX2X_ERR("STOP on none TCP "
1664 "data\n");
1665
1666 /* This is a size of the linear data
1667 on this skb */
1668 len = le16_to_cpu(cqe->fast_path_cqe.
1669 len_on_bd);
1670 bnx2x_tpa_stop(bp, fp, queue, pad,
1671 len, cqe, comp_ring_cons);
1672#ifdef BNX2X_STOP_ON_ERROR
1673 if (bp->panic)
17cb4006 1674 return 0;
7a9b2557
VZ
1675#endif
1676
1677 bnx2x_update_sge_prod(fp,
1678 &cqe->fast_path_cqe);
1679 goto next_cqe;
1680 }
1681 }
1682
1a983142
FT
1683 dma_sync_single_for_device(&bp->pdev->dev,
1684 dma_unmap_addr(rx_buf, mapping),
1685 pad + RX_COPY_THRESH,
1686 DMA_FROM_DEVICE);
a2fbb9ea
ET
1687 prefetch(((char *)(skb)) + 128);
1688
1689 /* is this an error packet? */
34f80b04 1690 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1691 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1692 "ERROR flags %x rx packet %u\n",
1693 cqe_fp_flags, sw_comp_cons);
de832a55 1694 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1695 goto reuse_rx;
1696 }
1697
1698 /* Since we don't have a jumbo ring
1699 * copy small packets if mtu > 1500
1700 */
1701 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1702 (len <= RX_COPY_THRESH)) {
1703 struct sk_buff *new_skb;
1704
1705 new_skb = netdev_alloc_skb(bp->dev,
1706 len + pad);
1707 if (new_skb == NULL) {
1708 DP(NETIF_MSG_RX_ERR,
34f80b04 1709 "ERROR packet dropped "
a2fbb9ea 1710 "because of alloc failure\n");
de832a55 1711 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1712 goto reuse_rx;
1713 }
1714
1715 /* aligned copy */
1716 skb_copy_from_linear_data_offset(skb, pad,
1717 new_skb->data + pad, len);
1718 skb_reserve(new_skb, pad);
1719 skb_put(new_skb, len);
1720
1721 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1722
1723 skb = new_skb;
1724
a119a069
EG
1725 } else
1726 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1a983142
FT
1727 dma_unmap_single(&bp->pdev->dev,
1728 dma_unmap_addr(rx_buf, mapping),
437cf2f1 1729 bp->rx_buf_size,
1a983142 1730 DMA_FROM_DEVICE);
a2fbb9ea
ET
1731 skb_reserve(skb, pad);
1732 skb_put(skb, len);
1733
1734 } else {
1735 DP(NETIF_MSG_RX_ERR,
34f80b04 1736 "ERROR packet dropped because "
a2fbb9ea 1737 "of alloc failure\n");
de832a55 1738 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1739reuse_rx:
1740 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1741 goto next_rx;
1742 }
1743
1744 skb->protocol = eth_type_trans(skb, bp->dev);
1745
6f3c72a2
VZ
1746 /* Set Toeplitz hash for a none-LRO skb */
1747 bnx2x_set_skb_rxhash(bp, cqe, skb);
c68ed255 1748
a2fbb9ea 1749 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1750 if (bp->rx_csum) {
1adcd8be
EG
1751 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1752 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1753 else
de832a55 1754 fp->eth_q_stats.hw_csum_err++;
66e855f3 1755 }
a2fbb9ea
ET
1756 }
1757
748e5439 1758 skb_record_rx_queue(skb, fp->index);
ab6ad5a4 1759
a2fbb9ea 1760#ifdef BCM_VLAN
0c6671b0 1761 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1762 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1763 PARSING_FLAGS_VLAN))
4fd89b7a
DK
1764 vlan_gro_receive(&fp->napi, bp->vlgrp,
1765 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
a2fbb9ea
ET
1766 else
1767#endif
4fd89b7a 1768 napi_gro_receive(&fp->napi, skb);
a2fbb9ea 1769
a2fbb9ea
ET
1770
1771next_rx:
1772 rx_buf->skb = NULL;
1773
1774 bd_cons = NEXT_RX_IDX(bd_cons);
1775 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1776 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1777 rx_pkt++;
a2fbb9ea
ET
1778next_cqe:
1779 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1780 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1781
34f80b04 1782 if (rx_pkt == budget)
a2fbb9ea
ET
1783 break;
1784 } /* while */
1785
1786 fp->rx_bd_cons = bd_cons;
34f80b04 1787 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1788 fp->rx_comp_cons = sw_comp_cons;
1789 fp->rx_comp_prod = sw_comp_prod;
1790
7a9b2557
VZ
1791 /* Update producers */
1792 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1793 fp->rx_sge_prod);
a2fbb9ea
ET
1794
1795 fp->rx_pkt += rx_pkt;
1796 fp->rx_calls++;
1797
1798 return rx_pkt;
1799}
1800
1801static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1802{
1803 struct bnx2x_fastpath *fp = fp_cookie;
1804 struct bnx2x *bp = fp->bp;
a2fbb9ea 1805
da5a662a
VZ
1806 /* Return here if interrupt is disabled */
1807 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1808 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1809 return IRQ_HANDLED;
1810 }
1811
34f80b04 1812 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1813 fp->index, fp->sb_id);
0626b899 1814 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1815
1816#ifdef BNX2X_STOP_ON_ERROR
1817 if (unlikely(bp->panic))
1818 return IRQ_HANDLED;
1819#endif
ca00392c 1820
54b9ddaa
VZ
1821 /* Handle Rx and Tx according to MSI-X vector */
1822 prefetch(fp->rx_cons_sb);
1823 prefetch(fp->tx_cons_sb);
1824 prefetch(&fp->status_blk->u_status_block.status_block_index);
1825 prefetch(&fp->status_blk->c_status_block.status_block_index);
1826 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
34f80b04 1827
a2fbb9ea
ET
1828 return IRQ_HANDLED;
1829}
1830
1831static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1832{
555f6c78 1833 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1834 u16 status = bnx2x_ack_int(bp);
34f80b04 1835 u16 mask;
ca00392c 1836 int i;
a2fbb9ea 1837
34f80b04 1838 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1839 if (unlikely(status == 0)) {
1840 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1841 return IRQ_NONE;
1842 }
f5372251 1843 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1844
34f80b04 1845 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1846 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1847 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1848 return IRQ_HANDLED;
1849 }
1850
3196a88a
EG
1851#ifdef BNX2X_STOP_ON_ERROR
1852 if (unlikely(bp->panic))
1853 return IRQ_HANDLED;
1854#endif
1855
ca00392c
EG
1856 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1857 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1858
ca00392c
EG
1859 mask = 0x2 << fp->sb_id;
1860 if (status & mask) {
54b9ddaa
VZ
1861 /* Handle Rx and Tx according to SB id */
1862 prefetch(fp->rx_cons_sb);
1863 prefetch(&fp->status_blk->u_status_block.
1864 status_block_index);
1865 prefetch(fp->tx_cons_sb);
1866 prefetch(&fp->status_blk->c_status_block.
1867 status_block_index);
1868 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1869 status &= ~mask;
1870 }
a2fbb9ea
ET
1871 }
1872
993ac7b5
MC
1873#ifdef BCM_CNIC
1874 mask = 0x2 << CNIC_SB_ID(bp);
1875 if (status & (mask | 0x1)) {
1876 struct cnic_ops *c_ops = NULL;
1877
1878 rcu_read_lock();
1879 c_ops = rcu_dereference(bp->cnic_ops);
1880 if (c_ops)
1881 c_ops->cnic_handler(bp->cnic_data, NULL);
1882 rcu_read_unlock();
1883
1884 status &= ~mask;
1885 }
1886#endif
a2fbb9ea 1887
34f80b04 1888 if (unlikely(status & 0x1)) {
1cf167f2 1889 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1890
1891 status &= ~0x1;
1892 if (!status)
1893 return IRQ_HANDLED;
1894 }
1895
cdaa7cb8
VZ
1896 if (unlikely(status))
1897 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 1898 status);
a2fbb9ea 1899
c18487ee 1900 return IRQ_HANDLED;
a2fbb9ea
ET
1901}
1902
c18487ee 1903/* end of fast path */
a2fbb9ea 1904
bb2a0f7a 1905static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1906
c18487ee
YR
1907/* Link */
1908
1909/*
1910 * General service functions
1911 */
a2fbb9ea 1912
4a37fb66 1913static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1914{
1915 u32 lock_status;
1916 u32 resource_bit = (1 << resource);
4a37fb66
YG
1917 int func = BP_FUNC(bp);
1918 u32 hw_lock_control_reg;
c18487ee 1919 int cnt;
a2fbb9ea 1920
c18487ee
YR
1921 /* Validating that the resource is within range */
1922 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1923 DP(NETIF_MSG_HW,
1924 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1925 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1926 return -EINVAL;
1927 }
a2fbb9ea 1928
4a37fb66
YG
1929 if (func <= 5) {
1930 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1931 } else {
1932 hw_lock_control_reg =
1933 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1934 }
1935
c18487ee 1936 /* Validating that the resource is not already taken */
4a37fb66 1937 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1938 if (lock_status & resource_bit) {
1939 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1940 lock_status, resource_bit);
1941 return -EEXIST;
1942 }
a2fbb9ea 1943
46230476
EG
1944 /* Try for 5 second every 5ms */
1945 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1946 /* Try to acquire the lock */
4a37fb66
YG
1947 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1948 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1949 if (lock_status & resource_bit)
1950 return 0;
a2fbb9ea 1951
c18487ee 1952 msleep(5);
a2fbb9ea 1953 }
c18487ee
YR
1954 DP(NETIF_MSG_HW, "Timeout\n");
1955 return -EAGAIN;
1956}
a2fbb9ea 1957
4a37fb66 1958static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1959{
1960 u32 lock_status;
1961 u32 resource_bit = (1 << resource);
4a37fb66
YG
1962 int func = BP_FUNC(bp);
1963 u32 hw_lock_control_reg;
a2fbb9ea 1964
72fd0718
VZ
1965 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1966
c18487ee
YR
1967 /* Validating that the resource is within range */
1968 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1969 DP(NETIF_MSG_HW,
1970 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1971 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1972 return -EINVAL;
1973 }
1974
4a37fb66
YG
1975 if (func <= 5) {
1976 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1977 } else {
1978 hw_lock_control_reg =
1979 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1980 }
1981
c18487ee 1982 /* Validating that the resource is currently taken */
4a37fb66 1983 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1984 if (!(lock_status & resource_bit)) {
1985 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1986 lock_status, resource_bit);
1987 return -EFAULT;
a2fbb9ea
ET
1988 }
1989
4a37fb66 1990 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1991 return 0;
1992}
1993
1994/* HW Lock for shared dual port PHYs */
4a37fb66 1995static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1996{
34f80b04 1997 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1998
46c6a674
EG
1999 if (bp->port.need_hw_lock)
2000 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 2001}
a2fbb9ea 2002
4a37fb66 2003static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 2004{
46c6a674
EG
2005 if (bp->port.need_hw_lock)
2006 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 2007
34f80b04 2008 mutex_unlock(&bp->port.phy_mutex);
c18487ee 2009}
a2fbb9ea 2010
4acac6a5
EG
2011int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2012{
2013 /* The GPIO should be swapped if swap register is set and active */
2014 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2015 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2016 int gpio_shift = gpio_num +
2017 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2018 u32 gpio_mask = (1 << gpio_shift);
2019 u32 gpio_reg;
2020 int value;
2021
2022 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2023 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2024 return -EINVAL;
2025 }
2026
2027 /* read GPIO value */
2028 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2029
2030 /* get the requested pin value */
2031 if ((gpio_reg & gpio_mask) == gpio_mask)
2032 value = 1;
2033 else
2034 value = 0;
2035
2036 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
2037
2038 return value;
2039}
2040
17de50b7 2041int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
2042{
2043 /* The GPIO should be swapped if swap register is set and active */
2044 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 2045 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
2046 int gpio_shift = gpio_num +
2047 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2048 u32 gpio_mask = (1 << gpio_shift);
2049 u32 gpio_reg;
a2fbb9ea 2050
c18487ee
YR
2051 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2052 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2053 return -EINVAL;
2054 }
a2fbb9ea 2055
4a37fb66 2056 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
2057 /* read GPIO and mask except the float bits */
2058 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 2059
c18487ee
YR
2060 switch (mode) {
2061 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2062 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2063 gpio_num, gpio_shift);
2064 /* clear FLOAT and set CLR */
2065 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2066 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2067 break;
a2fbb9ea 2068
c18487ee
YR
2069 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2070 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2071 gpio_num, gpio_shift);
2072 /* clear FLOAT and set SET */
2073 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2074 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2075 break;
a2fbb9ea 2076
17de50b7 2077 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
2078 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2079 gpio_num, gpio_shift);
2080 /* set FLOAT */
2081 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2082 break;
a2fbb9ea 2083
c18487ee
YR
2084 default:
2085 break;
a2fbb9ea
ET
2086 }
2087
c18487ee 2088 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2089 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2090
c18487ee 2091 return 0;
a2fbb9ea
ET
2092}
2093
4acac6a5
EG
2094int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2095{
2096 /* The GPIO should be swapped if swap register is set and active */
2097 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2098 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2099 int gpio_shift = gpio_num +
2100 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2101 u32 gpio_mask = (1 << gpio_shift);
2102 u32 gpio_reg;
2103
2104 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2105 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2106 return -EINVAL;
2107 }
2108
2109 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2110 /* read GPIO int */
2111 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2112
2113 switch (mode) {
2114 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2115 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2116 "output low\n", gpio_num, gpio_shift);
2117 /* clear SET and set CLR */
2118 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2119 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2120 break;
2121
2122 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2123 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2124 "output high\n", gpio_num, gpio_shift);
2125 /* clear CLR and set SET */
2126 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2127 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2128 break;
2129
2130 default:
2131 break;
2132 }
2133
2134 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2135 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2136
2137 return 0;
2138}
2139
c18487ee 2140static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2141{
c18487ee
YR
2142 u32 spio_mask = (1 << spio_num);
2143 u32 spio_reg;
a2fbb9ea 2144
c18487ee
YR
2145 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2146 (spio_num > MISC_REGISTERS_SPIO_7)) {
2147 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2148 return -EINVAL;
a2fbb9ea
ET
2149 }
2150
4a37fb66 2151 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2152 /* read SPIO and mask except the float bits */
2153 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2154
c18487ee 2155 switch (mode) {
6378c025 2156 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2157 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2158 /* clear FLOAT and set CLR */
2159 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2160 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2161 break;
a2fbb9ea 2162
6378c025 2163 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2164 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2165 /* clear FLOAT and set SET */
2166 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2167 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2168 break;
a2fbb9ea 2169
c18487ee
YR
2170 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2171 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2172 /* set FLOAT */
2173 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2174 break;
a2fbb9ea 2175
c18487ee
YR
2176 default:
2177 break;
a2fbb9ea
ET
2178 }
2179
c18487ee 2180 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2181 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2182
a2fbb9ea
ET
2183 return 0;
2184}
2185
c18487ee 2186static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2187{
ad33ea3a
EG
2188 switch (bp->link_vars.ieee_fc &
2189 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2190 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2191 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2192 ADVERTISED_Pause);
2193 break;
356e2385 2194
c18487ee 2195 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2196 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2197 ADVERTISED_Pause);
2198 break;
356e2385 2199
c18487ee 2200 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2201 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2202 break;
356e2385 2203
c18487ee 2204 default:
34f80b04 2205 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2206 ADVERTISED_Pause);
2207 break;
2208 }
2209}
f1410647 2210
c18487ee
YR
2211static void bnx2x_link_report(struct bnx2x *bp)
2212{
f34d28ea 2213 if (bp->flags & MF_FUNC_DIS) {
2691d51d 2214 netif_carrier_off(bp->dev);
7995c64e 2215 netdev_err(bp->dev, "NIC Link is Down\n");
2691d51d
EG
2216 return;
2217 }
2218
c18487ee 2219 if (bp->link_vars.link_up) {
35c5f8fe
EG
2220 u16 line_speed;
2221
c18487ee
YR
2222 if (bp->state == BNX2X_STATE_OPEN)
2223 netif_carrier_on(bp->dev);
7995c64e 2224 netdev_info(bp->dev, "NIC Link is Up, ");
f1410647 2225
35c5f8fe
EG
2226 line_speed = bp->link_vars.line_speed;
2227 if (IS_E1HMF(bp)) {
2228 u16 vn_max_rate;
2229
2230 vn_max_rate =
2231 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2232 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2233 if (vn_max_rate < line_speed)
2234 line_speed = vn_max_rate;
2235 }
7995c64e 2236 pr_cont("%d Mbps ", line_speed);
f1410647 2237
c18487ee 2238 if (bp->link_vars.duplex == DUPLEX_FULL)
7995c64e 2239 pr_cont("full duplex");
c18487ee 2240 else
7995c64e 2241 pr_cont("half duplex");
f1410647 2242
c0700f90
DM
2243 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2244 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
7995c64e 2245 pr_cont(", receive ");
356e2385
EG
2246 if (bp->link_vars.flow_ctrl &
2247 BNX2X_FLOW_CTRL_TX)
7995c64e 2248 pr_cont("& transmit ");
c18487ee 2249 } else {
7995c64e 2250 pr_cont(", transmit ");
c18487ee 2251 }
7995c64e 2252 pr_cont("flow control ON");
c18487ee 2253 }
7995c64e 2254 pr_cont("\n");
f1410647 2255
c18487ee
YR
2256 } else { /* link_down */
2257 netif_carrier_off(bp->dev);
7995c64e 2258 netdev_err(bp->dev, "NIC Link is Down\n");
f1410647 2259 }
c18487ee
YR
2260}
2261
b5bf9068 2262static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2263{
19680c48
EG
2264 if (!BP_NOMCP(bp)) {
2265 u8 rc;
a2fbb9ea 2266
19680c48 2267 /* Initialize link parameters structure variables */
8c99e7b0
YR
2268 /* It is recommended to turn off RX FC for jumbo frames
2269 for better performance */
0c593270 2270 if (bp->dev->mtu > 5000)
c0700f90 2271 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2272 else
c0700f90 2273 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2274
4a37fb66 2275 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2276
2277 if (load_mode == LOAD_DIAG)
2278 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2279
19680c48 2280 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2281
4a37fb66 2282 bnx2x_release_phy_lock(bp);
a2fbb9ea 2283
3c96c68b
EG
2284 bnx2x_calc_fc_adv(bp);
2285
b5bf9068
EG
2286 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2287 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2288 bnx2x_link_report(bp);
b5bf9068 2289 }
34f80b04 2290
19680c48
EG
2291 return rc;
2292 }
f5372251 2293 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2294 return -EINVAL;
a2fbb9ea
ET
2295}
2296
c18487ee 2297static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2298{
19680c48 2299 if (!BP_NOMCP(bp)) {
4a37fb66 2300 bnx2x_acquire_phy_lock(bp);
19680c48 2301 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2302 bnx2x_release_phy_lock(bp);
a2fbb9ea 2303
19680c48
EG
2304 bnx2x_calc_fc_adv(bp);
2305 } else
f5372251 2306 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2307}
a2fbb9ea 2308
c18487ee
YR
2309static void bnx2x__link_reset(struct bnx2x *bp)
2310{
19680c48 2311 if (!BP_NOMCP(bp)) {
4a37fb66 2312 bnx2x_acquire_phy_lock(bp);
589abe3a 2313 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2314 bnx2x_release_phy_lock(bp);
19680c48 2315 } else
f5372251 2316 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2317}
a2fbb9ea 2318
c18487ee
YR
2319static u8 bnx2x_link_test(struct bnx2x *bp)
2320{
2145a920 2321 u8 rc = 0;
a2fbb9ea 2322
2145a920
VZ
2323 if (!BP_NOMCP(bp)) {
2324 bnx2x_acquire_phy_lock(bp);
2325 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2326 bnx2x_release_phy_lock(bp);
2327 } else
2328 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 2329
c18487ee
YR
2330 return rc;
2331}
a2fbb9ea 2332
8a1c38d1 2333static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2334{
8a1c38d1
EG
2335 u32 r_param = bp->link_vars.line_speed / 8;
2336 u32 fair_periodic_timeout_usec;
2337 u32 t_fair;
34f80b04 2338
8a1c38d1
EG
2339 memset(&(bp->cmng.rs_vars), 0,
2340 sizeof(struct rate_shaping_vars_per_port));
2341 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2342
8a1c38d1
EG
2343 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2344 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2345
8a1c38d1
EG
2346 /* this is the threshold below which no timer arming will occur
2347 1.25 coefficient is for the threshold to be a little bigger
2348 than the real time, to compensate for timer in-accuracy */
2349 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2350 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2351
8a1c38d1
EG
2352 /* resolution of fairness timer */
2353 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2354 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2355 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2356
8a1c38d1
EG
2357 /* this is the threshold below which we won't arm the timer anymore */
2358 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2359
8a1c38d1
EG
2360 /* we multiply by 1e3/8 to get bytes/msec.
2361 We don't want the credits to pass a credit
2362 of the t_fair*FAIR_MEM (algorithm resolution) */
2363 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2364 /* since each tick is 4 usec */
2365 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2366}
2367
2691d51d
EG
2368/* Calculates the sum of vn_min_rates.
2369 It's needed for further normalizing of the min_rates.
2370 Returns:
2371 sum of vn_min_rates.
2372 or
2373 0 - if all the min_rates are 0.
2374 In the later case fainess algorithm should be deactivated.
2375 If not all min_rates are zero then those that are zeroes will be set to 1.
2376 */
2377static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2378{
2379 int all_zero = 1;
2380 int port = BP_PORT(bp);
2381 int vn;
2382
2383 bp->vn_weight_sum = 0;
2384 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2385 int func = 2*vn + port;
2386 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2387 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2388 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2389
2390 /* Skip hidden vns */
2391 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2392 continue;
2393
2394 /* If min rate is zero - set it to 1 */
2395 if (!vn_min_rate)
2396 vn_min_rate = DEF_MIN_RATE;
2397 else
2398 all_zero = 0;
2399
2400 bp->vn_weight_sum += vn_min_rate;
2401 }
2402
2403 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
2404 if (all_zero) {
2405 bp->cmng.flags.cmng_enables &=
2406 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2407 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2408 " fairness will be disabled\n");
2409 } else
2410 bp->cmng.flags.cmng_enables |=
2411 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
2412}
2413
8a1c38d1 2414static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2415{
2416 struct rate_shaping_vars_per_vn m_rs_vn;
2417 struct fairness_vars_per_vn m_fair_vn;
2418 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2419 u16 vn_min_rate, vn_max_rate;
2420 int i;
2421
2422 /* If function is hidden - set min and max to zeroes */
2423 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2424 vn_min_rate = 0;
2425 vn_max_rate = 0;
2426
2427 } else {
2428 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2429 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
2430 /* If min rate is zero - set it to 1 */
2431 if (!vn_min_rate)
34f80b04
EG
2432 vn_min_rate = DEF_MIN_RATE;
2433 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2434 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2435 }
8a1c38d1 2436 DP(NETIF_MSG_IFUP,
b015e3d1 2437 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 2438 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2439
2440 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2441 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2442
2443 /* global vn counter - maximal Mbps for this vn */
2444 m_rs_vn.vn_counter.rate = vn_max_rate;
2445
2446 /* quota - number of bytes transmitted in this period */
2447 m_rs_vn.vn_counter.quota =
2448 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2449
8a1c38d1 2450 if (bp->vn_weight_sum) {
34f80b04
EG
2451 /* credit for each period of the fairness algorithm:
2452 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2453 vn_weight_sum should not be larger than 10000, thus
2454 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2455 than zero */
34f80b04 2456 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
2457 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2458 (8 * bp->vn_weight_sum))),
2459 (bp->cmng.fair_vars.fair_threshold * 2));
2460 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
2461 m_fair_vn.vn_credit_delta);
2462 }
2463
34f80b04
EG
2464 /* Store it to internal memory */
2465 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2466 REG_WR(bp, BAR_XSTRORM_INTMEM +
2467 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2468 ((u32 *)(&m_rs_vn))[i]);
2469
2470 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2471 REG_WR(bp, BAR_XSTRORM_INTMEM +
2472 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2473 ((u32 *)(&m_fair_vn))[i]);
2474}
2475
8a1c38d1 2476
c18487ee
YR
2477/* This function is called upon link interrupt */
2478static void bnx2x_link_attn(struct bnx2x *bp)
2479{
d9e8b185 2480 u32 prev_link_status = bp->link_vars.link_status;
bb2a0f7a
YG
2481 /* Make sure that we are synced with the current statistics */
2482 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2483
c18487ee 2484 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2485
bb2a0f7a
YG
2486 if (bp->link_vars.link_up) {
2487
1c06328c 2488 /* dropless flow control */
a18f5128 2489 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2490 int port = BP_PORT(bp);
2491 u32 pause_enabled = 0;
2492
2493 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2494 pause_enabled = 1;
2495
2496 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2497 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2498 pause_enabled);
2499 }
2500
bb2a0f7a
YG
2501 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2502 struct host_port_stats *pstats;
2503
2504 pstats = bnx2x_sp(bp, port_stats);
2505 /* reset old bmac stats */
2506 memset(&(pstats->mac_stx[0]), 0,
2507 sizeof(struct mac_stx));
2508 }
f34d28ea 2509 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2510 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2511 }
2512
d9e8b185
VZ
2513 /* indicate link status only if link status actually changed */
2514 if (prev_link_status != bp->link_vars.link_status)
2515 bnx2x_link_report(bp);
34f80b04
EG
2516
2517 if (IS_E1HMF(bp)) {
8a1c38d1 2518 int port = BP_PORT(bp);
34f80b04 2519 int func;
8a1c38d1 2520 int vn;
34f80b04 2521
ab6ad5a4 2522 /* Set the attention towards other drivers on the same port */
34f80b04
EG
2523 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2524 if (vn == BP_E1HVN(bp))
2525 continue;
2526
8a1c38d1 2527 func = ((vn << 1) | port);
34f80b04
EG
2528 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2529 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2530 }
34f80b04 2531
8a1c38d1
EG
2532 if (bp->link_vars.link_up) {
2533 int i;
2534
2535 /* Init rate shaping and fairness contexts */
2536 bnx2x_init_port_minmax(bp);
34f80b04 2537
34f80b04 2538 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2539 bnx2x_init_vn_minmax(bp, 2*vn + port);
2540
2541 /* Store it to internal memory */
2542 for (i = 0;
2543 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2544 REG_WR(bp, BAR_XSTRORM_INTMEM +
2545 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2546 ((u32 *)(&bp->cmng))[i]);
2547 }
34f80b04 2548 }
c18487ee 2549}
a2fbb9ea 2550
c18487ee
YR
2551static void bnx2x__link_status_update(struct bnx2x *bp)
2552{
f34d28ea 2553 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2554 return;
a2fbb9ea 2555
c18487ee 2556 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2557
bb2a0f7a
YG
2558 if (bp->link_vars.link_up)
2559 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2560 else
2561 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2562
2691d51d
EG
2563 bnx2x_calc_vn_weight_sum(bp);
2564
c18487ee
YR
2565 /* indicate link status */
2566 bnx2x_link_report(bp);
a2fbb9ea 2567}
a2fbb9ea 2568
34f80b04
EG
2569static void bnx2x_pmf_update(struct bnx2x *bp)
2570{
2571 int port = BP_PORT(bp);
2572 u32 val;
2573
2574 bp->port.pmf = 1;
2575 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2576
2577 /* enable nig attention */
2578 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2579 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2580 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2581
2582 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2583}
2584
c18487ee 2585/* end of Link */
a2fbb9ea
ET
2586
2587/* slow path */
2588
2589/*
2590 * General service functions
2591 */
2592
2691d51d
EG
2593/* send the MCP a request, block until there is a reply */
2594u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2595{
2596 int func = BP_FUNC(bp);
2597 u32 seq = ++bp->fw_seq;
2598 u32 rc = 0;
2599 u32 cnt = 1;
2600 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2601
c4ff7cbf 2602 mutex_lock(&bp->fw_mb_mutex);
2691d51d
EG
2603 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2604 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2605
2606 do {
2607 /* let the FW do it's magic ... */
2608 msleep(delay);
2609
2610 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2611
c4ff7cbf
EG
2612 /* Give the FW up to 5 second (500*10ms) */
2613 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2614
2615 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2616 cnt*delay, rc, seq);
2617
2618 /* is this a reply to our command? */
2619 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2620 rc &= FW_MSG_CODE_MASK;
2621 else {
2622 /* FW BUG! */
2623 BNX2X_ERR("FW failed to respond!\n");
2624 bnx2x_fw_dump(bp);
2625 rc = 0;
2626 }
c4ff7cbf 2627 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2628
2629 return rc;
2630}
2631
e665bfda 2632static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2691d51d
EG
2633static void bnx2x_set_rx_mode(struct net_device *dev);
2634
2635static void bnx2x_e1h_disable(struct bnx2x *bp)
2636{
2637 int port = BP_PORT(bp);
2691d51d
EG
2638
2639 netif_tx_disable(bp->dev);
2691d51d
EG
2640
2641 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2642
2691d51d
EG
2643 netif_carrier_off(bp->dev);
2644}
2645
2646static void bnx2x_e1h_enable(struct bnx2x *bp)
2647{
2648 int port = BP_PORT(bp);
2649
2650 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2651
2691d51d
EG
2652 /* Tx queue should be only reenabled */
2653 netif_tx_wake_all_queues(bp->dev);
2654
061bc702
EG
2655 /*
2656 * Should not call netif_carrier_on since it will be called if the link
2657 * is up when checking for link state
2658 */
2691d51d
EG
2659}
2660
2661static void bnx2x_update_min_max(struct bnx2x *bp)
2662{
2663 int port = BP_PORT(bp);
2664 int vn, i;
2665
2666 /* Init rate shaping and fairness contexts */
2667 bnx2x_init_port_minmax(bp);
2668
2669 bnx2x_calc_vn_weight_sum(bp);
2670
2671 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2672 bnx2x_init_vn_minmax(bp, 2*vn + port);
2673
2674 if (bp->port.pmf) {
2675 int func;
2676
2677 /* Set the attention towards other drivers on the same port */
2678 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2679 if (vn == BP_E1HVN(bp))
2680 continue;
2681
2682 func = ((vn << 1) | port);
2683 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2684 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2685 }
2686
2687 /* Store it to internal memory */
2688 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2689 REG_WR(bp, BAR_XSTRORM_INTMEM +
2690 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2691 ((u32 *)(&bp->cmng))[i]);
2692 }
2693}
2694
2695static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2696{
2691d51d 2697 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
2698
2699 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2700
f34d28ea
EG
2701 /*
2702 * This is the only place besides the function initialization
2703 * where the bp->flags can change so it is done without any
2704 * locks
2705 */
2691d51d
EG
2706 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2707 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
f34d28ea 2708 bp->flags |= MF_FUNC_DIS;
2691d51d
EG
2709
2710 bnx2x_e1h_disable(bp);
2711 } else {
2712 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
f34d28ea 2713 bp->flags &= ~MF_FUNC_DIS;
2691d51d
EG
2714
2715 bnx2x_e1h_enable(bp);
2716 }
2717 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2718 }
2719 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2720
2721 bnx2x_update_min_max(bp);
2722 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2723 }
2724
2725 /* Report results to MCP */
2726 if (dcc_event)
2727 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2728 else
2729 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2730}
2731
28912902
MC
2732/* must be called under the spq lock */
2733static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2734{
2735 struct eth_spe *next_spe = bp->spq_prod_bd;
2736
2737 if (bp->spq_prod_bd == bp->spq_last_bd) {
2738 bp->spq_prod_bd = bp->spq;
2739 bp->spq_prod_idx = 0;
2740 DP(NETIF_MSG_TIMER, "end of spq\n");
2741 } else {
2742 bp->spq_prod_bd++;
2743 bp->spq_prod_idx++;
2744 }
2745 return next_spe;
2746}
2747
2748/* must be called under the spq lock */
2749static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2750{
2751 int func = BP_FUNC(bp);
2752
2753 /* Make sure that BD data is updated before writing the producer */
2754 wmb();
2755
2756 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2757 bp->spq_prod_idx);
2758 mmiowb();
2759}
2760
a2fbb9ea
ET
2761/* the slow path queue is odd since completions arrive on the fastpath ring */
2762static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2763 u32 data_hi, u32 data_lo, int common)
2764{
28912902 2765 struct eth_spe *spe;
a2fbb9ea 2766
a2fbb9ea
ET
2767#ifdef BNX2X_STOP_ON_ERROR
2768 if (unlikely(bp->panic))
2769 return -EIO;
2770#endif
2771
34f80b04 2772 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2773
2774 if (!bp->spq_left) {
2775 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2776 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2777 bnx2x_panic();
2778 return -EBUSY;
2779 }
f1410647 2780
28912902
MC
2781 spe = bnx2x_sp_get_next(bp);
2782
a2fbb9ea 2783 /* CID needs port number to be encoded int it */
28912902 2784 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
2785 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2786 HW_CID(bp, cid));
28912902 2787 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 2788 if (common)
28912902 2789 spe->hdr.type |=
a2fbb9ea
ET
2790 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2791
28912902
MC
2792 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2793 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
2794
2795 bp->spq_left--;
2796
cdaa7cb8
VZ
2797 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2798 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2799 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2800 (u32)(U64_LO(bp->spq_mapping) +
2801 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2802 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2803
28912902 2804 bnx2x_sp_prod_update(bp);
34f80b04 2805 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2806 return 0;
2807}
2808
2809/* acquire split MCP access lock register */
4a37fb66 2810static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2811{
72fd0718 2812 u32 j, val;
34f80b04 2813 int rc = 0;
a2fbb9ea
ET
2814
2815 might_sleep();
72fd0718 2816 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
2817 val = (1UL << 31);
2818 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2819 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2820 if (val & (1L << 31))
2821 break;
2822
2823 msleep(5);
2824 }
a2fbb9ea 2825 if (!(val & (1L << 31))) {
19680c48 2826 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2827 rc = -EBUSY;
2828 }
2829
2830 return rc;
2831}
2832
4a37fb66
YG
2833/* release split MCP access lock register */
2834static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 2835{
72fd0718 2836 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
2837}
2838
2839static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2840{
2841 struct host_def_status_block *def_sb = bp->def_status_blk;
2842 u16 rc = 0;
2843
2844 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2845 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2846 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2847 rc |= 1;
2848 }
2849 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2850 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2851 rc |= 2;
2852 }
2853 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2854 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2855 rc |= 4;
2856 }
2857 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2858 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2859 rc |= 8;
2860 }
2861 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2862 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2863 rc |= 16;
2864 }
2865 return rc;
2866}
2867
2868/*
2869 * slow path service functions
2870 */
2871
2872static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2873{
34f80b04 2874 int port = BP_PORT(bp);
5c862848
EG
2875 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2876 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2877 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2878 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2879 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2880 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2881 u32 aeu_mask;
87942b46 2882 u32 nig_mask = 0;
a2fbb9ea 2883
a2fbb9ea
ET
2884 if (bp->attn_state & asserted)
2885 BNX2X_ERR("IGU ERROR\n");
2886
3fcaf2e5
EG
2887 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2888 aeu_mask = REG_RD(bp, aeu_addr);
2889
a2fbb9ea 2890 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 2891 aeu_mask, asserted);
72fd0718 2892 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 2893 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2894
3fcaf2e5
EG
2895 REG_WR(bp, aeu_addr, aeu_mask);
2896 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2897
3fcaf2e5 2898 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2899 bp->attn_state |= asserted;
3fcaf2e5 2900 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2901
2902 if (asserted & ATTN_HARD_WIRED_MASK) {
2903 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2904
a5e9a7cf
EG
2905 bnx2x_acquire_phy_lock(bp);
2906
877e9aa4 2907 /* save nig interrupt mask */
87942b46 2908 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2909 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2910
c18487ee 2911 bnx2x_link_attn(bp);
a2fbb9ea
ET
2912
2913 /* handle unicore attn? */
2914 }
2915 if (asserted & ATTN_SW_TIMER_4_FUNC)
2916 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2917
2918 if (asserted & GPIO_2_FUNC)
2919 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2920
2921 if (asserted & GPIO_3_FUNC)
2922 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2923
2924 if (asserted & GPIO_4_FUNC)
2925 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2926
2927 if (port == 0) {
2928 if (asserted & ATTN_GENERAL_ATTN_1) {
2929 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2930 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2931 }
2932 if (asserted & ATTN_GENERAL_ATTN_2) {
2933 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2934 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2935 }
2936 if (asserted & ATTN_GENERAL_ATTN_3) {
2937 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2938 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2939 }
2940 } else {
2941 if (asserted & ATTN_GENERAL_ATTN_4) {
2942 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2943 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2944 }
2945 if (asserted & ATTN_GENERAL_ATTN_5) {
2946 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2947 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2948 }
2949 if (asserted & ATTN_GENERAL_ATTN_6) {
2950 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2951 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2952 }
2953 }
2954
2955 } /* if hardwired */
2956
5c862848
EG
2957 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2958 asserted, hc_addr);
2959 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2960
2961 /* now set back the mask */
a5e9a7cf 2962 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2963 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2964 bnx2x_release_phy_lock(bp);
2965 }
a2fbb9ea
ET
2966}
2967
fd4ef40d
EG
2968static inline void bnx2x_fan_failure(struct bnx2x *bp)
2969{
2970 int port = BP_PORT(bp);
2971
2972 /* mark the failure */
2973 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2974 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2975 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2976 bp->link_params.ext_phy_config);
2977
2978 /* log the failure */
cdaa7cb8
VZ
2979 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2980 " the driver to shutdown the card to prevent permanent"
2981 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 2982}
ab6ad5a4 2983
877e9aa4 2984static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2985{
34f80b04 2986 int port = BP_PORT(bp);
877e9aa4 2987 int reg_offset;
4d295db0 2988 u32 val, swap_val, swap_override;
877e9aa4 2989
34f80b04
EG
2990 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2991 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2992
34f80b04 2993 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2994
2995 val = REG_RD(bp, reg_offset);
2996 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2997 REG_WR(bp, reg_offset, val);
2998
2999 BNX2X_ERR("SPIO5 hw attention\n");
3000
fd4ef40d 3001 /* Fan failure attention */
35b19ba5
EG
3002 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
3003 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 3004 /* Low power mode is controlled by GPIO 2 */
877e9aa4 3005 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 3006 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
3007 /* The PHY reset is controlled by GPIO 1 */
3008 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3009 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
3010 break;
3011
4d295db0
EG
3012 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
3013 /* The PHY reset is controlled by GPIO 1 */
3014 /* fake the port number to cancel the swap done in
3015 set_gpio() */
3016 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
3017 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
3018 port = (swap_val && swap_override) ^ 1;
3019 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3020 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3021 break;
3022
877e9aa4
ET
3023 default:
3024 break;
3025 }
fd4ef40d 3026 bnx2x_fan_failure(bp);
877e9aa4 3027 }
34f80b04 3028
589abe3a
EG
3029 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3030 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3031 bnx2x_acquire_phy_lock(bp);
3032 bnx2x_handle_module_detect_int(&bp->link_params);
3033 bnx2x_release_phy_lock(bp);
3034 }
3035
34f80b04
EG
3036 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3037
3038 val = REG_RD(bp, reg_offset);
3039 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3040 REG_WR(bp, reg_offset, val);
3041
3042 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 3043 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
3044 bnx2x_panic();
3045 }
877e9aa4
ET
3046}
3047
3048static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3049{
3050 u32 val;
3051
0626b899 3052 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
3053
3054 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3055 BNX2X_ERR("DB hw attention 0x%x\n", val);
3056 /* DORQ discard attention */
3057 if (val & 0x2)
3058 BNX2X_ERR("FATAL error from DORQ\n");
3059 }
34f80b04
EG
3060
3061 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3062
3063 int port = BP_PORT(bp);
3064 int reg_offset;
3065
3066 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3067 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3068
3069 val = REG_RD(bp, reg_offset);
3070 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3071 REG_WR(bp, reg_offset, val);
3072
3073 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3074 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3075 bnx2x_panic();
3076 }
877e9aa4
ET
3077}
3078
3079static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3080{
3081 u32 val;
3082
3083 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3084
3085 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3086 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3087 /* CFC error attention */
3088 if (val & 0x2)
3089 BNX2X_ERR("FATAL error from CFC\n");
3090 }
3091
3092 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3093
3094 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3095 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3096 /* RQ_USDMDP_FIFO_OVERFLOW */
3097 if (val & 0x18000)
3098 BNX2X_ERR("FATAL error from PXP\n");
3099 }
34f80b04
EG
3100
3101 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3102
3103 int port = BP_PORT(bp);
3104 int reg_offset;
3105
3106 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3107 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3108
3109 val = REG_RD(bp, reg_offset);
3110 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3111 REG_WR(bp, reg_offset, val);
3112
3113 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3114 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3115 bnx2x_panic();
3116 }
877e9aa4
ET
3117}
3118
3119static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3120{
34f80b04
EG
3121 u32 val;
3122
877e9aa4
ET
3123 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3124
34f80b04
EG
3125 if (attn & BNX2X_PMF_LINK_ASSERT) {
3126 int func = BP_FUNC(bp);
3127
3128 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
3129 bp->mf_config = SHMEM_RD(bp,
3130 mf_cfg.func_mf_config[func].config);
2691d51d
EG
3131 val = SHMEM_RD(bp, func_mb[func].drv_status);
3132 if (val & DRV_STATUS_DCC_EVENT_MASK)
3133 bnx2x_dcc_event(bp,
3134 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3135 bnx2x__link_status_update(bp);
2691d51d 3136 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3137 bnx2x_pmf_update(bp);
3138
3139 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3140
3141 BNX2X_ERR("MC assert!\n");
3142 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3143 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3144 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3145 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3146 bnx2x_panic();
3147
3148 } else if (attn & BNX2X_MCP_ASSERT) {
3149
3150 BNX2X_ERR("MCP assert!\n");
3151 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3152 bnx2x_fw_dump(bp);
877e9aa4
ET
3153
3154 } else
3155 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3156 }
3157
3158 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3159 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3160 if (attn & BNX2X_GRC_TIMEOUT) {
3161 val = CHIP_IS_E1H(bp) ?
3162 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3163 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3164 }
3165 if (attn & BNX2X_GRC_RSV) {
3166 val = CHIP_IS_E1H(bp) ?
3167 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3168 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3169 }
877e9aa4 3170 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3171 }
3172}
3173
72fd0718
VZ
3174static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3175static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3176
3177
3178#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3179#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3180#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3181#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3182#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3183#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3184/*
3185 * should be run under rtnl lock
3186 */
3187static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3188{
3189 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3190 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3191 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3192 barrier();
3193 mmiowb();
3194}
3195
3196/*
3197 * should be run under rtnl lock
3198 */
3199static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3200{
3201 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3202 val |= (1 << 16);
3203 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3204 barrier();
3205 mmiowb();
3206}
3207
3208/*
3209 * should be run under rtnl lock
3210 */
3211static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3212{
3213 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3214 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3215 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3216}
3217
3218/*
3219 * should be run under rtnl lock
3220 */
3221static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3222{
3223 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3224
3225 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3226
3227 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3228 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3229 barrier();
3230 mmiowb();
3231}
3232
3233/*
3234 * should be run under rtnl lock
3235 */
3236static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3237{
3238 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3239
3240 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3241
3242 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3243 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3244 barrier();
3245 mmiowb();
3246
3247 return val1;
3248}
3249
3250/*
3251 * should be run under rtnl lock
3252 */
3253static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3254{
3255 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3256}
3257
3258static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3259{
3260 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3261 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3262}
3263
3264static inline void _print_next_block(int idx, const char *blk)
3265{
3266 if (idx)
3267 pr_cont(", ");
3268 pr_cont("%s", blk);
3269}
3270
3271static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3272{
3273 int i = 0;
3274 u32 cur_bit = 0;
3275 for (i = 0; sig; i++) {
3276 cur_bit = ((u32)0x1 << i);
3277 if (sig & cur_bit) {
3278 switch (cur_bit) {
3279 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3280 _print_next_block(par_num++, "BRB");
3281 break;
3282 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3283 _print_next_block(par_num++, "PARSER");
3284 break;
3285 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3286 _print_next_block(par_num++, "TSDM");
3287 break;
3288 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3289 _print_next_block(par_num++, "SEARCHER");
3290 break;
3291 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3292 _print_next_block(par_num++, "TSEMI");
3293 break;
3294 }
3295
3296 /* Clear the bit */
3297 sig &= ~cur_bit;
3298 }
3299 }
3300
3301 return par_num;
3302}
3303
3304static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3305{
3306 int i = 0;
3307 u32 cur_bit = 0;
3308 for (i = 0; sig; i++) {
3309 cur_bit = ((u32)0x1 << i);
3310 if (sig & cur_bit) {
3311 switch (cur_bit) {
3312 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3313 _print_next_block(par_num++, "PBCLIENT");
3314 break;
3315 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3316 _print_next_block(par_num++, "QM");
3317 break;
3318 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3319 _print_next_block(par_num++, "XSDM");
3320 break;
3321 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3322 _print_next_block(par_num++, "XSEMI");
3323 break;
3324 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3325 _print_next_block(par_num++, "DOORBELLQ");
3326 break;
3327 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3328 _print_next_block(par_num++, "VAUX PCI CORE");
3329 break;
3330 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3331 _print_next_block(par_num++, "DEBUG");
3332 break;
3333 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3334 _print_next_block(par_num++, "USDM");
3335 break;
3336 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3337 _print_next_block(par_num++, "USEMI");
3338 break;
3339 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3340 _print_next_block(par_num++, "UPB");
3341 break;
3342 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3343 _print_next_block(par_num++, "CSDM");
3344 break;
3345 }
3346
3347 /* Clear the bit */
3348 sig &= ~cur_bit;
3349 }
3350 }
3351
3352 return par_num;
3353}
3354
3355static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3356{
3357 int i = 0;
3358 u32 cur_bit = 0;
3359 for (i = 0; sig; i++) {
3360 cur_bit = ((u32)0x1 << i);
3361 if (sig & cur_bit) {
3362 switch (cur_bit) {
3363 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3364 _print_next_block(par_num++, "CSEMI");
3365 break;
3366 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3367 _print_next_block(par_num++, "PXP");
3368 break;
3369 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3370 _print_next_block(par_num++,
3371 "PXPPCICLOCKCLIENT");
3372 break;
3373 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3374 _print_next_block(par_num++, "CFC");
3375 break;
3376 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3377 _print_next_block(par_num++, "CDU");
3378 break;
3379 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3380 _print_next_block(par_num++, "IGU");
3381 break;
3382 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3383 _print_next_block(par_num++, "MISC");
3384 break;
3385 }
3386
3387 /* Clear the bit */
3388 sig &= ~cur_bit;
3389 }
3390 }
3391
3392 return par_num;
3393}
3394
3395static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3396{
3397 int i = 0;
3398 u32 cur_bit = 0;
3399 for (i = 0; sig; i++) {
3400 cur_bit = ((u32)0x1 << i);
3401 if (sig & cur_bit) {
3402 switch (cur_bit) {
3403 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3404 _print_next_block(par_num++, "MCP ROM");
3405 break;
3406 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3407 _print_next_block(par_num++, "MCP UMP RX");
3408 break;
3409 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3410 _print_next_block(par_num++, "MCP UMP TX");
3411 break;
3412 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3413 _print_next_block(par_num++, "MCP SCPAD");
3414 break;
3415 }
3416
3417 /* Clear the bit */
3418 sig &= ~cur_bit;
3419 }
3420 }
3421
3422 return par_num;
3423}
3424
3425static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3426 u32 sig2, u32 sig3)
3427{
3428 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3429 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3430 int par_num = 0;
3431 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3432 "[0]:0x%08x [1]:0x%08x "
3433 "[2]:0x%08x [3]:0x%08x\n",
3434 sig0 & HW_PRTY_ASSERT_SET_0,
3435 sig1 & HW_PRTY_ASSERT_SET_1,
3436 sig2 & HW_PRTY_ASSERT_SET_2,
3437 sig3 & HW_PRTY_ASSERT_SET_3);
3438 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3439 bp->dev->name);
3440 par_num = bnx2x_print_blocks_with_parity0(
3441 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3442 par_num = bnx2x_print_blocks_with_parity1(
3443 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3444 par_num = bnx2x_print_blocks_with_parity2(
3445 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3446 par_num = bnx2x_print_blocks_with_parity3(
3447 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3448 printk("\n");
3449 return true;
3450 } else
3451 return false;
3452}
3453
3454static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 3455{
a2fbb9ea 3456 struct attn_route attn;
72fd0718
VZ
3457 int port = BP_PORT(bp);
3458
3459 attn.sig[0] = REG_RD(bp,
3460 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3461 port*4);
3462 attn.sig[1] = REG_RD(bp,
3463 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3464 port*4);
3465 attn.sig[2] = REG_RD(bp,
3466 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3467 port*4);
3468 attn.sig[3] = REG_RD(bp,
3469 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3470 port*4);
3471
3472 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3473 attn.sig[3]);
3474}
3475
3476static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3477{
3478 struct attn_route attn, *group_mask;
34f80b04 3479 int port = BP_PORT(bp);
877e9aa4 3480 int index;
a2fbb9ea
ET
3481 u32 reg_addr;
3482 u32 val;
3fcaf2e5 3483 u32 aeu_mask;
a2fbb9ea
ET
3484
3485 /* need to take HW lock because MCP or other port might also
3486 try to handle this event */
4a37fb66 3487 bnx2x_acquire_alr(bp);
a2fbb9ea 3488
72fd0718
VZ
3489 if (bnx2x_chk_parity_attn(bp)) {
3490 bp->recovery_state = BNX2X_RECOVERY_INIT;
3491 bnx2x_set_reset_in_progress(bp);
3492 schedule_delayed_work(&bp->reset_task, 0);
3493 /* Disable HW interrupts */
3494 bnx2x_int_disable(bp);
3495 bnx2x_release_alr(bp);
3496 /* In case of parity errors don't handle attentions so that
3497 * other function would "see" parity errors.
3498 */
3499 return;
3500 }
3501
a2fbb9ea
ET
3502 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3503 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3504 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3505 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3506 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3507 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3508
3509 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3510 if (deasserted & (1 << index)) {
72fd0718 3511 group_mask = &bp->attn_group[index];
a2fbb9ea 3512
34f80b04 3513 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
72fd0718
VZ
3514 index, group_mask->sig[0], group_mask->sig[1],
3515 group_mask->sig[2], group_mask->sig[3]);
a2fbb9ea 3516
877e9aa4 3517 bnx2x_attn_int_deasserted3(bp,
72fd0718 3518 attn.sig[3] & group_mask->sig[3]);
877e9aa4 3519 bnx2x_attn_int_deasserted1(bp,
72fd0718 3520 attn.sig[1] & group_mask->sig[1]);
877e9aa4 3521 bnx2x_attn_int_deasserted2(bp,
72fd0718 3522 attn.sig[2] & group_mask->sig[2]);
877e9aa4 3523 bnx2x_attn_int_deasserted0(bp,
72fd0718 3524 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
3525 }
3526 }
3527
4a37fb66 3528 bnx2x_release_alr(bp);
a2fbb9ea 3529
5c862848 3530 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3531
3532 val = ~deasserted;
3fcaf2e5
EG
3533 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3534 val, reg_addr);
5c862848 3535 REG_WR(bp, reg_addr, val);
a2fbb9ea 3536
a2fbb9ea 3537 if (~bp->attn_state & deasserted)
3fcaf2e5 3538 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3539
3540 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3541 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3542
3fcaf2e5
EG
3543 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3544 aeu_mask = REG_RD(bp, reg_addr);
3545
3546 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3547 aeu_mask, deasserted);
72fd0718 3548 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 3549 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3550
3fcaf2e5
EG
3551 REG_WR(bp, reg_addr, aeu_mask);
3552 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3553
3554 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3555 bp->attn_state &= ~deasserted;
3556 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3557}
3558
3559static void bnx2x_attn_int(struct bnx2x *bp)
3560{
3561 /* read local copy of bits */
68d59484
EG
3562 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3563 attn_bits);
3564 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3565 attn_bits_ack);
a2fbb9ea
ET
3566 u32 attn_state = bp->attn_state;
3567
3568 /* look for changed bits */
3569 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3570 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3571
3572 DP(NETIF_MSG_HW,
3573 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3574 attn_bits, attn_ack, asserted, deasserted);
3575
3576 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3577 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3578
3579 /* handle bits that were raised */
3580 if (asserted)
3581 bnx2x_attn_int_asserted(bp, asserted);
3582
3583 if (deasserted)
3584 bnx2x_attn_int_deasserted(bp, deasserted);
3585}
3586
3587static void bnx2x_sp_task(struct work_struct *work)
3588{
1cf167f2 3589 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3590 u16 status;
3591
3592 /* Return here if interrupt is disabled */
3593 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3594 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3595 return;
3596 }
3597
3598 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3599/* if (status == 0) */
3600/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3601
cdaa7cb8 3602 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 3603
877e9aa4 3604 /* HW attentions */
cdaa7cb8 3605 if (status & 0x1) {
a2fbb9ea 3606 bnx2x_attn_int(bp);
cdaa7cb8
VZ
3607 status &= ~0x1;
3608 }
3609
3610 /* CStorm events: STAT_QUERY */
3611 if (status & 0x2) {
3612 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
3613 status &= ~0x2;
3614 }
3615
3616 if (unlikely(status))
3617 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3618 status);
a2fbb9ea 3619
68d59484 3620 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3621 IGU_INT_NOP, 1);
3622 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3623 IGU_INT_NOP, 1);
3624 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3625 IGU_INT_NOP, 1);
3626 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3627 IGU_INT_NOP, 1);
3628 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3629 IGU_INT_ENABLE, 1);
3630}
3631
3632static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3633{
3634 struct net_device *dev = dev_instance;
3635 struct bnx2x *bp = netdev_priv(dev);
3636
3637 /* Return here if interrupt is disabled */
3638 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3639 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3640 return IRQ_HANDLED;
3641 }
3642
8d9c5f34 3643 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3644
3645#ifdef BNX2X_STOP_ON_ERROR
3646 if (unlikely(bp->panic))
3647 return IRQ_HANDLED;
3648#endif
3649
993ac7b5
MC
3650#ifdef BCM_CNIC
3651 {
3652 struct cnic_ops *c_ops;
3653
3654 rcu_read_lock();
3655 c_ops = rcu_dereference(bp->cnic_ops);
3656 if (c_ops)
3657 c_ops->cnic_handler(bp->cnic_data, NULL);
3658 rcu_read_unlock();
3659 }
3660#endif
1cf167f2 3661 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3662
3663 return IRQ_HANDLED;
3664}
3665
3666/* end of slow path */
3667
3668/* Statistics */
3669
3670/****************************************************************************
3671* Macros
3672****************************************************************************/
3673
a2fbb9ea
ET
3674/* sum[hi:lo] += add[hi:lo] */
3675#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3676 do { \
3677 s_lo += a_lo; \
f5ba6772 3678 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3679 } while (0)
3680
3681/* difference = minuend - subtrahend */
3682#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3683 do { \
bb2a0f7a
YG
3684 if (m_lo < s_lo) { \
3685 /* underflow */ \
a2fbb9ea 3686 d_hi = m_hi - s_hi; \
bb2a0f7a 3687 if (d_hi > 0) { \
6378c025 3688 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3689 d_hi--; \
3690 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3691 } else { \
6378c025 3692 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3693 d_hi = 0; \
3694 d_lo = 0; \
3695 } \
bb2a0f7a
YG
3696 } else { \
3697 /* m_lo >= s_lo */ \
a2fbb9ea 3698 if (m_hi < s_hi) { \
bb2a0f7a
YG
3699 d_hi = 0; \
3700 d_lo = 0; \
3701 } else { \
6378c025 3702 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3703 d_hi = m_hi - s_hi; \
3704 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3705 } \
3706 } \
3707 } while (0)
3708
bb2a0f7a 3709#define UPDATE_STAT64(s, t) \
a2fbb9ea 3710 do { \
bb2a0f7a
YG
3711 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3712 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3713 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3714 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3715 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3716 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3717 } while (0)
3718
bb2a0f7a 3719#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3720 do { \
bb2a0f7a
YG
3721 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3722 diff.lo, new->s##_lo, old->s##_lo); \
3723 ADD_64(estats->t##_hi, diff.hi, \
3724 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3725 } while (0)
3726
3727/* sum[hi:lo] += add */
3728#define ADD_EXTEND_64(s_hi, s_lo, a) \
3729 do { \
3730 s_lo += a; \
3731 s_hi += (s_lo < a) ? 1 : 0; \
3732 } while (0)
3733
bb2a0f7a 3734#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3735 do { \
bb2a0f7a
YG
3736 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3737 pstats->mac_stx[1].s##_lo, \
3738 new->s); \
a2fbb9ea
ET
3739 } while (0)
3740
bb2a0f7a 3741#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3742 do { \
4781bfad
EG
3743 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3744 old_tclient->s = tclient->s; \
de832a55
EG
3745 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3746 } while (0)
3747
3748#define UPDATE_EXTEND_USTAT(s, t) \
3749 do { \
3750 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3751 old_uclient->s = uclient->s; \
3752 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3753 } while (0)
3754
3755#define UPDATE_EXTEND_XSTAT(s, t) \
3756 do { \
4781bfad
EG
3757 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3758 old_xclient->s = xclient->s; \
de832a55
EG
3759 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3760 } while (0)
3761
3762/* minuend -= subtrahend */
3763#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3764 do { \
3765 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3766 } while (0)
3767
3768/* minuend[hi:lo] -= subtrahend */
3769#define SUB_EXTEND_64(m_hi, m_lo, s) \
3770 do { \
3771 SUB_64(m_hi, 0, m_lo, s); \
3772 } while (0)
3773
3774#define SUB_EXTEND_USTAT(s, t) \
3775 do { \
3776 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3777 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3778 } while (0)
3779
3780/*
3781 * General service functions
3782 */
3783
3784static inline long bnx2x_hilo(u32 *hiref)
3785{
3786 u32 lo = *(hiref + 1);
3787#if (BITS_PER_LONG == 64)
3788 u32 hi = *hiref;
3789
3790 return HILO_U64(hi, lo);
3791#else
3792 return lo;
3793#endif
3794}
3795
3796/*
3797 * Init service functions
3798 */
3799
bb2a0f7a
YG
3800static void bnx2x_storm_stats_post(struct bnx2x *bp)
3801{
3802 if (!bp->stats_pending) {
3803 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3804 int i, rc;
bb2a0f7a
YG
3805
3806 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3807 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3808 for_each_queue(bp, i)
3809 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3810
3811 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3812 ((u32 *)&ramrod_data)[1],
3813 ((u32 *)&ramrod_data)[0], 0);
3814 if (rc == 0) {
3815 /* stats ramrod has it's own slot on the spq */
3816 bp->spq_left++;
3817 bp->stats_pending = 1;
3818 }
3819 }
3820}
3821
bb2a0f7a
YG
3822static void bnx2x_hw_stats_post(struct bnx2x *bp)
3823{
3824 struct dmae_command *dmae = &bp->stats_dmae;
3825 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3826
3827 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3828 if (CHIP_REV_IS_SLOW(bp))
3829 return;
bb2a0f7a
YG
3830
3831 /* loader */
3832 if (bp->executer_idx) {
3833 int loader_idx = PMF_DMAE_C(bp);
3834
3835 memset(dmae, 0, sizeof(struct dmae_command));
3836
3837 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3838 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3839 DMAE_CMD_DST_RESET |
3840#ifdef __BIG_ENDIAN
3841 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3842#else
3843 DMAE_CMD_ENDIANITY_DW_SWAP |
3844#endif
3845 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3846 DMAE_CMD_PORT_0) |
3847 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3848 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3849 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3850 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3851 sizeof(struct dmae_command) *
3852 (loader_idx + 1)) >> 2;
3853 dmae->dst_addr_hi = 0;
3854 dmae->len = sizeof(struct dmae_command) >> 2;
3855 if (CHIP_IS_E1(bp))
3856 dmae->len--;
3857 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3858 dmae->comp_addr_hi = 0;
3859 dmae->comp_val = 1;
3860
3861 *stats_comp = 0;
3862 bnx2x_post_dmae(bp, dmae, loader_idx);
3863
3864 } else if (bp->func_stx) {
3865 *stats_comp = 0;
3866 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3867 }
3868}
3869
3870static int bnx2x_stats_comp(struct bnx2x *bp)
3871{
3872 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3873 int cnt = 10;
3874
3875 might_sleep();
3876 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3877 if (!cnt) {
3878 BNX2X_ERR("timeout waiting for stats finished\n");
3879 break;
3880 }
3881 cnt--;
12469401 3882 msleep(1);
bb2a0f7a
YG
3883 }
3884 return 1;
3885}
3886
3887/*
3888 * Statistics service functions
3889 */
3890
3891static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3892{
3893 struct dmae_command *dmae;
3894 u32 opcode;
3895 int loader_idx = PMF_DMAE_C(bp);
3896 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3897
3898 /* sanity */
3899 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3900 BNX2X_ERR("BUG!\n");
3901 return;
3902 }
3903
3904 bp->executer_idx = 0;
3905
3906 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3907 DMAE_CMD_C_ENABLE |
3908 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3909#ifdef __BIG_ENDIAN
3910 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3911#else
3912 DMAE_CMD_ENDIANITY_DW_SWAP |
3913#endif
3914 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3915 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3916
3917 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3918 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3919 dmae->src_addr_lo = bp->port.port_stx >> 2;
3920 dmae->src_addr_hi = 0;
3921 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3922 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3923 dmae->len = DMAE_LEN32_RD_MAX;
3924 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3925 dmae->comp_addr_hi = 0;
3926 dmae->comp_val = 1;
3927
3928 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3929 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3930 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3931 dmae->src_addr_hi = 0;
7a9b2557
VZ
3932 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3933 DMAE_LEN32_RD_MAX * 4);
3934 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3935 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3936 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3937 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3938 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3939 dmae->comp_val = DMAE_COMP_VAL;
3940
3941 *stats_comp = 0;
3942 bnx2x_hw_stats_post(bp);
3943 bnx2x_stats_comp(bp);
3944}
3945
3946static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3947{
3948 struct dmae_command *dmae;
34f80b04 3949 int port = BP_PORT(bp);
bb2a0f7a 3950 int vn = BP_E1HVN(bp);
a2fbb9ea 3951 u32 opcode;
bb2a0f7a 3952 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3953 u32 mac_addr;
bb2a0f7a
YG
3954 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3955
3956 /* sanity */
3957 if (!bp->link_vars.link_up || !bp->port.pmf) {
3958 BNX2X_ERR("BUG!\n");
3959 return;
3960 }
a2fbb9ea
ET
3961
3962 bp->executer_idx = 0;
bb2a0f7a
YG
3963
3964 /* MCP */
3965 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3966 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3967 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3968#ifdef __BIG_ENDIAN
bb2a0f7a 3969 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3970#else
bb2a0f7a 3971 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3972#endif
bb2a0f7a
YG
3973 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3974 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3975
bb2a0f7a 3976 if (bp->port.port_stx) {
a2fbb9ea
ET
3977
3978 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3979 dmae->opcode = opcode;
bb2a0f7a
YG
3980 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3981 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3982 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3983 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3984 dmae->len = sizeof(struct host_port_stats) >> 2;
3985 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3986 dmae->comp_addr_hi = 0;
3987 dmae->comp_val = 1;
a2fbb9ea
ET
3988 }
3989
bb2a0f7a
YG
3990 if (bp->func_stx) {
3991
3992 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3993 dmae->opcode = opcode;
3994 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3995 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3996 dmae->dst_addr_lo = bp->func_stx >> 2;
3997 dmae->dst_addr_hi = 0;
3998 dmae->len = sizeof(struct host_func_stats) >> 2;
3999 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4000 dmae->comp_addr_hi = 0;
4001 dmae->comp_val = 1;
a2fbb9ea
ET
4002 }
4003
bb2a0f7a 4004 /* MAC */
a2fbb9ea
ET
4005 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4006 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
4007 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4008#ifdef __BIG_ENDIAN
4009 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4010#else
4011 DMAE_CMD_ENDIANITY_DW_SWAP |
4012#endif
bb2a0f7a
YG
4013 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4014 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 4015
c18487ee 4016 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
4017
4018 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4019 NIG_REG_INGRESS_BMAC0_MEM);
4020
4021 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4022 BIGMAC_REGISTER_TX_STAT_GTBYT */
4023 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4024 dmae->opcode = opcode;
4025 dmae->src_addr_lo = (mac_addr +
4026 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4027 dmae->src_addr_hi = 0;
4028 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4029 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4030 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4031 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4032 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4033 dmae->comp_addr_hi = 0;
4034 dmae->comp_val = 1;
4035
4036 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4037 BIGMAC_REGISTER_RX_STAT_GRIPJ */
4038 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4039 dmae->opcode = opcode;
4040 dmae->src_addr_lo = (mac_addr +
4041 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4042 dmae->src_addr_hi = 0;
4043 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4044 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 4045 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4046 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
4047 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4048 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4049 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4050 dmae->comp_addr_hi = 0;
4051 dmae->comp_val = 1;
4052
c18487ee 4053 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
4054
4055 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4056
4057 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4058 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4059 dmae->opcode = opcode;
4060 dmae->src_addr_lo = (mac_addr +
4061 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4062 dmae->src_addr_hi = 0;
4063 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4064 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4065 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4066 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4067 dmae->comp_addr_hi = 0;
4068 dmae->comp_val = 1;
4069
4070 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4071 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4072 dmae->opcode = opcode;
4073 dmae->src_addr_lo = (mac_addr +
4074 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4075 dmae->src_addr_hi = 0;
4076 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4077 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 4078 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4079 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
4080 dmae->len = 1;
4081 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4082 dmae->comp_addr_hi = 0;
4083 dmae->comp_val = 1;
4084
4085 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4086 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4087 dmae->opcode = opcode;
4088 dmae->src_addr_lo = (mac_addr +
4089 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4090 dmae->src_addr_hi = 0;
4091 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4092 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 4093 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4094 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
4095 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4096 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4097 dmae->comp_addr_hi = 0;
4098 dmae->comp_val = 1;
4099 }
4100
4101 /* NIG */
bb2a0f7a
YG
4102 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4103 dmae->opcode = opcode;
4104 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4105 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4106 dmae->src_addr_hi = 0;
4107 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4108 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4109 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4110 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4111 dmae->comp_addr_hi = 0;
4112 dmae->comp_val = 1;
4113
4114 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4115 dmae->opcode = opcode;
4116 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4117 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4118 dmae->src_addr_hi = 0;
4119 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4120 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4121 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4122 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4123 dmae->len = (2*sizeof(u32)) >> 2;
4124 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4125 dmae->comp_addr_hi = 0;
4126 dmae->comp_val = 1;
4127
a2fbb9ea
ET
4128 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4129 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4130 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4131 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4132#ifdef __BIG_ENDIAN
4133 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4134#else
4135 DMAE_CMD_ENDIANITY_DW_SWAP |
4136#endif
bb2a0f7a
YG
4137 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4138 (vn << DMAE_CMD_E1HVN_SHIFT));
4139 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4140 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 4141 dmae->src_addr_hi = 0;
bb2a0f7a
YG
4142 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4143 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4144 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4145 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4146 dmae->len = (2*sizeof(u32)) >> 2;
4147 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4148 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4149 dmae->comp_val = DMAE_COMP_VAL;
4150
4151 *stats_comp = 0;
a2fbb9ea
ET
4152}
4153
bb2a0f7a 4154static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 4155{
bb2a0f7a
YG
4156 struct dmae_command *dmae = &bp->stats_dmae;
4157 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4158
bb2a0f7a
YG
4159 /* sanity */
4160 if (!bp->func_stx) {
4161 BNX2X_ERR("BUG!\n");
4162 return;
4163 }
a2fbb9ea 4164
bb2a0f7a
YG
4165 bp->executer_idx = 0;
4166 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 4167
bb2a0f7a
YG
4168 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4169 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4170 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4171#ifdef __BIG_ENDIAN
4172 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4173#else
4174 DMAE_CMD_ENDIANITY_DW_SWAP |
4175#endif
4176 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4177 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4178 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4179 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4180 dmae->dst_addr_lo = bp->func_stx >> 2;
4181 dmae->dst_addr_hi = 0;
4182 dmae->len = sizeof(struct host_func_stats) >> 2;
4183 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4184 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4185 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4186
bb2a0f7a
YG
4187 *stats_comp = 0;
4188}
a2fbb9ea 4189
bb2a0f7a
YG
4190static void bnx2x_stats_start(struct bnx2x *bp)
4191{
4192 if (bp->port.pmf)
4193 bnx2x_port_stats_init(bp);
4194
4195 else if (bp->func_stx)
4196 bnx2x_func_stats_init(bp);
4197
4198 bnx2x_hw_stats_post(bp);
4199 bnx2x_storm_stats_post(bp);
4200}
4201
4202static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4203{
4204 bnx2x_stats_comp(bp);
4205 bnx2x_stats_pmf_update(bp);
4206 bnx2x_stats_start(bp);
4207}
4208
4209static void bnx2x_stats_restart(struct bnx2x *bp)
4210{
4211 bnx2x_stats_comp(bp);
4212 bnx2x_stats_start(bp);
4213}
4214
4215static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4216{
4217 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4218 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 4219 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
4220 struct {
4221 u32 lo;
4222 u32 hi;
4223 } diff;
bb2a0f7a
YG
4224
4225 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4226 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4227 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4228 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4229 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4230 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 4231 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 4232 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 4233 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
4234 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4235 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4236 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4237 UPDATE_STAT64(tx_stat_gt127,
4238 tx_stat_etherstatspkts65octetsto127octets);
4239 UPDATE_STAT64(tx_stat_gt255,
4240 tx_stat_etherstatspkts128octetsto255octets);
4241 UPDATE_STAT64(tx_stat_gt511,
4242 tx_stat_etherstatspkts256octetsto511octets);
4243 UPDATE_STAT64(tx_stat_gt1023,
4244 tx_stat_etherstatspkts512octetsto1023octets);
4245 UPDATE_STAT64(tx_stat_gt1518,
4246 tx_stat_etherstatspkts1024octetsto1522octets);
4247 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4248 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4249 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4250 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4251 UPDATE_STAT64(tx_stat_gterr,
4252 tx_stat_dot3statsinternalmactransmiterrors);
4253 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
4254
4255 estats->pause_frames_received_hi =
4256 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4257 estats->pause_frames_received_lo =
4258 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4259
4260 estats->pause_frames_sent_hi =
4261 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4262 estats->pause_frames_sent_lo =
4263 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
4264}
4265
4266static void bnx2x_emac_stats_update(struct bnx2x *bp)
4267{
4268 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4269 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 4270 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
4271
4272 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4273 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4274 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4275 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4276 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4277 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4278 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4279 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4280 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4281 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4282 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4283 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4284 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4285 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4286 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4287 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4288 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4289 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4290 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4291 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4292 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4293 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4294 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4295 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4296 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4297 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4298 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4299 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4300 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4301 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4302 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
4303
4304 estats->pause_frames_received_hi =
4305 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4306 estats->pause_frames_received_lo =
4307 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4308 ADD_64(estats->pause_frames_received_hi,
4309 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4310 estats->pause_frames_received_lo,
4311 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4312
4313 estats->pause_frames_sent_hi =
4314 pstats->mac_stx[1].tx_stat_outxonsent_hi;
4315 estats->pause_frames_sent_lo =
4316 pstats->mac_stx[1].tx_stat_outxonsent_lo;
4317 ADD_64(estats->pause_frames_sent_hi,
4318 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4319 estats->pause_frames_sent_lo,
4320 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
4321}
4322
4323static int bnx2x_hw_stats_update(struct bnx2x *bp)
4324{
4325 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4326 struct nig_stats *old = &(bp->port.old_nig_stats);
4327 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4328 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
4329 struct {
4330 u32 lo;
4331 u32 hi;
4332 } diff;
bb2a0f7a
YG
4333
4334 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4335 bnx2x_bmac_stats_update(bp);
4336
4337 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4338 bnx2x_emac_stats_update(bp);
4339
4340 else { /* unreached */
c3eefaf6 4341 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
4342 return -1;
4343 }
a2fbb9ea 4344
bb2a0f7a
YG
4345 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4346 new->brb_discard - old->brb_discard);
66e855f3
YG
4347 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4348 new->brb_truncate - old->brb_truncate);
a2fbb9ea 4349
bb2a0f7a
YG
4350 UPDATE_STAT64_NIG(egress_mac_pkt0,
4351 etherstatspkts1024octetsto1522octets);
4352 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 4353
bb2a0f7a 4354 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 4355
bb2a0f7a
YG
4356 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4357 sizeof(struct mac_stx));
4358 estats->brb_drop_hi = pstats->brb_drop_hi;
4359 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 4360
bb2a0f7a 4361 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 4362
2145a920
VZ
4363 if (!BP_NOMCP(bp)) {
4364 u32 nig_timer_max =
4365 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4366 if (nig_timer_max != estats->nig_timer_max) {
4367 estats->nig_timer_max = nig_timer_max;
4368 BNX2X_ERR("NIG timer max (%u)\n",
4369 estats->nig_timer_max);
4370 }
de832a55
EG
4371 }
4372
bb2a0f7a 4373 return 0;
a2fbb9ea
ET
4374}
4375
bb2a0f7a 4376static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
4377{
4378 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 4379 struct tstorm_per_port_stats *tport =
de832a55 4380 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
4381 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4382 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
4383 int i;
4384
6fe49bb9
EG
4385 memcpy(&(fstats->total_bytes_received_hi),
4386 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
4387 sizeof(struct host_func_stats) - 2*sizeof(u32));
4388 estats->error_bytes_received_hi = 0;
4389 estats->error_bytes_received_lo = 0;
4390 estats->etherstatsoverrsizepkts_hi = 0;
4391 estats->etherstatsoverrsizepkts_lo = 0;
4392 estats->no_buff_discard_hi = 0;
4393 estats->no_buff_discard_lo = 0;
a2fbb9ea 4394
54b9ddaa 4395 for_each_queue(bp, i) {
de832a55
EG
4396 struct bnx2x_fastpath *fp = &bp->fp[i];
4397 int cl_id = fp->cl_id;
4398 struct tstorm_per_client_stats *tclient =
4399 &stats->tstorm_common.client_statistics[cl_id];
4400 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4401 struct ustorm_per_client_stats *uclient =
4402 &stats->ustorm_common.client_statistics[cl_id];
4403 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4404 struct xstorm_per_client_stats *xclient =
4405 &stats->xstorm_common.client_statistics[cl_id];
4406 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4407 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4408 u32 diff;
4409
4410 /* are storm stats valid? */
4411 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 4412 bp->stats_counter) {
de832a55 4413 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
cdaa7cb8 4414 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
de832a55
EG
4415 i, xclient->stats_counter, bp->stats_counter);
4416 return -1;
4417 }
4418 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 4419 bp->stats_counter) {
de832a55 4420 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
cdaa7cb8 4421 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
de832a55
EG
4422 i, tclient->stats_counter, bp->stats_counter);
4423 return -2;
4424 }
4425 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4426 bp->stats_counter) {
4427 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
cdaa7cb8 4428 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
de832a55
EG
4429 i, uclient->stats_counter, bp->stats_counter);
4430 return -4;
4431 }
a2fbb9ea 4432
de832a55 4433 qstats->total_bytes_received_hi =
ca00392c 4434 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4435 qstats->total_bytes_received_lo =
ca00392c
EG
4436 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4437
4438 ADD_64(qstats->total_bytes_received_hi,
4439 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4440 qstats->total_bytes_received_lo,
4441 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4442
4443 ADD_64(qstats->total_bytes_received_hi,
4444 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4445 qstats->total_bytes_received_lo,
4446 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4447
dea7aab1
VZ
4448 SUB_64(qstats->total_bytes_received_hi,
4449 le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
4450 qstats->total_bytes_received_lo,
4451 le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
4452
4453 SUB_64(qstats->total_bytes_received_hi,
4454 le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
4455 qstats->total_bytes_received_lo,
4456 le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
4457
4458 SUB_64(qstats->total_bytes_received_hi,
4459 le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
4460 qstats->total_bytes_received_lo,
4461 le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
4462
ca00392c
EG
4463 qstats->valid_bytes_received_hi =
4464 qstats->total_bytes_received_hi;
de832a55 4465 qstats->valid_bytes_received_lo =
ca00392c 4466 qstats->total_bytes_received_lo;
bb2a0f7a 4467
de832a55 4468 qstats->error_bytes_received_hi =
bb2a0f7a 4469 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4470 qstats->error_bytes_received_lo =
bb2a0f7a 4471 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4472
de832a55
EG
4473 ADD_64(qstats->total_bytes_received_hi,
4474 qstats->error_bytes_received_hi,
4475 qstats->total_bytes_received_lo,
4476 qstats->error_bytes_received_lo);
4477
4478 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4479 total_unicast_packets_received);
4480 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4481 total_multicast_packets_received);
4482 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4483 total_broadcast_packets_received);
4484 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4485 etherstatsoverrsizepkts);
4486 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4487
4488 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4489 total_unicast_packets_received);
4490 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4491 total_multicast_packets_received);
4492 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4493 total_broadcast_packets_received);
4494 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4495 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4496 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4497
4498 qstats->total_bytes_transmitted_hi =
ca00392c 4499 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4500 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4501 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4502
4503 ADD_64(qstats->total_bytes_transmitted_hi,
4504 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4505 qstats->total_bytes_transmitted_lo,
4506 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4507
4508 ADD_64(qstats->total_bytes_transmitted_hi,
4509 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4510 qstats->total_bytes_transmitted_lo,
4511 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4512
de832a55
EG
4513 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4514 total_unicast_packets_transmitted);
4515 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4516 total_multicast_packets_transmitted);
4517 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4518 total_broadcast_packets_transmitted);
4519
4520 old_tclient->checksum_discard = tclient->checksum_discard;
4521 old_tclient->ttl0_discard = tclient->ttl0_discard;
4522
4523 ADD_64(fstats->total_bytes_received_hi,
4524 qstats->total_bytes_received_hi,
4525 fstats->total_bytes_received_lo,
4526 qstats->total_bytes_received_lo);
4527 ADD_64(fstats->total_bytes_transmitted_hi,
4528 qstats->total_bytes_transmitted_hi,
4529 fstats->total_bytes_transmitted_lo,
4530 qstats->total_bytes_transmitted_lo);
4531 ADD_64(fstats->total_unicast_packets_received_hi,
4532 qstats->total_unicast_packets_received_hi,
4533 fstats->total_unicast_packets_received_lo,
4534 qstats->total_unicast_packets_received_lo);
4535 ADD_64(fstats->total_multicast_packets_received_hi,
4536 qstats->total_multicast_packets_received_hi,
4537 fstats->total_multicast_packets_received_lo,
4538 qstats->total_multicast_packets_received_lo);
4539 ADD_64(fstats->total_broadcast_packets_received_hi,
4540 qstats->total_broadcast_packets_received_hi,
4541 fstats->total_broadcast_packets_received_lo,
4542 qstats->total_broadcast_packets_received_lo);
4543 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4544 qstats->total_unicast_packets_transmitted_hi,
4545 fstats->total_unicast_packets_transmitted_lo,
4546 qstats->total_unicast_packets_transmitted_lo);
4547 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4548 qstats->total_multicast_packets_transmitted_hi,
4549 fstats->total_multicast_packets_transmitted_lo,
4550 qstats->total_multicast_packets_transmitted_lo);
4551 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4552 qstats->total_broadcast_packets_transmitted_hi,
4553 fstats->total_broadcast_packets_transmitted_lo,
4554 qstats->total_broadcast_packets_transmitted_lo);
4555 ADD_64(fstats->valid_bytes_received_hi,
4556 qstats->valid_bytes_received_hi,
4557 fstats->valid_bytes_received_lo,
4558 qstats->valid_bytes_received_lo);
4559
4560 ADD_64(estats->error_bytes_received_hi,
4561 qstats->error_bytes_received_hi,
4562 estats->error_bytes_received_lo,
4563 qstats->error_bytes_received_lo);
4564 ADD_64(estats->etherstatsoverrsizepkts_hi,
4565 qstats->etherstatsoverrsizepkts_hi,
4566 estats->etherstatsoverrsizepkts_lo,
4567 qstats->etherstatsoverrsizepkts_lo);
4568 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4569 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4570 }
4571
4572 ADD_64(fstats->total_bytes_received_hi,
4573 estats->rx_stat_ifhcinbadoctets_hi,
4574 fstats->total_bytes_received_lo,
4575 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4576
4577 memcpy(estats, &(fstats->total_bytes_received_hi),
4578 sizeof(struct host_func_stats) - 2*sizeof(u32));
4579
de832a55
EG
4580 ADD_64(estats->etherstatsoverrsizepkts_hi,
4581 estats->rx_stat_dot3statsframestoolong_hi,
4582 estats->etherstatsoverrsizepkts_lo,
4583 estats->rx_stat_dot3statsframestoolong_lo);
4584 ADD_64(estats->error_bytes_received_hi,
4585 estats->rx_stat_ifhcinbadoctets_hi,
4586 estats->error_bytes_received_lo,
4587 estats->rx_stat_ifhcinbadoctets_lo);
4588
4589 if (bp->port.pmf) {
4590 estats->mac_filter_discard =
4591 le32_to_cpu(tport->mac_filter_discard);
4592 estats->xxoverflow_discard =
4593 le32_to_cpu(tport->xxoverflow_discard);
4594 estats->brb_truncate_discard =
bb2a0f7a 4595 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4596 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4597 }
bb2a0f7a
YG
4598
4599 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4600
de832a55
EG
4601 bp->stats_pending = 0;
4602
a2fbb9ea
ET
4603 return 0;
4604}
4605
bb2a0f7a 4606static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4607{
bb2a0f7a 4608 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4609 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4610 int i;
a2fbb9ea
ET
4611
4612 nstats->rx_packets =
4613 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4614 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4615 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4616
4617 nstats->tx_packets =
4618 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4619 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4620 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4621
de832a55 4622 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4623
0e39e645 4624 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4625
de832a55 4626 nstats->rx_dropped = estats->mac_discard;
54b9ddaa 4627 for_each_queue(bp, i)
de832a55
EG
4628 nstats->rx_dropped +=
4629 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4630
a2fbb9ea
ET
4631 nstats->tx_dropped = 0;
4632
4633 nstats->multicast =
de832a55 4634 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4635
bb2a0f7a 4636 nstats->collisions =
de832a55 4637 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4638
4639 nstats->rx_length_errors =
de832a55
EG
4640 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4641 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4642 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4643 bnx2x_hilo(&estats->brb_truncate_hi);
4644 nstats->rx_crc_errors =
4645 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4646 nstats->rx_frame_errors =
4647 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4648 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4649 nstats->rx_missed_errors = estats->xxoverflow_discard;
4650
4651 nstats->rx_errors = nstats->rx_length_errors +
4652 nstats->rx_over_errors +
4653 nstats->rx_crc_errors +
4654 nstats->rx_frame_errors +
0e39e645
ET
4655 nstats->rx_fifo_errors +
4656 nstats->rx_missed_errors;
a2fbb9ea 4657
bb2a0f7a 4658 nstats->tx_aborted_errors =
de832a55
EG
4659 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4660 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4661 nstats->tx_carrier_errors =
4662 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4663 nstats->tx_fifo_errors = 0;
4664 nstats->tx_heartbeat_errors = 0;
4665 nstats->tx_window_errors = 0;
4666
4667 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4668 nstats->tx_carrier_errors +
4669 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4670}
4671
4672static void bnx2x_drv_stats_update(struct bnx2x *bp)
4673{
4674 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4675 int i;
4676
4677 estats->driver_xoff = 0;
4678 estats->rx_err_discard_pkt = 0;
4679 estats->rx_skb_alloc_failed = 0;
4680 estats->hw_csum_err = 0;
54b9ddaa 4681 for_each_queue(bp, i) {
de832a55
EG
4682 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4683
4684 estats->driver_xoff += qstats->driver_xoff;
4685 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4686 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4687 estats->hw_csum_err += qstats->hw_csum_err;
4688 }
a2fbb9ea
ET
4689}
4690
bb2a0f7a 4691static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4692{
bb2a0f7a 4693 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4694
bb2a0f7a
YG
4695 if (*stats_comp != DMAE_COMP_VAL)
4696 return;
4697
4698 if (bp->port.pmf)
de832a55 4699 bnx2x_hw_stats_update(bp);
a2fbb9ea 4700
de832a55
EG
4701 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4702 BNX2X_ERR("storm stats were not updated for 3 times\n");
4703 bnx2x_panic();
4704 return;
a2fbb9ea
ET
4705 }
4706
de832a55
EG
4707 bnx2x_net_stats_update(bp);
4708 bnx2x_drv_stats_update(bp);
4709
7995c64e 4710 if (netif_msg_timer(bp)) {
bb2a0f7a 4711 struct bnx2x_eth_stats *estats = &bp->eth_stats;
34f80b04 4712 int i;
a2fbb9ea 4713
dea7aab1
VZ
4714 printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
4715 bp->dev->name,
de832a55 4716 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea
ET
4717
4718 for_each_queue(bp, i) {
dea7aab1
VZ
4719 struct bnx2x_fastpath *fp = &bp->fp[i];
4720 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4721
4722 printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
4723 " rx pkt(%lu) rx calls(%lu %lu)\n",
4724 fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
4725 fp->rx_comp_cons),
4726 le16_to_cpu(*fp->rx_cons_sb),
4727 bnx2x_hilo(&qstats->
4728 total_unicast_packets_received_hi),
4729 fp->rx_calls, fp->rx_pkt);
4730 }
4731
4732 for_each_queue(bp, i) {
4733 struct bnx2x_fastpath *fp = &bp->fp[i];
4734 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4735 struct netdev_queue *txq =
4736 netdev_get_tx_queue(bp->dev, i);
4737
4738 printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
4739 " tx pkt(%lu) tx calls (%lu)"
4740 " %s (Xoff events %u)\n",
4741 fp->name, bnx2x_tx_avail(fp),
4742 le16_to_cpu(*fp->tx_cons_sb),
4743 bnx2x_hilo(&qstats->
4744 total_unicast_packets_transmitted_hi),
4745 fp->tx_pkt,
4746 (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
4747 qstats->driver_xoff);
a2fbb9ea
ET
4748 }
4749 }
4750
bb2a0f7a
YG
4751 bnx2x_hw_stats_post(bp);
4752 bnx2x_storm_stats_post(bp);
4753}
a2fbb9ea 4754
bb2a0f7a
YG
4755static void bnx2x_port_stats_stop(struct bnx2x *bp)
4756{
4757 struct dmae_command *dmae;
4758 u32 opcode;
4759 int loader_idx = PMF_DMAE_C(bp);
4760 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4761
bb2a0f7a 4762 bp->executer_idx = 0;
a2fbb9ea 4763
bb2a0f7a
YG
4764 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4765 DMAE_CMD_C_ENABLE |
4766 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4767#ifdef __BIG_ENDIAN
bb2a0f7a 4768 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4769#else
bb2a0f7a 4770 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4771#endif
bb2a0f7a
YG
4772 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4773 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4774
4775 if (bp->port.port_stx) {
4776
4777 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4778 if (bp->func_stx)
4779 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4780 else
4781 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4782 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4783 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4784 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4785 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4786 dmae->len = sizeof(struct host_port_stats) >> 2;
4787 if (bp->func_stx) {
4788 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4789 dmae->comp_addr_hi = 0;
4790 dmae->comp_val = 1;
4791 } else {
4792 dmae->comp_addr_lo =
4793 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4794 dmae->comp_addr_hi =
4795 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4796 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4797
bb2a0f7a
YG
4798 *stats_comp = 0;
4799 }
a2fbb9ea
ET
4800 }
4801
bb2a0f7a
YG
4802 if (bp->func_stx) {
4803
4804 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4805 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4806 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4807 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4808 dmae->dst_addr_lo = bp->func_stx >> 2;
4809 dmae->dst_addr_hi = 0;
4810 dmae->len = sizeof(struct host_func_stats) >> 2;
4811 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4812 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4813 dmae->comp_val = DMAE_COMP_VAL;
4814
4815 *stats_comp = 0;
a2fbb9ea 4816 }
bb2a0f7a
YG
4817}
4818
4819static void bnx2x_stats_stop(struct bnx2x *bp)
4820{
4821 int update = 0;
4822
4823 bnx2x_stats_comp(bp);
4824
4825 if (bp->port.pmf)
4826 update = (bnx2x_hw_stats_update(bp) == 0);
4827
4828 update |= (bnx2x_storm_stats_update(bp) == 0);
4829
4830 if (update) {
4831 bnx2x_net_stats_update(bp);
a2fbb9ea 4832
bb2a0f7a
YG
4833 if (bp->port.pmf)
4834 bnx2x_port_stats_stop(bp);
4835
4836 bnx2x_hw_stats_post(bp);
4837 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4838 }
4839}
4840
bb2a0f7a
YG
4841static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4842{
4843}
4844
4845static const struct {
4846 void (*action)(struct bnx2x *bp);
4847 enum bnx2x_stats_state next_state;
4848} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4849/* state event */
4850{
4851/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4852/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4853/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4854/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4855},
4856{
4857/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4858/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4859/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4860/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4861}
4862};
4863
4864static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4865{
4866 enum bnx2x_stats_state state = bp->stats_state;
4867
cdaa7cb8
VZ
4868 if (unlikely(bp->panic))
4869 return;
4870
bb2a0f7a
YG
4871 bnx2x_stats_stm[state][event].action(bp);
4872 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4873
8924665a
EG
4874 /* Make sure the state has been "changed" */
4875 smp_wmb();
4876
7995c64e 4877 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
bb2a0f7a
YG
4878 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4879 state, event, bp->stats_state);
4880}
4881
6fe49bb9
EG
4882static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4883{
4884 struct dmae_command *dmae;
4885 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4886
4887 /* sanity */
4888 if (!bp->port.pmf || !bp->port.port_stx) {
4889 BNX2X_ERR("BUG!\n");
4890 return;
4891 }
4892
4893 bp->executer_idx = 0;
4894
4895 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4896 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4897 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4898 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4899#ifdef __BIG_ENDIAN
4900 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4901#else
4902 DMAE_CMD_ENDIANITY_DW_SWAP |
4903#endif
4904 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4905 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4906 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4907 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4908 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4909 dmae->dst_addr_hi = 0;
4910 dmae->len = sizeof(struct host_port_stats) >> 2;
4911 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4912 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4913 dmae->comp_val = DMAE_COMP_VAL;
4914
4915 *stats_comp = 0;
4916 bnx2x_hw_stats_post(bp);
4917 bnx2x_stats_comp(bp);
4918}
4919
4920static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4921{
4922 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4923 int port = BP_PORT(bp);
4924 int func;
4925 u32 func_stx;
4926
4927 /* sanity */
4928 if (!bp->port.pmf || !bp->func_stx) {
4929 BNX2X_ERR("BUG!\n");
4930 return;
4931 }
4932
4933 /* save our func_stx */
4934 func_stx = bp->func_stx;
4935
4936 for (vn = VN_0; vn < vn_max; vn++) {
4937 func = 2*vn + port;
4938
4939 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4940 bnx2x_func_stats_init(bp);
4941 bnx2x_hw_stats_post(bp);
4942 bnx2x_stats_comp(bp);
4943 }
4944
4945 /* restore our func_stx */
4946 bp->func_stx = func_stx;
4947}
4948
4949static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4950{
4951 struct dmae_command *dmae = &bp->stats_dmae;
4952 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4953
4954 /* sanity */
4955 if (!bp->func_stx) {
4956 BNX2X_ERR("BUG!\n");
4957 return;
4958 }
4959
4960 bp->executer_idx = 0;
4961 memset(dmae, 0, sizeof(struct dmae_command));
4962
4963 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4964 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4965 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4966#ifdef __BIG_ENDIAN
4967 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4968#else
4969 DMAE_CMD_ENDIANITY_DW_SWAP |
4970#endif
4971 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4972 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4973 dmae->src_addr_lo = bp->func_stx >> 2;
4974 dmae->src_addr_hi = 0;
4975 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4976 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4977 dmae->len = sizeof(struct host_func_stats) >> 2;
4978 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4979 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4980 dmae->comp_val = DMAE_COMP_VAL;
4981
4982 *stats_comp = 0;
4983 bnx2x_hw_stats_post(bp);
4984 bnx2x_stats_comp(bp);
4985}
4986
4987static void bnx2x_stats_init(struct bnx2x *bp)
4988{
4989 int port = BP_PORT(bp);
4990 int func = BP_FUNC(bp);
4991 int i;
4992
4993 bp->stats_pending = 0;
4994 bp->executer_idx = 0;
4995 bp->stats_counter = 0;
4996
4997 /* port and func stats for management */
4998 if (!BP_NOMCP(bp)) {
4999 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
5000 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5001
5002 } else {
5003 bp->port.port_stx = 0;
5004 bp->func_stx = 0;
5005 }
5006 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
5007 bp->port.port_stx, bp->func_stx);
5008
5009 /* port stats */
5010 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
5011 bp->port.old_nig_stats.brb_discard =
5012 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
5013 bp->port.old_nig_stats.brb_truncate =
5014 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
5015 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
5016 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
5017 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
5018 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
5019
5020 /* function stats */
5021 for_each_queue(bp, i) {
5022 struct bnx2x_fastpath *fp = &bp->fp[i];
5023
5024 memset(&fp->old_tclient, 0,
5025 sizeof(struct tstorm_per_client_stats));
5026 memset(&fp->old_uclient, 0,
5027 sizeof(struct ustorm_per_client_stats));
5028 memset(&fp->old_xclient, 0,
5029 sizeof(struct xstorm_per_client_stats));
5030 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
5031 }
5032
5033 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
5034 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
5035
5036 bp->stats_state = STATS_STATE_DISABLED;
5037
5038 if (bp->port.pmf) {
5039 if (bp->port.port_stx)
5040 bnx2x_port_stats_base_init(bp);
5041
5042 if (bp->func_stx)
5043 bnx2x_func_stats_base_init(bp);
5044
5045 } else if (bp->func_stx)
5046 bnx2x_func_stats_base_update(bp);
5047}
5048
a2fbb9ea
ET
5049static void bnx2x_timer(unsigned long data)
5050{
5051 struct bnx2x *bp = (struct bnx2x *) data;
5052
5053 if (!netif_running(bp->dev))
5054 return;
5055
5056 if (atomic_read(&bp->intr_sem) != 0)
f1410647 5057 goto timer_restart;
a2fbb9ea
ET
5058
5059 if (poll) {
5060 struct bnx2x_fastpath *fp = &bp->fp[0];
5061 int rc;
5062
7961f791 5063 bnx2x_tx_int(fp);
a2fbb9ea
ET
5064 rc = bnx2x_rx_int(fp, 1000);
5065 }
5066
34f80b04
EG
5067 if (!BP_NOMCP(bp)) {
5068 int func = BP_FUNC(bp);
a2fbb9ea
ET
5069 u32 drv_pulse;
5070 u32 mcp_pulse;
5071
5072 ++bp->fw_drv_pulse_wr_seq;
5073 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5074 /* TBD - add SYSTEM_TIME */
5075 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 5076 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 5077
34f80b04 5078 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
5079 MCP_PULSE_SEQ_MASK);
5080 /* The delta between driver pulse and mcp response
5081 * should be 1 (before mcp response) or 0 (after mcp response)
5082 */
5083 if ((drv_pulse != mcp_pulse) &&
5084 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5085 /* someone lost a heartbeat... */
5086 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5087 drv_pulse, mcp_pulse);
5088 }
5089 }
5090
f34d28ea 5091 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 5092 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 5093
f1410647 5094timer_restart:
a2fbb9ea
ET
5095 mod_timer(&bp->timer, jiffies + bp->current_interval);
5096}
5097
5098/* end of Statistics */
5099
5100/* nic init */
5101
5102/*
5103 * nic init service functions
5104 */
5105
34f80b04 5106static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 5107{
34f80b04
EG
5108 int port = BP_PORT(bp);
5109
ca00392c
EG
5110 /* "CSTORM" */
5111 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5112 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5113 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5114 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5115 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5116 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
5117}
5118
5c862848
EG
5119static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5120 dma_addr_t mapping, int sb_id)
34f80b04
EG
5121{
5122 int port = BP_PORT(bp);
bb2a0f7a 5123 int func = BP_FUNC(bp);
a2fbb9ea 5124 int index;
34f80b04 5125 u64 section;
a2fbb9ea
ET
5126
5127 /* USTORM */
5128 section = ((u64)mapping) + offsetof(struct host_status_block,
5129 u_status_block);
34f80b04 5130 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 5131
ca00392c
EG
5132 REG_WR(bp, BAR_CSTRORM_INTMEM +
5133 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5134 REG_WR(bp, BAR_CSTRORM_INTMEM +
5135 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 5136 U64_HI(section));
ca00392c
EG
5137 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5138 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
5139
5140 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
5141 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5142 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
5143
5144 /* CSTORM */
5145 section = ((u64)mapping) + offsetof(struct host_status_block,
5146 c_status_block);
34f80b04 5147 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5148
5149 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5150 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 5151 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5152 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 5153 U64_HI(section));
7a9b2557 5154 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 5155 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
5156
5157 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5158 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 5159 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
5160
5161 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5162}
5163
5164static void bnx2x_zero_def_sb(struct bnx2x *bp)
5165{
5166 int func = BP_FUNC(bp);
a2fbb9ea 5167
ca00392c 5168 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
5169 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5170 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
5171 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5172 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5173 sizeof(struct cstorm_def_status_block_u)/4);
5174 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5175 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5176 sizeof(struct cstorm_def_status_block_c)/4);
5177 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
5178 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5179 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
5180}
5181
5182static void bnx2x_init_def_sb(struct bnx2x *bp,
5183 struct host_def_status_block *def_sb,
34f80b04 5184 dma_addr_t mapping, int sb_id)
a2fbb9ea 5185{
34f80b04
EG
5186 int port = BP_PORT(bp);
5187 int func = BP_FUNC(bp);
a2fbb9ea
ET
5188 int index, val, reg_offset;
5189 u64 section;
5190
5191 /* ATTN */
5192 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5193 atten_status_block);
34f80b04 5194 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 5195
49d66772
ET
5196 bp->attn_state = 0;
5197
a2fbb9ea
ET
5198 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5199 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5200
34f80b04 5201 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
5202 bp->attn_group[index].sig[0] = REG_RD(bp,
5203 reg_offset + 0x10*index);
5204 bp->attn_group[index].sig[1] = REG_RD(bp,
5205 reg_offset + 0x4 + 0x10*index);
5206 bp->attn_group[index].sig[2] = REG_RD(bp,
5207 reg_offset + 0x8 + 0x10*index);
5208 bp->attn_group[index].sig[3] = REG_RD(bp,
5209 reg_offset + 0xc + 0x10*index);
5210 }
5211
a2fbb9ea
ET
5212 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5213 HC_REG_ATTN_MSG0_ADDR_L);
5214
5215 REG_WR(bp, reg_offset, U64_LO(section));
5216 REG_WR(bp, reg_offset + 4, U64_HI(section));
5217
5218 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5219
5220 val = REG_RD(bp, reg_offset);
34f80b04 5221 val |= sb_id;
a2fbb9ea
ET
5222 REG_WR(bp, reg_offset, val);
5223
5224 /* USTORM */
5225 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5226 u_def_status_block);
34f80b04 5227 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 5228
ca00392c
EG
5229 REG_WR(bp, BAR_CSTRORM_INTMEM +
5230 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5231 REG_WR(bp, BAR_CSTRORM_INTMEM +
5232 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 5233 U64_HI(section));
ca00392c
EG
5234 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5235 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
5236
5237 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
5238 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5239 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
5240
5241 /* CSTORM */
5242 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5243 c_def_status_block);
34f80b04 5244 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5245
5246 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5247 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 5248 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5249 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 5250 U64_HI(section));
5c862848 5251 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 5252 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
5253
5254 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5255 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 5256 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
5257
5258 /* TSTORM */
5259 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5260 t_def_status_block);
34f80b04 5261 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5262
5263 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5264 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 5265 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5266 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 5267 U64_HI(section));
5c862848 5268 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 5269 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
5270
5271 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5272 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 5273 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
5274
5275 /* XSTORM */
5276 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5277 x_def_status_block);
34f80b04 5278 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5279
5280 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 5281 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 5282 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 5283 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 5284 U64_HI(section));
5c862848 5285 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 5286 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
5287
5288 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5289 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 5290 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 5291
bb2a0f7a 5292 bp->stats_pending = 0;
66e855f3 5293 bp->set_mac_pending = 0;
bb2a0f7a 5294
34f80b04 5295 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
5296}
5297
5298static void bnx2x_update_coalesce(struct bnx2x *bp)
5299{
34f80b04 5300 int port = BP_PORT(bp);
a2fbb9ea
ET
5301 int i;
5302
5303 for_each_queue(bp, i) {
34f80b04 5304 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
5305
5306 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
5307 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5308 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5309 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 5310 bp->rx_ticks/(4 * BNX2X_BTR));
ca00392c
EG
5311 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5312 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5313 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 5314 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
5315
5316 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5317 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
5318 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5319 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 5320 bp->tx_ticks/(4 * BNX2X_BTR));
a2fbb9ea 5321 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
5322 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5323 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 5324 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
5325 }
5326}
5327
7a9b2557
VZ
5328static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5329 struct bnx2x_fastpath *fp, int last)
5330{
5331 int i;
5332
5333 for (i = 0; i < last; i++) {
5334 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5335 struct sk_buff *skb = rx_buf->skb;
5336
5337 if (skb == NULL) {
5338 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5339 continue;
5340 }
5341
5342 if (fp->tpa_state[i] == BNX2X_TPA_START)
1a983142
FT
5343 dma_unmap_single(&bp->pdev->dev,
5344 dma_unmap_addr(rx_buf, mapping),
5345 bp->rx_buf_size, DMA_FROM_DEVICE);
7a9b2557
VZ
5346
5347 dev_kfree_skb(skb);
5348 rx_buf->skb = NULL;
5349 }
5350}
5351
a2fbb9ea
ET
5352static void bnx2x_init_rx_rings(struct bnx2x *bp)
5353{
7a9b2557 5354 int func = BP_FUNC(bp);
32626230
EG
5355 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5356 ETH_MAX_AGGREGATION_QUEUES_E1H;
5357 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 5358 int i, j;
a2fbb9ea 5359
87942b46 5360 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
5361 DP(NETIF_MSG_IFUP,
5362 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 5363
7a9b2557 5364 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 5365
54b9ddaa 5366 for_each_queue(bp, j) {
32626230 5367 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 5368
32626230 5369 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
5370 fp->tpa_pool[i].skb =
5371 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5372 if (!fp->tpa_pool[i].skb) {
5373 BNX2X_ERR("Failed to allocate TPA "
5374 "skb pool for queue[%d] - "
5375 "disabling TPA on this "
5376 "queue!\n", j);
5377 bnx2x_free_tpa_pool(bp, fp, i);
5378 fp->disable_tpa = 1;
5379 break;
5380 }
1a983142 5381 dma_unmap_addr_set((struct sw_rx_bd *)
7a9b2557
VZ
5382 &bp->fp->tpa_pool[i],
5383 mapping, 0);
5384 fp->tpa_state[i] = BNX2X_TPA_STOP;
5385 }
5386 }
5387 }
5388
54b9ddaa 5389 for_each_queue(bp, j) {
a2fbb9ea
ET
5390 struct bnx2x_fastpath *fp = &bp->fp[j];
5391
5392 fp->rx_bd_cons = 0;
5393 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
5394 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5395
5396 /* "next page" elements initialization */
5397 /* SGE ring */
5398 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5399 struct eth_rx_sge *sge;
5400
5401 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5402 sge->addr_hi =
5403 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5404 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5405 sge->addr_lo =
5406 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5407 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5408 }
5409
5410 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 5411
7a9b2557 5412 /* RX BD ring */
a2fbb9ea
ET
5413 for (i = 1; i <= NUM_RX_RINGS; i++) {
5414 struct eth_rx_bd *rx_bd;
5415
5416 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5417 rx_bd->addr_hi =
5418 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 5419 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5420 rx_bd->addr_lo =
5421 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 5422 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5423 }
5424
34f80b04 5425 /* CQ ring */
a2fbb9ea
ET
5426 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5427 struct eth_rx_cqe_next_page *nextpg;
5428
5429 nextpg = (struct eth_rx_cqe_next_page *)
5430 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5431 nextpg->addr_hi =
5432 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 5433 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5434 nextpg->addr_lo =
5435 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 5436 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5437 }
5438
7a9b2557
VZ
5439 /* Allocate SGEs and initialize the ring elements */
5440 for (i = 0, ring_prod = 0;
5441 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 5442
7a9b2557
VZ
5443 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5444 BNX2X_ERR("was only able to allocate "
5445 "%d rx sges\n", i);
5446 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5447 /* Cleanup already allocated elements */
5448 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 5449 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
5450 fp->disable_tpa = 1;
5451 ring_prod = 0;
5452 break;
5453 }
5454 ring_prod = NEXT_SGE_IDX(ring_prod);
5455 }
5456 fp->rx_sge_prod = ring_prod;
5457
5458 /* Allocate BDs and initialize BD ring */
66e855f3 5459 fp->rx_comp_cons = 0;
7a9b2557 5460 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5461 for (i = 0; i < bp->rx_ring_size; i++) {
5462 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5463 BNX2X_ERR("was only able to allocate "
de832a55
EG
5464 "%d rx skbs on queue[%d]\n", i, j);
5465 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5466 break;
5467 }
5468 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5469 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5470 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5471 }
5472
7a9b2557
VZ
5473 fp->rx_bd_prod = ring_prod;
5474 /* must not have more available CQEs than BDs */
cdaa7cb8
VZ
5475 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5476 cqe_ring_prod);
a2fbb9ea
ET
5477 fp->rx_pkt = fp->rx_calls = 0;
5478
7a9b2557
VZ
5479 /* Warning!
5480 * this will generate an interrupt (to the TSTORM)
5481 * must only be done after chip is initialized
5482 */
5483 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5484 fp->rx_sge_prod);
a2fbb9ea
ET
5485 if (j != 0)
5486 continue;
5487
5488 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5489 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5490 U64_LO(fp->rx_comp_mapping));
5491 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5492 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5493 U64_HI(fp->rx_comp_mapping));
5494 }
5495}
5496
5497static void bnx2x_init_tx_ring(struct bnx2x *bp)
5498{
5499 int i, j;
5500
54b9ddaa 5501 for_each_queue(bp, j) {
a2fbb9ea
ET
5502 struct bnx2x_fastpath *fp = &bp->fp[j];
5503
5504 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5505 struct eth_tx_next_bd *tx_next_bd =
5506 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5507
ca00392c 5508 tx_next_bd->addr_hi =
a2fbb9ea 5509 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5510 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5511 tx_next_bd->addr_lo =
a2fbb9ea 5512 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5513 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5514 }
5515
ca00392c
EG
5516 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5517 fp->tx_db.data.zero_fill1 = 0;
5518 fp->tx_db.data.prod = 0;
5519
a2fbb9ea
ET
5520 fp->tx_pkt_prod = 0;
5521 fp->tx_pkt_cons = 0;
5522 fp->tx_bd_prod = 0;
5523 fp->tx_bd_cons = 0;
5524 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5525 fp->tx_pkt = 0;
5526 }
5527}
5528
5529static void bnx2x_init_sp_ring(struct bnx2x *bp)
5530{
34f80b04 5531 int func = BP_FUNC(bp);
a2fbb9ea
ET
5532
5533 spin_lock_init(&bp->spq_lock);
5534
5535 bp->spq_left = MAX_SPQ_PENDING;
5536 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5537 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5538 bp->spq_prod_bd = bp->spq;
5539 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5540
34f80b04 5541 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5542 U64_LO(bp->spq_mapping));
34f80b04
EG
5543 REG_WR(bp,
5544 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5545 U64_HI(bp->spq_mapping));
5546
34f80b04 5547 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5548 bp->spq_prod_idx);
5549}
5550
5551static void bnx2x_init_context(struct bnx2x *bp)
5552{
5553 int i;
5554
54b9ddaa
VZ
5555 /* Rx */
5556 for_each_queue(bp, i) {
a2fbb9ea
ET
5557 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5558 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5559 u8 cl_id = fp->cl_id;
a2fbb9ea 5560
34f80b04
EG
5561 context->ustorm_st_context.common.sb_index_numbers =
5562 BNX2X_RX_SB_INDEX_NUM;
0626b899 5563 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5564 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5565 context->ustorm_st_context.common.flags =
de832a55
EG
5566 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5567 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5568 context->ustorm_st_context.common.statistics_counter_id =
5569 cl_id;
8d9c5f34 5570 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5571 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5572 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5573 bp->rx_buf_size;
34f80b04 5574 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5575 U64_HI(fp->rx_desc_mapping);
34f80b04 5576 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5577 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5578 if (!fp->disable_tpa) {
5579 context->ustorm_st_context.common.flags |=
ca00392c 5580 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5581 context->ustorm_st_context.common.sge_buff_size =
cdaa7cb8
VZ
5582 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
5583 0xffff);
7a9b2557
VZ
5584 context->ustorm_st_context.common.sge_page_base_hi =
5585 U64_HI(fp->rx_sge_mapping);
5586 context->ustorm_st_context.common.sge_page_base_lo =
5587 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5588
5589 context->ustorm_st_context.common.max_sges_for_packet =
5590 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5591 context->ustorm_st_context.common.max_sges_for_packet =
5592 ((context->ustorm_st_context.common.
5593 max_sges_for_packet + PAGES_PER_SGE - 1) &
5594 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5595 }
5596
8d9c5f34
EG
5597 context->ustorm_ag_context.cdu_usage =
5598 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5599 CDU_REGION_NUMBER_UCM_AG,
5600 ETH_CONNECTION_TYPE);
5601
ca00392c
EG
5602 context->xstorm_ag_context.cdu_reserved =
5603 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5604 CDU_REGION_NUMBER_XCM_AG,
5605 ETH_CONNECTION_TYPE);
5606 }
5607
54b9ddaa
VZ
5608 /* Tx */
5609 for_each_queue(bp, i) {
ca00392c
EG
5610 struct bnx2x_fastpath *fp = &bp->fp[i];
5611 struct eth_context *context =
54b9ddaa 5612 bnx2x_sp(bp, context[i].eth);
ca00392c
EG
5613
5614 context->cstorm_st_context.sb_index_number =
5615 C_SB_ETH_TX_CQ_INDEX;
5616 context->cstorm_st_context.status_block_id = fp->sb_id;
5617
8d9c5f34
EG
5618 context->xstorm_st_context.tx_bd_page_base_hi =
5619 U64_HI(fp->tx_desc_mapping);
5620 context->xstorm_st_context.tx_bd_page_base_lo =
5621 U64_LO(fp->tx_desc_mapping);
ca00392c 5622 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5623 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5624 }
5625}
5626
5627static void bnx2x_init_ind_table(struct bnx2x *bp)
5628{
26c8fa4d 5629 int func = BP_FUNC(bp);
a2fbb9ea
ET
5630 int i;
5631
555f6c78 5632 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5633 return;
5634
555f6c78
EG
5635 DP(NETIF_MSG_IFUP,
5636 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5637 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5638 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5639 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 5640 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
5641}
5642
49d66772
ET
5643static void bnx2x_set_client_config(struct bnx2x *bp)
5644{
49d66772 5645 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5646 int port = BP_PORT(bp);
5647 int i;
49d66772 5648
e7799c5f 5649 tstorm_client.mtu = bp->dev->mtu;
49d66772 5650 tstorm_client.config_flags =
de832a55
EG
5651 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5652 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5653#ifdef BCM_VLAN
0c6671b0 5654 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5655 tstorm_client.config_flags |=
8d9c5f34 5656 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5657 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5658 }
5659#endif
49d66772
ET
5660
5661 for_each_queue(bp, i) {
de832a55
EG
5662 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5663
49d66772 5664 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5665 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5666 ((u32 *)&tstorm_client)[0]);
5667 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5668 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5669 ((u32 *)&tstorm_client)[1]);
5670 }
5671
34f80b04
EG
5672 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5673 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5674}
5675
a2fbb9ea
ET
5676static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5677{
a2fbb9ea 5678 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 5679 int mode = bp->rx_mode;
37b091ba 5680 int mask = bp->rx_mode_cl_mask;
34f80b04 5681 int func = BP_FUNC(bp);
581ce43d 5682 int port = BP_PORT(bp);
a2fbb9ea 5683 int i;
581ce43d
EG
5684 /* All but management unicast packets should pass to the host as well */
5685 u32 llh_mask =
5686 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5687 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5688 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5689 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5690
3196a88a 5691 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5692
5693 switch (mode) {
5694 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5695 tstorm_mac_filter.ucast_drop_all = mask;
5696 tstorm_mac_filter.mcast_drop_all = mask;
5697 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5698 break;
356e2385 5699
a2fbb9ea 5700 case BNX2X_RX_MODE_NORMAL:
34f80b04 5701 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5702 break;
356e2385 5703
a2fbb9ea 5704 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5705 tstorm_mac_filter.mcast_accept_all = mask;
5706 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5707 break;
356e2385 5708
a2fbb9ea 5709 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5710 tstorm_mac_filter.ucast_accept_all = mask;
5711 tstorm_mac_filter.mcast_accept_all = mask;
5712 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5713 /* pass management unicast packets as well */
5714 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5715 break;
356e2385 5716
a2fbb9ea 5717 default:
34f80b04
EG
5718 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5719 break;
a2fbb9ea
ET
5720 }
5721
581ce43d
EG
5722 REG_WR(bp,
5723 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5724 llh_mask);
5725
a2fbb9ea
ET
5726 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5727 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5728 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5729 ((u32 *)&tstorm_mac_filter)[i]);
5730
34f80b04 5731/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5732 ((u32 *)&tstorm_mac_filter)[i]); */
5733 }
a2fbb9ea 5734
49d66772
ET
5735 if (mode != BNX2X_RX_MODE_NONE)
5736 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5737}
5738
471de716
EG
5739static void bnx2x_init_internal_common(struct bnx2x *bp)
5740{
5741 int i;
5742
5743 /* Zero this manually as its initialization is
5744 currently missing in the initTool */
5745 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5746 REG_WR(bp, BAR_USTRORM_INTMEM +
5747 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5748}
5749
5750static void bnx2x_init_internal_port(struct bnx2x *bp)
5751{
5752 int port = BP_PORT(bp);
5753
ca00392c
EG
5754 REG_WR(bp,
5755 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5756 REG_WR(bp,
5757 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5758 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5759 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5760}
5761
5762static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5763{
a2fbb9ea
ET
5764 struct tstorm_eth_function_common_config tstorm_config = {0};
5765 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5766 int port = BP_PORT(bp);
5767 int func = BP_FUNC(bp);
de832a55
EG
5768 int i, j;
5769 u32 offset;
471de716 5770 u16 max_agg_size;
a2fbb9ea 5771
c68ed255
TH
5772 tstorm_config.config_flags = RSS_FLAGS(bp);
5773
5774 if (is_multi(bp))
a2fbb9ea 5775 tstorm_config.rss_result_mask = MULTI_MASK;
ca00392c
EG
5776
5777 /* Enable TPA if needed */
5778 if (bp->flags & TPA_ENABLE_FLAG)
5779 tstorm_config.config_flags |=
5780 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5781
8d9c5f34
EG
5782 if (IS_E1HMF(bp))
5783 tstorm_config.config_flags |=
5784 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5785
34f80b04
EG
5786 tstorm_config.leading_client_id = BP_L_ID(bp);
5787
a2fbb9ea 5788 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5789 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5790 (*(u32 *)&tstorm_config));
5791
c14423fe 5792 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 5793 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
5794 bnx2x_set_storm_rx_mode(bp);
5795
de832a55
EG
5796 for_each_queue(bp, i) {
5797 u8 cl_id = bp->fp[i].cl_id;
5798
5799 /* reset xstorm per client statistics */
5800 offset = BAR_XSTRORM_INTMEM +
5801 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5802 for (j = 0;
5803 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5804 REG_WR(bp, offset + j*4, 0);
5805
5806 /* reset tstorm per client statistics */
5807 offset = BAR_TSTRORM_INTMEM +
5808 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5809 for (j = 0;
5810 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5811 REG_WR(bp, offset + j*4, 0);
5812
5813 /* reset ustorm per client statistics */
5814 offset = BAR_USTRORM_INTMEM +
5815 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5816 for (j = 0;
5817 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5818 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5819 }
5820
5821 /* Init statistics related context */
34f80b04 5822 stats_flags.collect_eth = 1;
a2fbb9ea 5823
66e855f3 5824 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5825 ((u32 *)&stats_flags)[0]);
66e855f3 5826 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5827 ((u32 *)&stats_flags)[1]);
5828
66e855f3 5829 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5830 ((u32 *)&stats_flags)[0]);
66e855f3 5831 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5832 ((u32 *)&stats_flags)[1]);
5833
de832a55
EG
5834 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5835 ((u32 *)&stats_flags)[0]);
5836 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5837 ((u32 *)&stats_flags)[1]);
5838
66e855f3 5839 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5840 ((u32 *)&stats_flags)[0]);
66e855f3 5841 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5842 ((u32 *)&stats_flags)[1]);
5843
66e855f3
YG
5844 REG_WR(bp, BAR_XSTRORM_INTMEM +
5845 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5846 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5847 REG_WR(bp, BAR_XSTRORM_INTMEM +
5848 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5849 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5850
5851 REG_WR(bp, BAR_TSTRORM_INTMEM +
5852 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5853 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5854 REG_WR(bp, BAR_TSTRORM_INTMEM +
5855 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5856 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5857
de832a55
EG
5858 REG_WR(bp, BAR_USTRORM_INTMEM +
5859 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5860 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5861 REG_WR(bp, BAR_USTRORM_INTMEM +
5862 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5863 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5864
34f80b04
EG
5865 if (CHIP_IS_E1H(bp)) {
5866 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5867 IS_E1HMF(bp));
5868 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5869 IS_E1HMF(bp));
5870 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5871 IS_E1HMF(bp));
5872 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5873 IS_E1HMF(bp));
5874
7a9b2557
VZ
5875 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5876 bp->e1hov);
34f80b04
EG
5877 }
5878
4f40f2cb 5879 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
cdaa7cb8
VZ
5880 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
5881 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
54b9ddaa 5882 for_each_queue(bp, i) {
7a9b2557 5883 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5884
5885 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5886 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5887 U64_LO(fp->rx_comp_mapping));
5888 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5889 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5890 U64_HI(fp->rx_comp_mapping));
5891
ca00392c
EG
5892 /* Next page */
5893 REG_WR(bp, BAR_USTRORM_INTMEM +
5894 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5895 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5896 REG_WR(bp, BAR_USTRORM_INTMEM +
5897 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5898 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5899
7a9b2557 5900 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5901 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5902 max_agg_size);
5903 }
8a1c38d1 5904
1c06328c
EG
5905 /* dropless flow control */
5906 if (CHIP_IS_E1H(bp)) {
5907 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5908
5909 rx_pause.bd_thr_low = 250;
5910 rx_pause.cqe_thr_low = 250;
5911 rx_pause.cos = 1;
5912 rx_pause.sge_thr_low = 0;
5913 rx_pause.bd_thr_high = 350;
5914 rx_pause.cqe_thr_high = 350;
5915 rx_pause.sge_thr_high = 0;
5916
54b9ddaa 5917 for_each_queue(bp, i) {
1c06328c
EG
5918 struct bnx2x_fastpath *fp = &bp->fp[i];
5919
5920 if (!fp->disable_tpa) {
5921 rx_pause.sge_thr_low = 150;
5922 rx_pause.sge_thr_high = 250;
5923 }
5924
5925
5926 offset = BAR_USTRORM_INTMEM +
5927 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5928 fp->cl_id);
5929 for (j = 0;
5930 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5931 j++)
5932 REG_WR(bp, offset + j*4,
5933 ((u32 *)&rx_pause)[j]);
5934 }
5935 }
5936
8a1c38d1
EG
5937 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5938
5939 /* Init rate shaping and fairness contexts */
5940 if (IS_E1HMF(bp)) {
5941 int vn;
5942
5943 /* During init there is no active link
5944 Until link is up, set link rate to 10Gbps */
5945 bp->link_vars.line_speed = SPEED_10000;
5946 bnx2x_init_port_minmax(bp);
5947
b015e3d1
EG
5948 if (!BP_NOMCP(bp))
5949 bp->mf_config =
5950 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
5951 bnx2x_calc_vn_weight_sum(bp);
5952
5953 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5954 bnx2x_init_vn_minmax(bp, 2*vn + port);
5955
5956 /* Enable rate shaping and fairness */
b015e3d1 5957 bp->cmng.flags.cmng_enables |=
8a1c38d1 5958 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 5959
8a1c38d1
EG
5960 } else {
5961 /* rate shaping and fairness are disabled */
5962 DP(NETIF_MSG_IFUP,
5963 "single function mode minmax will be disabled\n");
5964 }
5965
5966
cdaa7cb8 5967 /* Store cmng structures to internal memory */
8a1c38d1
EG
5968 if (bp->port.pmf)
5969 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5970 REG_WR(bp, BAR_XSTRORM_INTMEM +
5971 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5972 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5973}
5974
471de716
EG
5975static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5976{
5977 switch (load_code) {
5978 case FW_MSG_CODE_DRV_LOAD_COMMON:
5979 bnx2x_init_internal_common(bp);
5980 /* no break */
5981
5982 case FW_MSG_CODE_DRV_LOAD_PORT:
5983 bnx2x_init_internal_port(bp);
5984 /* no break */
5985
5986 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5987 bnx2x_init_internal_func(bp);
5988 break;
5989
5990 default:
5991 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5992 break;
5993 }
5994}
5995
5996static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5997{
5998 int i;
5999
6000 for_each_queue(bp, i) {
6001 struct bnx2x_fastpath *fp = &bp->fp[i];
6002
34f80b04 6003 fp->bp = bp;
a2fbb9ea 6004 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 6005 fp->index = i;
34f80b04 6006 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
6007#ifdef BCM_CNIC
6008 fp->sb_id = fp->cl_id + 1;
6009#else
34f80b04 6010 fp->sb_id = fp->cl_id;
37b091ba 6011#endif
34f80b04 6012 DP(NETIF_MSG_IFUP,
f5372251
EG
6013 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
6014 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 6015 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 6016 fp->sb_id);
5c862848 6017 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
6018 }
6019
16119785
EG
6020 /* ensure status block indices were read */
6021 rmb();
6022
6023
5c862848
EG
6024 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
6025 DEF_SB_ID);
6026 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
6027 bnx2x_update_coalesce(bp);
6028 bnx2x_init_rx_rings(bp);
6029 bnx2x_init_tx_ring(bp);
6030 bnx2x_init_sp_ring(bp);
6031 bnx2x_init_context(bp);
471de716 6032 bnx2x_init_internal(bp, load_code);
a2fbb9ea 6033 bnx2x_init_ind_table(bp);
0ef00459
EG
6034 bnx2x_stats_init(bp);
6035
6036 /* At this point, we are ready for interrupts */
6037 atomic_set(&bp->intr_sem, 0);
6038
6039 /* flush all before enabling interrupts */
6040 mb();
6041 mmiowb();
6042
615f8fd9 6043 bnx2x_int_enable(bp);
eb8da205
EG
6044
6045 /* Check for SPIO5 */
6046 bnx2x_attn_int_deasserted0(bp,
6047 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6048 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
6049}
6050
6051/* end of nic init */
6052
6053/*
6054 * gzip service functions
6055 */
6056
6057static int bnx2x_gunzip_init(struct bnx2x *bp)
6058{
1a983142
FT
6059 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6060 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
6061 if (bp->gunzip_buf == NULL)
6062 goto gunzip_nomem1;
6063
6064 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6065 if (bp->strm == NULL)
6066 goto gunzip_nomem2;
6067
6068 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6069 GFP_KERNEL);
6070 if (bp->strm->workspace == NULL)
6071 goto gunzip_nomem3;
6072
6073 return 0;
6074
6075gunzip_nomem3:
6076 kfree(bp->strm);
6077 bp->strm = NULL;
6078
6079gunzip_nomem2:
1a983142
FT
6080 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6081 bp->gunzip_mapping);
a2fbb9ea
ET
6082 bp->gunzip_buf = NULL;
6083
6084gunzip_nomem1:
cdaa7cb8
VZ
6085 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
6086 " un-compression\n");
a2fbb9ea
ET
6087 return -ENOMEM;
6088}
6089
6090static void bnx2x_gunzip_end(struct bnx2x *bp)
6091{
6092 kfree(bp->strm->workspace);
6093
6094 kfree(bp->strm);
6095 bp->strm = NULL;
6096
6097 if (bp->gunzip_buf) {
1a983142
FT
6098 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6099 bp->gunzip_mapping);
a2fbb9ea
ET
6100 bp->gunzip_buf = NULL;
6101 }
6102}
6103
94a78b79 6104static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
6105{
6106 int n, rc;
6107
6108 /* check gzip header */
94a78b79
VZ
6109 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6110 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 6111 return -EINVAL;
94a78b79 6112 }
a2fbb9ea
ET
6113
6114 n = 10;
6115
34f80b04 6116#define FNAME 0x8
a2fbb9ea
ET
6117
6118 if (zbuf[3] & FNAME)
6119 while ((zbuf[n++] != 0) && (n < len));
6120
94a78b79 6121 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
6122 bp->strm->avail_in = len - n;
6123 bp->strm->next_out = bp->gunzip_buf;
6124 bp->strm->avail_out = FW_BUF_SIZE;
6125
6126 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6127 if (rc != Z_OK)
6128 return rc;
6129
6130 rc = zlib_inflate(bp->strm, Z_FINISH);
6131 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
6132 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6133 bp->strm->msg);
a2fbb9ea
ET
6134
6135 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6136 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
6137 netdev_err(bp->dev, "Firmware decompression error:"
6138 " gunzip_outlen (%d) not aligned\n",
6139 bp->gunzip_outlen);
a2fbb9ea
ET
6140 bp->gunzip_outlen >>= 2;
6141
6142 zlib_inflateEnd(bp->strm);
6143
6144 if (rc == Z_STREAM_END)
6145 return 0;
6146
6147 return rc;
6148}
6149
6150/* nic load/unload */
6151
6152/*
34f80b04 6153 * General service functions
a2fbb9ea
ET
6154 */
6155
6156/* send a NIG loopback debug packet */
6157static void bnx2x_lb_pckt(struct bnx2x *bp)
6158{
a2fbb9ea 6159 u32 wb_write[3];
a2fbb9ea
ET
6160
6161 /* Ethernet source and destination addresses */
a2fbb9ea
ET
6162 wb_write[0] = 0x55555555;
6163 wb_write[1] = 0x55555555;
34f80b04 6164 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 6165 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
6166
6167 /* NON-IP protocol */
a2fbb9ea
ET
6168 wb_write[0] = 0x09000000;
6169 wb_write[1] = 0x55555555;
34f80b04 6170 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 6171 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
6172}
6173
6174/* some of the internal memories
6175 * are not directly readable from the driver
6176 * to test them we send debug packets
6177 */
6178static int bnx2x_int_mem_test(struct bnx2x *bp)
6179{
6180 int factor;
6181 int count, i;
6182 u32 val = 0;
6183
ad8d3948 6184 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 6185 factor = 120;
ad8d3948
EG
6186 else if (CHIP_REV_IS_EMUL(bp))
6187 factor = 200;
6188 else
a2fbb9ea 6189 factor = 1;
a2fbb9ea
ET
6190
6191 DP(NETIF_MSG_HW, "start part1\n");
6192
6193 /* Disable inputs of parser neighbor blocks */
6194 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6195 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6196 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 6197 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
6198
6199 /* Write 0 to parser credits for CFC search request */
6200 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6201
6202 /* send Ethernet packet */
6203 bnx2x_lb_pckt(bp);
6204
6205 /* TODO do i reset NIG statistic? */
6206 /* Wait until NIG register shows 1 packet of size 0x10 */
6207 count = 1000 * factor;
6208 while (count) {
34f80b04 6209
a2fbb9ea
ET
6210 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6211 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
6212 if (val == 0x10)
6213 break;
6214
6215 msleep(10);
6216 count--;
6217 }
6218 if (val != 0x10) {
6219 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6220 return -1;
6221 }
6222
6223 /* Wait until PRS register shows 1 packet */
6224 count = 1000 * factor;
6225 while (count) {
6226 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
6227 if (val == 1)
6228 break;
6229
6230 msleep(10);
6231 count--;
6232 }
6233 if (val != 0x1) {
6234 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6235 return -2;
6236 }
6237
6238 /* Reset and init BRB, PRS */
34f80b04 6239 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 6240 msleep(50);
34f80b04 6241 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 6242 msleep(50);
94a78b79
VZ
6243 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6244 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
6245
6246 DP(NETIF_MSG_HW, "part2\n");
6247
6248 /* Disable inputs of parser neighbor blocks */
6249 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6250 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6251 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 6252 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
6253
6254 /* Write 0 to parser credits for CFC search request */
6255 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6256
6257 /* send 10 Ethernet packets */
6258 for (i = 0; i < 10; i++)
6259 bnx2x_lb_pckt(bp);
6260
6261 /* Wait until NIG register shows 10 + 1
6262 packets of size 11*0x10 = 0xb0 */
6263 count = 1000 * factor;
6264 while (count) {
34f80b04 6265
a2fbb9ea
ET
6266 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6267 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
6268 if (val == 0xb0)
6269 break;
6270
6271 msleep(10);
6272 count--;
6273 }
6274 if (val != 0xb0) {
6275 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6276 return -3;
6277 }
6278
6279 /* Wait until PRS register shows 2 packets */
6280 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6281 if (val != 2)
6282 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6283
6284 /* Write 1 to parser credits for CFC search request */
6285 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6286
6287 /* Wait until PRS register shows 3 packets */
6288 msleep(10 * factor);
6289 /* Wait until NIG register shows 1 packet of size 0x10 */
6290 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6291 if (val != 3)
6292 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6293
6294 /* clear NIG EOP FIFO */
6295 for (i = 0; i < 11; i++)
6296 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6297 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6298 if (val != 1) {
6299 BNX2X_ERR("clear of NIG failed\n");
6300 return -4;
6301 }
6302
6303 /* Reset and init BRB, PRS, NIG */
6304 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6305 msleep(50);
6306 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6307 msleep(50);
94a78b79
VZ
6308 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6309 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 6310#ifndef BCM_CNIC
a2fbb9ea
ET
6311 /* set NIC mode */
6312 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6313#endif
6314
6315 /* Enable inputs of parser neighbor blocks */
6316 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6317 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6318 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 6319 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
6320
6321 DP(NETIF_MSG_HW, "done\n");
6322
6323 return 0; /* OK */
6324}
6325
6326static void enable_blocks_attention(struct bnx2x *bp)
6327{
6328 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6329 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6330 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6331 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6332 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6333 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6334 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6335 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6336 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
6337/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6338/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6339 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6340 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6341 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
6342/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6343/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6344 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6345 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6346 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6347 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
6348/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6349/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6350 if (CHIP_REV_IS_FPGA(bp))
6351 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6352 else
6353 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
6354 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6355 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6356 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
6357/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6358/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6359 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6360 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
6361/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6362 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
6363}
6364
72fd0718
VZ
6365static const struct {
6366 u32 addr;
6367 u32 mask;
6368} bnx2x_parity_mask[] = {
6369 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6370 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6371 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6372 {HC_REG_HC_PRTY_MASK, 0xffffffff},
6373 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6374 {QM_REG_QM_PRTY_MASK, 0x0},
6375 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6376 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6377 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6378 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6379 {CDU_REG_CDU_PRTY_MASK, 0x0},
6380 {CFC_REG_CFC_PRTY_MASK, 0x0},
6381 {DBG_REG_DBG_PRTY_MASK, 0x0},
6382 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6383 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6384 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6385 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6386 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6387 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6388 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6389 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6390 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6391 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6392 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6393 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6394 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6395 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6396 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6397};
6398
6399static void enable_blocks_parity(struct bnx2x *bp)
6400{
6401 int i, mask_arr_len =
6402 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6403
6404 for (i = 0; i < mask_arr_len; i++)
6405 REG_WR(bp, bnx2x_parity_mask[i].addr,
6406 bnx2x_parity_mask[i].mask);
6407}
6408
34f80b04 6409
81f75bbf
EG
6410static void bnx2x_reset_common(struct bnx2x *bp)
6411{
6412 /* reset_common */
6413 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6414 0xd3ffff7f);
6415 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6416}
6417
573f2035
EG
6418static void bnx2x_init_pxp(struct bnx2x *bp)
6419{
6420 u16 devctl;
6421 int r_order, w_order;
6422
6423 pci_read_config_word(bp->pdev,
6424 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6425 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6426 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6427 if (bp->mrrs == -1)
6428 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6429 else {
6430 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6431 r_order = bp->mrrs;
6432 }
6433
6434 bnx2x_init_pxp_arb(bp, r_order, w_order);
6435}
fd4ef40d
EG
6436
6437static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6438{
2145a920 6439 int is_required;
fd4ef40d 6440 u32 val;
2145a920 6441 int port;
fd4ef40d 6442
2145a920
VZ
6443 if (BP_NOMCP(bp))
6444 return;
6445
6446 is_required = 0;
fd4ef40d
EG
6447 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6448 SHARED_HW_CFG_FAN_FAILURE_MASK;
6449
6450 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6451 is_required = 1;
6452
6453 /*
6454 * The fan failure mechanism is usually related to the PHY type since
6455 * the power consumption of the board is affected by the PHY. Currently,
6456 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6457 */
6458 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6459 for (port = PORT_0; port < PORT_MAX; port++) {
6460 u32 phy_type =
6461 SHMEM_RD(bp, dev_info.port_hw_config[port].
6462 external_phy_config) &
6463 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6464 is_required |=
6465 ((phy_type ==
6466 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
6467 (phy_type ==
6468 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
6469 (phy_type ==
6470 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6471 }
6472
6473 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6474
6475 if (is_required == 0)
6476 return;
6477
6478 /* Fan failure is indicated by SPIO 5 */
6479 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6480 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6481
6482 /* set to active low mode */
6483 val = REG_RD(bp, MISC_REG_SPIO_INT);
6484 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 6485 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
6486 REG_WR(bp, MISC_REG_SPIO_INT, val);
6487
6488 /* enable interrupt to signal the IGU */
6489 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6490 val |= (1 << MISC_REGISTERS_SPIO_5);
6491 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6492}
6493
34f80b04 6494static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 6495{
a2fbb9ea 6496 u32 val, i;
37b091ba
MC
6497#ifdef BCM_CNIC
6498 u32 wb_write[2];
6499#endif
a2fbb9ea 6500
34f80b04 6501 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 6502
81f75bbf 6503 bnx2x_reset_common(bp);
34f80b04
EG
6504 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6505 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 6506
94a78b79 6507 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
6508 if (CHIP_IS_E1H(bp))
6509 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 6510
34f80b04
EG
6511 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6512 msleep(30);
6513 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 6514
94a78b79 6515 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
6516 if (CHIP_IS_E1(bp)) {
6517 /* enable HW interrupt from PXP on USDM overflow
6518 bit 16 on INT_MASK_0 */
6519 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6520 }
a2fbb9ea 6521
94a78b79 6522 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6523 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6524
6525#ifdef __BIG_ENDIAN
34f80b04
EG
6526 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6527 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6528 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6529 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6530 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6531 /* make sure this value is 0 */
6532 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6533
6534/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6535 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6536 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6537 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6538 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6539#endif
6540
34f80b04 6541 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 6542#ifdef BCM_CNIC
34f80b04
EG
6543 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6544 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6545 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6546#endif
6547
34f80b04
EG
6548 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6549 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6550
34f80b04
EG
6551 /* let the HW do it's magic ... */
6552 msleep(100);
6553 /* finish PXP init */
6554 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6555 if (val != 1) {
6556 BNX2X_ERR("PXP2 CFG failed\n");
6557 return -EBUSY;
6558 }
6559 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6560 if (val != 1) {
6561 BNX2X_ERR("PXP2 RD_INIT failed\n");
6562 return -EBUSY;
6563 }
a2fbb9ea 6564
34f80b04
EG
6565 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6566 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6567
94a78b79 6568 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6569
34f80b04
EG
6570 /* clean the DMAE memory */
6571 bp->dmae_ready = 1;
6572 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6573
94a78b79
VZ
6574 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6575 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6576 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6577 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6578
34f80b04
EG
6579 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6580 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6581 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6582 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6583
94a78b79 6584 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
6585
6586#ifdef BCM_CNIC
6587 wb_write[0] = 0;
6588 wb_write[1] = 0;
6589 for (i = 0; i < 64; i++) {
6590 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6591 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6592
6593 if (CHIP_IS_E1H(bp)) {
6594 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6595 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6596 wb_write, 2);
6597 }
6598 }
6599#endif
34f80b04
EG
6600 /* soft reset pulse */
6601 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6602 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 6603
37b091ba 6604#ifdef BCM_CNIC
94a78b79 6605 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6606#endif
a2fbb9ea 6607
94a78b79 6608 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6609 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6610 if (!CHIP_REV_IS_SLOW(bp)) {
6611 /* enable hw interrupt from doorbell Q */
6612 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6613 }
a2fbb9ea 6614
94a78b79
VZ
6615 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6616 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6617 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 6618#ifndef BCM_CNIC
3196a88a
EG
6619 /* set NIC mode */
6620 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 6621#endif
34f80b04
EG
6622 if (CHIP_IS_E1H(bp))
6623 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6624
94a78b79
VZ
6625 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6626 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6627 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6628 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6629
ca00392c
EG
6630 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6631 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6632 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6633 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6634
94a78b79
VZ
6635 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6636 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6637 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6638 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6639
34f80b04
EG
6640 /* sync semi rtc */
6641 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6642 0x80000000);
6643 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6644 0x80000000);
a2fbb9ea 6645
94a78b79
VZ
6646 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6647 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6648 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6649
34f80b04 6650 REG_WR(bp, SRC_REG_SOFT_RST, 1);
c68ed255
TH
6651 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
6652 REG_WR(bp, i, random32());
94a78b79 6653 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
6654#ifdef BCM_CNIC
6655 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6656 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6657 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6658 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6659 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6660 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6661 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6662 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6663 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6664 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6665#endif
34f80b04 6666 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6667
34f80b04
EG
6668 if (sizeof(union cdu_context) != 1024)
6669 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
6670 dev_alert(&bp->pdev->dev, "please adjust the size "
6671 "of cdu_context(%ld)\n",
7995c64e 6672 (long)sizeof(union cdu_context));
a2fbb9ea 6673
94a78b79 6674 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6675 val = (4 << 24) + (0 << 12) + 1024;
6676 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6677
94a78b79 6678 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6679 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6680 /* enable context validation interrupt from CFC */
6681 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6682
6683 /* set the thresholds to prevent CFC/CDU race */
6684 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6685
94a78b79
VZ
6686 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6687 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6688
94a78b79 6689 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6690 /* Reset PCIE errors for debug */
6691 REG_WR(bp, 0x2814, 0xffffffff);
6692 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6693
94a78b79 6694 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6695 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6696 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6697 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6698
94a78b79 6699 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6700 if (CHIP_IS_E1H(bp)) {
6701 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6702 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6703 }
6704
6705 if (CHIP_REV_IS_SLOW(bp))
6706 msleep(200);
6707
6708 /* finish CFC init */
6709 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6710 if (val != 1) {
6711 BNX2X_ERR("CFC LL_INIT failed\n");
6712 return -EBUSY;
6713 }
6714 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6715 if (val != 1) {
6716 BNX2X_ERR("CFC AC_INIT failed\n");
6717 return -EBUSY;
6718 }
6719 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6720 if (val != 1) {
6721 BNX2X_ERR("CFC CAM_INIT failed\n");
6722 return -EBUSY;
6723 }
6724 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6725
34f80b04
EG
6726 /* read NIG statistic
6727 to see if this is our first up since powerup */
6728 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6729 val = *bnx2x_sp(bp, wb_data[0]);
6730
6731 /* do internal memory self test */
6732 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6733 BNX2X_ERR("internal mem self test failed\n");
6734 return -EBUSY;
6735 }
6736
35b19ba5 6737 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6738 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6739 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6740 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6741 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6742 bp->port.need_hw_lock = 1;
6743 break;
6744
34f80b04
EG
6745 default:
6746 break;
6747 }
f1410647 6748
fd4ef40d
EG
6749 bnx2x_setup_fan_failure_detection(bp);
6750
34f80b04
EG
6751 /* clear PXP2 attentions */
6752 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6753
34f80b04 6754 enable_blocks_attention(bp);
72fd0718
VZ
6755 if (CHIP_PARITY_SUPPORTED(bp))
6756 enable_blocks_parity(bp);
a2fbb9ea 6757
6bbca910
YR
6758 if (!BP_NOMCP(bp)) {
6759 bnx2x_acquire_phy_lock(bp);
6760 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6761 bnx2x_release_phy_lock(bp);
6762 } else
6763 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6764
34f80b04
EG
6765 return 0;
6766}
a2fbb9ea 6767
34f80b04
EG
6768static int bnx2x_init_port(struct bnx2x *bp)
6769{
6770 int port = BP_PORT(bp);
94a78b79 6771 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6772 u32 low, high;
34f80b04 6773 u32 val;
a2fbb9ea 6774
cdaa7cb8 6775 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
6776
6777 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6778
94a78b79 6779 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6780 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6781
6782 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6783 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6784 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 6785 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6786
37b091ba
MC
6787#ifdef BCM_CNIC
6788 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 6789
94a78b79 6790 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
6791 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6792 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 6793#endif
cdaa7cb8 6794
94a78b79 6795 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6796
94a78b79 6797 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6798 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6799 /* no pause for emulation and FPGA */
6800 low = 0;
6801 high = 513;
6802 } else {
6803 if (IS_E1HMF(bp))
6804 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6805 else if (bp->dev->mtu > 4096) {
6806 if (bp->flags & ONE_PORT_FLAG)
6807 low = 160;
6808 else {
6809 val = bp->dev->mtu;
6810 /* (24*1024 + val*4)/256 */
6811 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6812 }
6813 } else
6814 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6815 high = low + 56; /* 14*1024/256 */
6816 }
6817 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6818 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6819
6820
94a78b79 6821 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6822
94a78b79 6823 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6824 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6825 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6826 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6827
94a78b79
VZ
6828 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6829 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6830 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6831 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6832
94a78b79 6833 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6834 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6835
94a78b79 6836 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6837
6838 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6839 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6840
6841 /* update threshold */
34f80b04 6842 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6843 /* update init credit */
34f80b04 6844 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6845
6846 /* probe changes */
34f80b04 6847 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6848 msleep(5);
34f80b04 6849 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 6850
37b091ba
MC
6851#ifdef BCM_CNIC
6852 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 6853#endif
94a78b79 6854 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6855 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6856
6857 if (CHIP_IS_E1(bp)) {
6858 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6859 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6860 }
94a78b79 6861 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6862
94a78b79 6863 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6864 /* init aeu_mask_attn_func_0/1:
6865 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6866 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6867 * bits 4-7 are used for "per vn group attention" */
6868 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6869 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6870
94a78b79 6871 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6872 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6873 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6874 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6875 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6876
94a78b79 6877 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6878
6879 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6880
6881 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6882 /* 0x2 disable e1hov, 0x1 enable */
6883 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6884 (IS_E1HMF(bp) ? 0x1 : 0x2));
6885
1c06328c
EG
6886 {
6887 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6888 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6889 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6890 }
34f80b04
EG
6891 }
6892
94a78b79 6893 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6894 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6895
35b19ba5 6896 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6897 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6898 {
6899 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6900
6901 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6902 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6903
6904 /* The GPIO should be swapped if the swap register is
6905 set and active */
6906 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6907 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6908
6909 /* Select function upon port-swap configuration */
6910 if (port == 0) {
6911 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6912 aeu_gpio_mask = (swap_val && swap_override) ?
6913 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6914 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6915 } else {
6916 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6917 aeu_gpio_mask = (swap_val && swap_override) ?
6918 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6919 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6920 }
6921 val = REG_RD(bp, offset);
6922 /* add GPIO3 to group */
6923 val |= aeu_gpio_mask;
6924 REG_WR(bp, offset, val);
6925 }
6926 break;
6927
35b19ba5 6928 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6929 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6930 /* add SPIO 5 to group 0 */
4d295db0
EG
6931 {
6932 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6933 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6934 val = REG_RD(bp, reg_addr);
f1410647 6935 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6936 REG_WR(bp, reg_addr, val);
6937 }
f1410647
ET
6938 break;
6939
6940 default:
6941 break;
6942 }
6943
c18487ee 6944 bnx2x__link_reset(bp);
a2fbb9ea 6945
34f80b04
EG
6946 return 0;
6947}
6948
6949#define ILT_PER_FUNC (768/2)
6950#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6951/* the phys address is shifted right 12 bits and has an added
6952 1=valid bit added to the 53rd bit
6953 then since this is a wide register(TM)
6954 we split it into two 32 bit writes
6955 */
6956#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6957#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6958#define PXP_ONE_ILT(x) (((x) << 10) | x)
6959#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6960
37b091ba
MC
6961#ifdef BCM_CNIC
6962#define CNIC_ILT_LINES 127
6963#define CNIC_CTX_PER_ILT 16
6964#else
34f80b04 6965#define CNIC_ILT_LINES 0
37b091ba 6966#endif
34f80b04
EG
6967
6968static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6969{
6970 int reg;
6971
6972 if (CHIP_IS_E1H(bp))
6973 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6974 else /* E1 */
6975 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6976
6977 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6978}
6979
6980static int bnx2x_init_func(struct bnx2x *bp)
6981{
6982 int port = BP_PORT(bp);
6983 int func = BP_FUNC(bp);
8badd27a 6984 u32 addr, val;
34f80b04
EG
6985 int i;
6986
cdaa7cb8 6987 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 6988
8badd27a
EG
6989 /* set MSI reconfigure capability */
6990 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6991 val = REG_RD(bp, addr);
6992 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6993 REG_WR(bp, addr, val);
6994
34f80b04
EG
6995 i = FUNC_ILT_BASE(func);
6996
6997 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6998 if (CHIP_IS_E1H(bp)) {
6999 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
7000 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
7001 } else /* E1 */
7002 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
7003 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
7004
37b091ba
MC
7005#ifdef BCM_CNIC
7006 i += 1 + CNIC_ILT_LINES;
7007 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
7008 if (CHIP_IS_E1(bp))
7009 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
7010 else {
7011 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
7012 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
7013 }
7014
7015 i++;
7016 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
7017 if (CHIP_IS_E1(bp))
7018 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
7019 else {
7020 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
7021 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
7022 }
7023
7024 i++;
7025 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
7026 if (CHIP_IS_E1(bp))
7027 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
7028 else {
7029 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
7030 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
7031 }
7032
7033 /* tell the searcher where the T2 table is */
7034 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
7035
7036 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
7037 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
7038
7039 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
7040 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
7041 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
7042
7043 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
7044#endif
34f80b04
EG
7045
7046 if (CHIP_IS_E1H(bp)) {
573f2035
EG
7047 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
7048 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
7049 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
7050 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
7051 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
7052 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
7053 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
7054 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
7055 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
7056
7057 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7058 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
7059 }
7060
7061 /* HC init per function */
7062 if (CHIP_IS_E1H(bp)) {
7063 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7064
7065 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7066 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7067 }
94a78b79 7068 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 7069
c14423fe 7070 /* Reset PCIE errors for debug */
a2fbb9ea
ET
7071 REG_WR(bp, 0x2114, 0xffffffff);
7072 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 7073
34f80b04
EG
7074 return 0;
7075}
7076
7077static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7078{
7079 int i, rc = 0;
a2fbb9ea 7080
34f80b04
EG
7081 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
7082 BP_FUNC(bp), load_code);
a2fbb9ea 7083
34f80b04
EG
7084 bp->dmae_ready = 0;
7085 mutex_init(&bp->dmae_mutex);
54016b26
EG
7086 rc = bnx2x_gunzip_init(bp);
7087 if (rc)
7088 return rc;
a2fbb9ea 7089
34f80b04
EG
7090 switch (load_code) {
7091 case FW_MSG_CODE_DRV_LOAD_COMMON:
7092 rc = bnx2x_init_common(bp);
7093 if (rc)
7094 goto init_hw_err;
7095 /* no break */
7096
7097 case FW_MSG_CODE_DRV_LOAD_PORT:
7098 bp->dmae_ready = 1;
7099 rc = bnx2x_init_port(bp);
7100 if (rc)
7101 goto init_hw_err;
7102 /* no break */
7103
7104 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7105 bp->dmae_ready = 1;
7106 rc = bnx2x_init_func(bp);
7107 if (rc)
7108 goto init_hw_err;
7109 break;
7110
7111 default:
7112 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7113 break;
7114 }
7115
7116 if (!BP_NOMCP(bp)) {
7117 int func = BP_FUNC(bp);
a2fbb9ea
ET
7118
7119 bp->fw_drv_pulse_wr_seq =
34f80b04 7120 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 7121 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
7122 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7123 }
a2fbb9ea 7124
34f80b04
EG
7125 /* this needs to be done before gunzip end */
7126 bnx2x_zero_def_sb(bp);
7127 for_each_queue(bp, i)
7128 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
7129#ifdef BCM_CNIC
7130 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7131#endif
34f80b04
EG
7132
7133init_hw_err:
7134 bnx2x_gunzip_end(bp);
7135
7136 return rc;
a2fbb9ea
ET
7137}
7138
a2fbb9ea
ET
7139static void bnx2x_free_mem(struct bnx2x *bp)
7140{
7141
7142#define BNX2X_PCI_FREE(x, y, size) \
7143 do { \
7144 if (x) { \
1a983142 7145 dma_free_coherent(&bp->pdev->dev, size, x, y); \
a2fbb9ea
ET
7146 x = NULL; \
7147 y = 0; \
7148 } \
7149 } while (0)
7150
7151#define BNX2X_FREE(x) \
7152 do { \
7153 if (x) { \
7154 vfree(x); \
7155 x = NULL; \
7156 } \
7157 } while (0)
7158
7159 int i;
7160
7161 /* fastpath */
555f6c78 7162 /* Common */
a2fbb9ea
ET
7163 for_each_queue(bp, i) {
7164
555f6c78 7165 /* status blocks */
a2fbb9ea
ET
7166 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7167 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 7168 sizeof(struct host_status_block));
555f6c78
EG
7169 }
7170 /* Rx */
54b9ddaa 7171 for_each_queue(bp, i) {
a2fbb9ea 7172
555f6c78 7173 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
7174 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7175 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7176 bnx2x_fp(bp, i, rx_desc_mapping),
7177 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7178
7179 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7180 bnx2x_fp(bp, i, rx_comp_mapping),
7181 sizeof(struct eth_fast_path_rx_cqe) *
7182 NUM_RCQ_BD);
a2fbb9ea 7183
7a9b2557 7184 /* SGE ring */
32626230 7185 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
7186 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7187 bnx2x_fp(bp, i, rx_sge_mapping),
7188 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7189 }
555f6c78 7190 /* Tx */
54b9ddaa 7191 for_each_queue(bp, i) {
555f6c78
EG
7192
7193 /* fastpath tx rings: tx_buf tx_desc */
7194 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7195 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7196 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 7197 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 7198 }
a2fbb9ea
ET
7199 /* end of fastpath */
7200
7201 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 7202 sizeof(struct host_def_status_block));
a2fbb9ea
ET
7203
7204 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 7205 sizeof(struct bnx2x_slowpath));
a2fbb9ea 7206
37b091ba 7207#ifdef BCM_CNIC
a2fbb9ea
ET
7208 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7209 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7210 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7211 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
7212 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7213 sizeof(struct host_status_block));
a2fbb9ea 7214#endif
7a9b2557 7215 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
7216
7217#undef BNX2X_PCI_FREE
7218#undef BNX2X_KFREE
7219}
7220
7221static int bnx2x_alloc_mem(struct bnx2x *bp)
7222{
7223
7224#define BNX2X_PCI_ALLOC(x, y, size) \
7225 do { \
1a983142 7226 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
a2fbb9ea
ET
7227 if (x == NULL) \
7228 goto alloc_mem_err; \
7229 memset(x, 0, size); \
7230 } while (0)
7231
7232#define BNX2X_ALLOC(x, size) \
7233 do { \
7234 x = vmalloc(size); \
7235 if (x == NULL) \
7236 goto alloc_mem_err; \
7237 memset(x, 0, size); \
7238 } while (0)
7239
7240 int i;
7241
7242 /* fastpath */
555f6c78 7243 /* Common */
a2fbb9ea
ET
7244 for_each_queue(bp, i) {
7245 bnx2x_fp(bp, i, bp) = bp;
7246
555f6c78 7247 /* status blocks */
a2fbb9ea
ET
7248 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7249 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 7250 sizeof(struct host_status_block));
555f6c78
EG
7251 }
7252 /* Rx */
54b9ddaa 7253 for_each_queue(bp, i) {
a2fbb9ea 7254
555f6c78 7255 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
7256 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7257 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7258 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7259 &bnx2x_fp(bp, i, rx_desc_mapping),
7260 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7261
7262 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7263 &bnx2x_fp(bp, i, rx_comp_mapping),
7264 sizeof(struct eth_fast_path_rx_cqe) *
7265 NUM_RCQ_BD);
7266
7a9b2557
VZ
7267 /* SGE ring */
7268 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7269 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7270 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7271 &bnx2x_fp(bp, i, rx_sge_mapping),
7272 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 7273 }
555f6c78 7274 /* Tx */
54b9ddaa 7275 for_each_queue(bp, i) {
555f6c78 7276
555f6c78
EG
7277 /* fastpath tx rings: tx_buf tx_desc */
7278 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7279 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7280 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7281 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 7282 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 7283 }
a2fbb9ea
ET
7284 /* end of fastpath */
7285
7286 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7287 sizeof(struct host_def_status_block));
7288
7289 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7290 sizeof(struct bnx2x_slowpath));
7291
37b091ba 7292#ifdef BCM_CNIC
a2fbb9ea
ET
7293 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7294
a2fbb9ea
ET
7295 /* allocate searcher T2 table
7296 we allocate 1/4 of alloc num for T2
7297 (which is not entered into the ILT) */
7298 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7299
37b091ba 7300 /* Initialize T2 (for 1024 connections) */
a2fbb9ea 7301 for (i = 0; i < 16*1024; i += 64)
37b091ba 7302 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 7303
37b091ba 7304 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
a2fbb9ea
ET
7305 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7306
7307 /* QM queues (128*MAX_CONN) */
7308 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
37b091ba
MC
7309
7310 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7311 sizeof(struct host_status_block));
a2fbb9ea
ET
7312#endif
7313
7314 /* Slow path ring */
7315 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7316
7317 return 0;
7318
7319alloc_mem_err:
7320 bnx2x_free_mem(bp);
7321 return -ENOMEM;
7322
7323#undef BNX2X_PCI_ALLOC
7324#undef BNX2X_ALLOC
7325}
7326
7327static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7328{
7329 int i;
7330
54b9ddaa 7331 for_each_queue(bp, i) {
a2fbb9ea
ET
7332 struct bnx2x_fastpath *fp = &bp->fp[i];
7333
7334 u16 bd_cons = fp->tx_bd_cons;
7335 u16 sw_prod = fp->tx_pkt_prod;
7336 u16 sw_cons = fp->tx_pkt_cons;
7337
a2fbb9ea
ET
7338 while (sw_cons != sw_prod) {
7339 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7340 sw_cons++;
7341 }
7342 }
7343}
7344
7345static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7346{
7347 int i, j;
7348
54b9ddaa 7349 for_each_queue(bp, j) {
a2fbb9ea
ET
7350 struct bnx2x_fastpath *fp = &bp->fp[j];
7351
a2fbb9ea
ET
7352 for (i = 0; i < NUM_RX_BD; i++) {
7353 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7354 struct sk_buff *skb = rx_buf->skb;
7355
7356 if (skb == NULL)
7357 continue;
7358
1a983142
FT
7359 dma_unmap_single(&bp->pdev->dev,
7360 dma_unmap_addr(rx_buf, mapping),
7361 bp->rx_buf_size, DMA_FROM_DEVICE);
a2fbb9ea
ET
7362
7363 rx_buf->skb = NULL;
7364 dev_kfree_skb(skb);
7365 }
7a9b2557 7366 if (!fp->disable_tpa)
32626230
EG
7367 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7368 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 7369 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
7370 }
7371}
7372
7373static void bnx2x_free_skbs(struct bnx2x *bp)
7374{
7375 bnx2x_free_tx_skbs(bp);
7376 bnx2x_free_rx_skbs(bp);
7377}
7378
7379static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7380{
34f80b04 7381 int i, offset = 1;
a2fbb9ea
ET
7382
7383 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 7384 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
7385 bp->msix_table[0].vector);
7386
37b091ba
MC
7387#ifdef BCM_CNIC
7388 offset++;
7389#endif
a2fbb9ea 7390 for_each_queue(bp, i) {
c14423fe 7391 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 7392 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
7393 bnx2x_fp(bp, i, state));
7394
34f80b04 7395 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 7396 }
a2fbb9ea
ET
7397}
7398
6cbe5065 7399static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
a2fbb9ea 7400{
a2fbb9ea 7401 if (bp->flags & USING_MSIX_FLAG) {
6cbe5065
VZ
7402 if (!disable_only)
7403 bnx2x_free_msix_irqs(bp);
a2fbb9ea 7404 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
7405 bp->flags &= ~USING_MSIX_FLAG;
7406
8badd27a 7407 } else if (bp->flags & USING_MSI_FLAG) {
6cbe5065
VZ
7408 if (!disable_only)
7409 free_irq(bp->pdev->irq, bp->dev);
8badd27a
EG
7410 pci_disable_msi(bp->pdev);
7411 bp->flags &= ~USING_MSI_FLAG;
7412
6cbe5065 7413 } else if (!disable_only)
a2fbb9ea
ET
7414 free_irq(bp->pdev->irq, bp->dev);
7415}
7416
7417static int bnx2x_enable_msix(struct bnx2x *bp)
7418{
8badd27a
EG
7419 int i, rc, offset = 1;
7420 int igu_vec = 0;
a2fbb9ea 7421
8badd27a
EG
7422 bp->msix_table[0].entry = igu_vec;
7423 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 7424
37b091ba
MC
7425#ifdef BCM_CNIC
7426 igu_vec = BP_L_ID(bp) + offset;
7427 bp->msix_table[1].entry = igu_vec;
7428 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7429 offset++;
7430#endif
34f80b04 7431 for_each_queue(bp, i) {
8badd27a 7432 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
7433 bp->msix_table[i + offset].entry = igu_vec;
7434 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7435 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
7436 }
7437
34f80b04 7438 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 7439 BNX2X_NUM_QUEUES(bp) + offset);
1ac218c8
VZ
7440
7441 /*
7442 * reconfigure number of tx/rx queues according to available
7443 * MSI-X vectors
7444 */
7445 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
7446 /* vectors available for FP */
7447 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
7448
7449 DP(NETIF_MSG_IFUP,
7450 "Trying to use less MSI-X vectors: %d\n", rc);
7451
7452 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
7453
7454 if (rc) {
7455 DP(NETIF_MSG_IFUP,
7456 "MSI-X is not attainable rc %d\n", rc);
7457 return rc;
7458 }
7459
7460 bp->num_queues = min(bp->num_queues, fp_vec);
7461
7462 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
7463 bp->num_queues);
7464 } else if (rc) {
8badd27a
EG
7465 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7466 return rc;
34f80b04 7467 }
8badd27a 7468
a2fbb9ea
ET
7469 bp->flags |= USING_MSIX_FLAG;
7470
7471 return 0;
a2fbb9ea
ET
7472}
7473
a2fbb9ea
ET
7474static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7475{
34f80b04 7476 int i, rc, offset = 1;
a2fbb9ea 7477
a2fbb9ea
ET
7478 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7479 bp->dev->name, bp->dev);
a2fbb9ea
ET
7480 if (rc) {
7481 BNX2X_ERR("request sp irq failed\n");
7482 return -EBUSY;
7483 }
7484
37b091ba
MC
7485#ifdef BCM_CNIC
7486 offset++;
7487#endif
a2fbb9ea 7488 for_each_queue(bp, i) {
555f6c78 7489 struct bnx2x_fastpath *fp = &bp->fp[i];
54b9ddaa
VZ
7490 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7491 bp->dev->name, i);
ca00392c 7492
34f80b04 7493 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 7494 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 7495 if (rc) {
555f6c78 7496 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
7497 bnx2x_free_msix_irqs(bp);
7498 return -EBUSY;
7499 }
7500
555f6c78 7501 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
7502 }
7503
555f6c78 7504 i = BNX2X_NUM_QUEUES(bp);
cdaa7cb8
VZ
7505 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
7506 " ... fp[%d] %d\n",
7507 bp->msix_table[0].vector,
7508 0, bp->msix_table[offset].vector,
7509 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 7510
a2fbb9ea 7511 return 0;
a2fbb9ea
ET
7512}
7513
8badd27a
EG
7514static int bnx2x_enable_msi(struct bnx2x *bp)
7515{
7516 int rc;
7517
7518 rc = pci_enable_msi(bp->pdev);
7519 if (rc) {
7520 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7521 return -1;
7522 }
7523 bp->flags |= USING_MSI_FLAG;
7524
7525 return 0;
7526}
7527
a2fbb9ea
ET
7528static int bnx2x_req_irq(struct bnx2x *bp)
7529{
8badd27a 7530 unsigned long flags;
34f80b04 7531 int rc;
a2fbb9ea 7532
8badd27a
EG
7533 if (bp->flags & USING_MSI_FLAG)
7534 flags = 0;
7535 else
7536 flags = IRQF_SHARED;
7537
7538 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 7539 bp->dev->name, bp->dev);
a2fbb9ea
ET
7540 if (!rc)
7541 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7542
7543 return rc;
a2fbb9ea
ET
7544}
7545
65abd74d
YG
7546static void bnx2x_napi_enable(struct bnx2x *bp)
7547{
7548 int i;
7549
54b9ddaa 7550 for_each_queue(bp, i)
65abd74d
YG
7551 napi_enable(&bnx2x_fp(bp, i, napi));
7552}
7553
7554static void bnx2x_napi_disable(struct bnx2x *bp)
7555{
7556 int i;
7557
54b9ddaa 7558 for_each_queue(bp, i)
65abd74d
YG
7559 napi_disable(&bnx2x_fp(bp, i, napi));
7560}
7561
7562static void bnx2x_netif_start(struct bnx2x *bp)
7563{
e1510706
EG
7564 int intr_sem;
7565
7566 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7567 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7568
7569 if (intr_sem) {
65abd74d 7570 if (netif_running(bp->dev)) {
65abd74d
YG
7571 bnx2x_napi_enable(bp);
7572 bnx2x_int_enable(bp);
555f6c78
EG
7573 if (bp->state == BNX2X_STATE_OPEN)
7574 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
7575 }
7576 }
7577}
7578
f8ef6e44 7579static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 7580{
f8ef6e44 7581 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 7582 bnx2x_napi_disable(bp);
762d5f6c 7583 netif_tx_disable(bp->dev);
65abd74d
YG
7584}
7585
a2fbb9ea
ET
7586/*
7587 * Init service functions
7588 */
7589
e665bfda
MC
7590/**
7591 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7592 *
7593 * @param bp driver descriptor
7594 * @param set set or clear an entry (1 or 0)
7595 * @param mac pointer to a buffer containing a MAC
7596 * @param cl_bit_vec bit vector of clients to register a MAC for
7597 * @param cam_offset offset in a CAM to use
7598 * @param with_bcast set broadcast MAC as well
7599 */
7600static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7601 u32 cl_bit_vec, u8 cam_offset,
7602 u8 with_bcast)
a2fbb9ea
ET
7603{
7604 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 7605 int port = BP_PORT(bp);
a2fbb9ea
ET
7606
7607 /* CAM allocation
7608 * unicasts 0-31:port0 32-63:port1
7609 * multicast 64-127:port0 128-191:port1
7610 */
e665bfda
MC
7611 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7612 config->hdr.offset = cam_offset;
7613 config->hdr.client_id = 0xff;
a2fbb9ea
ET
7614 config->hdr.reserved1 = 0;
7615
7616 /* primary MAC */
7617 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 7618 swab16(*(u16 *)&mac[0]);
a2fbb9ea 7619 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 7620 swab16(*(u16 *)&mac[2]);
a2fbb9ea 7621 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 7622 swab16(*(u16 *)&mac[4]);
34f80b04 7623 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7624 if (set)
7625 config->config_table[0].target_table_entry.flags = 0;
7626 else
7627 CAM_INVALIDATE(config->config_table[0]);
ca00392c 7628 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 7629 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
7630 config->config_table[0].target_table_entry.vlan_id = 0;
7631
3101c2bc
YG
7632 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7633 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7634 config->config_table[0].cam_entry.msb_mac_addr,
7635 config->config_table[0].cam_entry.middle_mac_addr,
7636 config->config_table[0].cam_entry.lsb_mac_addr);
7637
7638 /* broadcast */
e665bfda
MC
7639 if (with_bcast) {
7640 config->config_table[1].cam_entry.msb_mac_addr =
7641 cpu_to_le16(0xffff);
7642 config->config_table[1].cam_entry.middle_mac_addr =
7643 cpu_to_le16(0xffff);
7644 config->config_table[1].cam_entry.lsb_mac_addr =
7645 cpu_to_le16(0xffff);
7646 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7647 if (set)
7648 config->config_table[1].target_table_entry.flags =
7649 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7650 else
7651 CAM_INVALIDATE(config->config_table[1]);
7652 config->config_table[1].target_table_entry.clients_bit_vector =
7653 cpu_to_le32(cl_bit_vec);
7654 config->config_table[1].target_table_entry.vlan_id = 0;
7655 }
a2fbb9ea
ET
7656
7657 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7658 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7659 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7660}
7661
e665bfda
MC
7662/**
7663 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7664 *
7665 * @param bp driver descriptor
7666 * @param set set or clear an entry (1 or 0)
7667 * @param mac pointer to a buffer containing a MAC
7668 * @param cl_bit_vec bit vector of clients to register a MAC for
7669 * @param cam_offset offset in a CAM to use
7670 */
7671static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7672 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
7673{
7674 struct mac_configuration_cmd_e1h *config =
7675 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7676
8d9c5f34 7677 config->hdr.length = 1;
e665bfda
MC
7678 config->hdr.offset = cam_offset;
7679 config->hdr.client_id = 0xff;
34f80b04
EG
7680 config->hdr.reserved1 = 0;
7681
7682 /* primary MAC */
7683 config->config_table[0].msb_mac_addr =
e665bfda 7684 swab16(*(u16 *)&mac[0]);
34f80b04 7685 config->config_table[0].middle_mac_addr =
e665bfda 7686 swab16(*(u16 *)&mac[2]);
34f80b04 7687 config->config_table[0].lsb_mac_addr =
e665bfda 7688 swab16(*(u16 *)&mac[4]);
ca00392c 7689 config->config_table[0].clients_bit_vector =
e665bfda 7690 cpu_to_le32(cl_bit_vec);
34f80b04
EG
7691 config->config_table[0].vlan_id = 0;
7692 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7693 if (set)
7694 config->config_table[0].flags = BP_PORT(bp);
7695 else
7696 config->config_table[0].flags =
7697 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7698
e665bfda 7699 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 7700 (set ? "setting" : "clearing"),
34f80b04
EG
7701 config->config_table[0].msb_mac_addr,
7702 config->config_table[0].middle_mac_addr,
e665bfda 7703 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
7704
7705 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7706 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7707 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7708}
7709
a2fbb9ea
ET
7710static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7711 int *state_p, int poll)
7712{
7713 /* can take a while if any port is running */
8b3a0f0b 7714 int cnt = 5000;
a2fbb9ea 7715
c14423fe
ET
7716 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7717 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7718
7719 might_sleep();
34f80b04 7720 while (cnt--) {
a2fbb9ea
ET
7721 if (poll) {
7722 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7723 /* if index is different from 0
7724 * the reply for some commands will
3101c2bc 7725 * be on the non default queue
a2fbb9ea
ET
7726 */
7727 if (idx)
7728 bnx2x_rx_int(&bp->fp[idx], 10);
7729 }
a2fbb9ea 7730
3101c2bc 7731 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7732 if (*state_p == state) {
7733#ifdef BNX2X_STOP_ON_ERROR
7734 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7735#endif
a2fbb9ea 7736 return 0;
8b3a0f0b 7737 }
a2fbb9ea 7738
a2fbb9ea 7739 msleep(1);
e3553b29
EG
7740
7741 if (bp->panic)
7742 return -EIO;
a2fbb9ea
ET
7743 }
7744
a2fbb9ea 7745 /* timeout! */
49d66772
ET
7746 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7747 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7748#ifdef BNX2X_STOP_ON_ERROR
7749 bnx2x_panic();
7750#endif
a2fbb9ea 7751
49d66772 7752 return -EBUSY;
a2fbb9ea
ET
7753}
7754
e665bfda
MC
7755static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7756{
7757 bp->set_mac_pending++;
7758 smp_wmb();
7759
7760 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7761 (1 << bp->fp->cl_id), BP_FUNC(bp));
7762
7763 /* Wait for a completion */
7764 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7765}
7766
7767static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7768{
7769 bp->set_mac_pending++;
7770 smp_wmb();
7771
7772 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7773 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7774 1);
7775
7776 /* Wait for a completion */
7777 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7778}
7779
993ac7b5
MC
7780#ifdef BCM_CNIC
7781/**
7782 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7783 * MAC(s). This function will wait until the ramdord completion
7784 * returns.
7785 *
7786 * @param bp driver handle
7787 * @param set set or clear the CAM entry
7788 *
7789 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7790 */
7791static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7792{
7793 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7794
7795 bp->set_mac_pending++;
7796 smp_wmb();
7797
7798 /* Send a SET_MAC ramrod */
7799 if (CHIP_IS_E1(bp))
7800 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7801 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7802 1);
7803 else
7804 /* CAM allocation for E1H
7805 * unicasts: by func number
7806 * multicast: 20+FUNC*20, 20 each
7807 */
7808 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7809 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7810
7811 /* Wait for a completion when setting */
7812 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7813
7814 return 0;
7815}
7816#endif
7817
a2fbb9ea
ET
7818static int bnx2x_setup_leading(struct bnx2x *bp)
7819{
34f80b04 7820 int rc;
a2fbb9ea 7821
c14423fe 7822 /* reset IGU state */
34f80b04 7823 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7824
7825 /* SETUP ramrod */
7826 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7827
34f80b04
EG
7828 /* Wait for completion */
7829 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7830
34f80b04 7831 return rc;
a2fbb9ea
ET
7832}
7833
7834static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7835{
555f6c78
EG
7836 struct bnx2x_fastpath *fp = &bp->fp[index];
7837
a2fbb9ea 7838 /* reset IGU state */
555f6c78 7839 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7840
228241eb 7841 /* SETUP ramrod */
555f6c78
EG
7842 fp->state = BNX2X_FP_STATE_OPENING;
7843 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7844 fp->cl_id, 0);
a2fbb9ea
ET
7845
7846 /* Wait for completion */
7847 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7848 &(fp->state), 0);
a2fbb9ea
ET
7849}
7850
a2fbb9ea 7851static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7852
54b9ddaa 7853static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
ca00392c 7854{
ca00392c
EG
7855
7856 switch (bp->multi_mode) {
7857 case ETH_RSS_MODE_DISABLED:
54b9ddaa 7858 bp->num_queues = 1;
ca00392c
EG
7859 break;
7860
7861 case ETH_RSS_MODE_REGULAR:
54b9ddaa
VZ
7862 if (num_queues)
7863 bp->num_queues = min_t(u32, num_queues,
7864 BNX2X_MAX_QUEUES(bp));
ca00392c 7865 else
54b9ddaa
VZ
7866 bp->num_queues = min_t(u32, num_online_cpus(),
7867 BNX2X_MAX_QUEUES(bp));
ca00392c
EG
7868 break;
7869
7870
7871 default:
54b9ddaa 7872 bp->num_queues = 1;
ca00392c
EG
7873 break;
7874 }
ca00392c
EG
7875}
7876
54b9ddaa 7877static int bnx2x_set_num_queues(struct bnx2x *bp)
a2fbb9ea 7878{
ca00392c 7879 int rc = 0;
a2fbb9ea 7880
8badd27a
EG
7881 switch (int_mode) {
7882 case INT_MODE_INTx:
7883 case INT_MODE_MSI:
54b9ddaa 7884 bp->num_queues = 1;
ca00392c 7885 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a 7886 break;
8badd27a 7887 default:
54b9ddaa
VZ
7888 /* Set number of queues according to bp->multi_mode value */
7889 bnx2x_set_num_queues_msix(bp);
ca00392c 7890
54b9ddaa
VZ
7891 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7892 bp->num_queues);
ca00392c 7893
2dfe0e1f
EG
7894 /* if we can't use MSI-X we only need one fp,
7895 * so try to enable MSI-X with the requested number of fp's
7896 * and fallback to MSI or legacy INTx with one fp
7897 */
ca00392c 7898 rc = bnx2x_enable_msix(bp);
54b9ddaa 7899 if (rc)
34f80b04 7900 /* failed to enable MSI-X */
54b9ddaa 7901 bp->num_queues = 1;
8badd27a 7902 break;
a2fbb9ea 7903 }
54b9ddaa 7904 bp->dev->real_num_tx_queues = bp->num_queues;
ca00392c 7905 return rc;
8badd27a
EG
7906}
7907
993ac7b5
MC
7908#ifdef BCM_CNIC
7909static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7910static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7911#endif
8badd27a
EG
7912
7913/* must be called with rtnl_lock */
7914static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7915{
7916 u32 load_code;
ca00392c
EG
7917 int i, rc;
7918
8badd27a 7919#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7920 if (unlikely(bp->panic))
7921 return -EPERM;
7922#endif
7923
7924 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7925
54b9ddaa 7926 rc = bnx2x_set_num_queues(bp);
c14423fe 7927
6cbe5065
VZ
7928 if (bnx2x_alloc_mem(bp)) {
7929 bnx2x_free_irq(bp, true);
a2fbb9ea 7930 return -ENOMEM;
6cbe5065 7931 }
a2fbb9ea 7932
54b9ddaa 7933 for_each_queue(bp, i)
7a9b2557
VZ
7934 bnx2x_fp(bp, i, disable_tpa) =
7935 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7936
54b9ddaa 7937 for_each_queue(bp, i)
2dfe0e1f
EG
7938 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7939 bnx2x_poll, 128);
7940
2dfe0e1f
EG
7941 bnx2x_napi_enable(bp);
7942
34f80b04
EG
7943 if (bp->flags & USING_MSIX_FLAG) {
7944 rc = bnx2x_req_msix_irqs(bp);
7945 if (rc) {
6cbe5065 7946 bnx2x_free_irq(bp, true);
2dfe0e1f 7947 goto load_error1;
34f80b04
EG
7948 }
7949 } else {
ca00392c 7950 /* Fall to INTx if failed to enable MSI-X due to lack of
54b9ddaa 7951 memory (in bnx2x_set_num_queues()) */
8badd27a
EG
7952 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7953 bnx2x_enable_msi(bp);
34f80b04
EG
7954 bnx2x_ack_int(bp);
7955 rc = bnx2x_req_irq(bp);
7956 if (rc) {
2dfe0e1f 7957 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6cbe5065 7958 bnx2x_free_irq(bp, true);
2dfe0e1f 7959 goto load_error1;
a2fbb9ea 7960 }
8badd27a
EG
7961 if (bp->flags & USING_MSI_FLAG) {
7962 bp->dev->irq = bp->pdev->irq;
7995c64e
JP
7963 netdev_info(bp->dev, "using MSI IRQ %d\n",
7964 bp->pdev->irq);
8badd27a 7965 }
a2fbb9ea
ET
7966 }
7967
2dfe0e1f
EG
7968 /* Send LOAD_REQUEST command to MCP
7969 Returns the type of LOAD command:
7970 if it is the first port to be initialized
7971 common blocks should be initialized, otherwise - not
7972 */
7973 if (!BP_NOMCP(bp)) {
7974 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7975 if (!load_code) {
7976 BNX2X_ERR("MCP response failure, aborting\n");
7977 rc = -EBUSY;
7978 goto load_error2;
7979 }
7980 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7981 rc = -EBUSY; /* other port in diagnostic mode */
7982 goto load_error2;
7983 }
7984
7985 } else {
7986 int port = BP_PORT(bp);
7987
f5372251 7988 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7989 load_count[0], load_count[1], load_count[2]);
7990 load_count[0]++;
7991 load_count[1 + port]++;
f5372251 7992 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7993 load_count[0], load_count[1], load_count[2]);
7994 if (load_count[0] == 1)
7995 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7996 else if (load_count[1 + port] == 1)
7997 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7998 else
7999 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
8000 }
8001
8002 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
8003 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
8004 bp->port.pmf = 1;
8005 else
8006 bp->port.pmf = 0;
8007 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 8008
a2fbb9ea 8009 /* Initialize HW */
34f80b04
EG
8010 rc = bnx2x_init_hw(bp, load_code);
8011 if (rc) {
a2fbb9ea 8012 BNX2X_ERR("HW init failed, aborting\n");
f1e1a199
VZ
8013 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8014 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8015 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
2dfe0e1f 8016 goto load_error2;
a2fbb9ea
ET
8017 }
8018
a2fbb9ea 8019 /* Setup NIC internals and enable interrupts */
471de716 8020 bnx2x_nic_init(bp, load_code);
a2fbb9ea 8021
2691d51d
EG
8022 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
8023 (bp->common.shmem2_base))
8024 SHMEM2_WR(bp, dcc_support,
8025 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
8026 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
8027
a2fbb9ea 8028 /* Send LOAD_DONE command to MCP */
34f80b04 8029 if (!BP_NOMCP(bp)) {
228241eb
ET
8030 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8031 if (!load_code) {
da5a662a 8032 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 8033 rc = -EBUSY;
2dfe0e1f 8034 goto load_error3;
a2fbb9ea
ET
8035 }
8036 }
8037
8038 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
8039
34f80b04
EG
8040 rc = bnx2x_setup_leading(bp);
8041 if (rc) {
da5a662a 8042 BNX2X_ERR("Setup leading failed!\n");
e3553b29 8043#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 8044 goto load_error3;
e3553b29
EG
8045#else
8046 bp->panic = 1;
8047 return -EBUSY;
8048#endif
34f80b04 8049 }
a2fbb9ea 8050
34f80b04
EG
8051 if (CHIP_IS_E1H(bp))
8052 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 8053 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
f34d28ea 8054 bp->flags |= MF_FUNC_DIS;
34f80b04 8055 }
a2fbb9ea 8056
ca00392c 8057 if (bp->state == BNX2X_STATE_OPEN) {
37b091ba
MC
8058#ifdef BCM_CNIC
8059 /* Enable Timer scan */
8060 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
8061#endif
34f80b04
EG
8062 for_each_nondefault_queue(bp, i) {
8063 rc = bnx2x_setup_multi(bp, i);
8064 if (rc)
37b091ba
MC
8065#ifdef BCM_CNIC
8066 goto load_error4;
8067#else
2dfe0e1f 8068 goto load_error3;
37b091ba 8069#endif
34f80b04 8070 }
a2fbb9ea 8071
ca00392c 8072 if (CHIP_IS_E1(bp))
e665bfda 8073 bnx2x_set_eth_mac_addr_e1(bp, 1);
ca00392c 8074 else
e665bfda 8075 bnx2x_set_eth_mac_addr_e1h(bp, 1);
993ac7b5
MC
8076#ifdef BCM_CNIC
8077 /* Set iSCSI L2 MAC */
8078 mutex_lock(&bp->cnic_mutex);
8079 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
8080 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8081 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
4a6e47a4
MC
8082 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
8083 CNIC_SB_ID(bp));
993ac7b5
MC
8084 }
8085 mutex_unlock(&bp->cnic_mutex);
8086#endif
ca00392c 8087 }
34f80b04
EG
8088
8089 if (bp->port.pmf)
b5bf9068 8090 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
8091
8092 /* Start fast path */
34f80b04
EG
8093 switch (load_mode) {
8094 case LOAD_NORMAL:
ca00392c
EG
8095 if (bp->state == BNX2X_STATE_OPEN) {
8096 /* Tx queue should be only reenabled */
8097 netif_tx_wake_all_queues(bp->dev);
8098 }
2dfe0e1f 8099 /* Initialize the receive filter. */
34f80b04
EG
8100 bnx2x_set_rx_mode(bp->dev);
8101 break;
8102
8103 case LOAD_OPEN:
555f6c78 8104 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
8105 if (bp->state != BNX2X_STATE_OPEN)
8106 netif_tx_disable(bp->dev);
2dfe0e1f 8107 /* Initialize the receive filter. */
34f80b04 8108 bnx2x_set_rx_mode(bp->dev);
34f80b04 8109 break;
a2fbb9ea 8110
34f80b04 8111 case LOAD_DIAG:
2dfe0e1f 8112 /* Initialize the receive filter. */
a2fbb9ea 8113 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
8114 bp->state = BNX2X_STATE_DIAG;
8115 break;
8116
8117 default:
8118 break;
a2fbb9ea
ET
8119 }
8120
34f80b04
EG
8121 if (!bp->port.pmf)
8122 bnx2x__link_status_update(bp);
8123
a2fbb9ea
ET
8124 /* start the timer */
8125 mod_timer(&bp->timer, jiffies + bp->current_interval);
8126
993ac7b5
MC
8127#ifdef BCM_CNIC
8128 bnx2x_setup_cnic_irq_info(bp);
8129 if (bp->state == BNX2X_STATE_OPEN)
8130 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8131#endif
72fd0718 8132 bnx2x_inc_load_cnt(bp);
34f80b04 8133
a2fbb9ea
ET
8134 return 0;
8135
37b091ba
MC
8136#ifdef BCM_CNIC
8137load_error4:
8138 /* Disable Timer scan */
8139 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8140#endif
2dfe0e1f
EG
8141load_error3:
8142 bnx2x_int_disable_sync(bp, 1);
8143 if (!BP_NOMCP(bp)) {
8144 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8145 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8146 }
8147 bp->port.pmf = 0;
7a9b2557
VZ
8148 /* Free SKBs, SGEs, TPA pool and driver internals */
8149 bnx2x_free_skbs(bp);
54b9ddaa 8150 for_each_queue(bp, i)
3196a88a 8151 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 8152load_error2:
d1014634 8153 /* Release IRQs */
6cbe5065 8154 bnx2x_free_irq(bp, false);
2dfe0e1f
EG
8155load_error1:
8156 bnx2x_napi_disable(bp);
54b9ddaa 8157 for_each_queue(bp, i)
7cde1c8b 8158 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8159 bnx2x_free_mem(bp);
8160
34f80b04 8161 return rc;
a2fbb9ea
ET
8162}
8163
8164static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8165{
555f6c78 8166 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
8167 int rc;
8168
c14423fe 8169 /* halt the connection */
555f6c78
EG
8170 fp->state = BNX2X_FP_STATE_HALTING;
8171 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 8172
34f80b04 8173 /* Wait for completion */
a2fbb9ea 8174 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 8175 &(fp->state), 1);
c14423fe 8176 if (rc) /* timeout */
a2fbb9ea
ET
8177 return rc;
8178
8179 /* delete cfc entry */
8180 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8181
34f80b04
EG
8182 /* Wait for completion */
8183 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 8184 &(fp->state), 1);
34f80b04 8185 return rc;
a2fbb9ea
ET
8186}
8187
da5a662a 8188static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 8189{
4781bfad 8190 __le16 dsb_sp_prod_idx;
c14423fe 8191 /* if the other port is handling traffic,
a2fbb9ea 8192 this can take a lot of time */
34f80b04
EG
8193 int cnt = 500;
8194 int rc;
a2fbb9ea
ET
8195
8196 might_sleep();
8197
8198 /* Send HALT ramrod */
8199 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 8200 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 8201
34f80b04
EG
8202 /* Wait for completion */
8203 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8204 &(bp->fp[0].state), 1);
8205 if (rc) /* timeout */
da5a662a 8206 return rc;
a2fbb9ea 8207
49d66772 8208 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 8209
228241eb 8210 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
8211 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8212
49d66772 8213 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
8214 we are going to reset the chip anyway
8215 so there is not much to do if this times out
8216 */
34f80b04 8217 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
8218 if (!cnt) {
8219 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8220 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8221 *bp->dsb_sp_prod, dsb_sp_prod_idx);
8222#ifdef BNX2X_STOP_ON_ERROR
8223 bnx2x_panic();
8224#endif
36e552ab 8225 rc = -EBUSY;
34f80b04
EG
8226 break;
8227 }
8228 cnt--;
da5a662a 8229 msleep(1);
5650d9d4 8230 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
8231 }
8232 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8233 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
8234
8235 return rc;
a2fbb9ea
ET
8236}
8237
34f80b04
EG
8238static void bnx2x_reset_func(struct bnx2x *bp)
8239{
8240 int port = BP_PORT(bp);
8241 int func = BP_FUNC(bp);
8242 int base, i;
8243
8244 /* Configure IGU */
8245 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8246 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8247
37b091ba
MC
8248#ifdef BCM_CNIC
8249 /* Disable Timer scan */
8250 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8251 /*
8252 * Wait for at least 10ms and up to 2 second for the timers scan to
8253 * complete
8254 */
8255 for (i = 0; i < 200; i++) {
8256 msleep(10);
8257 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8258 break;
8259 }
8260#endif
34f80b04
EG
8261 /* Clear ILT */
8262 base = FUNC_ILT_BASE(func);
8263 for (i = base; i < base + ILT_PER_FUNC; i++)
8264 bnx2x_ilt_wr(bp, i, 0);
8265}
8266
8267static void bnx2x_reset_port(struct bnx2x *bp)
8268{
8269 int port = BP_PORT(bp);
8270 u32 val;
8271
8272 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8273
8274 /* Do not rcv packets to BRB */
8275 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8276 /* Do not direct rcv packets that are not for MCP to the BRB */
8277 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8278 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8279
8280 /* Configure AEU */
8281 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8282
8283 msleep(100);
8284 /* Check for BRB port occupancy */
8285 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8286 if (val)
8287 DP(NETIF_MSG_IFDOWN,
33471629 8288 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
8289
8290 /* TODO: Close Doorbell port? */
8291}
8292
34f80b04
EG
8293static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8294{
8295 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
8296 BP_FUNC(bp), reset_code);
8297
8298 switch (reset_code) {
8299 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8300 bnx2x_reset_port(bp);
8301 bnx2x_reset_func(bp);
8302 bnx2x_reset_common(bp);
8303 break;
8304
8305 case FW_MSG_CODE_DRV_UNLOAD_PORT:
8306 bnx2x_reset_port(bp);
8307 bnx2x_reset_func(bp);
8308 break;
8309
8310 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8311 bnx2x_reset_func(bp);
8312 break;
49d66772 8313
34f80b04
EG
8314 default:
8315 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8316 break;
8317 }
8318}
8319
72fd0718 8320static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 8321{
da5a662a 8322 int port = BP_PORT(bp);
a2fbb9ea 8323 u32 reset_code = 0;
da5a662a 8324 int i, cnt, rc;
a2fbb9ea 8325
555f6c78 8326 /* Wait until tx fastpath tasks complete */
54b9ddaa 8327 for_each_queue(bp, i) {
228241eb
ET
8328 struct bnx2x_fastpath *fp = &bp->fp[i];
8329
34f80b04 8330 cnt = 1000;
e8b5fc51 8331 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 8332
7961f791 8333 bnx2x_tx_int(fp);
34f80b04
EG
8334 if (!cnt) {
8335 BNX2X_ERR("timeout waiting for queue[%d]\n",
8336 i);
8337#ifdef BNX2X_STOP_ON_ERROR
8338 bnx2x_panic();
8339 return -EBUSY;
8340#else
8341 break;
8342#endif
8343 }
8344 cnt--;
da5a662a 8345 msleep(1);
34f80b04 8346 }
228241eb 8347 }
da5a662a
VZ
8348 /* Give HW time to discard old tx messages */
8349 msleep(1);
a2fbb9ea 8350
3101c2bc
YG
8351 if (CHIP_IS_E1(bp)) {
8352 struct mac_configuration_cmd *config =
8353 bnx2x_sp(bp, mcast_config);
8354
e665bfda 8355 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 8356
8d9c5f34 8357 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
8358 CAM_INVALIDATE(config->config_table[i]);
8359
8d9c5f34 8360 config->hdr.length = i;
3101c2bc
YG
8361 if (CHIP_REV_IS_SLOW(bp))
8362 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8363 else
8364 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 8365 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
8366 config->hdr.reserved1 = 0;
8367
e665bfda
MC
8368 bp->set_mac_pending++;
8369 smp_wmb();
8370
3101c2bc
YG
8371 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8372 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8373 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8374
8375 } else { /* E1H */
65abd74d
YG
8376 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8377
e665bfda 8378 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
8379
8380 for (i = 0; i < MC_HASH_SIZE; i++)
8381 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
8382
8383 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 8384 }
993ac7b5
MC
8385#ifdef BCM_CNIC
8386 /* Clear iSCSI L2 MAC */
8387 mutex_lock(&bp->cnic_mutex);
8388 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8389 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8390 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8391 }
8392 mutex_unlock(&bp->cnic_mutex);
8393#endif
3101c2bc 8394
65abd74d
YG
8395 if (unload_mode == UNLOAD_NORMAL)
8396 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8397
7d0446c2 8398 else if (bp->flags & NO_WOL_FLAG)
65abd74d 8399 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 8400
7d0446c2 8401 else if (bp->wol) {
65abd74d
YG
8402 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8403 u8 *mac_addr = bp->dev->dev_addr;
8404 u32 val;
8405 /* The mac address is written to entries 1-4 to
8406 preserve entry 0 which is used by the PMF */
8407 u8 entry = (BP_E1HVN(bp) + 1)*8;
8408
8409 val = (mac_addr[0] << 8) | mac_addr[1];
8410 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8411
8412 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8413 (mac_addr[4] << 8) | mac_addr[5];
8414 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8415
8416 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8417
8418 } else
8419 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8420
34f80b04
EG
8421 /* Close multi and leading connections
8422 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
8423 for_each_nondefault_queue(bp, i)
8424 if (bnx2x_stop_multi(bp, i))
228241eb 8425 goto unload_error;
a2fbb9ea 8426
da5a662a
VZ
8427 rc = bnx2x_stop_leading(bp);
8428 if (rc) {
34f80b04 8429 BNX2X_ERR("Stop leading failed!\n");
da5a662a 8430#ifdef BNX2X_STOP_ON_ERROR
34f80b04 8431 return -EBUSY;
da5a662a
VZ
8432#else
8433 goto unload_error;
34f80b04 8434#endif
228241eb
ET
8435 }
8436
8437unload_error:
34f80b04 8438 if (!BP_NOMCP(bp))
228241eb 8439 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 8440 else {
f5372251 8441 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
8442 load_count[0], load_count[1], load_count[2]);
8443 load_count[0]--;
da5a662a 8444 load_count[1 + port]--;
f5372251 8445 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
8446 load_count[0], load_count[1], load_count[2]);
8447 if (load_count[0] == 0)
8448 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 8449 else if (load_count[1 + port] == 0)
34f80b04
EG
8450 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8451 else
8452 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8453 }
a2fbb9ea 8454
34f80b04
EG
8455 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8456 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8457 bnx2x__link_reset(bp);
a2fbb9ea
ET
8458
8459 /* Reset the chip */
228241eb 8460 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
8461
8462 /* Report UNLOAD_DONE to MCP */
34f80b04 8463 if (!BP_NOMCP(bp))
a2fbb9ea 8464 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 8465
72fd0718
VZ
8466}
8467
8468static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8469{
8470 u32 val;
8471
8472 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8473
8474 if (CHIP_IS_E1(bp)) {
8475 int port = BP_PORT(bp);
8476 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8477 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8478
8479 val = REG_RD(bp, addr);
8480 val &= ~(0x300);
8481 REG_WR(bp, addr, val);
8482 } else if (CHIP_IS_E1H(bp)) {
8483 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8484 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8485 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8486 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8487 }
8488}
8489
8490/* must be called with rtnl_lock */
8491static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8492{
8493 int i;
8494
8495 if (bp->state == BNX2X_STATE_CLOSED) {
8496 /* Interface has been removed - nothing to recover */
8497 bp->recovery_state = BNX2X_RECOVERY_DONE;
8498 bp->is_leader = 0;
8499 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8500 smp_wmb();
8501
8502 return -EINVAL;
8503 }
8504
8505#ifdef BCM_CNIC
8506 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8507#endif
8508 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8509
8510 /* Set "drop all" */
8511 bp->rx_mode = BNX2X_RX_MODE_NONE;
8512 bnx2x_set_storm_rx_mode(bp);
8513
8514 /* Disable HW interrupts, NAPI and Tx */
8515 bnx2x_netif_stop(bp, 1);
c89af1a3 8516 netif_carrier_off(bp->dev);
72fd0718
VZ
8517
8518 del_timer_sync(&bp->timer);
8519 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8520 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8521 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8522
8523 /* Release IRQs */
8524 bnx2x_free_irq(bp, false);
8525
8526 /* Cleanup the chip if needed */
8527 if (unload_mode != UNLOAD_RECOVERY)
8528 bnx2x_chip_cleanup(bp, unload_mode);
8529
9a035440 8530 bp->port.pmf = 0;
a2fbb9ea 8531
7a9b2557 8532 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 8533 bnx2x_free_skbs(bp);
54b9ddaa 8534 for_each_queue(bp, i)
3196a88a 8535 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 8536 for_each_queue(bp, i)
7cde1c8b 8537 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8538 bnx2x_free_mem(bp);
8539
8540 bp->state = BNX2X_STATE_CLOSED;
228241eb 8541
72fd0718
VZ
8542 /* The last driver must disable a "close the gate" if there is no
8543 * parity attention or "process kill" pending.
8544 */
8545 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8546 bnx2x_reset_is_done(bp))
8547 bnx2x_disable_close_the_gate(bp);
8548
8549 /* Reset MCP mail box sequence if there is on going recovery */
8550 if (unload_mode == UNLOAD_RECOVERY)
8551 bp->fw_seq = 0;
8552
8553 return 0;
8554}
8555
8556/* Close gates #2, #3 and #4: */
8557static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8558{
8559 u32 val, addr;
8560
8561 /* Gates #2 and #4a are closed/opened for "not E1" only */
8562 if (!CHIP_IS_E1(bp)) {
8563 /* #4 */
8564 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8565 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8566 close ? (val | 0x1) : (val & (~(u32)1)));
8567 /* #2 */
8568 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8569 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8570 close ? (val | 0x1) : (val & (~(u32)1)));
8571 }
8572
8573 /* #3 */
8574 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8575 val = REG_RD(bp, addr);
8576 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8577
8578 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8579 close ? "closing" : "opening");
8580 mmiowb();
8581}
8582
8583#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8584
8585static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8586{
8587 /* Do some magic... */
8588 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8589 *magic_val = val & SHARED_MF_CLP_MAGIC;
8590 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8591}
8592
8593/* Restore the value of the `magic' bit.
8594 *
8595 * @param pdev Device handle.
8596 * @param magic_val Old value of the `magic' bit.
8597 */
8598static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8599{
8600 /* Restore the `magic' bit value... */
8601 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8602 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8603 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8604 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8605 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8606 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8607}
8608
8609/* Prepares for MCP reset: takes care of CLP configurations.
8610 *
8611 * @param bp
8612 * @param magic_val Old value of 'magic' bit.
8613 */
8614static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8615{
8616 u32 shmem;
8617 u32 validity_offset;
8618
8619 DP(NETIF_MSG_HW, "Starting\n");
8620
8621 /* Set `magic' bit in order to save MF config */
8622 if (!CHIP_IS_E1(bp))
8623 bnx2x_clp_reset_prep(bp, magic_val);
8624
8625 /* Get shmem offset */
8626 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8627 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8628
8629 /* Clear validity map flags */
8630 if (shmem > 0)
8631 REG_WR(bp, shmem + validity_offset, 0);
8632}
8633
8634#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8635#define MCP_ONE_TIMEOUT 100 /* 100 ms */
8636
8637/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8638 * depending on the HW type.
8639 *
8640 * @param bp
8641 */
8642static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8643{
8644 /* special handling for emulation and FPGA,
8645 wait 10 times longer */
8646 if (CHIP_REV_IS_SLOW(bp))
8647 msleep(MCP_ONE_TIMEOUT*10);
8648 else
8649 msleep(MCP_ONE_TIMEOUT);
8650}
8651
8652static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8653{
8654 u32 shmem, cnt, validity_offset, val;
8655 int rc = 0;
8656
8657 msleep(100);
8658
8659 /* Get shmem offset */
8660 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8661 if (shmem == 0) {
8662 BNX2X_ERR("Shmem 0 return failure\n");
8663 rc = -ENOTTY;
8664 goto exit_lbl;
8665 }
8666
8667 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8668
8669 /* Wait for MCP to come up */
8670 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8671 /* TBD: its best to check validity map of last port.
8672 * currently checks on port 0.
8673 */
8674 val = REG_RD(bp, shmem + validity_offset);
8675 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8676 shmem + validity_offset, val);
8677
8678 /* check that shared memory is valid. */
8679 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8680 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8681 break;
8682
8683 bnx2x_mcp_wait_one(bp);
8684 }
8685
8686 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8687
8688 /* Check that shared memory is valid. This indicates that MCP is up. */
8689 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8690 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8691 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8692 rc = -ENOTTY;
8693 goto exit_lbl;
8694 }
8695
8696exit_lbl:
8697 /* Restore the `magic' bit value */
8698 if (!CHIP_IS_E1(bp))
8699 bnx2x_clp_reset_done(bp, magic_val);
8700
8701 return rc;
8702}
8703
8704static void bnx2x_pxp_prep(struct bnx2x *bp)
8705{
8706 if (!CHIP_IS_E1(bp)) {
8707 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8708 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8709 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8710 mmiowb();
8711 }
8712}
8713
8714/*
8715 * Reset the whole chip except for:
8716 * - PCIE core
8717 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8718 * one reset bit)
8719 * - IGU
8720 * - MISC (including AEU)
8721 * - GRC
8722 * - RBCN, RBCP
8723 */
8724static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8725{
8726 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8727
8728 not_reset_mask1 =
8729 MISC_REGISTERS_RESET_REG_1_RST_HC |
8730 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8731 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8732
8733 not_reset_mask2 =
8734 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8735 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8736 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8737 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8738 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8739 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8740 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8741 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8742
8743 reset_mask1 = 0xffffffff;
8744
8745 if (CHIP_IS_E1(bp))
8746 reset_mask2 = 0xffff;
8747 else
8748 reset_mask2 = 0x1ffff;
8749
8750 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8751 reset_mask1 & (~not_reset_mask1));
8752 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8753 reset_mask2 & (~not_reset_mask2));
8754
8755 barrier();
8756 mmiowb();
8757
8758 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8759 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8760 mmiowb();
8761}
8762
8763static int bnx2x_process_kill(struct bnx2x *bp)
8764{
8765 int cnt = 1000;
8766 u32 val = 0;
8767 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8768
8769
8770 /* Empty the Tetris buffer, wait for 1s */
8771 do {
8772 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8773 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8774 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8775 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8776 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8777 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8778 ((port_is_idle_0 & 0x1) == 0x1) &&
8779 ((port_is_idle_1 & 0x1) == 0x1) &&
8780 (pgl_exp_rom2 == 0xffffffff))
8781 break;
8782 msleep(1);
8783 } while (cnt-- > 0);
8784
8785 if (cnt <= 0) {
8786 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8787 " are still"
8788 " outstanding read requests after 1s!\n");
8789 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8790 " port_is_idle_0=0x%08x,"
8791 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8792 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8793 pgl_exp_rom2);
8794 return -EAGAIN;
8795 }
8796
8797 barrier();
8798
8799 /* Close gates #2, #3 and #4 */
8800 bnx2x_set_234_gates(bp, true);
8801
8802 /* TBD: Indicate that "process kill" is in progress to MCP */
8803
8804 /* Clear "unprepared" bit */
8805 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8806 barrier();
8807
8808 /* Make sure all is written to the chip before the reset */
8809 mmiowb();
8810
8811 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8812 * PSWHST, GRC and PSWRD Tetris buffer.
8813 */
8814 msleep(1);
8815
8816 /* Prepare to chip reset: */
8817 /* MCP */
8818 bnx2x_reset_mcp_prep(bp, &val);
8819
8820 /* PXP */
8821 bnx2x_pxp_prep(bp);
8822 barrier();
8823
8824 /* reset the chip */
8825 bnx2x_process_kill_chip_reset(bp);
8826 barrier();
8827
8828 /* Recover after reset: */
8829 /* MCP */
8830 if (bnx2x_reset_mcp_comp(bp, val))
8831 return -EAGAIN;
8832
8833 /* PXP */
8834 bnx2x_pxp_prep(bp);
8835
8836 /* Open the gates #2, #3 and #4 */
8837 bnx2x_set_234_gates(bp, false);
8838
8839 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8840 * reset state, re-enable attentions. */
8841
a2fbb9ea
ET
8842 return 0;
8843}
8844
72fd0718
VZ
8845static int bnx2x_leader_reset(struct bnx2x *bp)
8846{
8847 int rc = 0;
8848 /* Try to recover after the failure */
8849 if (bnx2x_process_kill(bp)) {
8850 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8851 bp->dev->name);
8852 rc = -EAGAIN;
8853 goto exit_leader_reset;
8854 }
8855
8856 /* Clear "reset is in progress" bit and update the driver state */
8857 bnx2x_set_reset_done(bp);
8858 bp->recovery_state = BNX2X_RECOVERY_DONE;
8859
8860exit_leader_reset:
8861 bp->is_leader = 0;
8862 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8863 smp_wmb();
8864 return rc;
8865}
8866
8867static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8868
8869/* Assumption: runs under rtnl lock. This together with the fact
8870 * that it's called only from bnx2x_reset_task() ensure that it
8871 * will never be called when netif_running(bp->dev) is false.
8872 */
8873static void bnx2x_parity_recover(struct bnx2x *bp)
8874{
8875 DP(NETIF_MSG_HW, "Handling parity\n");
8876 while (1) {
8877 switch (bp->recovery_state) {
8878 case BNX2X_RECOVERY_INIT:
8879 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8880 /* Try to get a LEADER_LOCK HW lock */
8881 if (bnx2x_trylock_hw_lock(bp,
8882 HW_LOCK_RESOURCE_RESERVED_08))
8883 bp->is_leader = 1;
8884
8885 /* Stop the driver */
8886 /* If interface has been removed - break */
8887 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8888 return;
8889
8890 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8891 /* Ensure "is_leader" and "recovery_state"
8892 * update values are seen on other CPUs
8893 */
8894 smp_wmb();
8895 break;
8896
8897 case BNX2X_RECOVERY_WAIT:
8898 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8899 if (bp->is_leader) {
8900 u32 load_counter = bnx2x_get_load_cnt(bp);
8901 if (load_counter) {
8902 /* Wait until all other functions get
8903 * down.
8904 */
8905 schedule_delayed_work(&bp->reset_task,
8906 HZ/10);
8907 return;
8908 } else {
8909 /* If all other functions got down -
8910 * try to bring the chip back to
8911 * normal. In any case it's an exit
8912 * point for a leader.
8913 */
8914 if (bnx2x_leader_reset(bp) ||
8915 bnx2x_nic_load(bp, LOAD_NORMAL)) {
8916 printk(KERN_ERR"%s: Recovery "
8917 "has failed. Power cycle is "
8918 "needed.\n", bp->dev->name);
8919 /* Disconnect this device */
8920 netif_device_detach(bp->dev);
8921 /* Block ifup for all function
8922 * of this ASIC until
8923 * "process kill" or power
8924 * cycle.
8925 */
8926 bnx2x_set_reset_in_progress(bp);
8927 /* Shut down the power */
8928 bnx2x_set_power_state(bp,
8929 PCI_D3hot);
8930 return;
8931 }
8932
8933 return;
8934 }
8935 } else { /* non-leader */
8936 if (!bnx2x_reset_is_done(bp)) {
8937 /* Try to get a LEADER_LOCK HW lock as
8938 * long as a former leader may have
8939 * been unloaded by the user or
8940 * released a leadership by another
8941 * reason.
8942 */
8943 if (bnx2x_trylock_hw_lock(bp,
8944 HW_LOCK_RESOURCE_RESERVED_08)) {
8945 /* I'm a leader now! Restart a
8946 * switch case.
8947 */
8948 bp->is_leader = 1;
8949 break;
8950 }
8951
8952 schedule_delayed_work(&bp->reset_task,
8953 HZ/10);
8954 return;
8955
8956 } else { /* A leader has completed
8957 * the "process kill". It's an exit
8958 * point for a non-leader.
8959 */
8960 bnx2x_nic_load(bp, LOAD_NORMAL);
8961 bp->recovery_state =
8962 BNX2X_RECOVERY_DONE;
8963 smp_wmb();
8964 return;
8965 }
8966 }
8967 default:
8968 return;
8969 }
8970 }
8971}
8972
8973/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8974 * scheduled on a general queue in order to prevent a dead lock.
8975 */
34f80b04
EG
8976static void bnx2x_reset_task(struct work_struct *work)
8977{
72fd0718 8978 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
8979
8980#ifdef BNX2X_STOP_ON_ERROR
8981 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8982 " so reset not done to allow debug dump,\n"
72fd0718 8983 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
8984 return;
8985#endif
8986
8987 rtnl_lock();
8988
8989 if (!netif_running(bp->dev))
8990 goto reset_task_exit;
8991
72fd0718
VZ
8992 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8993 bnx2x_parity_recover(bp);
8994 else {
8995 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8996 bnx2x_nic_load(bp, LOAD_NORMAL);
8997 }
34f80b04
EG
8998
8999reset_task_exit:
9000 rtnl_unlock();
9001}
9002
a2fbb9ea
ET
9003/* end of nic load/unload */
9004
9005/* ethtool_ops */
9006
9007/*
9008 * Init service functions
9009 */
9010
f1ef27ef
EG
9011static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
9012{
9013 switch (func) {
9014 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
9015 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
9016 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
9017 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
9018 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
9019 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
9020 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
9021 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
9022 default:
9023 BNX2X_ERR("Unsupported function index: %d\n", func);
9024 return (u32)(-1);
9025 }
9026}
9027
9028static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
9029{
9030 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
9031
9032 /* Flush all outstanding writes */
9033 mmiowb();
9034
9035 /* Pretend to be function 0 */
9036 REG_WR(bp, reg, 0);
9037 /* Flush the GRC transaction (in the chip) */
9038 new_val = REG_RD(bp, reg);
9039 if (new_val != 0) {
9040 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
9041 new_val);
9042 BUG();
9043 }
9044
9045 /* From now we are in the "like-E1" mode */
9046 bnx2x_int_disable(bp);
9047
9048 /* Flush all outstanding writes */
9049 mmiowb();
9050
9051 /* Restore the original funtion settings */
9052 REG_WR(bp, reg, orig_func);
9053 new_val = REG_RD(bp, reg);
9054 if (new_val != orig_func) {
9055 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
9056 orig_func, new_val);
9057 BUG();
9058 }
9059}
9060
9061static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
9062{
9063 if (CHIP_IS_E1H(bp))
9064 bnx2x_undi_int_disable_e1h(bp, func);
9065 else
9066 bnx2x_int_disable(bp);
9067}
9068
34f80b04
EG
9069static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
9070{
9071 u32 val;
9072
9073 /* Check if there is any driver already loaded */
9074 val = REG_RD(bp, MISC_REG_UNPREPARED);
9075 if (val == 0x1) {
9076 /* Check if it is the UNDI driver
9077 * UNDI driver initializes CID offset for normal bell to 0x7
9078 */
4a37fb66 9079 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
9080 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9081 if (val == 0x7) {
9082 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 9083 /* save our func */
34f80b04 9084 int func = BP_FUNC(bp);
da5a662a
VZ
9085 u32 swap_en;
9086 u32 swap_val;
34f80b04 9087
b4661739
EG
9088 /* clear the UNDI indication */
9089 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9090
34f80b04
EG
9091 BNX2X_DEV_INFO("UNDI is active! reset device\n");
9092
9093 /* try unload UNDI on port 0 */
9094 bp->func = 0;
da5a662a
VZ
9095 bp->fw_seq =
9096 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9097 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 9098 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
9099
9100 /* if UNDI is loaded on the other port */
9101 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9102
da5a662a
VZ
9103 /* send "DONE" for previous unload */
9104 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9105
9106 /* unload UNDI on port 1 */
34f80b04 9107 bp->func = 1;
da5a662a
VZ
9108 bp->fw_seq =
9109 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9110 DRV_MSG_SEQ_NUMBER_MASK);
9111 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9112
9113 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
9114 }
9115
b4661739
EG
9116 /* now it's safe to release the lock */
9117 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9118
f1ef27ef 9119 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
9120
9121 /* close input traffic and wait for it */
9122 /* Do not rcv packets to BRB */
9123 REG_WR(bp,
9124 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9125 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9126 /* Do not direct rcv packets that are not for MCP to
9127 * the BRB */
9128 REG_WR(bp,
9129 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9130 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9131 /* clear AEU */
9132 REG_WR(bp,
9133 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9134 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9135 msleep(10);
9136
9137 /* save NIG port swap info */
9138 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9139 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
9140 /* reset device */
9141 REG_WR(bp,
9142 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 9143 0xd3ffffff);
34f80b04
EG
9144 REG_WR(bp,
9145 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9146 0x1403);
da5a662a
VZ
9147 /* take the NIG out of reset and restore swap values */
9148 REG_WR(bp,
9149 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9150 MISC_REGISTERS_RESET_REG_1_RST_NIG);
9151 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9152 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9153
9154 /* send unload done to the MCP */
9155 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9156
9157 /* restore our func and fw_seq */
9158 bp->func = func;
9159 bp->fw_seq =
9160 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9161 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
9162
9163 } else
9164 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
9165 }
9166}
9167
9168static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9169{
9170 u32 val, val2, val3, val4, id;
72ce58c3 9171 u16 pmc;
34f80b04
EG
9172
9173 /* Get the chip revision id and number. */
9174 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9175 val = REG_RD(bp, MISC_REG_CHIP_NUM);
9176 id = ((val & 0xffff) << 16);
9177 val = REG_RD(bp, MISC_REG_CHIP_REV);
9178 id |= ((val & 0xf) << 12);
9179 val = REG_RD(bp, MISC_REG_CHIP_METAL);
9180 id |= ((val & 0xff) << 4);
5a40e08e 9181 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
9182 id |= (val & 0xf);
9183 bp->common.chip_id = id;
9184 bp->link_params.chip_id = bp->common.chip_id;
9185 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9186
1c06328c
EG
9187 val = (REG_RD(bp, 0x2874) & 0x55);
9188 if ((bp->common.chip_id & 0x1) ||
9189 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9190 bp->flags |= ONE_PORT_FLAG;
9191 BNX2X_DEV_INFO("single port device\n");
9192 }
9193
34f80b04
EG
9194 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9195 bp->common.flash_size = (NVRAM_1MB_SIZE <<
9196 (val & MCPR_NVM_CFG4_FLASH_SIZE));
9197 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9198 bp->common.flash_size, bp->common.flash_size);
9199
9200 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 9201 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 9202 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
9203 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
9204 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
9205
9206 if (!bp->common.shmem_base ||
9207 (bp->common.shmem_base < 0xA0000) ||
9208 (bp->common.shmem_base >= 0xC0000)) {
9209 BNX2X_DEV_INFO("MCP not active\n");
9210 bp->flags |= NO_MCP_FLAG;
9211 return;
9212 }
9213
9214 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9215 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9216 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
cdaa7cb8 9217 BNX2X_ERROR("BAD MCP validity signature\n");
34f80b04
EG
9218
9219 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 9220 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
9221
9222 bp->link_params.hw_led_mode = ((bp->common.hw_config &
9223 SHARED_HW_CFG_LED_MODE_MASK) >>
9224 SHARED_HW_CFG_LED_MODE_SHIFT);
9225
c2c8b03e
EG
9226 bp->link_params.feature_config_flags = 0;
9227 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9228 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9229 bp->link_params.feature_config_flags |=
9230 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9231 else
9232 bp->link_params.feature_config_flags &=
9233 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9234
34f80b04
EG
9235 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9236 bp->common.bc_ver = val;
9237 BNX2X_DEV_INFO("bc_ver %X\n", val);
9238 if (val < BNX2X_BC_VER) {
9239 /* for now only warn
9240 * later we might need to enforce this */
cdaa7cb8
VZ
9241 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
9242 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 9243 }
4d295db0
EG
9244 bp->link_params.feature_config_flags |=
9245 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9246 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
9247
9248 if (BP_E1HVN(bp) == 0) {
9249 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9250 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9251 } else {
9252 /* no WOL capability for E1HVN != 0 */
9253 bp->flags |= NO_WOL_FLAG;
9254 }
9255 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 9256 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
9257
9258 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9259 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9260 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9261 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9262
cdaa7cb8
VZ
9263 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9264 val, val2, val3, val4);
34f80b04
EG
9265}
9266
9267static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9268 u32 switch_cfg)
a2fbb9ea 9269{
34f80b04 9270 int port = BP_PORT(bp);
a2fbb9ea
ET
9271 u32 ext_phy_type;
9272
a2fbb9ea
ET
9273 switch (switch_cfg) {
9274 case SWITCH_CFG_1G:
9275 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9276
c18487ee
YR
9277 ext_phy_type =
9278 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
9279 switch (ext_phy_type) {
9280 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9281 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9282 ext_phy_type);
9283
34f80b04
EG
9284 bp->port.supported |= (SUPPORTED_10baseT_Half |
9285 SUPPORTED_10baseT_Full |
9286 SUPPORTED_100baseT_Half |
9287 SUPPORTED_100baseT_Full |
9288 SUPPORTED_1000baseT_Full |
9289 SUPPORTED_2500baseX_Full |
9290 SUPPORTED_TP |
9291 SUPPORTED_FIBRE |
9292 SUPPORTED_Autoneg |
9293 SUPPORTED_Pause |
9294 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9295 break;
9296
9297 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9298 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9299 ext_phy_type);
9300
34f80b04
EG
9301 bp->port.supported |= (SUPPORTED_10baseT_Half |
9302 SUPPORTED_10baseT_Full |
9303 SUPPORTED_100baseT_Half |
9304 SUPPORTED_100baseT_Full |
9305 SUPPORTED_1000baseT_Full |
9306 SUPPORTED_TP |
9307 SUPPORTED_FIBRE |
9308 SUPPORTED_Autoneg |
9309 SUPPORTED_Pause |
9310 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9311 break;
9312
9313 default:
9314 BNX2X_ERR("NVRAM config error. "
9315 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 9316 bp->link_params.ext_phy_config);
a2fbb9ea
ET
9317 return;
9318 }
9319
34f80b04
EG
9320 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9321 port*0x10);
9322 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
9323 break;
9324
9325 case SWITCH_CFG_10G:
9326 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9327
c18487ee
YR
9328 ext_phy_type =
9329 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
9330 switch (ext_phy_type) {
9331 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9332 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9333 ext_phy_type);
9334
34f80b04
EG
9335 bp->port.supported |= (SUPPORTED_10baseT_Half |
9336 SUPPORTED_10baseT_Full |
9337 SUPPORTED_100baseT_Half |
9338 SUPPORTED_100baseT_Full |
9339 SUPPORTED_1000baseT_Full |
9340 SUPPORTED_2500baseX_Full |
9341 SUPPORTED_10000baseT_Full |
9342 SUPPORTED_TP |
9343 SUPPORTED_FIBRE |
9344 SUPPORTED_Autoneg |
9345 SUPPORTED_Pause |
9346 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9347 break;
9348
589abe3a
EG
9349 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9350 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 9351 ext_phy_type);
f1410647 9352
34f80b04 9353 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 9354 SUPPORTED_1000baseT_Full |
34f80b04 9355 SUPPORTED_FIBRE |
589abe3a 9356 SUPPORTED_Autoneg |
34f80b04
EG
9357 SUPPORTED_Pause |
9358 SUPPORTED_Asym_Pause);
f1410647
ET
9359 break;
9360
589abe3a
EG
9361 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9362 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
9363 ext_phy_type);
9364
34f80b04 9365 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 9366 SUPPORTED_2500baseX_Full |
34f80b04 9367 SUPPORTED_1000baseT_Full |
589abe3a
EG
9368 SUPPORTED_FIBRE |
9369 SUPPORTED_Autoneg |
9370 SUPPORTED_Pause |
9371 SUPPORTED_Asym_Pause);
9372 break;
9373
9374 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9375 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9376 ext_phy_type);
9377
9378 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
9379 SUPPORTED_FIBRE |
9380 SUPPORTED_Pause |
9381 SUPPORTED_Asym_Pause);
f1410647
ET
9382 break;
9383
589abe3a
EG
9384 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9385 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
9386 ext_phy_type);
9387
34f80b04
EG
9388 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9389 SUPPORTED_1000baseT_Full |
9390 SUPPORTED_FIBRE |
34f80b04
EG
9391 SUPPORTED_Pause |
9392 SUPPORTED_Asym_Pause);
f1410647
ET
9393 break;
9394
589abe3a
EG
9395 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9396 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
9397 ext_phy_type);
9398
34f80b04 9399 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 9400 SUPPORTED_1000baseT_Full |
34f80b04 9401 SUPPORTED_Autoneg |
589abe3a 9402 SUPPORTED_FIBRE |
34f80b04
EG
9403 SUPPORTED_Pause |
9404 SUPPORTED_Asym_Pause);
c18487ee
YR
9405 break;
9406
4d295db0
EG
9407 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9408 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9409 ext_phy_type);
9410
9411 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9412 SUPPORTED_1000baseT_Full |
9413 SUPPORTED_Autoneg |
9414 SUPPORTED_FIBRE |
9415 SUPPORTED_Pause |
9416 SUPPORTED_Asym_Pause);
9417 break;
9418
f1410647
ET
9419 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9420 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9421 ext_phy_type);
9422
34f80b04
EG
9423 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9424 SUPPORTED_TP |
9425 SUPPORTED_Autoneg |
9426 SUPPORTED_Pause |
9427 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9428 break;
9429
28577185
EG
9430 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9431 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9432 ext_phy_type);
9433
9434 bp->port.supported |= (SUPPORTED_10baseT_Half |
9435 SUPPORTED_10baseT_Full |
9436 SUPPORTED_100baseT_Half |
9437 SUPPORTED_100baseT_Full |
9438 SUPPORTED_1000baseT_Full |
9439 SUPPORTED_10000baseT_Full |
9440 SUPPORTED_TP |
9441 SUPPORTED_Autoneg |
9442 SUPPORTED_Pause |
9443 SUPPORTED_Asym_Pause);
9444 break;
9445
c18487ee
YR
9446 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9447 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9448 bp->link_params.ext_phy_config);
9449 break;
9450
a2fbb9ea
ET
9451 default:
9452 BNX2X_ERR("NVRAM config error. "
9453 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 9454 bp->link_params.ext_phy_config);
a2fbb9ea
ET
9455 return;
9456 }
9457
34f80b04
EG
9458 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9459 port*0x18);
9460 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 9461
a2fbb9ea
ET
9462 break;
9463
9464 default:
9465 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 9466 bp->port.link_config);
a2fbb9ea
ET
9467 return;
9468 }
34f80b04 9469 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
9470
9471 /* mask what we support according to speed_cap_mask */
c18487ee
YR
9472 if (!(bp->link_params.speed_cap_mask &
9473 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 9474 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 9475
c18487ee
YR
9476 if (!(bp->link_params.speed_cap_mask &
9477 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 9478 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 9479
c18487ee
YR
9480 if (!(bp->link_params.speed_cap_mask &
9481 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 9482 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 9483
c18487ee
YR
9484 if (!(bp->link_params.speed_cap_mask &
9485 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 9486 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 9487
c18487ee
YR
9488 if (!(bp->link_params.speed_cap_mask &
9489 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
9490 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9491 SUPPORTED_1000baseT_Full);
a2fbb9ea 9492
c18487ee
YR
9493 if (!(bp->link_params.speed_cap_mask &
9494 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 9495 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 9496
c18487ee
YR
9497 if (!(bp->link_params.speed_cap_mask &
9498 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 9499 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 9500
34f80b04 9501 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
9502}
9503
34f80b04 9504static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 9505{
c18487ee 9506 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 9507
34f80b04 9508 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 9509 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 9510 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 9511 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 9512 bp->port.advertising = bp->port.supported;
a2fbb9ea 9513 } else {
c18487ee
YR
9514 u32 ext_phy_type =
9515 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9516
9517 if ((ext_phy_type ==
9518 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9519 (ext_phy_type ==
9520 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 9521 /* force 10G, no AN */
c18487ee 9522 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 9523 bp->port.advertising =
a2fbb9ea
ET
9524 (ADVERTISED_10000baseT_Full |
9525 ADVERTISED_FIBRE);
9526 break;
9527 }
9528 BNX2X_ERR("NVRAM config error. "
9529 "Invalid link_config 0x%x"
9530 " Autoneg not supported\n",
34f80b04 9531 bp->port.link_config);
a2fbb9ea
ET
9532 return;
9533 }
9534 break;
9535
9536 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 9537 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 9538 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
9539 bp->port.advertising = (ADVERTISED_10baseT_Full |
9540 ADVERTISED_TP);
a2fbb9ea 9541 } else {
cdaa7cb8
VZ
9542 BNX2X_ERROR("NVRAM config error. "
9543 "Invalid link_config 0x%x"
9544 " speed_cap_mask 0x%x\n",
9545 bp->port.link_config,
9546 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9547 return;
9548 }
9549 break;
9550
9551 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 9552 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
9553 bp->link_params.req_line_speed = SPEED_10;
9554 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
9555 bp->port.advertising = (ADVERTISED_10baseT_Half |
9556 ADVERTISED_TP);
a2fbb9ea 9557 } else {
cdaa7cb8
VZ
9558 BNX2X_ERROR("NVRAM config error. "
9559 "Invalid link_config 0x%x"
9560 " speed_cap_mask 0x%x\n",
9561 bp->port.link_config,
9562 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9563 return;
9564 }
9565 break;
9566
9567 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 9568 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 9569 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
9570 bp->port.advertising = (ADVERTISED_100baseT_Full |
9571 ADVERTISED_TP);
a2fbb9ea 9572 } else {
cdaa7cb8
VZ
9573 BNX2X_ERROR("NVRAM config error. "
9574 "Invalid link_config 0x%x"
9575 " speed_cap_mask 0x%x\n",
9576 bp->port.link_config,
9577 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9578 return;
9579 }
9580 break;
9581
9582 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 9583 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
9584 bp->link_params.req_line_speed = SPEED_100;
9585 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
9586 bp->port.advertising = (ADVERTISED_100baseT_Half |
9587 ADVERTISED_TP);
a2fbb9ea 9588 } else {
cdaa7cb8
VZ
9589 BNX2X_ERROR("NVRAM config error. "
9590 "Invalid link_config 0x%x"
9591 " speed_cap_mask 0x%x\n",
9592 bp->port.link_config,
9593 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9594 return;
9595 }
9596 break;
9597
9598 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 9599 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 9600 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
9601 bp->port.advertising = (ADVERTISED_1000baseT_Full |
9602 ADVERTISED_TP);
a2fbb9ea 9603 } else {
cdaa7cb8
VZ
9604 BNX2X_ERROR("NVRAM config error. "
9605 "Invalid link_config 0x%x"
9606 " speed_cap_mask 0x%x\n",
9607 bp->port.link_config,
9608 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9609 return;
9610 }
9611 break;
9612
9613 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 9614 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 9615 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
9616 bp->port.advertising = (ADVERTISED_2500baseX_Full |
9617 ADVERTISED_TP);
a2fbb9ea 9618 } else {
cdaa7cb8
VZ
9619 BNX2X_ERROR("NVRAM config error. "
9620 "Invalid link_config 0x%x"
9621 " speed_cap_mask 0x%x\n",
9622 bp->port.link_config,
9623 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9624 return;
9625 }
9626 break;
9627
9628 case PORT_FEATURE_LINK_SPEED_10G_CX4:
9629 case PORT_FEATURE_LINK_SPEED_10G_KX4:
9630 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 9631 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 9632 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
9633 bp->port.advertising = (ADVERTISED_10000baseT_Full |
9634 ADVERTISED_FIBRE);
a2fbb9ea 9635 } else {
cdaa7cb8
VZ
9636 BNX2X_ERROR("NVRAM config error. "
9637 "Invalid link_config 0x%x"
9638 " speed_cap_mask 0x%x\n",
9639 bp->port.link_config,
9640 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9641 return;
9642 }
9643 break;
9644
9645 default:
cdaa7cb8
VZ
9646 BNX2X_ERROR("NVRAM config error. "
9647 "BAD link speed link_config 0x%x\n",
9648 bp->port.link_config);
c18487ee 9649 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 9650 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
9651 break;
9652 }
a2fbb9ea 9653
34f80b04
EG
9654 bp->link_params.req_flow_ctrl = (bp->port.link_config &
9655 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 9656 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 9657 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 9658 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9659
c18487ee 9660 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 9661 " advertising 0x%x\n",
c18487ee
YR
9662 bp->link_params.req_line_speed,
9663 bp->link_params.req_duplex,
34f80b04 9664 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
9665}
9666
e665bfda
MC
9667static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9668{
9669 mac_hi = cpu_to_be16(mac_hi);
9670 mac_lo = cpu_to_be32(mac_lo);
9671 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9672 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9673}
9674
34f80b04 9675static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 9676{
34f80b04
EG
9677 int port = BP_PORT(bp);
9678 u32 val, val2;
589abe3a 9679 u32 config;
c2c8b03e 9680 u16 i;
01cd4528 9681 u32 ext_phy_type;
a2fbb9ea 9682
c18487ee 9683 bp->link_params.bp = bp;
34f80b04 9684 bp->link_params.port = port;
c18487ee 9685
c18487ee 9686 bp->link_params.lane_config =
a2fbb9ea 9687 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 9688 bp->link_params.ext_phy_config =
a2fbb9ea
ET
9689 SHMEM_RD(bp,
9690 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
9691 /* BCM8727_NOC => BCM8727 no over current */
9692 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9693 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9694 bp->link_params.ext_phy_config &=
9695 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9696 bp->link_params.ext_phy_config |=
9697 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9698 bp->link_params.feature_config_flags |=
9699 FEATURE_CONFIG_BCM8727_NOC;
9700 }
9701
c18487ee 9702 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
9703 SHMEM_RD(bp,
9704 dev_info.port_hw_config[port].speed_capability_mask);
9705
34f80b04 9706 bp->port.link_config =
a2fbb9ea
ET
9707 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9708
c2c8b03e
EG
9709 /* Get the 4 lanes xgxs config rx and tx */
9710 for (i = 0; i < 2; i++) {
9711 val = SHMEM_RD(bp,
9712 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9713 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9714 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9715
9716 val = SHMEM_RD(bp,
9717 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9718 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9719 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9720 }
9721
3ce2c3f9
EG
9722 /* If the device is capable of WoL, set the default state according
9723 * to the HW
9724 */
4d295db0 9725 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
9726 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9727 (config & PORT_FEATURE_WOL_ENABLED));
9728
c2c8b03e
EG
9729 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
9730 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
9731 bp->link_params.lane_config,
9732 bp->link_params.ext_phy_config,
34f80b04 9733 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 9734
4d295db0
EG
9735 bp->link_params.switch_cfg |= (bp->port.link_config &
9736 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 9737 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
9738
9739 bnx2x_link_settings_requested(bp);
9740
01cd4528
EG
9741 /*
9742 * If connected directly, work with the internal PHY, otherwise, work
9743 * with the external PHY
9744 */
9745 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9746 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9747 bp->mdio.prtad = bp->link_params.phy_addr;
9748
9749 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9750 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9751 bp->mdio.prtad =
659bc5c4 9752 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 9753
a2fbb9ea
ET
9754 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9755 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 9756 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
9757 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9758 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
9759
9760#ifdef BCM_CNIC
9761 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9762 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9763 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9764#endif
34f80b04
EG
9765}
9766
9767static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9768{
9769 int func = BP_FUNC(bp);
9770 u32 val, val2;
9771 int rc = 0;
a2fbb9ea 9772
34f80b04 9773 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 9774
34f80b04
EG
9775 bp->e1hov = 0;
9776 bp->e1hmf = 0;
2145a920 9777 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
34f80b04
EG
9778 bp->mf_config =
9779 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 9780
2691d51d 9781 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 9782 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 9783 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 9784 bp->e1hmf = 1;
2691d51d
EG
9785 BNX2X_DEV_INFO("%s function mode\n",
9786 IS_E1HMF(bp) ? "multi" : "single");
9787
9788 if (IS_E1HMF(bp)) {
9789 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9790 e1hov_tag) &
9791 FUNC_MF_CFG_E1HOV_TAG_MASK);
9792 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9793 bp->e1hov = val;
9794 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9795 "(0x%04x)\n",
9796 func, bp->e1hov, bp->e1hov);
9797 } else {
cdaa7cb8
VZ
9798 BNX2X_ERROR("No valid E1HOV for func %d,"
9799 " aborting\n", func);
34f80b04
EG
9800 rc = -EPERM;
9801 }
2691d51d
EG
9802 } else {
9803 if (BP_E1HVN(bp)) {
cdaa7cb8
VZ
9804 BNX2X_ERROR("VN %d in single function mode,"
9805 " aborting\n", BP_E1HVN(bp));
2691d51d
EG
9806 rc = -EPERM;
9807 }
34f80b04
EG
9808 }
9809 }
a2fbb9ea 9810
34f80b04
EG
9811 if (!BP_NOMCP(bp)) {
9812 bnx2x_get_port_hwinfo(bp);
9813
9814 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9815 DRV_MSG_SEQ_NUMBER_MASK);
9816 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9817 }
9818
9819 if (IS_E1HMF(bp)) {
9820 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9821 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
9822 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9823 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9824 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9825 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9826 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9827 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9828 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
9829 bp->dev->dev_addr[5] = (u8)(val & 0xff);
9830 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9831 ETH_ALEN);
9832 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9833 ETH_ALEN);
a2fbb9ea 9834 }
34f80b04
EG
9835
9836 return rc;
a2fbb9ea
ET
9837 }
9838
34f80b04
EG
9839 if (BP_NOMCP(bp)) {
9840 /* only supposed to happen on emulation/FPGA */
cdaa7cb8 9841 BNX2X_ERROR("warning: random MAC workaround active\n");
34f80b04
EG
9842 random_ether_addr(bp->dev->dev_addr);
9843 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9844 }
a2fbb9ea 9845
34f80b04
EG
9846 return rc;
9847}
9848
34f24c7f
VZ
9849static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9850{
9851 int cnt, i, block_end, rodi;
9852 char vpd_data[BNX2X_VPD_LEN+1];
9853 char str_id_reg[VENDOR_ID_LEN+1];
9854 char str_id_cap[VENDOR_ID_LEN+1];
9855 u8 len;
9856
9857 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9858 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9859
9860 if (cnt < BNX2X_VPD_LEN)
9861 goto out_not_found;
9862
9863 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9864 PCI_VPD_LRDT_RO_DATA);
9865 if (i < 0)
9866 goto out_not_found;
9867
9868
9869 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9870 pci_vpd_lrdt_size(&vpd_data[i]);
9871
9872 i += PCI_VPD_LRDT_TAG_SIZE;
9873
9874 if (block_end > BNX2X_VPD_LEN)
9875 goto out_not_found;
9876
9877 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9878 PCI_VPD_RO_KEYWORD_MFR_ID);
9879 if (rodi < 0)
9880 goto out_not_found;
9881
9882 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9883
9884 if (len != VENDOR_ID_LEN)
9885 goto out_not_found;
9886
9887 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9888
9889 /* vendor specific info */
9890 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9891 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9892 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9893 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9894
9895 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9896 PCI_VPD_RO_KEYWORD_VENDOR0);
9897 if (rodi >= 0) {
9898 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9899
9900 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9901
9902 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9903 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9904 bp->fw_ver[len] = ' ';
9905 }
9906 }
9907 return;
9908 }
9909out_not_found:
9910 return;
9911}
9912
34f80b04
EG
9913static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9914{
9915 int func = BP_FUNC(bp);
87942b46 9916 int timer_interval;
34f80b04
EG
9917 int rc;
9918
da5a662a
VZ
9919 /* Disable interrupt handling until HW is initialized */
9920 atomic_set(&bp->intr_sem, 1);
e1510706 9921 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 9922
34f80b04 9923 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 9924 mutex_init(&bp->fw_mb_mutex);
993ac7b5
MC
9925#ifdef BCM_CNIC
9926 mutex_init(&bp->cnic_mutex);
9927#endif
a2fbb9ea 9928
1cf167f2 9929 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 9930 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
9931
9932 rc = bnx2x_get_hwinfo(bp);
9933
34f24c7f 9934 bnx2x_read_fwinfo(bp);
34f80b04
EG
9935 /* need to reset chip if undi was active */
9936 if (!BP_NOMCP(bp))
9937 bnx2x_undi_unload(bp);
9938
9939 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 9940 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
9941
9942 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
9943 dev_err(&bp->pdev->dev, "MCP disabled, "
9944 "must load devices in order!\n");
34f80b04 9945
555f6c78 9946 /* Set multi queue mode */
8badd27a
EG
9947 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9948 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
cdaa7cb8
VZ
9949 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
9950 "requested is not MSI-X\n");
555f6c78
EG
9951 multi_mode = ETH_RSS_MODE_DISABLED;
9952 }
9953 bp->multi_mode = multi_mode;
9954
9955
4fd89b7a
DK
9956 bp->dev->features |= NETIF_F_GRO;
9957
7a9b2557
VZ
9958 /* Set TPA flags */
9959 if (disable_tpa) {
9960 bp->flags &= ~TPA_ENABLE_FLAG;
9961 bp->dev->features &= ~NETIF_F_LRO;
9962 } else {
9963 bp->flags |= TPA_ENABLE_FLAG;
9964 bp->dev->features |= NETIF_F_LRO;
9965 }
9966
a18f5128
EG
9967 if (CHIP_IS_E1(bp))
9968 bp->dropless_fc = 0;
9969 else
9970 bp->dropless_fc = dropless_fc;
9971
8d5726c4 9972 bp->mrrs = mrrs;
7a9b2557 9973
34f80b04
EG
9974 bp->tx_ring_size = MAX_TX_AVAIL;
9975 bp->rx_ring_size = MAX_RX_AVAIL;
9976
9977 bp->rx_csum = 1;
34f80b04 9978
7d323bfd
EG
9979 /* make sure that the numbers are in the right granularity */
9980 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9981 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
34f80b04 9982
87942b46
EG
9983 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9984 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
9985
9986 init_timer(&bp->timer);
9987 bp->timer.expires = jiffies + bp->current_interval;
9988 bp->timer.data = (unsigned long) bp;
9989 bp->timer.function = bnx2x_timer;
9990
9991 return rc;
a2fbb9ea
ET
9992}
9993
9994/*
9995 * ethtool service functions
9996 */
9997
9998/* All ethtool functions called with rtnl_lock */
9999
10000static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10001{
10002 struct bnx2x *bp = netdev_priv(dev);
10003
34f80b04
EG
10004 cmd->supported = bp->port.supported;
10005 cmd->advertising = bp->port.advertising;
a2fbb9ea 10006
f34d28ea
EG
10007 if ((bp->state == BNX2X_STATE_OPEN) &&
10008 !(bp->flags & MF_FUNC_DIS) &&
10009 (bp->link_vars.link_up)) {
c18487ee
YR
10010 cmd->speed = bp->link_vars.line_speed;
10011 cmd->duplex = bp->link_vars.duplex;
b015e3d1
EG
10012 if (IS_E1HMF(bp)) {
10013 u16 vn_max_rate;
34f80b04 10014
b015e3d1
EG
10015 vn_max_rate =
10016 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
34f80b04 10017 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
b015e3d1
EG
10018 if (vn_max_rate < cmd->speed)
10019 cmd->speed = vn_max_rate;
10020 }
10021 } else {
10022 cmd->speed = -1;
10023 cmd->duplex = -1;
34f80b04 10024 }
a2fbb9ea 10025
c18487ee
YR
10026 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
10027 u32 ext_phy_type =
10028 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
10029
10030 switch (ext_phy_type) {
10031 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 10032 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 10033 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
10034 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
10035 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
10036 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 10037 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
10038 cmd->port = PORT_FIBRE;
10039 break;
10040
10041 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 10042 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
10043 cmd->port = PORT_TP;
10044 break;
10045
c18487ee
YR
10046 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
10047 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
10048 bp->link_params.ext_phy_config);
10049 break;
10050
f1410647
ET
10051 default:
10052 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
10053 bp->link_params.ext_phy_config);
10054 break;
f1410647
ET
10055 }
10056 } else
a2fbb9ea 10057 cmd->port = PORT_TP;
a2fbb9ea 10058
01cd4528 10059 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
10060 cmd->transceiver = XCVR_INTERNAL;
10061
c18487ee 10062 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 10063 cmd->autoneg = AUTONEG_ENABLE;
f1410647 10064 else
a2fbb9ea 10065 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
10066
10067 cmd->maxtxpkt = 0;
10068 cmd->maxrxpkt = 0;
10069
10070 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10071 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10072 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10073 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10074 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10075 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10076 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10077
10078 return 0;
10079}
10080
10081static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10082{
10083 struct bnx2x *bp = netdev_priv(dev);
10084 u32 advertising;
10085
34f80b04
EG
10086 if (IS_E1HMF(bp))
10087 return 0;
10088
a2fbb9ea
ET
10089 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10090 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10091 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10092 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10093 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10094 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10095 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10096
a2fbb9ea 10097 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
10098 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10099 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 10100 return -EINVAL;
f1410647 10101 }
a2fbb9ea
ET
10102
10103 /* advertise the requested speed and duplex if supported */
34f80b04 10104 cmd->advertising &= bp->port.supported;
a2fbb9ea 10105
c18487ee
YR
10106 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10107 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
10108 bp->port.advertising |= (ADVERTISED_Autoneg |
10109 cmd->advertising);
a2fbb9ea
ET
10110
10111 } else { /* forced speed */
10112 /* advertise the requested speed and duplex if supported */
10113 switch (cmd->speed) {
10114 case SPEED_10:
10115 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 10116 if (!(bp->port.supported &
f1410647
ET
10117 SUPPORTED_10baseT_Full)) {
10118 DP(NETIF_MSG_LINK,
10119 "10M full not supported\n");
a2fbb9ea 10120 return -EINVAL;
f1410647 10121 }
a2fbb9ea
ET
10122
10123 advertising = (ADVERTISED_10baseT_Full |
10124 ADVERTISED_TP);
10125 } else {
34f80b04 10126 if (!(bp->port.supported &
f1410647
ET
10127 SUPPORTED_10baseT_Half)) {
10128 DP(NETIF_MSG_LINK,
10129 "10M half not supported\n");
a2fbb9ea 10130 return -EINVAL;
f1410647 10131 }
a2fbb9ea
ET
10132
10133 advertising = (ADVERTISED_10baseT_Half |
10134 ADVERTISED_TP);
10135 }
10136 break;
10137
10138 case SPEED_100:
10139 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 10140 if (!(bp->port.supported &
f1410647
ET
10141 SUPPORTED_100baseT_Full)) {
10142 DP(NETIF_MSG_LINK,
10143 "100M full not supported\n");
a2fbb9ea 10144 return -EINVAL;
f1410647 10145 }
a2fbb9ea
ET
10146
10147 advertising = (ADVERTISED_100baseT_Full |
10148 ADVERTISED_TP);
10149 } else {
34f80b04 10150 if (!(bp->port.supported &
f1410647
ET
10151 SUPPORTED_100baseT_Half)) {
10152 DP(NETIF_MSG_LINK,
10153 "100M half not supported\n");
a2fbb9ea 10154 return -EINVAL;
f1410647 10155 }
a2fbb9ea
ET
10156
10157 advertising = (ADVERTISED_100baseT_Half |
10158 ADVERTISED_TP);
10159 }
10160 break;
10161
10162 case SPEED_1000:
f1410647
ET
10163 if (cmd->duplex != DUPLEX_FULL) {
10164 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 10165 return -EINVAL;
f1410647 10166 }
a2fbb9ea 10167
34f80b04 10168 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 10169 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 10170 return -EINVAL;
f1410647 10171 }
a2fbb9ea
ET
10172
10173 advertising = (ADVERTISED_1000baseT_Full |
10174 ADVERTISED_TP);
10175 break;
10176
10177 case SPEED_2500:
f1410647
ET
10178 if (cmd->duplex != DUPLEX_FULL) {
10179 DP(NETIF_MSG_LINK,
10180 "2.5G half not supported\n");
a2fbb9ea 10181 return -EINVAL;
f1410647 10182 }
a2fbb9ea 10183
34f80b04 10184 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
10185 DP(NETIF_MSG_LINK,
10186 "2.5G full not supported\n");
a2fbb9ea 10187 return -EINVAL;
f1410647 10188 }
a2fbb9ea 10189
f1410647 10190 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
10191 ADVERTISED_TP);
10192 break;
10193
10194 case SPEED_10000:
f1410647
ET
10195 if (cmd->duplex != DUPLEX_FULL) {
10196 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 10197 return -EINVAL;
f1410647 10198 }
a2fbb9ea 10199
34f80b04 10200 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 10201 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 10202 return -EINVAL;
f1410647 10203 }
a2fbb9ea
ET
10204
10205 advertising = (ADVERTISED_10000baseT_Full |
10206 ADVERTISED_FIBRE);
10207 break;
10208
10209 default:
f1410647 10210 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
10211 return -EINVAL;
10212 }
10213
c18487ee
YR
10214 bp->link_params.req_line_speed = cmd->speed;
10215 bp->link_params.req_duplex = cmd->duplex;
34f80b04 10216 bp->port.advertising = advertising;
a2fbb9ea
ET
10217 }
10218
c18487ee 10219 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 10220 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 10221 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 10222 bp->port.advertising);
a2fbb9ea 10223
34f80b04 10224 if (netif_running(dev)) {
bb2a0f7a 10225 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10226 bnx2x_link_set(bp);
10227 }
a2fbb9ea
ET
10228
10229 return 0;
10230}
10231
0a64ea57
EG
10232#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10233#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10234
10235static int bnx2x_get_regs_len(struct net_device *dev)
10236{
0a64ea57 10237 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 10238 int regdump_len = 0;
0a64ea57
EG
10239 int i;
10240
0a64ea57
EG
10241 if (CHIP_IS_E1(bp)) {
10242 for (i = 0; i < REGS_COUNT; i++)
10243 if (IS_E1_ONLINE(reg_addrs[i].info))
10244 regdump_len += reg_addrs[i].size;
10245
10246 for (i = 0; i < WREGS_COUNT_E1; i++)
10247 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10248 regdump_len += wreg_addrs_e1[i].size *
10249 (1 + wreg_addrs_e1[i].read_regs_count);
10250
10251 } else { /* E1H */
10252 for (i = 0; i < REGS_COUNT; i++)
10253 if (IS_E1H_ONLINE(reg_addrs[i].info))
10254 regdump_len += reg_addrs[i].size;
10255
10256 for (i = 0; i < WREGS_COUNT_E1H; i++)
10257 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10258 regdump_len += wreg_addrs_e1h[i].size *
10259 (1 + wreg_addrs_e1h[i].read_regs_count);
10260 }
10261 regdump_len *= 4;
10262 regdump_len += sizeof(struct dump_hdr);
10263
10264 return regdump_len;
10265}
10266
10267static void bnx2x_get_regs(struct net_device *dev,
10268 struct ethtool_regs *regs, void *_p)
10269{
10270 u32 *p = _p, i, j;
10271 struct bnx2x *bp = netdev_priv(dev);
10272 struct dump_hdr dump_hdr = {0};
10273
10274 regs->version = 0;
10275 memset(p, 0, regs->len);
10276
10277 if (!netif_running(bp->dev))
10278 return;
10279
10280 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10281 dump_hdr.dump_sign = dump_sign_all;
10282 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10283 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10284 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10285 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10286 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10287
10288 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10289 p += dump_hdr.hdr_size + 1;
10290
10291 if (CHIP_IS_E1(bp)) {
10292 for (i = 0; i < REGS_COUNT; i++)
10293 if (IS_E1_ONLINE(reg_addrs[i].info))
10294 for (j = 0; j < reg_addrs[i].size; j++)
10295 *p++ = REG_RD(bp,
10296 reg_addrs[i].addr + j*4);
10297
10298 } else { /* E1H */
10299 for (i = 0; i < REGS_COUNT; i++)
10300 if (IS_E1H_ONLINE(reg_addrs[i].info))
10301 for (j = 0; j < reg_addrs[i].size; j++)
10302 *p++ = REG_RD(bp,
10303 reg_addrs[i].addr + j*4);
10304 }
10305}
10306
0d28e49a
EG
10307#define PHY_FW_VER_LEN 10
10308
10309static void bnx2x_get_drvinfo(struct net_device *dev,
10310 struct ethtool_drvinfo *info)
10311{
10312 struct bnx2x *bp = netdev_priv(dev);
10313 u8 phy_fw_ver[PHY_FW_VER_LEN];
10314
10315 strcpy(info->driver, DRV_MODULE_NAME);
10316 strcpy(info->version, DRV_MODULE_VERSION);
10317
10318 phy_fw_ver[0] = '\0';
10319 if (bp->port.pmf) {
10320 bnx2x_acquire_phy_lock(bp);
10321 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10322 (bp->state != BNX2X_STATE_CLOSED),
10323 phy_fw_ver, PHY_FW_VER_LEN);
10324 bnx2x_release_phy_lock(bp);
10325 }
10326
34f24c7f
VZ
10327 strncpy(info->fw_version, bp->fw_ver, 32);
10328 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10329 "bc %d.%d.%d%s%s",
0d28e49a
EG
10330 (bp->common.bc_ver & 0xff0000) >> 16,
10331 (bp->common.bc_ver & 0xff00) >> 8,
10332 (bp->common.bc_ver & 0xff),
34f24c7f 10333 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
0d28e49a
EG
10334 strcpy(info->bus_info, pci_name(bp->pdev));
10335 info->n_stats = BNX2X_NUM_STATS;
10336 info->testinfo_len = BNX2X_NUM_TESTS;
10337 info->eedump_len = bp->common.flash_size;
10338 info->regdump_len = bnx2x_get_regs_len(dev);
10339}
10340
a2fbb9ea
ET
10341static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10342{
10343 struct bnx2x *bp = netdev_priv(dev);
10344
10345 if (bp->flags & NO_WOL_FLAG) {
10346 wol->supported = 0;
10347 wol->wolopts = 0;
10348 } else {
10349 wol->supported = WAKE_MAGIC;
10350 if (bp->wol)
10351 wol->wolopts = WAKE_MAGIC;
10352 else
10353 wol->wolopts = 0;
10354 }
10355 memset(&wol->sopass, 0, sizeof(wol->sopass));
10356}
10357
10358static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10359{
10360 struct bnx2x *bp = netdev_priv(dev);
10361
10362 if (wol->wolopts & ~WAKE_MAGIC)
10363 return -EINVAL;
10364
10365 if (wol->wolopts & WAKE_MAGIC) {
10366 if (bp->flags & NO_WOL_FLAG)
10367 return -EINVAL;
10368
10369 bp->wol = 1;
34f80b04 10370 } else
a2fbb9ea 10371 bp->wol = 0;
34f80b04 10372
a2fbb9ea
ET
10373 return 0;
10374}
10375
10376static u32 bnx2x_get_msglevel(struct net_device *dev)
10377{
10378 struct bnx2x *bp = netdev_priv(dev);
10379
7995c64e 10380 return bp->msg_enable;
a2fbb9ea
ET
10381}
10382
10383static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10384{
10385 struct bnx2x *bp = netdev_priv(dev);
10386
10387 if (capable(CAP_NET_ADMIN))
7995c64e 10388 bp->msg_enable = level;
a2fbb9ea
ET
10389}
10390
10391static int bnx2x_nway_reset(struct net_device *dev)
10392{
10393 struct bnx2x *bp = netdev_priv(dev);
10394
34f80b04
EG
10395 if (!bp->port.pmf)
10396 return 0;
a2fbb9ea 10397
34f80b04 10398 if (netif_running(dev)) {
bb2a0f7a 10399 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10400 bnx2x_link_set(bp);
10401 }
a2fbb9ea
ET
10402
10403 return 0;
10404}
10405
ab6ad5a4 10406static u32 bnx2x_get_link(struct net_device *dev)
01e53298
NO
10407{
10408 struct bnx2x *bp = netdev_priv(dev);
10409
f34d28ea
EG
10410 if (bp->flags & MF_FUNC_DIS)
10411 return 0;
10412
01e53298
NO
10413 return bp->link_vars.link_up;
10414}
10415
a2fbb9ea
ET
10416static int bnx2x_get_eeprom_len(struct net_device *dev)
10417{
10418 struct bnx2x *bp = netdev_priv(dev);
10419
34f80b04 10420 return bp->common.flash_size;
a2fbb9ea
ET
10421}
10422
10423static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10424{
34f80b04 10425 int port = BP_PORT(bp);
a2fbb9ea
ET
10426 int count, i;
10427 u32 val = 0;
10428
10429 /* adjust timeout for emulation/FPGA */
10430 count = NVRAM_TIMEOUT_COUNT;
10431 if (CHIP_REV_IS_SLOW(bp))
10432 count *= 100;
10433
10434 /* request access to nvram interface */
10435 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10436 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10437
10438 for (i = 0; i < count*10; i++) {
10439 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10440 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10441 break;
10442
10443 udelay(5);
10444 }
10445
10446 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 10447 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
10448 return -EBUSY;
10449 }
10450
10451 return 0;
10452}
10453
10454static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10455{
34f80b04 10456 int port = BP_PORT(bp);
a2fbb9ea
ET
10457 int count, i;
10458 u32 val = 0;
10459
10460 /* adjust timeout for emulation/FPGA */
10461 count = NVRAM_TIMEOUT_COUNT;
10462 if (CHIP_REV_IS_SLOW(bp))
10463 count *= 100;
10464
10465 /* relinquish nvram interface */
10466 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10467 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10468
10469 for (i = 0; i < count*10; i++) {
10470 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10471 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10472 break;
10473
10474 udelay(5);
10475 }
10476
10477 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 10478 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
10479 return -EBUSY;
10480 }
10481
10482 return 0;
10483}
10484
10485static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10486{
10487 u32 val;
10488
10489 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10490
10491 /* enable both bits, even on read */
10492 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10493 (val | MCPR_NVM_ACCESS_ENABLE_EN |
10494 MCPR_NVM_ACCESS_ENABLE_WR_EN));
10495}
10496
10497static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10498{
10499 u32 val;
10500
10501 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10502
10503 /* disable both bits, even after read */
10504 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10505 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10506 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10507}
10508
4781bfad 10509static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
10510 u32 cmd_flags)
10511{
f1410647 10512 int count, i, rc;
a2fbb9ea
ET
10513 u32 val;
10514
10515 /* build the command word */
10516 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10517
10518 /* need to clear DONE bit separately */
10519 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10520
10521 /* address of the NVRAM to read from */
10522 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10523 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10524
10525 /* issue a read command */
10526 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10527
10528 /* adjust timeout for emulation/FPGA */
10529 count = NVRAM_TIMEOUT_COUNT;
10530 if (CHIP_REV_IS_SLOW(bp))
10531 count *= 100;
10532
10533 /* wait for completion */
10534 *ret_val = 0;
10535 rc = -EBUSY;
10536 for (i = 0; i < count; i++) {
10537 udelay(5);
10538 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10539
10540 if (val & MCPR_NVM_COMMAND_DONE) {
10541 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
10542 /* we read nvram data in cpu order
10543 * but ethtool sees it as an array of bytes
10544 * converting to big-endian will do the work */
4781bfad 10545 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
10546 rc = 0;
10547 break;
10548 }
10549 }
10550
10551 return rc;
10552}
10553
10554static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10555 int buf_size)
10556{
10557 int rc;
10558 u32 cmd_flags;
4781bfad 10559 __be32 val;
a2fbb9ea
ET
10560
10561 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 10562 DP(BNX2X_MSG_NVM,
c14423fe 10563 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
10564 offset, buf_size);
10565 return -EINVAL;
10566 }
10567
34f80b04
EG
10568 if (offset + buf_size > bp->common.flash_size) {
10569 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10570 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10571 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10572 return -EINVAL;
10573 }
10574
10575 /* request access to nvram interface */
10576 rc = bnx2x_acquire_nvram_lock(bp);
10577 if (rc)
10578 return rc;
10579
10580 /* enable access to nvram interface */
10581 bnx2x_enable_nvram_access(bp);
10582
10583 /* read the first word(s) */
10584 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10585 while ((buf_size > sizeof(u32)) && (rc == 0)) {
10586 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10587 memcpy(ret_buf, &val, 4);
10588
10589 /* advance to the next dword */
10590 offset += sizeof(u32);
10591 ret_buf += sizeof(u32);
10592 buf_size -= sizeof(u32);
10593 cmd_flags = 0;
10594 }
10595
10596 if (rc == 0) {
10597 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10598 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10599 memcpy(ret_buf, &val, 4);
10600 }
10601
10602 /* disable access to nvram interface */
10603 bnx2x_disable_nvram_access(bp);
10604 bnx2x_release_nvram_lock(bp);
10605
10606 return rc;
10607}
10608
10609static int bnx2x_get_eeprom(struct net_device *dev,
10610 struct ethtool_eeprom *eeprom, u8 *eebuf)
10611{
10612 struct bnx2x *bp = netdev_priv(dev);
10613 int rc;
10614
2add3acb
EG
10615 if (!netif_running(dev))
10616 return -EAGAIN;
10617
34f80b04 10618 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
10619 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10620 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10621 eeprom->len, eeprom->len);
10622
10623 /* parameters already validated in ethtool_get_eeprom */
10624
10625 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10626
10627 return rc;
10628}
10629
10630static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10631 u32 cmd_flags)
10632{
f1410647 10633 int count, i, rc;
a2fbb9ea
ET
10634
10635 /* build the command word */
10636 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10637
10638 /* need to clear DONE bit separately */
10639 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10640
10641 /* write the data */
10642 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10643
10644 /* address of the NVRAM to write to */
10645 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10646 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10647
10648 /* issue the write command */
10649 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10650
10651 /* adjust timeout for emulation/FPGA */
10652 count = NVRAM_TIMEOUT_COUNT;
10653 if (CHIP_REV_IS_SLOW(bp))
10654 count *= 100;
10655
10656 /* wait for completion */
10657 rc = -EBUSY;
10658 for (i = 0; i < count; i++) {
10659 udelay(5);
10660 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10661 if (val & MCPR_NVM_COMMAND_DONE) {
10662 rc = 0;
10663 break;
10664 }
10665 }
10666
10667 return rc;
10668}
10669
f1410647 10670#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
10671
10672static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10673 int buf_size)
10674{
10675 int rc;
10676 u32 cmd_flags;
10677 u32 align_offset;
4781bfad 10678 __be32 val;
a2fbb9ea 10679
34f80b04
EG
10680 if (offset + buf_size > bp->common.flash_size) {
10681 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10682 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10683 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10684 return -EINVAL;
10685 }
10686
10687 /* request access to nvram interface */
10688 rc = bnx2x_acquire_nvram_lock(bp);
10689 if (rc)
10690 return rc;
10691
10692 /* enable access to nvram interface */
10693 bnx2x_enable_nvram_access(bp);
10694
10695 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10696 align_offset = (offset & ~0x03);
10697 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10698
10699 if (rc == 0) {
10700 val &= ~(0xff << BYTE_OFFSET(offset));
10701 val |= (*data_buf << BYTE_OFFSET(offset));
10702
10703 /* nvram data is returned as an array of bytes
10704 * convert it back to cpu order */
10705 val = be32_to_cpu(val);
10706
a2fbb9ea
ET
10707 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10708 cmd_flags);
10709 }
10710
10711 /* disable access to nvram interface */
10712 bnx2x_disable_nvram_access(bp);
10713 bnx2x_release_nvram_lock(bp);
10714
10715 return rc;
10716}
10717
10718static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10719 int buf_size)
10720{
10721 int rc;
10722 u32 cmd_flags;
10723 u32 val;
10724 u32 written_so_far;
10725
34f80b04 10726 if (buf_size == 1) /* ethtool */
a2fbb9ea 10727 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
10728
10729 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 10730 DP(BNX2X_MSG_NVM,
c14423fe 10731 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
10732 offset, buf_size);
10733 return -EINVAL;
10734 }
10735
34f80b04
EG
10736 if (offset + buf_size > bp->common.flash_size) {
10737 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10738 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10739 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10740 return -EINVAL;
10741 }
10742
10743 /* request access to nvram interface */
10744 rc = bnx2x_acquire_nvram_lock(bp);
10745 if (rc)
10746 return rc;
10747
10748 /* enable access to nvram interface */
10749 bnx2x_enable_nvram_access(bp);
10750
10751 written_so_far = 0;
10752 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10753 while ((written_so_far < buf_size) && (rc == 0)) {
10754 if (written_so_far == (buf_size - sizeof(u32)))
10755 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10756 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10757 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10758 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10759 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10760
10761 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
10762
10763 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10764
10765 /* advance to the next dword */
10766 offset += sizeof(u32);
10767 data_buf += sizeof(u32);
10768 written_so_far += sizeof(u32);
10769 cmd_flags = 0;
10770 }
10771
10772 /* disable access to nvram interface */
10773 bnx2x_disable_nvram_access(bp);
10774 bnx2x_release_nvram_lock(bp);
10775
10776 return rc;
10777}
10778
10779static int bnx2x_set_eeprom(struct net_device *dev,
10780 struct ethtool_eeprom *eeprom, u8 *eebuf)
10781{
10782 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
10783 int port = BP_PORT(bp);
10784 int rc = 0;
a2fbb9ea 10785
9f4c9583
EG
10786 if (!netif_running(dev))
10787 return -EAGAIN;
10788
34f80b04 10789 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
10790 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10791 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10792 eeprom->len, eeprom->len);
10793
10794 /* parameters already validated in ethtool_set_eeprom */
10795
f57a6025
EG
10796 /* PHY eeprom can be accessed only by the PMF */
10797 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10798 !bp->port.pmf)
10799 return -EINVAL;
10800
10801 if (eeprom->magic == 0x50485950) {
10802 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10803 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 10804
f57a6025
EG
10805 bnx2x_acquire_phy_lock(bp);
10806 rc |= bnx2x_link_reset(&bp->link_params,
10807 &bp->link_vars, 0);
10808 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10809 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10810 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10811 MISC_REGISTERS_GPIO_HIGH, port);
10812 bnx2x_release_phy_lock(bp);
10813 bnx2x_link_report(bp);
10814
10815 } else if (eeprom->magic == 0x50485952) {
10816 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
f34d28ea 10817 if (bp->state == BNX2X_STATE_OPEN) {
4a37fb66 10818 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
10819 rc |= bnx2x_link_reset(&bp->link_params,
10820 &bp->link_vars, 1);
10821
10822 rc |= bnx2x_phy_init(&bp->link_params,
10823 &bp->link_vars);
4a37fb66 10824 bnx2x_release_phy_lock(bp);
f57a6025
EG
10825 bnx2x_calc_fc_adv(bp);
10826 }
10827 } else if (eeprom->magic == 0x53985943) {
10828 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10829 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10830 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10831 u8 ext_phy_addr =
659bc5c4 10832 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
f57a6025
EG
10833
10834 /* DSP Remove Download Mode */
10835 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10836 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 10837
f57a6025
EG
10838 bnx2x_acquire_phy_lock(bp);
10839
10840 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10841
10842 /* wait 0.5 sec to allow it to run */
10843 msleep(500);
10844 bnx2x_ext_phy_hw_reset(bp, port);
10845 msleep(500);
10846 bnx2x_release_phy_lock(bp);
10847 }
10848 } else
c18487ee 10849 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
10850
10851 return rc;
10852}
10853
10854static int bnx2x_get_coalesce(struct net_device *dev,
10855 struct ethtool_coalesce *coal)
10856{
10857 struct bnx2x *bp = netdev_priv(dev);
10858
10859 memset(coal, 0, sizeof(struct ethtool_coalesce));
10860
10861 coal->rx_coalesce_usecs = bp->rx_ticks;
10862 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
10863
10864 return 0;
10865}
10866
10867static int bnx2x_set_coalesce(struct net_device *dev,
10868 struct ethtool_coalesce *coal)
10869{
10870 struct bnx2x *bp = netdev_priv(dev);
10871
cdaa7cb8
VZ
10872 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
10873 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
10874 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea 10875
cdaa7cb8
VZ
10876 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
10877 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
10878 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea 10879
34f80b04 10880 if (netif_running(dev))
a2fbb9ea
ET
10881 bnx2x_update_coalesce(bp);
10882
10883 return 0;
10884}
10885
10886static void bnx2x_get_ringparam(struct net_device *dev,
10887 struct ethtool_ringparam *ering)
10888{
10889 struct bnx2x *bp = netdev_priv(dev);
10890
10891 ering->rx_max_pending = MAX_RX_AVAIL;
10892 ering->rx_mini_max_pending = 0;
10893 ering->rx_jumbo_max_pending = 0;
10894
10895 ering->rx_pending = bp->rx_ring_size;
10896 ering->rx_mini_pending = 0;
10897 ering->rx_jumbo_pending = 0;
10898
10899 ering->tx_max_pending = MAX_TX_AVAIL;
10900 ering->tx_pending = bp->tx_ring_size;
10901}
10902
10903static int bnx2x_set_ringparam(struct net_device *dev,
10904 struct ethtool_ringparam *ering)
10905{
10906 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10907 int rc = 0;
a2fbb9ea 10908
72fd0718
VZ
10909 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10910 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10911 return -EAGAIN;
10912 }
10913
a2fbb9ea
ET
10914 if ((ering->rx_pending > MAX_RX_AVAIL) ||
10915 (ering->tx_pending > MAX_TX_AVAIL) ||
10916 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10917 return -EINVAL;
10918
10919 bp->rx_ring_size = ering->rx_pending;
10920 bp->tx_ring_size = ering->tx_pending;
10921
34f80b04
EG
10922 if (netif_running(dev)) {
10923 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10924 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
10925 }
10926
34f80b04 10927 return rc;
a2fbb9ea
ET
10928}
10929
10930static void bnx2x_get_pauseparam(struct net_device *dev,
10931 struct ethtool_pauseparam *epause)
10932{
10933 struct bnx2x *bp = netdev_priv(dev);
10934
356e2385
EG
10935 epause->autoneg = (bp->link_params.req_flow_ctrl ==
10936 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
10937 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10938
c0700f90
DM
10939 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10940 BNX2X_FLOW_CTRL_RX);
10941 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10942 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
10943
10944 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10945 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10946 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10947}
10948
10949static int bnx2x_set_pauseparam(struct net_device *dev,
10950 struct ethtool_pauseparam *epause)
10951{
10952 struct bnx2x *bp = netdev_priv(dev);
10953
34f80b04
EG
10954 if (IS_E1HMF(bp))
10955 return 0;
10956
a2fbb9ea
ET
10957 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10958 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10959 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10960
c0700f90 10961 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 10962
f1410647 10963 if (epause->rx_pause)
c0700f90 10964 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 10965
f1410647 10966 if (epause->tx_pause)
c0700f90 10967 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 10968
c0700f90
DM
10969 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10970 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 10971
c18487ee 10972 if (epause->autoneg) {
34f80b04 10973 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 10974 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
10975 return -EINVAL;
10976 }
a2fbb9ea 10977
c18487ee 10978 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 10979 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 10980 }
a2fbb9ea 10981
c18487ee
YR
10982 DP(NETIF_MSG_LINK,
10983 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
10984
10985 if (netif_running(dev)) {
bb2a0f7a 10986 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10987 bnx2x_link_set(bp);
10988 }
a2fbb9ea
ET
10989
10990 return 0;
10991}
10992
df0f2343
VZ
10993static int bnx2x_set_flags(struct net_device *dev, u32 data)
10994{
10995 struct bnx2x *bp = netdev_priv(dev);
10996 int changed = 0;
10997 int rc = 0;
10998
e0d904ff 10999 if (data & ~(ETH_FLAG_LRO | ETH_FLAG_RXHASH))
97d1935a 11000 return -EINVAL;
e0d904ff 11001
72fd0718
VZ
11002 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11003 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11004 return -EAGAIN;
11005 }
11006
df0f2343
VZ
11007 /* TPA requires Rx CSUM offloading */
11008 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
d43a7e67
VZ
11009 if (!disable_tpa) {
11010 if (!(dev->features & NETIF_F_LRO)) {
11011 dev->features |= NETIF_F_LRO;
11012 bp->flags |= TPA_ENABLE_FLAG;
11013 changed = 1;
11014 }
11015 } else
11016 rc = -EINVAL;
df0f2343
VZ
11017 } else if (dev->features & NETIF_F_LRO) {
11018 dev->features &= ~NETIF_F_LRO;
11019 bp->flags &= ~TPA_ENABLE_FLAG;
11020 changed = 1;
11021 }
11022
c68ed255
TH
11023 if (data & ETH_FLAG_RXHASH)
11024 dev->features |= NETIF_F_RXHASH;
11025 else
11026 dev->features &= ~NETIF_F_RXHASH;
11027
df0f2343
VZ
11028 if (changed && netif_running(dev)) {
11029 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11030 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11031 }
11032
11033 return rc;
11034}
11035
a2fbb9ea
ET
11036static u32 bnx2x_get_rx_csum(struct net_device *dev)
11037{
11038 struct bnx2x *bp = netdev_priv(dev);
11039
11040 return bp->rx_csum;
11041}
11042
11043static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
11044{
11045 struct bnx2x *bp = netdev_priv(dev);
df0f2343 11046 int rc = 0;
a2fbb9ea 11047
72fd0718
VZ
11048 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11049 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11050 return -EAGAIN;
11051 }
11052
a2fbb9ea 11053 bp->rx_csum = data;
df0f2343
VZ
11054
11055 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
11056 TPA'ed packets will be discarded due to wrong TCP CSUM */
11057 if (!data) {
11058 u32 flags = ethtool_op_get_flags(dev);
11059
11060 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
11061 }
11062
11063 return rc;
a2fbb9ea
ET
11064}
11065
11066static int bnx2x_set_tso(struct net_device *dev, u32 data)
11067{
755735eb 11068 if (data) {
a2fbb9ea 11069 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
11070 dev->features |= NETIF_F_TSO6;
11071 } else {
a2fbb9ea 11072 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
11073 dev->features &= ~NETIF_F_TSO6;
11074 }
11075
a2fbb9ea
ET
11076 return 0;
11077}
11078
f3c87cdd 11079static const struct {
a2fbb9ea
ET
11080 char string[ETH_GSTRING_LEN];
11081} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
11082 { "register_test (offline)" },
11083 { "memory_test (offline)" },
11084 { "loopback_test (offline)" },
11085 { "nvram_test (online)" },
11086 { "interrupt_test (online)" },
11087 { "link_test (online)" },
d3d4f495 11088 { "idle check (online)" }
a2fbb9ea
ET
11089};
11090
f3c87cdd
YG
11091static int bnx2x_test_registers(struct bnx2x *bp)
11092{
11093 int idx, i, rc = -ENODEV;
11094 u32 wr_val = 0;
9dabc424 11095 int port = BP_PORT(bp);
f3c87cdd 11096 static const struct {
cdaa7cb8
VZ
11097 u32 offset0;
11098 u32 offset1;
11099 u32 mask;
f3c87cdd
YG
11100 } reg_tbl[] = {
11101/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
11102 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
11103 { HC_REG_AGG_INT_0, 4, 0x000003ff },
11104 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
11105 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
11106 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
11107 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
11108 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11109 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
11110 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11111/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
11112 { QM_REG_CONNNUM_0, 4, 0x000fffff },
11113 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
11114 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
11115 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
11116 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11117 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
11118 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 11119 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
11120 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
11121/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
11122 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
11123 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
11124 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
11125 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
11126 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
11127 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
11128 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
11129 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
11130 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
11131/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
11132 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
11133 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
11134 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11135 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
11136 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11137 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
11138
11139 { 0xffffffff, 0, 0x00000000 }
11140 };
11141
11142 if (!netif_running(bp->dev))
11143 return rc;
11144
11145 /* Repeat the test twice:
11146 First by writing 0x00000000, second by writing 0xffffffff */
11147 for (idx = 0; idx < 2; idx++) {
11148
11149 switch (idx) {
11150 case 0:
11151 wr_val = 0;
11152 break;
11153 case 1:
11154 wr_val = 0xffffffff;
11155 break;
11156 }
11157
11158 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11159 u32 offset, mask, save_val, val;
f3c87cdd
YG
11160
11161 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11162 mask = reg_tbl[i].mask;
11163
11164 save_val = REG_RD(bp, offset);
11165
8eb5a20c 11166 REG_WR(bp, offset, (wr_val & mask));
f3c87cdd
YG
11167 val = REG_RD(bp, offset);
11168
11169 /* Restore the original register's value */
11170 REG_WR(bp, offset, save_val);
11171
cdaa7cb8
VZ
11172 /* verify value is as expected */
11173 if ((val & mask) != (wr_val & mask)) {
11174 DP(NETIF_MSG_PROBE,
11175 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11176 offset, val, wr_val, mask);
f3c87cdd 11177 goto test_reg_exit;
cdaa7cb8 11178 }
f3c87cdd
YG
11179 }
11180 }
11181
11182 rc = 0;
11183
11184test_reg_exit:
11185 return rc;
11186}
11187
11188static int bnx2x_test_memory(struct bnx2x *bp)
11189{
11190 int i, j, rc = -ENODEV;
11191 u32 val;
11192 static const struct {
11193 u32 offset;
11194 int size;
11195 } mem_tbl[] = {
11196 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
11197 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11198 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
11199 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
11200 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
11201 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
11202 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
11203
11204 { 0xffffffff, 0 }
11205 };
11206 static const struct {
11207 char *name;
11208 u32 offset;
9dabc424
YG
11209 u32 e1_mask;
11210 u32 e1h_mask;
f3c87cdd 11211 } prty_tbl[] = {
9dabc424
YG
11212 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
11213 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
11214 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
11215 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
11216 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
11217 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
11218
11219 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
11220 };
11221
11222 if (!netif_running(bp->dev))
11223 return rc;
11224
11225 /* Go through all the memories */
11226 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11227 for (j = 0; j < mem_tbl[i].size; j++)
11228 REG_RD(bp, mem_tbl[i].offset + j*4);
11229
11230 /* Check the parity status */
11231 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11232 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
11233 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11234 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
11235 DP(NETIF_MSG_HW,
11236 "%s is 0x%x\n", prty_tbl[i].name, val);
11237 goto test_mem_exit;
11238 }
11239 }
11240
11241 rc = 0;
11242
11243test_mem_exit:
11244 return rc;
11245}
11246
f3c87cdd
YG
11247static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11248{
11249 int cnt = 1000;
11250
11251 if (link_up)
11252 while (bnx2x_link_test(bp) && cnt--)
11253 msleep(10);
11254}
11255
11256static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11257{
11258 unsigned int pkt_size, num_pkts, i;
11259 struct sk_buff *skb;
11260 unsigned char *packet;
ca00392c 11261 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
54b9ddaa 11262 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
f3c87cdd
YG
11263 u16 tx_start_idx, tx_idx;
11264 u16 rx_start_idx, rx_idx;
ca00392c 11265 u16 pkt_prod, bd_prod;
f3c87cdd 11266 struct sw_tx_bd *tx_buf;
ca00392c
EG
11267 struct eth_tx_start_bd *tx_start_bd;
11268 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
11269 dma_addr_t mapping;
11270 union eth_rx_cqe *cqe;
11271 u8 cqe_fp_flags;
11272 struct sw_rx_bd *rx_buf;
11273 u16 len;
11274 int rc = -ENODEV;
11275
b5bf9068
EG
11276 /* check the loopback mode */
11277 switch (loopback_mode) {
11278 case BNX2X_PHY_LOOPBACK:
11279 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11280 return -EINVAL;
11281 break;
11282 case BNX2X_MAC_LOOPBACK:
f3c87cdd 11283 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 11284 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
11285 break;
11286 default:
f3c87cdd 11287 return -EINVAL;
b5bf9068 11288 }
f3c87cdd 11289
b5bf9068
EG
11290 /* prepare the loopback packet */
11291 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11292 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
11293 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11294 if (!skb) {
11295 rc = -ENOMEM;
11296 goto test_loopback_exit;
11297 }
11298 packet = skb_put(skb, pkt_size);
11299 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
11300 memset(packet + ETH_ALEN, 0, ETH_ALEN);
11301 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
11302 for (i = ETH_HLEN; i < pkt_size; i++)
11303 packet[i] = (unsigned char) (i & 0xff);
11304
b5bf9068 11305 /* send the loopback packet */
f3c87cdd 11306 num_pkts = 0;
ca00392c
EG
11307 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11308 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 11309
ca00392c
EG
11310 pkt_prod = fp_tx->tx_pkt_prod++;
11311 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11312 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 11313 tx_buf->skb = skb;
ca00392c 11314 tx_buf->flags = 0;
f3c87cdd 11315
ca00392c
EG
11316 bd_prod = TX_BD(fp_tx->tx_bd_prod);
11317 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
1a983142
FT
11318 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11319 skb_headlen(skb), DMA_TO_DEVICE);
ca00392c
EG
11320 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11321 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11322 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11323 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11324 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11325 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11326 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11327 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11328
11329 /* turn on parsing and get a BD */
11330 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11331 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11332
11333 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 11334
58f4c4cf
EG
11335 wmb();
11336
ca00392c
EG
11337 fp_tx->tx_db.data.prod += 2;
11338 barrier();
54b9ddaa 11339 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
f3c87cdd
YG
11340
11341 mmiowb();
11342
11343 num_pkts++;
ca00392c 11344 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
11345
11346 udelay(100);
11347
ca00392c 11348 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
11349 if (tx_idx != tx_start_idx + num_pkts)
11350 goto test_loopback_exit;
11351
ca00392c 11352 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
11353 if (rx_idx != rx_start_idx + num_pkts)
11354 goto test_loopback_exit;
11355
ca00392c 11356 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
11357 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11358 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11359 goto test_loopback_rx_exit;
11360
11361 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11362 if (len != pkt_size)
11363 goto test_loopback_rx_exit;
11364
ca00392c 11365 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
11366 skb = rx_buf->skb;
11367 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11368 for (i = ETH_HLEN; i < pkt_size; i++)
11369 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11370 goto test_loopback_rx_exit;
11371
11372 rc = 0;
11373
11374test_loopback_rx_exit:
f3c87cdd 11375
ca00392c
EG
11376 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11377 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11378 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11379 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
11380
11381 /* Update producers */
ca00392c
EG
11382 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11383 fp_rx->rx_sge_prod);
f3c87cdd
YG
11384
11385test_loopback_exit:
11386 bp->link_params.loopback_mode = LOOPBACK_NONE;
11387
11388 return rc;
11389}
11390
11391static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11392{
b5bf9068 11393 int rc = 0, res;
f3c87cdd 11394
2145a920
VZ
11395 if (BP_NOMCP(bp))
11396 return rc;
11397
f3c87cdd
YG
11398 if (!netif_running(bp->dev))
11399 return BNX2X_LOOPBACK_FAILED;
11400
f8ef6e44 11401 bnx2x_netif_stop(bp, 1);
3910c8ae 11402 bnx2x_acquire_phy_lock(bp);
f3c87cdd 11403
b5bf9068
EG
11404 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11405 if (res) {
11406 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
11407 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
11408 }
11409
b5bf9068
EG
11410 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11411 if (res) {
11412 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
11413 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
11414 }
11415
3910c8ae 11416 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
11417 bnx2x_netif_start(bp);
11418
11419 return rc;
11420}
11421
11422#define CRC32_RESIDUAL 0xdebb20e3
11423
11424static int bnx2x_test_nvram(struct bnx2x *bp)
11425{
11426 static const struct {
11427 int offset;
11428 int size;
11429 } nvram_tbl[] = {
11430 { 0, 0x14 }, /* bootstrap */
11431 { 0x14, 0xec }, /* dir */
11432 { 0x100, 0x350 }, /* manuf_info */
11433 { 0x450, 0xf0 }, /* feature_info */
11434 { 0x640, 0x64 }, /* upgrade_key_info */
11435 { 0x6a4, 0x64 },
11436 { 0x708, 0x70 }, /* manuf_key_info */
11437 { 0x778, 0x70 },
11438 { 0, 0 }
11439 };
4781bfad 11440 __be32 buf[0x350 / 4];
f3c87cdd
YG
11441 u8 *data = (u8 *)buf;
11442 int i, rc;
ab6ad5a4 11443 u32 magic, crc;
f3c87cdd 11444
2145a920
VZ
11445 if (BP_NOMCP(bp))
11446 return 0;
11447
f3c87cdd
YG
11448 rc = bnx2x_nvram_read(bp, 0, data, 4);
11449 if (rc) {
f5372251 11450 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
11451 goto test_nvram_exit;
11452 }
11453
11454 magic = be32_to_cpu(buf[0]);
11455 if (magic != 0x669955aa) {
11456 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11457 rc = -ENODEV;
11458 goto test_nvram_exit;
11459 }
11460
11461 for (i = 0; nvram_tbl[i].size; i++) {
11462
11463 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11464 nvram_tbl[i].size);
11465 if (rc) {
11466 DP(NETIF_MSG_PROBE,
f5372251 11467 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
11468 goto test_nvram_exit;
11469 }
11470
ab6ad5a4
EG
11471 crc = ether_crc_le(nvram_tbl[i].size, data);
11472 if (crc != CRC32_RESIDUAL) {
f3c87cdd 11473 DP(NETIF_MSG_PROBE,
ab6ad5a4 11474 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
f3c87cdd
YG
11475 rc = -ENODEV;
11476 goto test_nvram_exit;
11477 }
11478 }
11479
11480test_nvram_exit:
11481 return rc;
11482}
11483
11484static int bnx2x_test_intr(struct bnx2x *bp)
11485{
11486 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11487 int i, rc;
11488
11489 if (!netif_running(bp->dev))
11490 return -ENODEV;
11491
8d9c5f34 11492 config->hdr.length = 0;
af246401 11493 if (CHIP_IS_E1(bp))
0c43f43f
VZ
11494 /* use last unicast entries */
11495 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
af246401
EG
11496 else
11497 config->hdr.offset = BP_FUNC(bp);
0626b899 11498 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
11499 config->hdr.reserved1 = 0;
11500
e665bfda
MC
11501 bp->set_mac_pending++;
11502 smp_wmb();
f3c87cdd
YG
11503 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11504 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11505 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11506 if (rc == 0) {
f3c87cdd
YG
11507 for (i = 0; i < 10; i++) {
11508 if (!bp->set_mac_pending)
11509 break;
e665bfda 11510 smp_rmb();
f3c87cdd
YG
11511 msleep_interruptible(10);
11512 }
11513 if (i == 10)
11514 rc = -ENODEV;
11515 }
11516
11517 return rc;
11518}
11519
a2fbb9ea
ET
11520static void bnx2x_self_test(struct net_device *dev,
11521 struct ethtool_test *etest, u64 *buf)
11522{
11523 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea 11524
72fd0718
VZ
11525 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11526 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11527 etest->flags |= ETH_TEST_FL_FAILED;
11528 return;
11529 }
11530
a2fbb9ea
ET
11531 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11532
f3c87cdd 11533 if (!netif_running(dev))
a2fbb9ea 11534 return;
a2fbb9ea 11535
33471629 11536 /* offline tests are not supported in MF mode */
f3c87cdd
YG
11537 if (IS_E1HMF(bp))
11538 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11539
11540 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
11541 int port = BP_PORT(bp);
11542 u32 val;
f3c87cdd
YG
11543 u8 link_up;
11544
279abdf5
EG
11545 /* save current value of input enable for TX port IF */
11546 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11547 /* disable input for TX port IF */
11548 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11549
061bc702 11550 link_up = (bnx2x_link_test(bp) == 0);
f3c87cdd
YG
11551 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11552 bnx2x_nic_load(bp, LOAD_DIAG);
11553 /* wait until link state is restored */
11554 bnx2x_wait_for_link(bp, link_up);
11555
11556 if (bnx2x_test_registers(bp) != 0) {
11557 buf[0] = 1;
11558 etest->flags |= ETH_TEST_FL_FAILED;
11559 }
11560 if (bnx2x_test_memory(bp) != 0) {
11561 buf[1] = 1;
11562 etest->flags |= ETH_TEST_FL_FAILED;
11563 }
11564 buf[2] = bnx2x_test_loopback(bp, link_up);
11565 if (buf[2] != 0)
11566 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 11567
f3c87cdd 11568 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
11569
11570 /* restore input for TX port IF */
11571 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11572
f3c87cdd
YG
11573 bnx2x_nic_load(bp, LOAD_NORMAL);
11574 /* wait until link state is restored */
11575 bnx2x_wait_for_link(bp, link_up);
11576 }
11577 if (bnx2x_test_nvram(bp) != 0) {
11578 buf[3] = 1;
a2fbb9ea
ET
11579 etest->flags |= ETH_TEST_FL_FAILED;
11580 }
f3c87cdd
YG
11581 if (bnx2x_test_intr(bp) != 0) {
11582 buf[4] = 1;
11583 etest->flags |= ETH_TEST_FL_FAILED;
11584 }
11585 if (bp->port.pmf)
11586 if (bnx2x_link_test(bp) != 0) {
11587 buf[5] = 1;
11588 etest->flags |= ETH_TEST_FL_FAILED;
11589 }
f3c87cdd
YG
11590
11591#ifdef BNX2X_EXTRA_DEBUG
11592 bnx2x_panic_dump(bp);
11593#endif
a2fbb9ea
ET
11594}
11595
de832a55
EG
11596static const struct {
11597 long offset;
11598 int size;
11599 u8 string[ETH_GSTRING_LEN];
11600} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11601/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11602 { Q_STATS_OFFSET32(error_bytes_received_hi),
11603 8, "[%d]: rx_error_bytes" },
11604 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11605 8, "[%d]: rx_ucast_packets" },
11606 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11607 8, "[%d]: rx_mcast_packets" },
11608 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11609 8, "[%d]: rx_bcast_packets" },
11610 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11611 { Q_STATS_OFFSET32(rx_err_discard_pkt),
11612 4, "[%d]: rx_phy_ip_err_discards"},
11613 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11614 4, "[%d]: rx_skb_alloc_discard" },
11615 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11616
11617/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11618 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
dea7aab1
VZ
11619 8, "[%d]: tx_ucast_packets" },
11620 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11621 8, "[%d]: tx_mcast_packets" },
11622 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11623 8, "[%d]: tx_bcast_packets" }
de832a55
EG
11624};
11625
bb2a0f7a
YG
11626static const struct {
11627 long offset;
11628 int size;
11629 u32 flags;
66e855f3
YG
11630#define STATS_FLAGS_PORT 1
11631#define STATS_FLAGS_FUNC 2
de832a55 11632#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 11633 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 11634} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
11635/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11636 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 11637 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 11638 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 11639 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 11640 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 11641 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 11642 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 11643 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 11644 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 11645 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 11646 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 11647 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 11648 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
11649 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11650 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11651 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11652 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11653/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11654 8, STATS_FLAGS_PORT, "rx_fragments" },
11655 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11656 8, STATS_FLAGS_PORT, "rx_jabbers" },
11657 { STATS_OFFSET32(no_buff_discard_hi),
11658 8, STATS_FLAGS_BOTH, "rx_discards" },
11659 { STATS_OFFSET32(mac_filter_discard),
11660 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11661 { STATS_OFFSET32(xxoverflow_discard),
11662 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11663 { STATS_OFFSET32(brb_drop_hi),
11664 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11665 { STATS_OFFSET32(brb_truncate_hi),
11666 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11667 { STATS_OFFSET32(pause_frames_received_hi),
11668 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11669 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11670 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11671 { STATS_OFFSET32(nig_timer_max),
11672 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11673/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11674 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11675 { STATS_OFFSET32(rx_skb_alloc_failed),
11676 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11677 { STATS_OFFSET32(hw_csum_err),
11678 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11679
11680 { STATS_OFFSET32(total_bytes_transmitted_hi),
11681 8, STATS_FLAGS_BOTH, "tx_bytes" },
11682 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11683 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11684 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
dea7aab1
VZ
11685 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
11686 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11687 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
11688 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11689 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
de832a55
EG
11690 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11691 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11692 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11693 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
dea7aab1 11694/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 11695 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 11696 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 11697 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
dea7aab1 11698 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 11699 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 11700 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 11701 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 11702 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 11703 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 11704 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 11705 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 11706 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 11707 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 11708 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 11709 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 11710 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 11711 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 11712 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 11713 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
dea7aab1 11714/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 11715 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 11716 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 11717 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
dea7aab1 11718 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 11719 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
11720 { STATS_OFFSET32(pause_frames_sent_hi),
11721 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
11722};
11723
de832a55
EG
11724#define IS_PORT_STAT(i) \
11725 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11726#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11727#define IS_E1HMF_MODE_STAT(bp) \
7995c64e 11728 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
66e855f3 11729
15f0a394
BH
11730static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11731{
11732 struct bnx2x *bp = netdev_priv(dev);
11733 int i, num_stats;
11734
cdaa7cb8 11735 switch (stringset) {
15f0a394
BH
11736 case ETH_SS_STATS:
11737 if (is_multi(bp)) {
54b9ddaa 11738 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
15f0a394
BH
11739 if (!IS_E1HMF_MODE_STAT(bp))
11740 num_stats += BNX2X_NUM_STATS;
11741 } else {
11742 if (IS_E1HMF_MODE_STAT(bp)) {
11743 num_stats = 0;
11744 for (i = 0; i < BNX2X_NUM_STATS; i++)
11745 if (IS_FUNC_STAT(i))
11746 num_stats++;
11747 } else
11748 num_stats = BNX2X_NUM_STATS;
11749 }
11750 return num_stats;
11751
11752 case ETH_SS_TEST:
11753 return BNX2X_NUM_TESTS;
11754
11755 default:
11756 return -EINVAL;
11757 }
11758}
11759
a2fbb9ea
ET
11760static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11761{
bb2a0f7a 11762 struct bnx2x *bp = netdev_priv(dev);
de832a55 11763 int i, j, k;
bb2a0f7a 11764
a2fbb9ea
ET
11765 switch (stringset) {
11766 case ETH_SS_STATS:
de832a55
EG
11767 if (is_multi(bp)) {
11768 k = 0;
54b9ddaa 11769 for_each_queue(bp, i) {
de832a55
EG
11770 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11771 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11772 bnx2x_q_stats_arr[j].string, i);
11773 k += BNX2X_NUM_Q_STATS;
11774 }
11775 if (IS_E1HMF_MODE_STAT(bp))
11776 break;
11777 for (j = 0; j < BNX2X_NUM_STATS; j++)
11778 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11779 bnx2x_stats_arr[j].string);
11780 } else {
11781 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11782 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11783 continue;
11784 strcpy(buf + j*ETH_GSTRING_LEN,
11785 bnx2x_stats_arr[i].string);
11786 j++;
11787 }
bb2a0f7a 11788 }
a2fbb9ea
ET
11789 break;
11790
11791 case ETH_SS_TEST:
11792 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11793 break;
11794 }
11795}
11796
a2fbb9ea
ET
11797static void bnx2x_get_ethtool_stats(struct net_device *dev,
11798 struct ethtool_stats *stats, u64 *buf)
11799{
11800 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
11801 u32 *hw_stats, *offset;
11802 int i, j, k;
bb2a0f7a 11803
de832a55
EG
11804 if (is_multi(bp)) {
11805 k = 0;
54b9ddaa 11806 for_each_queue(bp, i) {
de832a55
EG
11807 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11808 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11809 if (bnx2x_q_stats_arr[j].size == 0) {
11810 /* skip this counter */
11811 buf[k + j] = 0;
11812 continue;
11813 }
11814 offset = (hw_stats +
11815 bnx2x_q_stats_arr[j].offset);
11816 if (bnx2x_q_stats_arr[j].size == 4) {
11817 /* 4-byte counter */
11818 buf[k + j] = (u64) *offset;
11819 continue;
11820 }
11821 /* 8-byte counter */
11822 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11823 }
11824 k += BNX2X_NUM_Q_STATS;
11825 }
11826 if (IS_E1HMF_MODE_STAT(bp))
11827 return;
11828 hw_stats = (u32 *)&bp->eth_stats;
11829 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11830 if (bnx2x_stats_arr[j].size == 0) {
11831 /* skip this counter */
11832 buf[k + j] = 0;
11833 continue;
11834 }
11835 offset = (hw_stats + bnx2x_stats_arr[j].offset);
11836 if (bnx2x_stats_arr[j].size == 4) {
11837 /* 4-byte counter */
11838 buf[k + j] = (u64) *offset;
11839 continue;
11840 }
11841 /* 8-byte counter */
11842 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 11843 }
de832a55
EG
11844 } else {
11845 hw_stats = (u32 *)&bp->eth_stats;
11846 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11847 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11848 continue;
11849 if (bnx2x_stats_arr[i].size == 0) {
11850 /* skip this counter */
11851 buf[j] = 0;
11852 j++;
11853 continue;
11854 }
11855 offset = (hw_stats + bnx2x_stats_arr[i].offset);
11856 if (bnx2x_stats_arr[i].size == 4) {
11857 /* 4-byte counter */
11858 buf[j] = (u64) *offset;
11859 j++;
11860 continue;
11861 }
11862 /* 8-byte counter */
11863 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 11864 j++;
a2fbb9ea 11865 }
a2fbb9ea
ET
11866 }
11867}
11868
11869static int bnx2x_phys_id(struct net_device *dev, u32 data)
11870{
11871 struct bnx2x *bp = netdev_priv(dev);
11872 int i;
11873
34f80b04
EG
11874 if (!netif_running(dev))
11875 return 0;
11876
11877 if (!bp->port.pmf)
11878 return 0;
11879
a2fbb9ea
ET
11880 if (data == 0)
11881 data = 2;
11882
11883 for (i = 0; i < (data * 2); i++) {
c18487ee 11884 if ((i % 2) == 0)
7846e471
YR
11885 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11886 SPEED_1000);
c18487ee 11887 else
7846e471 11888 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
c18487ee 11889
a2fbb9ea
ET
11890 msleep_interruptible(500);
11891 if (signal_pending(current))
11892 break;
11893 }
11894
c18487ee 11895 if (bp->link_vars.link_up)
7846e471
YR
11896 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11897 bp->link_vars.line_speed);
a2fbb9ea
ET
11898
11899 return 0;
11900}
11901
0fc0b732 11902static const struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
11903 .get_settings = bnx2x_get_settings,
11904 .set_settings = bnx2x_set_settings,
11905 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
11906 .get_regs_len = bnx2x_get_regs_len,
11907 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
11908 .get_wol = bnx2x_get_wol,
11909 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
11910 .get_msglevel = bnx2x_get_msglevel,
11911 .set_msglevel = bnx2x_set_msglevel,
11912 .nway_reset = bnx2x_nway_reset,
01e53298 11913 .get_link = bnx2x_get_link,
7a9b2557
VZ
11914 .get_eeprom_len = bnx2x_get_eeprom_len,
11915 .get_eeprom = bnx2x_get_eeprom,
11916 .set_eeprom = bnx2x_set_eeprom,
11917 .get_coalesce = bnx2x_get_coalesce,
11918 .set_coalesce = bnx2x_set_coalesce,
11919 .get_ringparam = bnx2x_get_ringparam,
11920 .set_ringparam = bnx2x_set_ringparam,
11921 .get_pauseparam = bnx2x_get_pauseparam,
11922 .set_pauseparam = bnx2x_set_pauseparam,
11923 .get_rx_csum = bnx2x_get_rx_csum,
11924 .set_rx_csum = bnx2x_set_rx_csum,
11925 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 11926 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
11927 .set_flags = bnx2x_set_flags,
11928 .get_flags = ethtool_op_get_flags,
11929 .get_sg = ethtool_op_get_sg,
11930 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
11931 .get_tso = ethtool_op_get_tso,
11932 .set_tso = bnx2x_set_tso,
7a9b2557 11933 .self_test = bnx2x_self_test,
15f0a394 11934 .get_sset_count = bnx2x_get_sset_count,
7a9b2557 11935 .get_strings = bnx2x_get_strings,
a2fbb9ea 11936 .phys_id = bnx2x_phys_id,
bb2a0f7a 11937 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
11938};
11939
11940/* end of ethtool_ops */
11941
11942/****************************************************************************
11943* General service functions
11944****************************************************************************/
11945
11946static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11947{
11948 u16 pmcsr;
11949
11950 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11951
11952 switch (state) {
11953 case PCI_D0:
34f80b04 11954 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
11955 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11956 PCI_PM_CTRL_PME_STATUS));
11957
11958 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 11959 /* delay required during transition out of D3hot */
a2fbb9ea 11960 msleep(20);
34f80b04 11961 break;
a2fbb9ea 11962
34f80b04 11963 case PCI_D3hot:
d3dbfee0
VZ
11964 /* If there are other clients above don't
11965 shut down the power */
11966 if (atomic_read(&bp->pdev->enable_cnt) != 1)
11967 return 0;
11968 /* Don't shut down the power for emulation and FPGA */
11969 if (CHIP_REV_IS_SLOW(bp))
11970 return 0;
11971
34f80b04
EG
11972 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11973 pmcsr |= 3;
a2fbb9ea 11974
34f80b04
EG
11975 if (bp->wol)
11976 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 11977
34f80b04
EG
11978 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11979 pmcsr);
a2fbb9ea 11980
34f80b04
EG
11981 /* No more memory access after this point until
11982 * device is brought back to D0.
11983 */
11984 break;
11985
11986 default:
11987 return -EINVAL;
11988 }
11989 return 0;
a2fbb9ea
ET
11990}
11991
237907c1
EG
11992static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11993{
11994 u16 rx_cons_sb;
11995
11996 /* Tell compiler that status block fields can change */
11997 barrier();
11998 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11999 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
12000 rx_cons_sb++;
12001 return (fp->rx_comp_cons != rx_cons_sb);
12002}
12003
34f80b04
EG
12004/*
12005 * net_device service functions
12006 */
12007
a2fbb9ea
ET
12008static int bnx2x_poll(struct napi_struct *napi, int budget)
12009{
54b9ddaa 12010 int work_done = 0;
a2fbb9ea
ET
12011 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
12012 napi);
12013 struct bnx2x *bp = fp->bp;
a2fbb9ea 12014
54b9ddaa 12015 while (1) {
a2fbb9ea 12016#ifdef BNX2X_STOP_ON_ERROR
54b9ddaa
VZ
12017 if (unlikely(bp->panic)) {
12018 napi_complete(napi);
12019 return 0;
12020 }
a2fbb9ea
ET
12021#endif
12022
54b9ddaa
VZ
12023 if (bnx2x_has_tx_work(fp))
12024 bnx2x_tx_int(fp);
356e2385 12025
54b9ddaa
VZ
12026 if (bnx2x_has_rx_work(fp)) {
12027 work_done += bnx2x_rx_int(fp, budget - work_done);
a2fbb9ea 12028
54b9ddaa
VZ
12029 /* must not complete if we consumed full budget */
12030 if (work_done >= budget)
12031 break;
12032 }
a2fbb9ea 12033
54b9ddaa
VZ
12034 /* Fall out from the NAPI loop if needed */
12035 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12036 bnx2x_update_fpsb_idx(fp);
12037 /* bnx2x_has_rx_work() reads the status block, thus we need
12038 * to ensure that status block indices have been actually read
12039 * (bnx2x_update_fpsb_idx) prior to this check
12040 * (bnx2x_has_rx_work) so that we won't write the "newer"
12041 * value of the status block to IGU (if there was a DMA right
12042 * after bnx2x_has_rx_work and if there is no rmb, the memory
12043 * reading (bnx2x_update_fpsb_idx) may be postponed to right
12044 * before bnx2x_ack_sb). In this case there will never be
12045 * another interrupt until there is another update of the
12046 * status block, while there is still unhandled work.
12047 */
12048 rmb();
a2fbb9ea 12049
54b9ddaa
VZ
12050 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12051 napi_complete(napi);
12052 /* Re-enable interrupts */
12053 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
12054 le16_to_cpu(fp->fp_c_idx),
12055 IGU_INT_NOP, 1);
12056 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
12057 le16_to_cpu(fp->fp_u_idx),
12058 IGU_INT_ENABLE, 1);
12059 break;
12060 }
12061 }
a2fbb9ea 12062 }
356e2385 12063
a2fbb9ea
ET
12064 return work_done;
12065}
12066
755735eb
EG
12067
12068/* we split the first BD into headers and data BDs
33471629 12069 * to ease the pain of our fellow microcode engineers
755735eb
EG
12070 * we use one mapping for both BDs
12071 * So far this has only been observed to happen
12072 * in Other Operating Systems(TM)
12073 */
12074static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
12075 struct bnx2x_fastpath *fp,
ca00392c
EG
12076 struct sw_tx_bd *tx_buf,
12077 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
12078 u16 bd_prod, int nbd)
12079{
ca00392c 12080 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
12081 struct eth_tx_bd *d_tx_bd;
12082 dma_addr_t mapping;
12083 int old_len = le16_to_cpu(h_tx_bd->nbytes);
12084
12085 /* first fix first BD */
12086 h_tx_bd->nbd = cpu_to_le16(nbd);
12087 h_tx_bd->nbytes = cpu_to_le16(hlen);
12088
12089 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
12090 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
12091 h_tx_bd->addr_lo, h_tx_bd->nbd);
12092
12093 /* now get a new data BD
12094 * (after the pbd) and fill it */
12095 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 12096 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
12097
12098 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
12099 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
12100
12101 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12102 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12103 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
12104
12105 /* this marks the BD as one that has no individual mapping */
12106 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
12107
755735eb
EG
12108 DP(NETIF_MSG_TX_QUEUED,
12109 "TSO split data size is %d (%x:%x)\n",
12110 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
12111
ca00392c
EG
12112 /* update tx_bd */
12113 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
12114
12115 return bd_prod;
12116}
12117
12118static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
12119{
12120 if (fix > 0)
12121 csum = (u16) ~csum_fold(csum_sub(csum,
12122 csum_partial(t_header - fix, fix, 0)));
12123
12124 else if (fix < 0)
12125 csum = (u16) ~csum_fold(csum_add(csum,
12126 csum_partial(t_header, -fix, 0)));
12127
12128 return swab16(csum);
12129}
12130
12131static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12132{
12133 u32 rc;
12134
12135 if (skb->ip_summed != CHECKSUM_PARTIAL)
12136 rc = XMIT_PLAIN;
12137
12138 else {
4781bfad 12139 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
12140 rc = XMIT_CSUM_V6;
12141 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12142 rc |= XMIT_CSUM_TCP;
12143
12144 } else {
12145 rc = XMIT_CSUM_V4;
12146 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12147 rc |= XMIT_CSUM_TCP;
12148 }
12149 }
12150
12151 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
d6a2f98b 12152 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
755735eb
EG
12153
12154 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
d6a2f98b 12155 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
755735eb
EG
12156
12157 return rc;
12158}
12159
632da4d6 12160#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
12161/* check if packet requires linearization (packet is too fragmented)
12162 no need to check fragmentation if page size > 8K (there will be no
12163 violation to FW restrictions) */
755735eb
EG
12164static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12165 u32 xmit_type)
12166{
12167 int to_copy = 0;
12168 int hlen = 0;
12169 int first_bd_sz = 0;
12170
12171 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12172 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12173
12174 if (xmit_type & XMIT_GSO) {
12175 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12176 /* Check if LSO packet needs to be copied:
12177 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12178 int wnd_size = MAX_FETCH_BD - 3;
33471629 12179 /* Number of windows to check */
755735eb
EG
12180 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12181 int wnd_idx = 0;
12182 int frag_idx = 0;
12183 u32 wnd_sum = 0;
12184
12185 /* Headers length */
12186 hlen = (int)(skb_transport_header(skb) - skb->data) +
12187 tcp_hdrlen(skb);
12188
12189 /* Amount of data (w/o headers) on linear part of SKB*/
12190 first_bd_sz = skb_headlen(skb) - hlen;
12191
12192 wnd_sum = first_bd_sz;
12193
12194 /* Calculate the first sum - it's special */
12195 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12196 wnd_sum +=
12197 skb_shinfo(skb)->frags[frag_idx].size;
12198
12199 /* If there was data on linear skb data - check it */
12200 if (first_bd_sz > 0) {
12201 if (unlikely(wnd_sum < lso_mss)) {
12202 to_copy = 1;
12203 goto exit_lbl;
12204 }
12205
12206 wnd_sum -= first_bd_sz;
12207 }
12208
12209 /* Others are easier: run through the frag list and
12210 check all windows */
12211 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12212 wnd_sum +=
12213 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12214
12215 if (unlikely(wnd_sum < lso_mss)) {
12216 to_copy = 1;
12217 break;
12218 }
12219 wnd_sum -=
12220 skb_shinfo(skb)->frags[wnd_idx].size;
12221 }
755735eb
EG
12222 } else {
12223 /* in non-LSO too fragmented packet should always
12224 be linearized */
12225 to_copy = 1;
12226 }
12227 }
12228
12229exit_lbl:
12230 if (unlikely(to_copy))
12231 DP(NETIF_MSG_TX_QUEUED,
12232 "Linearization IS REQUIRED for %s packet. "
12233 "num_frags %d hlen %d first_bd_sz %d\n",
12234 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12235 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12236
12237 return to_copy;
12238}
632da4d6 12239#endif
755735eb
EG
12240
12241/* called with netif_tx_lock
a2fbb9ea 12242 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 12243 * netif_wake_queue()
a2fbb9ea 12244 */
61357325 12245static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
a2fbb9ea
ET
12246{
12247 struct bnx2x *bp = netdev_priv(dev);
54b9ddaa 12248 struct bnx2x_fastpath *fp;
555f6c78 12249 struct netdev_queue *txq;
a2fbb9ea 12250 struct sw_tx_bd *tx_buf;
ca00392c
EG
12251 struct eth_tx_start_bd *tx_start_bd;
12252 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
12253 struct eth_tx_parse_bd *pbd = NULL;
12254 u16 pkt_prod, bd_prod;
755735eb 12255 int nbd, fp_index;
a2fbb9ea 12256 dma_addr_t mapping;
755735eb 12257 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
12258 int i;
12259 u8 hlen = 0;
ca00392c 12260 __le16 pkt_size = 0;
dea7aab1
VZ
12261 struct ethhdr *eth;
12262 u8 mac_type = UNICAST_ADDRESS;
a2fbb9ea
ET
12263
12264#ifdef BNX2X_STOP_ON_ERROR
12265 if (unlikely(bp->panic))
12266 return NETDEV_TX_BUSY;
12267#endif
12268
555f6c78
EG
12269 fp_index = skb_get_queue_mapping(skb);
12270 txq = netdev_get_tx_queue(dev, fp_index);
12271
54b9ddaa 12272 fp = &bp->fp[fp_index];
755735eb 12273
231fd58a 12274 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
54b9ddaa 12275 fp->eth_q_stats.driver_xoff++;
555f6c78 12276 netif_tx_stop_queue(txq);
a2fbb9ea
ET
12277 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12278 return NETDEV_TX_BUSY;
12279 }
12280
755735eb
EG
12281 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
12282 " gso type %x xmit_type %x\n",
12283 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12284 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12285
dea7aab1
VZ
12286 eth = (struct ethhdr *)skb->data;
12287
12288 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12289 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12290 if (is_broadcast_ether_addr(eth->h_dest))
12291 mac_type = BROADCAST_ADDRESS;
12292 else
12293 mac_type = MULTICAST_ADDRESS;
12294 }
12295
632da4d6 12296#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
12297 /* First, check if we need to linearize the skb (due to FW
12298 restrictions). No need to check fragmentation if page size > 8K
12299 (there will be no violation to FW restrictions) */
755735eb
EG
12300 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12301 /* Statistics of linearization */
12302 bp->lin_cnt++;
12303 if (skb_linearize(skb) != 0) {
12304 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12305 "silently dropping this SKB\n");
12306 dev_kfree_skb_any(skb);
da5a662a 12307 return NETDEV_TX_OK;
755735eb
EG
12308 }
12309 }
632da4d6 12310#endif
755735eb 12311
a2fbb9ea 12312 /*
755735eb 12313 Please read carefully. First we use one BD which we mark as start,
ca00392c 12314 then we have a parsing info BD (used for TSO or xsum),
755735eb 12315 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
12316 (don't forget to mark the last one as last,
12317 and to unmap only AFTER you write to the BD ...)
755735eb 12318 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
12319 */
12320
12321 pkt_prod = fp->tx_pkt_prod++;
755735eb 12322 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 12323
755735eb 12324 /* get a tx_buf and first BD */
a2fbb9ea 12325 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 12326 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 12327
ca00392c 12328 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
dea7aab1
VZ
12329 tx_start_bd->general_data = (mac_type <<
12330 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 12331 /* header nbd */
ca00392c 12332 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 12333
755735eb
EG
12334 /* remember the first BD of the packet */
12335 tx_buf->first_bd = fp->tx_bd_prod;
12336 tx_buf->skb = skb;
ca00392c 12337 tx_buf->flags = 0;
a2fbb9ea
ET
12338
12339 DP(NETIF_MSG_TX_QUEUED,
12340 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 12341 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 12342
0c6671b0
EG
12343#ifdef BCM_VLAN
12344 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12345 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
12346 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12347 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 12348 } else
0c6671b0 12349#endif
ca00392c 12350 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 12351
ca00392c
EG
12352 /* turn on parsing and get a BD */
12353 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12354 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 12355
ca00392c 12356 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
12357
12358 if (xmit_type & XMIT_CSUM) {
ca00392c 12359 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
12360
12361 /* for now NS flag is not used in Linux */
4781bfad
EG
12362 pbd->global_data =
12363 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12364 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 12365
755735eb
EG
12366 pbd->ip_hlen = (skb_transport_header(skb) -
12367 skb_network_header(skb)) / 2;
12368
12369 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 12370
755735eb 12371 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 12372 hlen = hlen*2;
a2fbb9ea 12373
ca00392c 12374 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
12375
12376 if (xmit_type & XMIT_CSUM_V4)
ca00392c 12377 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
12378 ETH_TX_BD_FLAGS_IP_CSUM;
12379 else
ca00392c
EG
12380 tx_start_bd->bd_flags.as_bitfield |=
12381 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
12382
12383 if (xmit_type & XMIT_CSUM_TCP) {
12384 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12385
12386 } else {
12387 s8 fix = SKB_CS_OFF(skb); /* signed! */
12388
ca00392c 12389 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 12390
755735eb 12391 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
12392 "hlen %d fix %d csum before fix %x\n",
12393 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
12394
12395 /* HW bug: fixup the CSUM */
12396 pbd->tcp_pseudo_csum =
12397 bnx2x_csum_fix(skb_transport_header(skb),
12398 SKB_CS(skb), fix);
12399
12400 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12401 pbd->tcp_pseudo_csum);
12402 }
a2fbb9ea
ET
12403 }
12404
1a983142
FT
12405 mapping = dma_map_single(&bp->pdev->dev, skb->data,
12406 skb_headlen(skb), DMA_TO_DEVICE);
a2fbb9ea 12407
ca00392c
EG
12408 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12409 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12410 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12411 tx_start_bd->nbd = cpu_to_le16(nbd);
12412 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12413 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
12414
12415 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 12416 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
12417 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12418 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12419 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 12420
755735eb 12421 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
12422
12423 DP(NETIF_MSG_TX_QUEUED,
12424 "TSO packet len %d hlen %d total len %d tso size %d\n",
12425 skb->len, hlen, skb_headlen(skb),
12426 skb_shinfo(skb)->gso_size);
12427
ca00392c 12428 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 12429
755735eb 12430 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
12431 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12432 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
12433
12434 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12435 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
12436 pbd->tcp_flags = pbd_tcp_flags(skb);
12437
12438 if (xmit_type & XMIT_GSO_V4) {
12439 pbd->ip_id = swab16(ip_hdr(skb)->id);
12440 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
12441 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12442 ip_hdr(skb)->daddr,
12443 0, IPPROTO_TCP, 0));
755735eb
EG
12444
12445 } else
12446 pbd->tcp_pseudo_csum =
12447 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12448 &ipv6_hdr(skb)->daddr,
12449 0, IPPROTO_TCP, 0));
12450
a2fbb9ea
ET
12451 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12452 }
ca00392c 12453 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 12454
755735eb
EG
12455 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12456 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 12457
755735eb 12458 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
12459 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12460 if (total_pkt_bd == NULL)
12461 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 12462
1a983142
FT
12463 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12464 frag->page_offset,
12465 frag->size, DMA_TO_DEVICE);
a2fbb9ea 12466
ca00392c
EG
12467 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12468 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12469 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12470 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 12471
755735eb 12472 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
12473 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
12474 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12475 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
12476 }
12477
ca00392c 12478 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 12479
a2fbb9ea
ET
12480 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12481
755735eb 12482 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
12483 * if the packet contains or ends with it
12484 */
12485 if (TX_BD_POFF(bd_prod) < nbd)
12486 nbd++;
12487
ca00392c
EG
12488 if (total_pkt_bd != NULL)
12489 total_pkt_bd->total_pkt_bytes = pkt_size;
12490
a2fbb9ea
ET
12491 if (pbd)
12492 DP(NETIF_MSG_TX_QUEUED,
12493 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
12494 " tcp_flags %x xsum %x seq %u hlen %u\n",
12495 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12496 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 12497 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 12498
755735eb 12499 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 12500
58f4c4cf
EG
12501 /*
12502 * Make sure that the BD data is updated before updating the producer
12503 * since FW might read the BD right after the producer is updated.
12504 * This is only applicable for weak-ordered memory model archs such
12505 * as IA-64. The following barrier is also mandatory since FW will
12506 * assumes packets must have BDs.
12507 */
12508 wmb();
12509
ca00392c
EG
12510 fp->tx_db.data.prod += nbd;
12511 barrier();
54b9ddaa 12512 DOORBELL(bp, fp->index, fp->tx_db.raw);
a2fbb9ea
ET
12513
12514 mmiowb();
12515
755735eb 12516 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
12517
12518 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 12519 netif_tx_stop_queue(txq);
9baddeb8
SG
12520
12521 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12522 * ordering of set_bit() in netif_tx_stop_queue() and read of
12523 * fp->bd_tx_cons */
58f4c4cf 12524 smp_mb();
9baddeb8 12525
54b9ddaa 12526 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 12527 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 12528 netif_tx_wake_queue(txq);
a2fbb9ea 12529 }
54b9ddaa 12530 fp->tx_pkt++;
a2fbb9ea
ET
12531
12532 return NETDEV_TX_OK;
12533}
12534
bb2a0f7a 12535/* called with rtnl_lock */
a2fbb9ea
ET
12536static int bnx2x_open(struct net_device *dev)
12537{
12538 struct bnx2x *bp = netdev_priv(dev);
12539
6eccabb3
EG
12540 netif_carrier_off(dev);
12541
a2fbb9ea
ET
12542 bnx2x_set_power_state(bp, PCI_D0);
12543
72fd0718
VZ
12544 if (!bnx2x_reset_is_done(bp)) {
12545 do {
12546 /* Reset MCP mail box sequence if there is on going
12547 * recovery
12548 */
12549 bp->fw_seq = 0;
12550
12551 /* If it's the first function to load and reset done
12552 * is still not cleared it may mean that. We don't
12553 * check the attention state here because it may have
12554 * already been cleared by a "common" reset but we
12555 * shell proceed with "process kill" anyway.
12556 */
12557 if ((bnx2x_get_load_cnt(bp) == 0) &&
12558 bnx2x_trylock_hw_lock(bp,
12559 HW_LOCK_RESOURCE_RESERVED_08) &&
12560 (!bnx2x_leader_reset(bp))) {
12561 DP(NETIF_MSG_HW, "Recovered in open\n");
12562 break;
12563 }
12564
12565 bnx2x_set_power_state(bp, PCI_D3hot);
12566
12567 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12568 " completed yet. Try again later. If u still see this"
12569 " message after a few retries then power cycle is"
12570 " required.\n", bp->dev->name);
12571
12572 return -EAGAIN;
12573 } while (0);
12574 }
12575
12576 bp->recovery_state = BNX2X_RECOVERY_DONE;
12577
bb2a0f7a 12578 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
12579}
12580
bb2a0f7a 12581/* called with rtnl_lock */
a2fbb9ea
ET
12582static int bnx2x_close(struct net_device *dev)
12583{
a2fbb9ea
ET
12584 struct bnx2x *bp = netdev_priv(dev);
12585
12586 /* Unload the driver, release IRQs */
bb2a0f7a 12587 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 12588 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
12589
12590 return 0;
12591}
12592
f5372251 12593/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
12594static void bnx2x_set_rx_mode(struct net_device *dev)
12595{
12596 struct bnx2x *bp = netdev_priv(dev);
12597 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12598 int port = BP_PORT(bp);
12599
12600 if (bp->state != BNX2X_STATE_OPEN) {
12601 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12602 return;
12603 }
12604
12605 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12606
12607 if (dev->flags & IFF_PROMISC)
12608 rx_mode = BNX2X_RX_MODE_PROMISC;
12609
12610 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
12611 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12612 CHIP_IS_E1(bp)))
34f80b04
EG
12613 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12614
12615 else { /* some multicasts */
12616 if (CHIP_IS_E1(bp)) {
12617 int i, old, offset;
22bedad3 12618 struct netdev_hw_addr *ha;
34f80b04
EG
12619 struct mac_configuration_cmd *config =
12620 bnx2x_sp(bp, mcast_config);
12621
0ddf477b 12622 i = 0;
22bedad3 12623 netdev_for_each_mc_addr(ha, dev) {
34f80b04
EG
12624 config->config_table[i].
12625 cam_entry.msb_mac_addr =
22bedad3 12626 swab16(*(u16 *)&ha->addr[0]);
34f80b04
EG
12627 config->config_table[i].
12628 cam_entry.middle_mac_addr =
22bedad3 12629 swab16(*(u16 *)&ha->addr[2]);
34f80b04
EG
12630 config->config_table[i].
12631 cam_entry.lsb_mac_addr =
22bedad3 12632 swab16(*(u16 *)&ha->addr[4]);
34f80b04
EG
12633 config->config_table[i].cam_entry.flags =
12634 cpu_to_le16(port);
12635 config->config_table[i].
12636 target_table_entry.flags = 0;
ca00392c
EG
12637 config->config_table[i].target_table_entry.
12638 clients_bit_vector =
12639 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
12640 config->config_table[i].
12641 target_table_entry.vlan_id = 0;
12642
12643 DP(NETIF_MSG_IFUP,
12644 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12645 config->config_table[i].
12646 cam_entry.msb_mac_addr,
12647 config->config_table[i].
12648 cam_entry.middle_mac_addr,
12649 config->config_table[i].
12650 cam_entry.lsb_mac_addr);
0ddf477b 12651 i++;
34f80b04 12652 }
8d9c5f34 12653 old = config->hdr.length;
34f80b04
EG
12654 if (old > i) {
12655 for (; i < old; i++) {
12656 if (CAM_IS_INVALID(config->
12657 config_table[i])) {
af246401 12658 /* already invalidated */
34f80b04
EG
12659 break;
12660 }
12661 /* invalidate */
12662 CAM_INVALIDATE(config->
12663 config_table[i]);
12664 }
12665 }
12666
12667 if (CHIP_REV_IS_SLOW(bp))
12668 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12669 else
12670 offset = BNX2X_MAX_MULTICAST*(1 + port);
12671
8d9c5f34 12672 config->hdr.length = i;
34f80b04 12673 config->hdr.offset = offset;
8d9c5f34 12674 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
12675 config->hdr.reserved1 = 0;
12676
e665bfda
MC
12677 bp->set_mac_pending++;
12678 smp_wmb();
12679
34f80b04
EG
12680 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12681 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12682 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12683 0);
12684 } else { /* E1H */
12685 /* Accept one or more multicasts */
22bedad3 12686 struct netdev_hw_addr *ha;
34f80b04
EG
12687 u32 mc_filter[MC_HASH_SIZE];
12688 u32 crc, bit, regidx;
12689 int i;
12690
12691 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12692
22bedad3 12693 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 12694 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
22bedad3 12695 ha->addr);
34f80b04 12696
22bedad3 12697 crc = crc32c_le(0, ha->addr, ETH_ALEN);
34f80b04
EG
12698 bit = (crc >> 24) & 0xff;
12699 regidx = bit >> 5;
12700 bit &= 0x1f;
12701 mc_filter[regidx] |= (1 << bit);
12702 }
12703
12704 for (i = 0; i < MC_HASH_SIZE; i++)
12705 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12706 mc_filter[i]);
12707 }
12708 }
12709
12710 bp->rx_mode = rx_mode;
12711 bnx2x_set_storm_rx_mode(bp);
12712}
12713
12714/* called with rtnl_lock */
a2fbb9ea
ET
12715static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12716{
12717 struct sockaddr *addr = p;
12718 struct bnx2x *bp = netdev_priv(dev);
12719
34f80b04 12720 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
12721 return -EINVAL;
12722
12723 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
12724 if (netif_running(dev)) {
12725 if (CHIP_IS_E1(bp))
e665bfda 12726 bnx2x_set_eth_mac_addr_e1(bp, 1);
34f80b04 12727 else
e665bfda 12728 bnx2x_set_eth_mac_addr_e1h(bp, 1);
34f80b04 12729 }
a2fbb9ea
ET
12730
12731 return 0;
12732}
12733
c18487ee 12734/* called with rtnl_lock */
01cd4528
EG
12735static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12736 int devad, u16 addr)
a2fbb9ea 12737{
01cd4528
EG
12738 struct bnx2x *bp = netdev_priv(netdev);
12739 u16 value;
12740 int rc;
12741 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 12742
01cd4528
EG
12743 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12744 prtad, devad, addr);
a2fbb9ea 12745
01cd4528
EG
12746 if (prtad != bp->mdio.prtad) {
12747 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12748 prtad, bp->mdio.prtad);
12749 return -EINVAL;
12750 }
12751
12752 /* The HW expects different devad if CL22 is used */
12753 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 12754
01cd4528
EG
12755 bnx2x_acquire_phy_lock(bp);
12756 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12757 devad, addr, &value);
12758 bnx2x_release_phy_lock(bp);
12759 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 12760
01cd4528
EG
12761 if (!rc)
12762 rc = value;
12763 return rc;
12764}
a2fbb9ea 12765
01cd4528
EG
12766/* called with rtnl_lock */
12767static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12768 u16 addr, u16 value)
12769{
12770 struct bnx2x *bp = netdev_priv(netdev);
12771 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12772 int rc;
12773
12774 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12775 " value 0x%x\n", prtad, devad, addr, value);
12776
12777 if (prtad != bp->mdio.prtad) {
12778 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12779 prtad, bp->mdio.prtad);
12780 return -EINVAL;
a2fbb9ea
ET
12781 }
12782
01cd4528
EG
12783 /* The HW expects different devad if CL22 is used */
12784 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 12785
01cd4528
EG
12786 bnx2x_acquire_phy_lock(bp);
12787 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12788 devad, addr, value);
12789 bnx2x_release_phy_lock(bp);
12790 return rc;
12791}
c18487ee 12792
01cd4528
EG
12793/* called with rtnl_lock */
12794static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12795{
12796 struct bnx2x *bp = netdev_priv(dev);
12797 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 12798
01cd4528
EG
12799 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12800 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 12801
01cd4528
EG
12802 if (!netif_running(dev))
12803 return -EAGAIN;
12804
12805 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
12806}
12807
34f80b04 12808/* called with rtnl_lock */
a2fbb9ea
ET
12809static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12810{
12811 struct bnx2x *bp = netdev_priv(dev);
34f80b04 12812 int rc = 0;
a2fbb9ea 12813
72fd0718
VZ
12814 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12815 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12816 return -EAGAIN;
12817 }
12818
a2fbb9ea
ET
12819 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12820 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12821 return -EINVAL;
12822
12823 /* This does not race with packet allocation
c14423fe 12824 * because the actual alloc size is
a2fbb9ea
ET
12825 * only updated as part of load
12826 */
12827 dev->mtu = new_mtu;
12828
12829 if (netif_running(dev)) {
34f80b04
EG
12830 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12831 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 12832 }
34f80b04
EG
12833
12834 return rc;
a2fbb9ea
ET
12835}
12836
12837static void bnx2x_tx_timeout(struct net_device *dev)
12838{
12839 struct bnx2x *bp = netdev_priv(dev);
12840
12841#ifdef BNX2X_STOP_ON_ERROR
12842 if (!bp->panic)
12843 bnx2x_panic();
12844#endif
12845 /* This allows the netif to be shutdown gracefully before resetting */
72fd0718 12846 schedule_delayed_work(&bp->reset_task, 0);
a2fbb9ea
ET
12847}
12848
12849#ifdef BCM_VLAN
34f80b04 12850/* called with rtnl_lock */
a2fbb9ea
ET
12851static void bnx2x_vlan_rx_register(struct net_device *dev,
12852 struct vlan_group *vlgrp)
12853{
12854 struct bnx2x *bp = netdev_priv(dev);
12855
12856 bp->vlgrp = vlgrp;
0c6671b0
EG
12857
12858 /* Set flags according to the required capabilities */
12859 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12860
12861 if (dev->features & NETIF_F_HW_VLAN_TX)
12862 bp->flags |= HW_VLAN_TX_FLAG;
12863
12864 if (dev->features & NETIF_F_HW_VLAN_RX)
12865 bp->flags |= HW_VLAN_RX_FLAG;
12866
a2fbb9ea 12867 if (netif_running(dev))
49d66772 12868 bnx2x_set_client_config(bp);
a2fbb9ea 12869}
34f80b04 12870
a2fbb9ea
ET
12871#endif
12872
257ddbda 12873#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
12874static void poll_bnx2x(struct net_device *dev)
12875{
12876 struct bnx2x *bp = netdev_priv(dev);
12877
12878 disable_irq(bp->pdev->irq);
12879 bnx2x_interrupt(bp->pdev->irq, dev);
12880 enable_irq(bp->pdev->irq);
12881}
12882#endif
12883
c64213cd
SH
12884static const struct net_device_ops bnx2x_netdev_ops = {
12885 .ndo_open = bnx2x_open,
12886 .ndo_stop = bnx2x_close,
12887 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 12888 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
12889 .ndo_set_mac_address = bnx2x_change_mac_addr,
12890 .ndo_validate_addr = eth_validate_addr,
12891 .ndo_do_ioctl = bnx2x_ioctl,
12892 .ndo_change_mtu = bnx2x_change_mtu,
12893 .ndo_tx_timeout = bnx2x_tx_timeout,
12894#ifdef BCM_VLAN
12895 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
12896#endif
257ddbda 12897#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
12898 .ndo_poll_controller = poll_bnx2x,
12899#endif
12900};
12901
34f80b04
EG
12902static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12903 struct net_device *dev)
a2fbb9ea
ET
12904{
12905 struct bnx2x *bp;
12906 int rc;
12907
12908 SET_NETDEV_DEV(dev, &pdev->dev);
12909 bp = netdev_priv(dev);
12910
34f80b04
EG
12911 bp->dev = dev;
12912 bp->pdev = pdev;
a2fbb9ea 12913 bp->flags = 0;
34f80b04 12914 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
12915
12916 rc = pci_enable_device(pdev);
12917 if (rc) {
cdaa7cb8
VZ
12918 dev_err(&bp->pdev->dev,
12919 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
12920 goto err_out;
12921 }
12922
12923 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
12924 dev_err(&bp->pdev->dev,
12925 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
12926 rc = -ENODEV;
12927 goto err_out_disable;
12928 }
12929
12930 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
12931 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
12932 " base address, aborting\n");
a2fbb9ea
ET
12933 rc = -ENODEV;
12934 goto err_out_disable;
12935 }
12936
34f80b04
EG
12937 if (atomic_read(&pdev->enable_cnt) == 1) {
12938 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12939 if (rc) {
cdaa7cb8
VZ
12940 dev_err(&bp->pdev->dev,
12941 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
12942 goto err_out_disable;
12943 }
a2fbb9ea 12944
34f80b04
EG
12945 pci_set_master(pdev);
12946 pci_save_state(pdev);
12947 }
a2fbb9ea
ET
12948
12949 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12950 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
12951 dev_err(&bp->pdev->dev,
12952 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
12953 rc = -EIO;
12954 goto err_out_release;
12955 }
12956
12957 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12958 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
12959 dev_err(&bp->pdev->dev,
12960 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
12961 rc = -EIO;
12962 goto err_out_release;
12963 }
12964
1a983142 12965 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 12966 bp->flags |= USING_DAC_FLAG;
1a983142 12967 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
12968 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
12969 " failed, aborting\n");
a2fbb9ea
ET
12970 rc = -EIO;
12971 goto err_out_release;
12972 }
12973
1a983142 12974 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
12975 dev_err(&bp->pdev->dev,
12976 "System does not support DMA, aborting\n");
a2fbb9ea
ET
12977 rc = -EIO;
12978 goto err_out_release;
12979 }
12980
34f80b04
EG
12981 dev->mem_start = pci_resource_start(pdev, 0);
12982 dev->base_addr = dev->mem_start;
12983 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
12984
12985 dev->irq = pdev->irq;
12986
275f165f 12987 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 12988 if (!bp->regview) {
cdaa7cb8
VZ
12989 dev_err(&bp->pdev->dev,
12990 "Cannot map register space, aborting\n");
a2fbb9ea
ET
12991 rc = -ENOMEM;
12992 goto err_out_release;
12993 }
12994
34f80b04
EG
12995 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12996 min_t(u64, BNX2X_DB_SIZE,
12997 pci_resource_len(pdev, 2)));
a2fbb9ea 12998 if (!bp->doorbells) {
cdaa7cb8
VZ
12999 dev_err(&bp->pdev->dev,
13000 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
13001 rc = -ENOMEM;
13002 goto err_out_unmap;
13003 }
13004
13005 bnx2x_set_power_state(bp, PCI_D0);
13006
34f80b04
EG
13007 /* clean indirect addresses */
13008 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
13009 PCICFG_VENDOR_ID_OFFSET);
13010 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
13011 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
13012 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
13013 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 13014
72fd0718
VZ
13015 /* Reset the load counter */
13016 bnx2x_clear_load_cnt(bp);
13017
34f80b04 13018 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 13019
c64213cd 13020 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 13021 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
13022 dev->features |= NETIF_F_SG;
13023 dev->features |= NETIF_F_HW_CSUM;
13024 if (bp->flags & USING_DAC_FLAG)
13025 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
13026 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13027 dev->features |= NETIF_F_TSO6;
34f80b04
EG
13028#ifdef BCM_VLAN
13029 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 13030 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
13031
13032 dev->vlan_features |= NETIF_F_SG;
13033 dev->vlan_features |= NETIF_F_HW_CSUM;
13034 if (bp->flags & USING_DAC_FLAG)
13035 dev->vlan_features |= NETIF_F_HIGHDMA;
13036 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13037 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 13038#endif
a2fbb9ea 13039
01cd4528
EG
13040 /* get_port_hwinfo() will set prtad and mmds properly */
13041 bp->mdio.prtad = MDIO_PRTAD_NONE;
13042 bp->mdio.mmds = 0;
13043 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13044 bp->mdio.dev = dev;
13045 bp->mdio.mdio_read = bnx2x_mdio_read;
13046 bp->mdio.mdio_write = bnx2x_mdio_write;
13047
a2fbb9ea
ET
13048 return 0;
13049
13050err_out_unmap:
13051 if (bp->regview) {
13052 iounmap(bp->regview);
13053 bp->regview = NULL;
13054 }
a2fbb9ea
ET
13055 if (bp->doorbells) {
13056 iounmap(bp->doorbells);
13057 bp->doorbells = NULL;
13058 }
13059
13060err_out_release:
34f80b04
EG
13061 if (atomic_read(&pdev->enable_cnt) == 1)
13062 pci_release_regions(pdev);
a2fbb9ea
ET
13063
13064err_out_disable:
13065 pci_disable_device(pdev);
13066 pci_set_drvdata(pdev, NULL);
13067
13068err_out:
13069 return rc;
13070}
13071
37f9ce62
EG
13072static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
13073 int *width, int *speed)
25047950
ET
13074{
13075 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
13076
37f9ce62 13077 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 13078
37f9ce62
EG
13079 /* return value of 1=2.5GHz 2=5GHz */
13080 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 13081}
37f9ce62 13082
94a78b79
VZ
13083static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
13084{
37f9ce62 13085 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
13086 struct bnx2x_fw_file_hdr *fw_hdr;
13087 struct bnx2x_fw_file_section *sections;
94a78b79 13088 u32 offset, len, num_ops;
37f9ce62 13089 u16 *ops_offsets;
94a78b79 13090 int i;
37f9ce62 13091 const u8 *fw_ver;
94a78b79
VZ
13092
13093 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
13094 return -EINVAL;
13095
13096 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13097 sections = (struct bnx2x_fw_file_section *)fw_hdr;
13098
13099 /* Make sure none of the offsets and sizes make us read beyond
13100 * the end of the firmware data */
13101 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13102 offset = be32_to_cpu(sections[i].offset);
13103 len = be32_to_cpu(sections[i].len);
13104 if (offset + len > firmware->size) {
cdaa7cb8
VZ
13105 dev_err(&bp->pdev->dev,
13106 "Section %d length is out of bounds\n", i);
94a78b79
VZ
13107 return -EINVAL;
13108 }
13109 }
13110
13111 /* Likewise for the init_ops offsets */
13112 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13113 ops_offsets = (u16 *)(firmware->data + offset);
13114 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13115
13116 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13117 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
13118 dev_err(&bp->pdev->dev,
13119 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
13120 return -EINVAL;
13121 }
13122 }
13123
13124 /* Check FW version */
13125 offset = be32_to_cpu(fw_hdr->fw_version.offset);
13126 fw_ver = firmware->data + offset;
13127 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13128 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13129 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13130 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
13131 dev_err(&bp->pdev->dev,
13132 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
13133 fw_ver[0], fw_ver[1], fw_ver[2],
13134 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
13135 BCM_5710_FW_MINOR_VERSION,
13136 BCM_5710_FW_REVISION_VERSION,
13137 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 13138 return -EINVAL;
94a78b79
VZ
13139 }
13140
13141 return 0;
13142}
13143
ab6ad5a4 13144static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 13145{
ab6ad5a4
EG
13146 const __be32 *source = (const __be32 *)_source;
13147 u32 *target = (u32 *)_target;
94a78b79 13148 u32 i;
94a78b79
VZ
13149
13150 for (i = 0; i < n/4; i++)
13151 target[i] = be32_to_cpu(source[i]);
13152}
13153
13154/*
13155 Ops array is stored in the following format:
13156 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13157 */
ab6ad5a4 13158static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 13159{
ab6ad5a4
EG
13160 const __be32 *source = (const __be32 *)_source;
13161 struct raw_op *target = (struct raw_op *)_target;
94a78b79 13162 u32 i, j, tmp;
94a78b79 13163
ab6ad5a4 13164 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
13165 tmp = be32_to_cpu(source[j]);
13166 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
13167 target[i].offset = tmp & 0xffffff;
13168 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
13169 }
13170}
ab6ad5a4
EG
13171
13172static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 13173{
ab6ad5a4
EG
13174 const __be16 *source = (const __be16 *)_source;
13175 u16 *target = (u16 *)_target;
94a78b79 13176 u32 i;
94a78b79
VZ
13177
13178 for (i = 0; i < n/2; i++)
13179 target[i] = be16_to_cpu(source[i]);
13180}
13181
7995c64e
JP
13182#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
13183do { \
13184 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13185 bp->arr = kmalloc(len, GFP_KERNEL); \
13186 if (!bp->arr) { \
13187 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
13188 goto lbl; \
13189 } \
13190 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13191 (u8 *)bp->arr, len); \
13192} while (0)
94a78b79 13193
94a78b79
VZ
13194static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
13195{
45229b42 13196 const char *fw_file_name;
94a78b79 13197 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 13198 int rc;
94a78b79 13199
94a78b79 13200 if (CHIP_IS_E1(bp))
45229b42 13201 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 13202 else if (CHIP_IS_E1H(bp))
45229b42 13203 fw_file_name = FW_FILE_NAME_E1H;
cdaa7cb8
VZ
13204 else {
13205 dev_err(dev, "Unsupported chip revision\n");
13206 return -EINVAL;
13207 }
94a78b79 13208
cdaa7cb8 13209 dev_info(dev, "Loading %s\n", fw_file_name);
94a78b79
VZ
13210
13211 rc = request_firmware(&bp->firmware, fw_file_name, dev);
13212 if (rc) {
cdaa7cb8 13213 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
13214 goto request_firmware_exit;
13215 }
13216
13217 rc = bnx2x_check_firmware(bp);
13218 if (rc) {
cdaa7cb8 13219 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
13220 goto request_firmware_exit;
13221 }
13222
13223 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13224
13225 /* Initialize the pointers to the init arrays */
13226 /* Blob */
13227 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13228
13229 /* Opcodes */
13230 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13231
13232 /* Offsets */
ab6ad5a4
EG
13233 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13234 be16_to_cpu_n);
94a78b79
VZ
13235
13236 /* STORMs firmware */
573f2035
EG
13237 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13238 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13239 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13240 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13241 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13242 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13243 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13244 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13245 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13246 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13247 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13248 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13249 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13250 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13251 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13252 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
13253
13254 return 0;
ab6ad5a4 13255
94a78b79
VZ
13256init_offsets_alloc_err:
13257 kfree(bp->init_ops);
13258init_ops_alloc_err:
13259 kfree(bp->init_data);
13260request_firmware_exit:
13261 release_firmware(bp->firmware);
13262
13263 return rc;
13264}
13265
13266
a2fbb9ea
ET
13267static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13268 const struct pci_device_id *ent)
13269{
a2fbb9ea
ET
13270 struct net_device *dev = NULL;
13271 struct bnx2x *bp;
37f9ce62 13272 int pcie_width, pcie_speed;
25047950 13273 int rc;
a2fbb9ea 13274
a2fbb9ea 13275 /* dev zeroed in init_etherdev */
555f6c78 13276 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04 13277 if (!dev) {
cdaa7cb8 13278 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 13279 return -ENOMEM;
34f80b04 13280 }
a2fbb9ea 13281
a2fbb9ea 13282 bp = netdev_priv(dev);
7995c64e 13283 bp->msg_enable = debug;
a2fbb9ea 13284
df4770de
EG
13285 pci_set_drvdata(pdev, dev);
13286
34f80b04 13287 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
13288 if (rc < 0) {
13289 free_netdev(dev);
13290 return rc;
13291 }
13292
34f80b04 13293 rc = bnx2x_init_bp(bp);
693fc0d1
EG
13294 if (rc)
13295 goto init_one_exit;
13296
94a78b79
VZ
13297 /* Set init arrays */
13298 rc = bnx2x_init_firmware(bp, &pdev->dev);
13299 if (rc) {
cdaa7cb8 13300 dev_err(&pdev->dev, "Error loading firmware\n");
94a78b79
VZ
13301 goto init_one_exit;
13302 }
13303
693fc0d1 13304 rc = register_netdev(dev);
34f80b04 13305 if (rc) {
693fc0d1 13306 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
13307 goto init_one_exit;
13308 }
13309
37f9ce62 13310 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
cdaa7cb8
VZ
13311 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
13312 " IRQ %d, ", board_info[ent->driver_data].name,
13313 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13314 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13315 dev->base_addr, bp->pdev->irq);
13316 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 13317
a2fbb9ea 13318 return 0;
34f80b04
EG
13319
13320init_one_exit:
13321 if (bp->regview)
13322 iounmap(bp->regview);
13323
13324 if (bp->doorbells)
13325 iounmap(bp->doorbells);
13326
13327 free_netdev(dev);
13328
13329 if (atomic_read(&pdev->enable_cnt) == 1)
13330 pci_release_regions(pdev);
13331
13332 pci_disable_device(pdev);
13333 pci_set_drvdata(pdev, NULL);
13334
13335 return rc;
a2fbb9ea
ET
13336}
13337
13338static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13339{
13340 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
13341 struct bnx2x *bp;
13342
13343 if (!dev) {
cdaa7cb8 13344 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
13345 return;
13346 }
228241eb 13347 bp = netdev_priv(dev);
a2fbb9ea 13348
a2fbb9ea
ET
13349 unregister_netdev(dev);
13350
72fd0718
VZ
13351 /* Make sure RESET task is not scheduled before continuing */
13352 cancel_delayed_work_sync(&bp->reset_task);
13353
94a78b79
VZ
13354 kfree(bp->init_ops_offsets);
13355 kfree(bp->init_ops);
13356 kfree(bp->init_data);
13357 release_firmware(bp->firmware);
13358
a2fbb9ea
ET
13359 if (bp->regview)
13360 iounmap(bp->regview);
13361
13362 if (bp->doorbells)
13363 iounmap(bp->doorbells);
13364
13365 free_netdev(dev);
34f80b04
EG
13366
13367 if (atomic_read(&pdev->enable_cnt) == 1)
13368 pci_release_regions(pdev);
13369
a2fbb9ea
ET
13370 pci_disable_device(pdev);
13371 pci_set_drvdata(pdev, NULL);
13372}
13373
13374static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13375{
13376 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
13377 struct bnx2x *bp;
13378
34f80b04 13379 if (!dev) {
cdaa7cb8 13380 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
34f80b04
EG
13381 return -ENODEV;
13382 }
13383 bp = netdev_priv(dev);
a2fbb9ea 13384
34f80b04 13385 rtnl_lock();
a2fbb9ea 13386
34f80b04 13387 pci_save_state(pdev);
228241eb 13388
34f80b04
EG
13389 if (!netif_running(dev)) {
13390 rtnl_unlock();
13391 return 0;
13392 }
a2fbb9ea
ET
13393
13394 netif_device_detach(dev);
a2fbb9ea 13395
da5a662a 13396 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 13397
a2fbb9ea 13398 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 13399
34f80b04
EG
13400 rtnl_unlock();
13401
a2fbb9ea
ET
13402 return 0;
13403}
13404
13405static int bnx2x_resume(struct pci_dev *pdev)
13406{
13407 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 13408 struct bnx2x *bp;
a2fbb9ea
ET
13409 int rc;
13410
228241eb 13411 if (!dev) {
cdaa7cb8 13412 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
13413 return -ENODEV;
13414 }
228241eb 13415 bp = netdev_priv(dev);
a2fbb9ea 13416
72fd0718
VZ
13417 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13418 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13419 return -EAGAIN;
13420 }
13421
34f80b04
EG
13422 rtnl_lock();
13423
228241eb 13424 pci_restore_state(pdev);
34f80b04
EG
13425
13426 if (!netif_running(dev)) {
13427 rtnl_unlock();
13428 return 0;
13429 }
13430
a2fbb9ea
ET
13431 bnx2x_set_power_state(bp, PCI_D0);
13432 netif_device_attach(dev);
13433
da5a662a 13434 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 13435
34f80b04
EG
13436 rtnl_unlock();
13437
13438 return rc;
a2fbb9ea
ET
13439}
13440
f8ef6e44
YG
13441static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13442{
13443 int i;
13444
13445 bp->state = BNX2X_STATE_ERROR;
13446
13447 bp->rx_mode = BNX2X_RX_MODE_NONE;
13448
13449 bnx2x_netif_stop(bp, 0);
c89af1a3 13450 netif_carrier_off(bp->dev);
f8ef6e44
YG
13451
13452 del_timer_sync(&bp->timer);
13453 bp->stats_state = STATS_STATE_DISABLED;
13454 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13455
13456 /* Release IRQs */
6cbe5065 13457 bnx2x_free_irq(bp, false);
f8ef6e44
YG
13458
13459 if (CHIP_IS_E1(bp)) {
13460 struct mac_configuration_cmd *config =
13461 bnx2x_sp(bp, mcast_config);
13462
8d9c5f34 13463 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
13464 CAM_INVALIDATE(config->config_table[i]);
13465 }
13466
13467 /* Free SKBs, SGEs, TPA pool and driver internals */
13468 bnx2x_free_skbs(bp);
54b9ddaa 13469 for_each_queue(bp, i)
f8ef6e44 13470 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 13471 for_each_queue(bp, i)
7cde1c8b 13472 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
13473 bnx2x_free_mem(bp);
13474
13475 bp->state = BNX2X_STATE_CLOSED;
13476
f8ef6e44
YG
13477 return 0;
13478}
13479
13480static void bnx2x_eeh_recover(struct bnx2x *bp)
13481{
13482 u32 val;
13483
13484 mutex_init(&bp->port.phy_mutex);
13485
13486 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13487 bp->link_params.shmem_base = bp->common.shmem_base;
13488 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13489
13490 if (!bp->common.shmem_base ||
13491 (bp->common.shmem_base < 0xA0000) ||
13492 (bp->common.shmem_base >= 0xC0000)) {
13493 BNX2X_DEV_INFO("MCP not active\n");
13494 bp->flags |= NO_MCP_FLAG;
13495 return;
13496 }
13497
13498 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13499 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13500 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13501 BNX2X_ERR("BAD MCP validity signature\n");
13502
13503 if (!BP_NOMCP(bp)) {
13504 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13505 & DRV_MSG_SEQ_NUMBER_MASK);
13506 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13507 }
13508}
13509
493adb1f
WX
13510/**
13511 * bnx2x_io_error_detected - called when PCI error is detected
13512 * @pdev: Pointer to PCI device
13513 * @state: The current pci connection state
13514 *
13515 * This function is called after a PCI bus error affecting
13516 * this device has been detected.
13517 */
13518static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13519 pci_channel_state_t state)
13520{
13521 struct net_device *dev = pci_get_drvdata(pdev);
13522 struct bnx2x *bp = netdev_priv(dev);
13523
13524 rtnl_lock();
13525
13526 netif_device_detach(dev);
13527
07ce50e4
DN
13528 if (state == pci_channel_io_perm_failure) {
13529 rtnl_unlock();
13530 return PCI_ERS_RESULT_DISCONNECT;
13531 }
13532
493adb1f 13533 if (netif_running(dev))
f8ef6e44 13534 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
13535
13536 pci_disable_device(pdev);
13537
13538 rtnl_unlock();
13539
13540 /* Request a slot reset */
13541 return PCI_ERS_RESULT_NEED_RESET;
13542}
13543
13544/**
13545 * bnx2x_io_slot_reset - called after the PCI bus has been reset
13546 * @pdev: Pointer to PCI device
13547 *
13548 * Restart the card from scratch, as if from a cold-boot.
13549 */
13550static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13551{
13552 struct net_device *dev = pci_get_drvdata(pdev);
13553 struct bnx2x *bp = netdev_priv(dev);
13554
13555 rtnl_lock();
13556
13557 if (pci_enable_device(pdev)) {
13558 dev_err(&pdev->dev,
13559 "Cannot re-enable PCI device after reset\n");
13560 rtnl_unlock();
13561 return PCI_ERS_RESULT_DISCONNECT;
13562 }
13563
13564 pci_set_master(pdev);
13565 pci_restore_state(pdev);
13566
13567 if (netif_running(dev))
13568 bnx2x_set_power_state(bp, PCI_D0);
13569
13570 rtnl_unlock();
13571
13572 return PCI_ERS_RESULT_RECOVERED;
13573}
13574
13575/**
13576 * bnx2x_io_resume - called when traffic can start flowing again
13577 * @pdev: Pointer to PCI device
13578 *
13579 * This callback is called when the error recovery driver tells us that
13580 * its OK to resume normal operation.
13581 */
13582static void bnx2x_io_resume(struct pci_dev *pdev)
13583{
13584 struct net_device *dev = pci_get_drvdata(pdev);
13585 struct bnx2x *bp = netdev_priv(dev);
13586
72fd0718
VZ
13587 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13588 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13589 return;
13590 }
13591
493adb1f
WX
13592 rtnl_lock();
13593
f8ef6e44
YG
13594 bnx2x_eeh_recover(bp);
13595
493adb1f 13596 if (netif_running(dev))
f8ef6e44 13597 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
13598
13599 netif_device_attach(dev);
13600
13601 rtnl_unlock();
13602}
13603
13604static struct pci_error_handlers bnx2x_err_handler = {
13605 .error_detected = bnx2x_io_error_detected,
356e2385
EG
13606 .slot_reset = bnx2x_io_slot_reset,
13607 .resume = bnx2x_io_resume,
493adb1f
WX
13608};
13609
a2fbb9ea 13610static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
13611 .name = DRV_MODULE_NAME,
13612 .id_table = bnx2x_pci_tbl,
13613 .probe = bnx2x_init_one,
13614 .remove = __devexit_p(bnx2x_remove_one),
13615 .suspend = bnx2x_suspend,
13616 .resume = bnx2x_resume,
13617 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
13618};
13619
13620static int __init bnx2x_init(void)
13621{
dd21ca6d
SG
13622 int ret;
13623
7995c64e 13624 pr_info("%s", version);
938cf541 13625
1cf167f2
EG
13626 bnx2x_wq = create_singlethread_workqueue("bnx2x");
13627 if (bnx2x_wq == NULL) {
7995c64e 13628 pr_err("Cannot create workqueue\n");
1cf167f2
EG
13629 return -ENOMEM;
13630 }
13631
dd21ca6d
SG
13632 ret = pci_register_driver(&bnx2x_pci_driver);
13633 if (ret) {
7995c64e 13634 pr_err("Cannot register driver\n");
dd21ca6d
SG
13635 destroy_workqueue(bnx2x_wq);
13636 }
13637 return ret;
a2fbb9ea
ET
13638}
13639
13640static void __exit bnx2x_cleanup(void)
13641{
13642 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
13643
13644 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
13645}
13646
13647module_init(bnx2x_init);
13648module_exit(bnx2x_cleanup);
13649
993ac7b5
MC
13650#ifdef BCM_CNIC
13651
13652/* count denotes the number of new completions we have seen */
13653static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13654{
13655 struct eth_spe *spe;
13656
13657#ifdef BNX2X_STOP_ON_ERROR
13658 if (unlikely(bp->panic))
13659 return;
13660#endif
13661
13662 spin_lock_bh(&bp->spq_lock);
13663 bp->cnic_spq_pending -= count;
13664
13665 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13666 bp->cnic_spq_pending++) {
13667
13668 if (!bp->cnic_kwq_pending)
13669 break;
13670
13671 spe = bnx2x_sp_get_next(bp);
13672 *spe = *bp->cnic_kwq_cons;
13673
13674 bp->cnic_kwq_pending--;
13675
13676 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13677 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13678
13679 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13680 bp->cnic_kwq_cons = bp->cnic_kwq;
13681 else
13682 bp->cnic_kwq_cons++;
13683 }
13684 bnx2x_sp_prod_update(bp);
13685 spin_unlock_bh(&bp->spq_lock);
13686}
13687
13688static int bnx2x_cnic_sp_queue(struct net_device *dev,
13689 struct kwqe_16 *kwqes[], u32 count)
13690{
13691 struct bnx2x *bp = netdev_priv(dev);
13692 int i;
13693
13694#ifdef BNX2X_STOP_ON_ERROR
13695 if (unlikely(bp->panic))
13696 return -EIO;
13697#endif
13698
13699 spin_lock_bh(&bp->spq_lock);
13700
13701 for (i = 0; i < count; i++) {
13702 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13703
13704 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13705 break;
13706
13707 *bp->cnic_kwq_prod = *spe;
13708
13709 bp->cnic_kwq_pending++;
13710
13711 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13712 spe->hdr.conn_and_cmd_data, spe->hdr.type,
13713 spe->data.mac_config_addr.hi,
13714 spe->data.mac_config_addr.lo,
13715 bp->cnic_kwq_pending);
13716
13717 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13718 bp->cnic_kwq_prod = bp->cnic_kwq;
13719 else
13720 bp->cnic_kwq_prod++;
13721 }
13722
13723 spin_unlock_bh(&bp->spq_lock);
13724
13725 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13726 bnx2x_cnic_sp_post(bp, 0);
13727
13728 return i;
13729}
13730
13731static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13732{
13733 struct cnic_ops *c_ops;
13734 int rc = 0;
13735
13736 mutex_lock(&bp->cnic_mutex);
13737 c_ops = bp->cnic_ops;
13738 if (c_ops)
13739 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13740 mutex_unlock(&bp->cnic_mutex);
13741
13742 return rc;
13743}
13744
13745static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13746{
13747 struct cnic_ops *c_ops;
13748 int rc = 0;
13749
13750 rcu_read_lock();
13751 c_ops = rcu_dereference(bp->cnic_ops);
13752 if (c_ops)
13753 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13754 rcu_read_unlock();
13755
13756 return rc;
13757}
13758
13759/*
13760 * for commands that have no data
13761 */
13762static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13763{
13764 struct cnic_ctl_info ctl = {0};
13765
13766 ctl.cmd = cmd;
13767
13768 return bnx2x_cnic_ctl_send(bp, &ctl);
13769}
13770
13771static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13772{
13773 struct cnic_ctl_info ctl;
13774
13775 /* first we tell CNIC and only then we count this as a completion */
13776 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13777 ctl.data.comp.cid = cid;
13778
13779 bnx2x_cnic_ctl_send_bh(bp, &ctl);
13780 bnx2x_cnic_sp_post(bp, 1);
13781}
13782
13783static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13784{
13785 struct bnx2x *bp = netdev_priv(dev);
13786 int rc = 0;
13787
13788 switch (ctl->cmd) {
13789 case DRV_CTL_CTXTBL_WR_CMD: {
13790 u32 index = ctl->data.io.offset;
13791 dma_addr_t addr = ctl->data.io.dma_addr;
13792
13793 bnx2x_ilt_wr(bp, index, addr);
13794 break;
13795 }
13796
13797 case DRV_CTL_COMPLETION_CMD: {
13798 int count = ctl->data.comp.comp_count;
13799
13800 bnx2x_cnic_sp_post(bp, count);
13801 break;
13802 }
13803
13804 /* rtnl_lock is held. */
13805 case DRV_CTL_START_L2_CMD: {
13806 u32 cli = ctl->data.ring.client_id;
13807
13808 bp->rx_mode_cl_mask |= (1 << cli);
13809 bnx2x_set_storm_rx_mode(bp);
13810 break;
13811 }
13812
13813 /* rtnl_lock is held. */
13814 case DRV_CTL_STOP_L2_CMD: {
13815 u32 cli = ctl->data.ring.client_id;
13816
13817 bp->rx_mode_cl_mask &= ~(1 << cli);
13818 bnx2x_set_storm_rx_mode(bp);
13819 break;
13820 }
13821
13822 default:
13823 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13824 rc = -EINVAL;
13825 }
13826
13827 return rc;
13828}
13829
13830static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13831{
13832 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13833
13834 if (bp->flags & USING_MSIX_FLAG) {
13835 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13836 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13837 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13838 } else {
13839 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13840 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13841 }
13842 cp->irq_arr[0].status_blk = bp->cnic_sb;
13843 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13844 cp->irq_arr[1].status_blk = bp->def_status_blk;
13845 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13846
13847 cp->num_irq = 2;
13848}
13849
13850static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13851 void *data)
13852{
13853 struct bnx2x *bp = netdev_priv(dev);
13854 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13855
13856 if (ops == NULL)
13857 return -EINVAL;
13858
13859 if (atomic_read(&bp->intr_sem) != 0)
13860 return -EBUSY;
13861
13862 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13863 if (!bp->cnic_kwq)
13864 return -ENOMEM;
13865
13866 bp->cnic_kwq_cons = bp->cnic_kwq;
13867 bp->cnic_kwq_prod = bp->cnic_kwq;
13868 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13869
13870 bp->cnic_spq_pending = 0;
13871 bp->cnic_kwq_pending = 0;
13872
13873 bp->cnic_data = data;
13874
13875 cp->num_irq = 0;
13876 cp->drv_state = CNIC_DRV_STATE_REGD;
13877
13878 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13879
13880 bnx2x_setup_cnic_irq_info(bp);
13881 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13882 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13883 rcu_assign_pointer(bp->cnic_ops, ops);
13884
13885 return 0;
13886}
13887
13888static int bnx2x_unregister_cnic(struct net_device *dev)
13889{
13890 struct bnx2x *bp = netdev_priv(dev);
13891 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13892
13893 mutex_lock(&bp->cnic_mutex);
13894 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13895 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13896 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13897 }
13898 cp->drv_state = 0;
13899 rcu_assign_pointer(bp->cnic_ops, NULL);
13900 mutex_unlock(&bp->cnic_mutex);
13901 synchronize_rcu();
13902 kfree(bp->cnic_kwq);
13903 bp->cnic_kwq = NULL;
13904
13905 return 0;
13906}
13907
13908struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13909{
13910 struct bnx2x *bp = netdev_priv(dev);
13911 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13912
13913 cp->drv_owner = THIS_MODULE;
13914 cp->chip_id = CHIP_ID(bp);
13915 cp->pdev = bp->pdev;
13916 cp->io_base = bp->regview;
13917 cp->io_base2 = bp->doorbells;
13918 cp->max_kwqe_pending = 8;
13919 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13920 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13921 cp->ctx_tbl_len = CNIC_ILT_LINES;
13922 cp->starting_cid = BCM_CNIC_CID_START;
13923 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13924 cp->drv_ctl = bnx2x_drv_ctl;
13925 cp->drv_register_cnic = bnx2x_register_cnic;
13926 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13927
13928 return cp;
13929}
13930EXPORT_SYMBOL(bnx2x_cnic_probe);
13931
13932#endif /* BCM_CNIC */
94a78b79 13933