]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
ethtool: Change ethtool_op_set_flags to validate flags
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea 51#include <linux/io.h>
45229b42 52#include <linux/stringify.h>
a2fbb9ea 53
359d8b15 54
a2fbb9ea
ET
55#include "bnx2x.h"
56#include "bnx2x_init.h"
94a78b79 57#include "bnx2x_init_ops.h"
0a64ea57 58#include "bnx2x_dump.h"
a2fbb9ea 59
a03b1a5c
VZ
60#define DRV_MODULE_VERSION "1.52.53-1"
61#define DRV_MODULE_RELDATE "2010/18/04"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
94a78b79
VZ
64#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h"
66/* FW files */
45229b42
BH
67#define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
94a78b79 74
34f80b04
EG
75/* Time in jiffies before concluding the transmitter is hung */
76#define TX_TIMEOUT (5*HZ)
a2fbb9ea 77
53a10565 78static char version[] __devinitdata =
34f80b04 79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
24e3fcef 82MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 83MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
84MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
86MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
a2fbb9ea 88
555f6c78
EG
89static int multi_mode = 1;
90module_param(multi_mode, int, 0);
ca00392c
EG
91MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
54b9ddaa
VZ
94static int num_queues;
95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
555f6c78 98
19680c48 99static int disable_tpa;
19680c48 100module_param(disable_tpa, int, 0);
9898f86d 101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
102
103static int int_mode;
104module_param(int_mode, int, 0);
cdaa7cb8
VZ
105MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
106 "(1 INT#x; 2 MSI)");
8badd27a 107
a18f5128
EG
108static int dropless_fc;
109module_param(dropless_fc, int, 0);
110MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
111
9898f86d 112static int poll;
a2fbb9ea 113module_param(poll, int, 0);
9898f86d 114MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
115
116static int mrrs = -1;
117module_param(mrrs, int, 0);
118MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
119
9898f86d 120static int debug;
a2fbb9ea 121module_param(debug, int, 0);
9898f86d
EG
122MODULE_PARM_DESC(debug, " Default debug msglevel");
123
124static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 125
1cf167f2 126static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
127
128enum bnx2x_board_type {
129 BCM57710 = 0,
34f80b04
EG
130 BCM57711 = 1,
131 BCM57711E = 2,
a2fbb9ea
ET
132};
133
34f80b04 134/* indexed by board_type, above */
53a10565 135static struct {
a2fbb9ea
ET
136 char *name;
137} board_info[] __devinitdata = {
34f80b04
EG
138 { "Broadcom NetXtreme II BCM57710 XGb" },
139 { "Broadcom NetXtreme II BCM57711 XGb" },
140 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
141};
142
34f80b04 143
a3aa1884 144static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
147 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
148 { 0 }
149};
150
151MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
152
153/****************************************************************************
154* General service functions
155****************************************************************************/
156
157/* used only at init
158 * locking is done by mcp
159 */
573f2035 160void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
161{
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
164 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
165 PCICFG_VENDOR_ID_OFFSET);
166}
167
a2fbb9ea
ET
168static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
169{
170 u32 val;
171
172 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
173 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
174 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
175 PCICFG_VENDOR_ID_OFFSET);
176
177 return val;
178}
a2fbb9ea
ET
179
180static const u32 dmae_reg_go_c[] = {
181 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
182 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
183 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
184 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
185};
186
187/* copy command into DMAE command memory and set DMAE command go */
188static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
189 int idx)
190{
191 u32 cmd_offset;
192 int i;
193
194 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
195 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
196 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
197
ad8d3948
EG
198 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
199 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
200 }
201 REG_WR(bp, dmae_reg_go_c[idx], 1);
202}
203
ad8d3948
EG
204void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
205 u32 len32)
a2fbb9ea 206{
5ff7b6d4 207 struct dmae_command dmae;
a2fbb9ea 208 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
209 int cnt = 200;
210
211 if (!bp->dmae_ready) {
212 u32 *data = bnx2x_sp(bp, wb_data[0]);
213
214 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
215 " using indirect\n", dst_addr, len32);
216 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
217 return;
218 }
219
5ff7b6d4 220 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 221
5ff7b6d4
EG
222 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
223 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
224 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 225#ifdef __BIG_ENDIAN
5ff7b6d4 226 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 227#else
5ff7b6d4 228 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 229#endif
5ff7b6d4
EG
230 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
231 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
232 dmae.src_addr_lo = U64_LO(dma_addr);
233 dmae.src_addr_hi = U64_HI(dma_addr);
234 dmae.dst_addr_lo = dst_addr >> 2;
235 dmae.dst_addr_hi = 0;
236 dmae.len = len32;
237 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
239 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 240
c3eefaf6 241 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
242 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
243 "dst_addr [%x:%08x (%08x)]\n"
244 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
245 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
246 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
247 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 248 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
249 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
250 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 251
5ff7b6d4
EG
252 mutex_lock(&bp->dmae_mutex);
253
a2fbb9ea
ET
254 *wb_comp = 0;
255
5ff7b6d4 256 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
257
258 udelay(5);
ad8d3948
EG
259
260 while (*wb_comp != DMAE_COMP_VAL) {
261 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
262
ad8d3948 263 if (!cnt) {
c3eefaf6 264 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
265 break;
266 }
ad8d3948 267 cnt--;
12469401
YG
268 /* adjust delay for emulation/FPGA */
269 if (CHIP_REV_IS_SLOW(bp))
270 msleep(100);
271 else
272 udelay(5);
a2fbb9ea 273 }
ad8d3948
EG
274
275 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
276}
277
c18487ee 278void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 279{
5ff7b6d4 280 struct dmae_command dmae;
a2fbb9ea 281 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
282 int cnt = 200;
283
284 if (!bp->dmae_ready) {
285 u32 *data = bnx2x_sp(bp, wb_data[0]);
286 int i;
287
288 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
289 " using indirect\n", src_addr, len32);
290 for (i = 0; i < len32; i++)
291 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
292 return;
293 }
294
5ff7b6d4 295 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 296
5ff7b6d4
EG
297 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
298 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
299 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 300#ifdef __BIG_ENDIAN
5ff7b6d4 301 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 302#else
5ff7b6d4 303 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 304#endif
5ff7b6d4
EG
305 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
306 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
307 dmae.src_addr_lo = src_addr >> 2;
308 dmae.src_addr_hi = 0;
309 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
310 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
311 dmae.len = len32;
312 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
314 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 315
c3eefaf6 316 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
317 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
318 "dst_addr [%x:%08x (%08x)]\n"
319 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
320 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
321 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
322 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 323
5ff7b6d4
EG
324 mutex_lock(&bp->dmae_mutex);
325
326 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
327 *wb_comp = 0;
328
5ff7b6d4 329 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
330
331 udelay(5);
ad8d3948
EG
332
333 while (*wb_comp != DMAE_COMP_VAL) {
334
ad8d3948 335 if (!cnt) {
c3eefaf6 336 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
337 break;
338 }
ad8d3948 339 cnt--;
12469401
YG
340 /* adjust delay for emulation/FPGA */
341 if (CHIP_REV_IS_SLOW(bp))
342 msleep(100);
343 else
344 udelay(5);
a2fbb9ea 345 }
ad8d3948 346 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
347 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
348 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
349
350 mutex_unlock(&bp->dmae_mutex);
351}
352
573f2035
EG
353void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
354 u32 addr, u32 len)
355{
02e3c6cb 356 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
357 int offset = 0;
358
02e3c6cb 359 while (len > dmae_wr_max) {
573f2035 360 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
361 addr + offset, dmae_wr_max);
362 offset += dmae_wr_max * 4;
363 len -= dmae_wr_max;
573f2035
EG
364 }
365
366 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
367}
368
ad8d3948
EG
369/* used only for slowpath so not inlined */
370static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
371{
372 u32 wb_write[2];
373
374 wb_write[0] = val_hi;
375 wb_write[1] = val_lo;
376 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 377}
a2fbb9ea 378
ad8d3948
EG
379#ifdef USE_WB_RD
380static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
381{
382 u32 wb_data[2];
383
384 REG_RD_DMAE(bp, reg, wb_data, 2);
385
386 return HILO_U64(wb_data[0], wb_data[1]);
387}
388#endif
389
a2fbb9ea
ET
390static int bnx2x_mc_assert(struct bnx2x *bp)
391{
a2fbb9ea 392 char last_idx;
34f80b04
EG
393 int i, rc = 0;
394 u32 row0, row1, row2, row3;
395
396 /* XSTORM */
397 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
398 XSTORM_ASSERT_LIST_INDEX_OFFSET);
399 if (last_idx)
400 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
401
402 /* print the asserts */
403 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
404
405 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i));
407 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
409 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
411 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
412 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
413
414 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
415 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
416 " 0x%08x 0x%08x 0x%08x\n",
417 i, row3, row2, row1, row0);
418 rc++;
419 } else {
420 break;
421 }
422 }
423
424 /* TSTORM */
425 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
426 TSTORM_ASSERT_LIST_INDEX_OFFSET);
427 if (last_idx)
428 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
429
430 /* print the asserts */
431 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
432
433 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i));
435 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
437 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
439 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
440 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
441
442 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
443 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
444 " 0x%08x 0x%08x 0x%08x\n",
445 i, row3, row2, row1, row0);
446 rc++;
447 } else {
448 break;
449 }
450 }
451
452 /* CSTORM */
453 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
454 CSTORM_ASSERT_LIST_INDEX_OFFSET);
455 if (last_idx)
456 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
457
458 /* print the asserts */
459 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
460
461 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i));
463 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
465 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
467 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
468 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
469
470 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
471 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
472 " 0x%08x 0x%08x 0x%08x\n",
473 i, row3, row2, row1, row0);
474 rc++;
475 } else {
476 break;
477 }
478 }
479
480 /* USTORM */
481 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
482 USTORM_ASSERT_LIST_INDEX_OFFSET);
483 if (last_idx)
484 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
485
486 /* print the asserts */
487 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
488
489 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i));
491 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 4);
493 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 8);
495 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
496 USTORM_ASSERT_LIST_OFFSET(i) + 12);
497
498 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
499 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
500 " 0x%08x 0x%08x 0x%08x\n",
501 i, row3, row2, row1, row0);
502 rc++;
503 } else {
504 break;
a2fbb9ea
ET
505 }
506 }
34f80b04 507
a2fbb9ea
ET
508 return rc;
509}
c14423fe 510
a2fbb9ea
ET
511static void bnx2x_fw_dump(struct bnx2x *bp)
512{
cdaa7cb8 513 u32 addr;
a2fbb9ea 514 u32 mark, offset;
4781bfad 515 __be32 data[9];
a2fbb9ea
ET
516 int word;
517
2145a920
VZ
518 if (BP_NOMCP(bp)) {
519 BNX2X_ERR("NO MCP - can not dump\n");
520 return;
521 }
cdaa7cb8
VZ
522
523 addr = bp->common.shmem_base - 0x0800 + 4;
524 mark = REG_RD(bp, addr);
525 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
7995c64e 526 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 527
7995c64e 528 pr_err("");
cdaa7cb8 529 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
a2fbb9ea 530 for (word = 0; word < 8; word++)
cdaa7cb8 531 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 532 data[8] = 0x0;
7995c64e 533 pr_cont("%s", (char *)data);
a2fbb9ea 534 }
cdaa7cb8 535 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 536 for (word = 0; word < 8; word++)
cdaa7cb8 537 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 538 data[8] = 0x0;
7995c64e 539 pr_cont("%s", (char *)data);
a2fbb9ea 540 }
7995c64e 541 pr_err("end of fw dump\n");
a2fbb9ea
ET
542}
543
544static void bnx2x_panic_dump(struct bnx2x *bp)
545{
546 int i;
547 u16 j, start, end;
548
66e855f3
YG
549 bp->stats_state = STATS_STATE_DISABLED;
550 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
551
a2fbb9ea
ET
552 BNX2X_ERR("begin crash dump -----------------\n");
553
8440d2b6
EG
554 /* Indices */
555 /* Common */
cdaa7cb8
VZ
556 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
557 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
558 " spq_prod_idx(0x%x)\n",
8440d2b6
EG
559 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
560 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
561
562 /* Rx */
54b9ddaa 563 for_each_queue(bp, i) {
a2fbb9ea 564 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 565
cdaa7cb8
VZ
566 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
567 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
568 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 569 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
570 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
571 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8
VZ
572 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
573 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
8440d2b6
EG
574 fp->rx_sge_prod, fp->last_max_sge,
575 le16_to_cpu(fp->fp_u_idx),
576 fp->status_blk->u_status_block.status_block_index);
577 }
a2fbb9ea 578
8440d2b6 579 /* Tx */
54b9ddaa 580 for_each_queue(bp, i) {
8440d2b6 581 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 582
cdaa7cb8
VZ
583 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
584 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
585 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
586 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
587 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
cdaa7cb8
VZ
588 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
589 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 590 fp->status_blk->c_status_block.status_block_index,
ca00392c 591 fp->tx_db.data.prod);
8440d2b6 592 }
a2fbb9ea 593
8440d2b6
EG
594 /* Rings */
595 /* Rx */
54b9ddaa 596 for_each_queue(bp, i) {
8440d2b6 597 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
598
599 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
600 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 601 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
602 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
603 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
604
c3eefaf6
EG
605 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
606 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
607 }
608
3196a88a
EG
609 start = RX_SGE(fp->rx_sge_prod);
610 end = RX_SGE(fp->last_max_sge);
8440d2b6 611 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
612 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
613 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
614
c3eefaf6
EG
615 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
616 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
617 }
618
a2fbb9ea
ET
619 start = RCQ_BD(fp->rx_comp_cons - 10);
620 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 621 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
622 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
623
c3eefaf6
EG
624 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
625 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
626 }
627 }
628
8440d2b6 629 /* Tx */
54b9ddaa 630 for_each_queue(bp, i) {
8440d2b6
EG
631 struct bnx2x_fastpath *fp = &bp->fp[i];
632
633 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
634 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
635 for (j = start; j != end; j = TX_BD(j + 1)) {
636 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
637
c3eefaf6
EG
638 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
639 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
640 }
641
642 start = TX_BD(fp->tx_bd_cons - 10);
643 end = TX_BD(fp->tx_bd_cons + 254);
644 for (j = start; j != end; j = TX_BD(j + 1)) {
645 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
646
c3eefaf6
EG
647 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
648 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
649 }
650 }
a2fbb9ea 651
34f80b04 652 bnx2x_fw_dump(bp);
a2fbb9ea
ET
653 bnx2x_mc_assert(bp);
654 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
655}
656
615f8fd9 657static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 658{
34f80b04 659 int port = BP_PORT(bp);
a2fbb9ea
ET
660 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
661 u32 val = REG_RD(bp, addr);
662 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 663 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
664
665 if (msix) {
8badd27a
EG
666 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
668 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
669 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
670 } else if (msi) {
671 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
672 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
673 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
674 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
675 } else {
676 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 677 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
678 HC_CONFIG_0_REG_INT_LINE_EN_0 |
679 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 680
8badd27a
EG
681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
682 val, port, addr);
615f8fd9
ET
683
684 REG_WR(bp, addr, val);
685
a2fbb9ea
ET
686 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
687 }
688
8badd27a
EG
689 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
690 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
691
692 REG_WR(bp, addr, val);
37dbbf32
EG
693 /*
694 * Ensure that HC_CONFIG is written before leading/trailing edge config
695 */
696 mmiowb();
697 barrier();
34f80b04
EG
698
699 if (CHIP_IS_E1H(bp)) {
700 /* init leading/trailing edge */
701 if (IS_E1HMF(bp)) {
8badd27a 702 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 703 if (bp->port.pmf)
4acac6a5
EG
704 /* enable nig and gpio3 attention */
705 val |= 0x1100;
34f80b04
EG
706 } else
707 val = 0xffff;
708
709 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
710 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
711 }
37dbbf32
EG
712
713 /* Make sure that interrupts are indeed enabled from here on */
714 mmiowb();
a2fbb9ea
ET
715}
716
615f8fd9 717static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 718{
34f80b04 719 int port = BP_PORT(bp);
a2fbb9ea
ET
720 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
721 u32 val = REG_RD(bp, addr);
722
723 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
724 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
725 HC_CONFIG_0_REG_INT_LINE_EN_0 |
726 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
727
728 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
729 val, port, addr);
730
8badd27a
EG
731 /* flush all outstanding writes */
732 mmiowb();
733
a2fbb9ea
ET
734 REG_WR(bp, addr, val);
735 if (REG_RD(bp, addr) != val)
736 BNX2X_ERR("BUG! proper val not read from IGU!\n");
737}
738
f8ef6e44 739static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 740{
a2fbb9ea 741 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 742 int i, offset;
a2fbb9ea 743
34f80b04 744 /* disable interrupt handling */
a2fbb9ea 745 atomic_inc(&bp->intr_sem);
e1510706
EG
746 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
747
f8ef6e44
YG
748 if (disable_hw)
749 /* prevent the HW from sending interrupts */
750 bnx2x_int_disable(bp);
a2fbb9ea
ET
751
752 /* make sure all ISRs are done */
753 if (msix) {
8badd27a
EG
754 synchronize_irq(bp->msix_table[0].vector);
755 offset = 1;
37b091ba
MC
756#ifdef BCM_CNIC
757 offset++;
758#endif
a2fbb9ea 759 for_each_queue(bp, i)
8badd27a 760 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
761 } else
762 synchronize_irq(bp->pdev->irq);
763
764 /* make sure sp_task is not running */
1cf167f2
EG
765 cancel_delayed_work(&bp->sp_task);
766 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
767}
768
34f80b04 769/* fast path */
a2fbb9ea
ET
770
771/*
34f80b04 772 * General service functions
a2fbb9ea
ET
773 */
774
72fd0718
VZ
775/* Return true if succeeded to acquire the lock */
776static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
777{
778 u32 lock_status;
779 u32 resource_bit = (1 << resource);
780 int func = BP_FUNC(bp);
781 u32 hw_lock_control_reg;
782
783 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
784
785 /* Validating that the resource is within range */
786 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
787 DP(NETIF_MSG_HW,
788 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
789 resource, HW_LOCK_MAX_RESOURCE_VALUE);
790 return -EINVAL;
791 }
792
793 if (func <= 5)
794 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
795 else
796 hw_lock_control_reg =
797 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
798
799 /* Try to acquire the lock */
800 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
801 lock_status = REG_RD(bp, hw_lock_control_reg);
802 if (lock_status & resource_bit)
803 return true;
804
805 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
806 return false;
807}
808
34f80b04 809static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
810 u8 storm, u16 index, u8 op, u8 update)
811{
5c862848
EG
812 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
813 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
814 struct igu_ack_register igu_ack;
815
816 igu_ack.status_block_index = index;
817 igu_ack.sb_id_and_flags =
34f80b04 818 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
819 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
820 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
821 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
822
5c862848
EG
823 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
824 (*(u32 *)&igu_ack), hc_addr);
825 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
826
827 /* Make sure that ACK is written */
828 mmiowb();
829 barrier();
a2fbb9ea
ET
830}
831
54b9ddaa 832static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
833{
834 struct host_status_block *fpsb = fp->status_blk;
a2fbb9ea
ET
835
836 barrier(); /* status block is written to by the chip */
54b9ddaa
VZ
837 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
838 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
a2fbb9ea
ET
839}
840
a2fbb9ea
ET
841static u16 bnx2x_ack_int(struct bnx2x *bp)
842{
5c862848
EG
843 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
844 COMMAND_REG_SIMD_MASK);
845 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 846
5c862848
EG
847 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
848 result, hc_addr);
a2fbb9ea 849
a2fbb9ea
ET
850 return result;
851}
852
853
854/*
855 * fast path service functions
856 */
857
e8b5fc51
VZ
858static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
859{
860 /* Tell compiler that consumer and producer can change */
861 barrier();
862 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
863}
864
a2fbb9ea
ET
865/* free skb in the packet ring at pos idx
866 * return idx of last bd freed
867 */
868static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
869 u16 idx)
870{
871 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
872 struct eth_tx_start_bd *tx_start_bd;
873 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 874 struct sk_buff *skb = tx_buf->skb;
34f80b04 875 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
876 int nbd;
877
54b9ddaa
VZ
878 /* prefetch skb end pointer to speedup dev_kfree_skb() */
879 prefetch(&skb->end);
880
a2fbb9ea
ET
881 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
882 idx, tx_buf, skb);
883
884 /* unmap first bd */
885 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c 886 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
1a983142 887 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
ca00392c 888 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 889
ca00392c 890 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 891#ifdef BNX2X_STOP_ON_ERROR
ca00392c 892 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 893 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
894 bnx2x_panic();
895 }
896#endif
ca00392c 897 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 898
ca00392c
EG
899 /* Get the next bd */
900 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 901
ca00392c
EG
902 /* Skip a parse bd... */
903 --nbd;
904 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
905
906 /* ...and the TSO split header bd since they have no mapping */
907 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
908 --nbd;
909 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
910 }
911
912 /* now free frags */
913 while (nbd > 0) {
914
915 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c 916 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
1a983142
FT
917 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
918 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
a2fbb9ea
ET
919 if (--nbd)
920 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
921 }
922
923 /* release skb */
53e5e96e 924 WARN_ON(!skb);
54b9ddaa 925 dev_kfree_skb(skb);
a2fbb9ea
ET
926 tx_buf->first_bd = 0;
927 tx_buf->skb = NULL;
928
34f80b04 929 return new_cons;
a2fbb9ea
ET
930}
931
34f80b04 932static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 933{
34f80b04
EG
934 s16 used;
935 u16 prod;
936 u16 cons;
a2fbb9ea 937
a2fbb9ea
ET
938 prod = fp->tx_bd_prod;
939 cons = fp->tx_bd_cons;
940
34f80b04
EG
941 /* NUM_TX_RINGS = number of "next-page" entries
942 It will be used as a threshold */
943 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 944
34f80b04 945#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
946 WARN_ON(used < 0);
947 WARN_ON(used > fp->bp->tx_ring_size);
948 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 949#endif
a2fbb9ea 950
34f80b04 951 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
952}
953
54b9ddaa
VZ
954static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
955{
956 u16 hw_cons;
957
958 /* Tell compiler that status block fields can change */
959 barrier();
960 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
961 return hw_cons != fp->tx_pkt_cons;
962}
963
964static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
965{
966 struct bnx2x *bp = fp->bp;
555f6c78 967 struct netdev_queue *txq;
a2fbb9ea 968 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
a2fbb9ea
ET
969
970#ifdef BNX2X_STOP_ON_ERROR
971 if (unlikely(bp->panic))
54b9ddaa 972 return -1;
a2fbb9ea
ET
973#endif
974
54b9ddaa 975 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
976 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
977 sw_cons = fp->tx_pkt_cons;
978
979 while (sw_cons != hw_cons) {
980 u16 pkt_cons;
981
982 pkt_cons = TX_BD(sw_cons);
983
984 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
985
34f80b04 986 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
987 hw_cons, sw_cons, pkt_cons);
988
34f80b04 989/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
990 rmb();
991 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
992 }
993*/
994 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
995 sw_cons++;
a2fbb9ea
ET
996 }
997
998 fp->tx_pkt_cons = sw_cons;
999 fp->tx_bd_cons = bd_cons;
1000
c16cc0b4
VZ
1001 /* Need to make the tx_bd_cons update visible to start_xmit()
1002 * before checking for netif_tx_queue_stopped(). Without the
1003 * memory barrier, there is a small possibility that
1004 * start_xmit() will miss it and cause the queue to be stopped
1005 * forever.
1006 */
2d99cf16 1007 smp_mb();
c16cc0b4 1008
a2fbb9ea 1009 /* TBD need a thresh? */
555f6c78 1010 if (unlikely(netif_tx_queue_stopped(txq))) {
c16cc0b4
VZ
1011 /* Taking tx_lock() is needed to prevent reenabling the queue
1012 * while it's empty. This could have happen if rx_action() gets
1013 * suspended in bnx2x_tx_int() after the condition before
1014 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1015 *
1016 * stops the queue->sees fresh tx_bd_cons->releases the queue->
1017 * sends some packets consuming the whole queue again->
1018 * stops the queue
6044735d 1019 */
c16cc0b4
VZ
1020
1021 __netif_tx_lock(txq, smp_processor_id());
6044735d 1022
555f6c78 1023 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 1024 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 1025 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 1026 netif_tx_wake_queue(txq);
c16cc0b4
VZ
1027
1028 __netif_tx_unlock(txq);
a2fbb9ea 1029 }
54b9ddaa 1030 return 0;
a2fbb9ea
ET
1031}
1032
993ac7b5
MC
1033#ifdef BCM_CNIC
1034static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1035#endif
3196a88a 1036
a2fbb9ea
ET
1037static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1038 union eth_rx_cqe *rr_cqe)
1039{
1040 struct bnx2x *bp = fp->bp;
1041 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1042 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1043
34f80b04 1044 DP(BNX2X_MSG_SP,
a2fbb9ea 1045 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1046 fp->index, cid, command, bp->state,
34f80b04 1047 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
1048
1049 bp->spq_left++;
1050
0626b899 1051 if (fp->index) {
a2fbb9ea
ET
1052 switch (command | fp->state) {
1053 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1054 BNX2X_FP_STATE_OPENING):
1055 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1056 cid);
1057 fp->state = BNX2X_FP_STATE_OPEN;
1058 break;
1059
1060 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1061 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1062 cid);
1063 fp->state = BNX2X_FP_STATE_HALTED;
1064 break;
1065
1066 default:
34f80b04 1067 BNX2X_ERR("unexpected MC reply (%d) "
cdaa7cb8
VZ
1068 "fp[%d] state is %x\n",
1069 command, fp->index, fp->state);
34f80b04 1070 break;
a2fbb9ea 1071 }
34f80b04 1072 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1073 return;
1074 }
c14423fe 1075
a2fbb9ea
ET
1076 switch (command | bp->state) {
1077 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1078 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1079 bp->state = BNX2X_STATE_OPEN;
1080 break;
1081
1082 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1083 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1084 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1085 fp->state = BNX2X_FP_STATE_HALTED;
1086 break;
1087
a2fbb9ea 1088 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1089 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1090 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1091 break;
1092
993ac7b5
MC
1093#ifdef BCM_CNIC
1094 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1095 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1096 bnx2x_cnic_cfc_comp(bp, cid);
1097 break;
1098#endif
3196a88a 1099
a2fbb9ea 1100 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1101 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1102 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
1103 bp->set_mac_pending--;
1104 smp_wmb();
a2fbb9ea
ET
1105 break;
1106
49d66772 1107 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1108 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
1109 bp->set_mac_pending--;
1110 smp_wmb();
49d66772
ET
1111 break;
1112
a2fbb9ea 1113 default:
34f80b04 1114 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1115 command, bp->state);
34f80b04 1116 break;
a2fbb9ea 1117 }
34f80b04 1118 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1119}
1120
7a9b2557
VZ
1121static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1122 struct bnx2x_fastpath *fp, u16 index)
1123{
1124 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1125 struct page *page = sw_buf->page;
1126 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1127
1128 /* Skip "next page" elements */
1129 if (!page)
1130 return;
1131
1a983142 1132 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
4f40f2cb 1133 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1134 __free_pages(page, PAGES_PER_SGE_SHIFT);
1135
1136 sw_buf->page = NULL;
1137 sge->addr_hi = 0;
1138 sge->addr_lo = 0;
1139}
1140
1141static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1142 struct bnx2x_fastpath *fp, int last)
1143{
1144 int i;
1145
1146 for (i = 0; i < last; i++)
1147 bnx2x_free_rx_sge(bp, fp, i);
1148}
1149
1150static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1151 struct bnx2x_fastpath *fp, u16 index)
1152{
1153 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1154 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1155 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1156 dma_addr_t mapping;
1157
1158 if (unlikely(page == NULL))
1159 return -ENOMEM;
1160
1a983142
FT
1161 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1162 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
8d8bb39b 1163 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1164 __free_pages(page, PAGES_PER_SGE_SHIFT);
1165 return -ENOMEM;
1166 }
1167
1168 sw_buf->page = page;
1a983142 1169 dma_unmap_addr_set(sw_buf, mapping, mapping);
7a9b2557
VZ
1170
1171 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1172 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1173
1174 return 0;
1175}
1176
a2fbb9ea
ET
1177static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1178 struct bnx2x_fastpath *fp, u16 index)
1179{
1180 struct sk_buff *skb;
1181 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1182 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1183 dma_addr_t mapping;
1184
1185 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1186 if (unlikely(skb == NULL))
1187 return -ENOMEM;
1188
1a983142
FT
1189 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1190 DMA_FROM_DEVICE);
8d8bb39b 1191 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1192 dev_kfree_skb(skb);
1193 return -ENOMEM;
1194 }
1195
1196 rx_buf->skb = skb;
1a983142 1197 dma_unmap_addr_set(rx_buf, mapping, mapping);
a2fbb9ea
ET
1198
1199 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1200 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1201
1202 return 0;
1203}
1204
1205/* note that we are not allocating a new skb,
1206 * we are just moving one from cons to prod
1207 * we are not creating a new mapping,
1208 * so there is no need to check for dma_mapping_error().
1209 */
1210static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1211 struct sk_buff *skb, u16 cons, u16 prod)
1212{
1213 struct bnx2x *bp = fp->bp;
1214 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1215 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1216 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1217 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1218
1a983142
FT
1219 dma_sync_single_for_device(&bp->pdev->dev,
1220 dma_unmap_addr(cons_rx_buf, mapping),
1221 RX_COPY_THRESH, DMA_FROM_DEVICE);
a2fbb9ea
ET
1222
1223 prod_rx_buf->skb = cons_rx_buf->skb;
1a983142
FT
1224 dma_unmap_addr_set(prod_rx_buf, mapping,
1225 dma_unmap_addr(cons_rx_buf, mapping));
a2fbb9ea
ET
1226 *prod_bd = *cons_bd;
1227}
1228
7a9b2557
VZ
1229static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1230 u16 idx)
1231{
1232 u16 last_max = fp->last_max_sge;
1233
1234 if (SUB_S16(idx, last_max) > 0)
1235 fp->last_max_sge = idx;
1236}
1237
1238static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1239{
1240 int i, j;
1241
1242 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1243 int idx = RX_SGE_CNT * i - 1;
1244
1245 for (j = 0; j < 2; j++) {
1246 SGE_MASK_CLEAR_BIT(fp, idx);
1247 idx--;
1248 }
1249 }
1250}
1251
1252static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1253 struct eth_fast_path_rx_cqe *fp_cqe)
1254{
1255 struct bnx2x *bp = fp->bp;
4f40f2cb 1256 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1257 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1258 SGE_PAGE_SHIFT;
7a9b2557
VZ
1259 u16 last_max, last_elem, first_elem;
1260 u16 delta = 0;
1261 u16 i;
1262
1263 if (!sge_len)
1264 return;
1265
1266 /* First mark all used pages */
1267 for (i = 0; i < sge_len; i++)
1268 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1269
1270 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1271 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1272
1273 /* Here we assume that the last SGE index is the biggest */
1274 prefetch((void *)(fp->sge_mask));
1275 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1276
1277 last_max = RX_SGE(fp->last_max_sge);
1278 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1279 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1280
1281 /* If ring is not full */
1282 if (last_elem + 1 != first_elem)
1283 last_elem++;
1284
1285 /* Now update the prod */
1286 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1287 if (likely(fp->sge_mask[i]))
1288 break;
1289
1290 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1291 delta += RX_SGE_MASK_ELEM_SZ;
1292 }
1293
1294 if (delta > 0) {
1295 fp->rx_sge_prod += delta;
1296 /* clear page-end entries */
1297 bnx2x_clear_sge_mask_next_elems(fp);
1298 }
1299
1300 DP(NETIF_MSG_RX_STATUS,
1301 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1302 fp->last_max_sge, fp->rx_sge_prod);
1303}
1304
1305static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1306{
1307 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1308 memset(fp->sge_mask, 0xff,
1309 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1310
33471629
EG
1311 /* Clear the two last indices in the page to 1:
1312 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1313 hence will never be indicated and should be removed from
1314 the calculations. */
1315 bnx2x_clear_sge_mask_next_elems(fp);
1316}
1317
1318static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1319 struct sk_buff *skb, u16 cons, u16 prod)
1320{
1321 struct bnx2x *bp = fp->bp;
1322 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1323 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1324 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1325 dma_addr_t mapping;
1326
1327 /* move empty skb from pool to prod and map it */
1328 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1a983142
FT
1329 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1330 bp->rx_buf_size, DMA_FROM_DEVICE);
1331 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
7a9b2557
VZ
1332
1333 /* move partial skb from cons to pool (don't unmap yet) */
1334 fp->tpa_pool[queue] = *cons_rx_buf;
1335
1336 /* mark bin state as start - print error if current state != stop */
1337 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1338 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1339
1340 fp->tpa_state[queue] = BNX2X_TPA_START;
1341
1342 /* point prod_bd to new skb */
1343 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1344 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1345
1346#ifdef BNX2X_STOP_ON_ERROR
1347 fp->tpa_queue_used |= (1 << queue);
cdaa7cb8 1348#ifdef _ASM_GENERIC_INT_L64_H
7a9b2557
VZ
1349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1350#else
1351 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1352#endif
1353 fp->tpa_queue_used);
1354#endif
1355}
1356
1357static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1358 struct sk_buff *skb,
1359 struct eth_fast_path_rx_cqe *fp_cqe,
1360 u16 cqe_idx)
1361{
1362 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1363 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1364 u32 i, frag_len, frag_size, pages;
1365 int err;
1366 int j;
1367
1368 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1369 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1370
1371 /* This is needed in order to enable forwarding support */
1372 if (frag_size)
4f40f2cb 1373 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1374 max(frag_size, (u32)len_on_bd));
1375
1376#ifdef BNX2X_STOP_ON_ERROR
cdaa7cb8 1377 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
7a9b2557
VZ
1378 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1379 pages, cqe_idx);
1380 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1381 fp_cqe->pkt_len, len_on_bd);
1382 bnx2x_panic();
1383 return -EINVAL;
1384 }
1385#endif
1386
1387 /* Run through the SGL and compose the fragmented skb */
1388 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1389 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1390
1391 /* FW gives the indices of the SGE as if the ring is an array
1392 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1393 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1394 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1395 old_rx_pg = *rx_pg;
1396
1397 /* If we fail to allocate a substitute page, we simply stop
1398 where we are and drop the whole packet */
1399 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1400 if (unlikely(err)) {
de832a55 1401 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1402 return err;
1403 }
1404
1405 /* Unmap the page as we r going to pass it to the stack */
1a983142
FT
1406 dma_unmap_page(&bp->pdev->dev,
1407 dma_unmap_addr(&old_rx_pg, mapping),
1408 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
7a9b2557
VZ
1409
1410 /* Add one frag and update the appropriate fields in the skb */
1411 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1412
1413 skb->data_len += frag_len;
1414 skb->truesize += frag_len;
1415 skb->len += frag_len;
1416
1417 frag_size -= frag_len;
1418 }
1419
1420 return 0;
1421}
1422
1423static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1424 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1425 u16 cqe_idx)
1426{
1427 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1428 struct sk_buff *skb = rx_buf->skb;
1429 /* alloc new skb */
1430 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1431
1432 /* Unmap skb in the pool anyway, as we are going to change
1433 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1434 fails. */
1a983142
FT
1435 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1436 bp->rx_buf_size, DMA_FROM_DEVICE);
7a9b2557 1437
7a9b2557 1438 if (likely(new_skb)) {
66e855f3
YG
1439 /* fix ip xsum and give it to the stack */
1440 /* (no need to map the new skb) */
0c6671b0
EG
1441#ifdef BCM_VLAN
1442 int is_vlan_cqe =
1443 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1444 PARSING_FLAGS_VLAN);
1445 int is_not_hwaccel_vlan_cqe =
1446 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1447#endif
7a9b2557
VZ
1448
1449 prefetch(skb);
1450 prefetch(((char *)(skb)) + 128);
1451
7a9b2557
VZ
1452#ifdef BNX2X_STOP_ON_ERROR
1453 if (pad + len > bp->rx_buf_size) {
1454 BNX2X_ERR("skb_put is about to fail... "
1455 "pad %d len %d rx_buf_size %d\n",
1456 pad, len, bp->rx_buf_size);
1457 bnx2x_panic();
1458 return;
1459 }
1460#endif
1461
1462 skb_reserve(skb, pad);
1463 skb_put(skb, len);
1464
1465 skb->protocol = eth_type_trans(skb, bp->dev);
1466 skb->ip_summed = CHECKSUM_UNNECESSARY;
1467
1468 {
1469 struct iphdr *iph;
1470
1471 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1472#ifdef BCM_VLAN
1473 /* If there is no Rx VLAN offloading -
1474 take VLAN tag into an account */
1475 if (unlikely(is_not_hwaccel_vlan_cqe))
1476 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1477#endif
7a9b2557
VZ
1478 iph->check = 0;
1479 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1480 }
1481
1482 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1483 &cqe->fast_path_cqe, cqe_idx)) {
1484#ifdef BCM_VLAN
0c6671b0
EG
1485 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1486 (!is_not_hwaccel_vlan_cqe))
4fd89b7a
DK
1487 vlan_gro_receive(&fp->napi, bp->vlgrp,
1488 le16_to_cpu(cqe->fast_path_cqe.
1489 vlan_tag), skb);
7a9b2557
VZ
1490 else
1491#endif
4fd89b7a 1492 napi_gro_receive(&fp->napi, skb);
7a9b2557
VZ
1493 } else {
1494 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1495 " - dropping packet!\n");
1496 dev_kfree_skb(skb);
1497 }
1498
7a9b2557
VZ
1499
1500 /* put new skb in bin */
1501 fp->tpa_pool[queue].skb = new_skb;
1502
1503 } else {
66e855f3 1504 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1505 DP(NETIF_MSG_RX_STATUS,
1506 "Failed to allocate new skb - dropping packet!\n");
de832a55 1507 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1508 }
1509
1510 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1511}
1512
1513static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1514 struct bnx2x_fastpath *fp,
1515 u16 bd_prod, u16 rx_comp_prod,
1516 u16 rx_sge_prod)
1517{
8d9c5f34 1518 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1519 int i;
1520
1521 /* Update producers */
1522 rx_prods.bd_prod = bd_prod;
1523 rx_prods.cqe_prod = rx_comp_prod;
1524 rx_prods.sge_prod = rx_sge_prod;
1525
58f4c4cf
EG
1526 /*
1527 * Make sure that the BD and SGE data is updated before updating the
1528 * producers since FW might read the BD/SGE right after the producer
1529 * is updated.
1530 * This is only applicable for weak-ordered memory model archs such
1531 * as IA-64. The following barrier is also mandatory since FW will
1532 * assumes BDs must have buffers.
1533 */
1534 wmb();
1535
8d9c5f34
EG
1536 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1537 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1538 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1539 ((u32 *)&rx_prods)[i]);
1540
58f4c4cf
EG
1541 mmiowb(); /* keep prod updates ordered */
1542
7a9b2557 1543 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1544 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1545 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1546}
1547
a2fbb9ea
ET
1548static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1549{
1550 struct bnx2x *bp = fp->bp;
34f80b04 1551 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1552 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1553 int rx_pkt = 0;
1554
1555#ifdef BNX2X_STOP_ON_ERROR
1556 if (unlikely(bp->panic))
1557 return 0;
1558#endif
1559
34f80b04
EG
1560 /* CQ "next element" is of the size of the regular element,
1561 that's why it's ok here */
a2fbb9ea
ET
1562 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1563 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1564 hw_comp_cons++;
1565
1566 bd_cons = fp->rx_bd_cons;
1567 bd_prod = fp->rx_bd_prod;
34f80b04 1568 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1569 sw_comp_cons = fp->rx_comp_cons;
1570 sw_comp_prod = fp->rx_comp_prod;
1571
1572 /* Memory barrier necessary as speculative reads of the rx
1573 * buffer can be ahead of the index in the status block
1574 */
1575 rmb();
1576
1577 DP(NETIF_MSG_RX_STATUS,
1578 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1579 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1580
1581 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1582 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1583 struct sk_buff *skb;
1584 union eth_rx_cqe *cqe;
c68ed255 1585 u8 cqe_fp_flags, cqe_fp_status_flags;
34f80b04 1586 u16 len, pad;
a2fbb9ea
ET
1587
1588 comp_ring_cons = RCQ_BD(sw_comp_cons);
1589 bd_prod = RX_BD(bd_prod);
1590 bd_cons = RX_BD(bd_cons);
1591
619e7a66
EG
1592 /* Prefetch the page containing the BD descriptor
1593 at producer's index. It will be needed when new skb is
1594 allocated */
1595 prefetch((void *)(PAGE_ALIGN((unsigned long)
1596 (&fp->rx_desc_ring[bd_prod])) -
1597 PAGE_SIZE + 1));
1598
a2fbb9ea 1599 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1600 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
c68ed255 1601 cqe_fp_status_flags = cqe->fast_path_cqe.status_flags;
a2fbb9ea 1602
a2fbb9ea 1603 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1604 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1605 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1606 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1607 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1608 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1609
1610 /* is this a slowpath msg? */
34f80b04 1611 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1612 bnx2x_sp_event(fp, cqe);
1613 goto next_cqe;
1614
1615 /* this is an rx packet */
1616 } else {
1617 rx_buf = &fp->rx_buf_ring[bd_cons];
1618 skb = rx_buf->skb;
54b9ddaa 1619 prefetch(skb);
a2fbb9ea
ET
1620 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1621 pad = cqe->fast_path_cqe.placement_offset;
1622
7a9b2557
VZ
1623 /* If CQE is marked both TPA_START and TPA_END
1624 it is a non-TPA CQE */
1625 if ((!fp->disable_tpa) &&
1626 (TPA_TYPE(cqe_fp_flags) !=
1627 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1628 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1629
1630 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1631 DP(NETIF_MSG_RX_STATUS,
1632 "calling tpa_start on queue %d\n",
1633 queue);
1634
1635 bnx2x_tpa_start(fp, queue, skb,
1636 bd_cons, bd_prod);
1637 goto next_rx;
1638 }
1639
1640 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1641 DP(NETIF_MSG_RX_STATUS,
1642 "calling tpa_stop on queue %d\n",
1643 queue);
1644
1645 if (!BNX2X_RX_SUM_FIX(cqe))
1646 BNX2X_ERR("STOP on none TCP "
1647 "data\n");
1648
1649 /* This is a size of the linear data
1650 on this skb */
1651 len = le16_to_cpu(cqe->fast_path_cqe.
1652 len_on_bd);
1653 bnx2x_tpa_stop(bp, fp, queue, pad,
1654 len, cqe, comp_ring_cons);
1655#ifdef BNX2X_STOP_ON_ERROR
1656 if (bp->panic)
17cb4006 1657 return 0;
7a9b2557
VZ
1658#endif
1659
1660 bnx2x_update_sge_prod(fp,
1661 &cqe->fast_path_cqe);
1662 goto next_cqe;
1663 }
1664 }
1665
1a983142
FT
1666 dma_sync_single_for_device(&bp->pdev->dev,
1667 dma_unmap_addr(rx_buf, mapping),
1668 pad + RX_COPY_THRESH,
1669 DMA_FROM_DEVICE);
a2fbb9ea
ET
1670 prefetch(((char *)(skb)) + 128);
1671
1672 /* is this an error packet? */
34f80b04 1673 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1674 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1675 "ERROR flags %x rx packet %u\n",
1676 cqe_fp_flags, sw_comp_cons);
de832a55 1677 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1678 goto reuse_rx;
1679 }
1680
1681 /* Since we don't have a jumbo ring
1682 * copy small packets if mtu > 1500
1683 */
1684 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1685 (len <= RX_COPY_THRESH)) {
1686 struct sk_buff *new_skb;
1687
1688 new_skb = netdev_alloc_skb(bp->dev,
1689 len + pad);
1690 if (new_skb == NULL) {
1691 DP(NETIF_MSG_RX_ERR,
34f80b04 1692 "ERROR packet dropped "
a2fbb9ea 1693 "because of alloc failure\n");
de832a55 1694 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1695 goto reuse_rx;
1696 }
1697
1698 /* aligned copy */
1699 skb_copy_from_linear_data_offset(skb, pad,
1700 new_skb->data + pad, len);
1701 skb_reserve(new_skb, pad);
1702 skb_put(new_skb, len);
1703
1704 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1705
1706 skb = new_skb;
1707
a119a069
EG
1708 } else
1709 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1a983142
FT
1710 dma_unmap_single(&bp->pdev->dev,
1711 dma_unmap_addr(rx_buf, mapping),
437cf2f1 1712 bp->rx_buf_size,
1a983142 1713 DMA_FROM_DEVICE);
a2fbb9ea
ET
1714 skb_reserve(skb, pad);
1715 skb_put(skb, len);
1716
1717 } else {
1718 DP(NETIF_MSG_RX_ERR,
34f80b04 1719 "ERROR packet dropped because "
a2fbb9ea 1720 "of alloc failure\n");
de832a55 1721 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1722reuse_rx:
1723 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1724 goto next_rx;
1725 }
1726
1727 skb->protocol = eth_type_trans(skb, bp->dev);
1728
4447957a 1729 if ((bp->dev->features & NETIF_F_RXHASH) &&
c68ed255
TH
1730 (cqe_fp_status_flags &
1731 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
1732 skb->rxhash = le32_to_cpu(
1733 cqe->fast_path_cqe.rss_hash_result);
1734
a2fbb9ea 1735 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1736 if (bp->rx_csum) {
1adcd8be
EG
1737 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1738 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1739 else
de832a55 1740 fp->eth_q_stats.hw_csum_err++;
66e855f3 1741 }
a2fbb9ea
ET
1742 }
1743
748e5439 1744 skb_record_rx_queue(skb, fp->index);
ab6ad5a4 1745
a2fbb9ea 1746#ifdef BCM_VLAN
0c6671b0 1747 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1748 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1749 PARSING_FLAGS_VLAN))
4fd89b7a
DK
1750 vlan_gro_receive(&fp->napi, bp->vlgrp,
1751 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
a2fbb9ea
ET
1752 else
1753#endif
4fd89b7a 1754 napi_gro_receive(&fp->napi, skb);
a2fbb9ea 1755
a2fbb9ea
ET
1756
1757next_rx:
1758 rx_buf->skb = NULL;
1759
1760 bd_cons = NEXT_RX_IDX(bd_cons);
1761 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1762 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1763 rx_pkt++;
a2fbb9ea
ET
1764next_cqe:
1765 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1766 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1767
34f80b04 1768 if (rx_pkt == budget)
a2fbb9ea
ET
1769 break;
1770 } /* while */
1771
1772 fp->rx_bd_cons = bd_cons;
34f80b04 1773 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1774 fp->rx_comp_cons = sw_comp_cons;
1775 fp->rx_comp_prod = sw_comp_prod;
1776
7a9b2557
VZ
1777 /* Update producers */
1778 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1779 fp->rx_sge_prod);
a2fbb9ea
ET
1780
1781 fp->rx_pkt += rx_pkt;
1782 fp->rx_calls++;
1783
1784 return rx_pkt;
1785}
1786
1787static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1788{
1789 struct bnx2x_fastpath *fp = fp_cookie;
1790 struct bnx2x *bp = fp->bp;
a2fbb9ea 1791
da5a662a
VZ
1792 /* Return here if interrupt is disabled */
1793 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1794 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1795 return IRQ_HANDLED;
1796 }
1797
34f80b04 1798 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1799 fp->index, fp->sb_id);
0626b899 1800 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1801
1802#ifdef BNX2X_STOP_ON_ERROR
1803 if (unlikely(bp->panic))
1804 return IRQ_HANDLED;
1805#endif
ca00392c 1806
54b9ddaa
VZ
1807 /* Handle Rx and Tx according to MSI-X vector */
1808 prefetch(fp->rx_cons_sb);
1809 prefetch(fp->tx_cons_sb);
1810 prefetch(&fp->status_blk->u_status_block.status_block_index);
1811 prefetch(&fp->status_blk->c_status_block.status_block_index);
1812 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
34f80b04 1813
a2fbb9ea
ET
1814 return IRQ_HANDLED;
1815}
1816
1817static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1818{
555f6c78 1819 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1820 u16 status = bnx2x_ack_int(bp);
34f80b04 1821 u16 mask;
ca00392c 1822 int i;
a2fbb9ea 1823
34f80b04 1824 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1825 if (unlikely(status == 0)) {
1826 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1827 return IRQ_NONE;
1828 }
f5372251 1829 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1830
34f80b04 1831 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1832 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1833 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1834 return IRQ_HANDLED;
1835 }
1836
3196a88a
EG
1837#ifdef BNX2X_STOP_ON_ERROR
1838 if (unlikely(bp->panic))
1839 return IRQ_HANDLED;
1840#endif
1841
ca00392c
EG
1842 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1843 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1844
ca00392c
EG
1845 mask = 0x2 << fp->sb_id;
1846 if (status & mask) {
54b9ddaa
VZ
1847 /* Handle Rx and Tx according to SB id */
1848 prefetch(fp->rx_cons_sb);
1849 prefetch(&fp->status_blk->u_status_block.
1850 status_block_index);
1851 prefetch(fp->tx_cons_sb);
1852 prefetch(&fp->status_blk->c_status_block.
1853 status_block_index);
1854 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1855 status &= ~mask;
1856 }
a2fbb9ea
ET
1857 }
1858
993ac7b5
MC
1859#ifdef BCM_CNIC
1860 mask = 0x2 << CNIC_SB_ID(bp);
1861 if (status & (mask | 0x1)) {
1862 struct cnic_ops *c_ops = NULL;
1863
1864 rcu_read_lock();
1865 c_ops = rcu_dereference(bp->cnic_ops);
1866 if (c_ops)
1867 c_ops->cnic_handler(bp->cnic_data, NULL);
1868 rcu_read_unlock();
1869
1870 status &= ~mask;
1871 }
1872#endif
a2fbb9ea 1873
34f80b04 1874 if (unlikely(status & 0x1)) {
1cf167f2 1875 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1876
1877 status &= ~0x1;
1878 if (!status)
1879 return IRQ_HANDLED;
1880 }
1881
cdaa7cb8
VZ
1882 if (unlikely(status))
1883 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 1884 status);
a2fbb9ea 1885
c18487ee 1886 return IRQ_HANDLED;
a2fbb9ea
ET
1887}
1888
c18487ee 1889/* end of fast path */
a2fbb9ea 1890
bb2a0f7a 1891static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1892
c18487ee
YR
1893/* Link */
1894
1895/*
1896 * General service functions
1897 */
a2fbb9ea 1898
4a37fb66 1899static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1900{
1901 u32 lock_status;
1902 u32 resource_bit = (1 << resource);
4a37fb66
YG
1903 int func = BP_FUNC(bp);
1904 u32 hw_lock_control_reg;
c18487ee 1905 int cnt;
a2fbb9ea 1906
c18487ee
YR
1907 /* Validating that the resource is within range */
1908 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1909 DP(NETIF_MSG_HW,
1910 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1911 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1912 return -EINVAL;
1913 }
a2fbb9ea 1914
4a37fb66
YG
1915 if (func <= 5) {
1916 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1917 } else {
1918 hw_lock_control_reg =
1919 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1920 }
1921
c18487ee 1922 /* Validating that the resource is not already taken */
4a37fb66 1923 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1924 if (lock_status & resource_bit) {
1925 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1926 lock_status, resource_bit);
1927 return -EEXIST;
1928 }
a2fbb9ea 1929
46230476
EG
1930 /* Try for 5 second every 5ms */
1931 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1932 /* Try to acquire the lock */
4a37fb66
YG
1933 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1934 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1935 if (lock_status & resource_bit)
1936 return 0;
a2fbb9ea 1937
c18487ee 1938 msleep(5);
a2fbb9ea 1939 }
c18487ee
YR
1940 DP(NETIF_MSG_HW, "Timeout\n");
1941 return -EAGAIN;
1942}
a2fbb9ea 1943
4a37fb66 1944static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1945{
1946 u32 lock_status;
1947 u32 resource_bit = (1 << resource);
4a37fb66
YG
1948 int func = BP_FUNC(bp);
1949 u32 hw_lock_control_reg;
a2fbb9ea 1950
72fd0718
VZ
1951 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1952
c18487ee
YR
1953 /* Validating that the resource is within range */
1954 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1955 DP(NETIF_MSG_HW,
1956 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1957 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1958 return -EINVAL;
1959 }
1960
4a37fb66
YG
1961 if (func <= 5) {
1962 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1963 } else {
1964 hw_lock_control_reg =
1965 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1966 }
1967
c18487ee 1968 /* Validating that the resource is currently taken */
4a37fb66 1969 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1970 if (!(lock_status & resource_bit)) {
1971 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1972 lock_status, resource_bit);
1973 return -EFAULT;
a2fbb9ea
ET
1974 }
1975
4a37fb66 1976 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1977 return 0;
1978}
1979
1980/* HW Lock for shared dual port PHYs */
4a37fb66 1981static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1982{
34f80b04 1983 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1984
46c6a674
EG
1985 if (bp->port.need_hw_lock)
1986 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1987}
a2fbb9ea 1988
4a37fb66 1989static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1990{
46c6a674
EG
1991 if (bp->port.need_hw_lock)
1992 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1993
34f80b04 1994 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1995}
a2fbb9ea 1996
4acac6a5
EG
1997int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1998{
1999 /* The GPIO should be swapped if swap register is set and active */
2000 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2001 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2002 int gpio_shift = gpio_num +
2003 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2004 u32 gpio_mask = (1 << gpio_shift);
2005 u32 gpio_reg;
2006 int value;
2007
2008 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2009 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2010 return -EINVAL;
2011 }
2012
2013 /* read GPIO value */
2014 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2015
2016 /* get the requested pin value */
2017 if ((gpio_reg & gpio_mask) == gpio_mask)
2018 value = 1;
2019 else
2020 value = 0;
2021
2022 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
2023
2024 return value;
2025}
2026
17de50b7 2027int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
2028{
2029 /* The GPIO should be swapped if swap register is set and active */
2030 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 2031 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
2032 int gpio_shift = gpio_num +
2033 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2034 u32 gpio_mask = (1 << gpio_shift);
2035 u32 gpio_reg;
a2fbb9ea 2036
c18487ee
YR
2037 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2038 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2039 return -EINVAL;
2040 }
a2fbb9ea 2041
4a37fb66 2042 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
2043 /* read GPIO and mask except the float bits */
2044 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 2045
c18487ee
YR
2046 switch (mode) {
2047 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2048 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2049 gpio_num, gpio_shift);
2050 /* clear FLOAT and set CLR */
2051 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2052 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2053 break;
a2fbb9ea 2054
c18487ee
YR
2055 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2056 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2057 gpio_num, gpio_shift);
2058 /* clear FLOAT and set SET */
2059 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2060 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2061 break;
a2fbb9ea 2062
17de50b7 2063 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
2064 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2065 gpio_num, gpio_shift);
2066 /* set FLOAT */
2067 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2068 break;
a2fbb9ea 2069
c18487ee
YR
2070 default:
2071 break;
a2fbb9ea
ET
2072 }
2073
c18487ee 2074 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2075 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2076
c18487ee 2077 return 0;
a2fbb9ea
ET
2078}
2079
4acac6a5
EG
2080int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2081{
2082 /* The GPIO should be swapped if swap register is set and active */
2083 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2084 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2085 int gpio_shift = gpio_num +
2086 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2087 u32 gpio_mask = (1 << gpio_shift);
2088 u32 gpio_reg;
2089
2090 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2091 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2092 return -EINVAL;
2093 }
2094
2095 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2096 /* read GPIO int */
2097 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2098
2099 switch (mode) {
2100 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2101 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2102 "output low\n", gpio_num, gpio_shift);
2103 /* clear SET and set CLR */
2104 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2105 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2106 break;
2107
2108 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2109 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2110 "output high\n", gpio_num, gpio_shift);
2111 /* clear CLR and set SET */
2112 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2113 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2114 break;
2115
2116 default:
2117 break;
2118 }
2119
2120 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2121 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2122
2123 return 0;
2124}
2125
c18487ee 2126static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2127{
c18487ee
YR
2128 u32 spio_mask = (1 << spio_num);
2129 u32 spio_reg;
a2fbb9ea 2130
c18487ee
YR
2131 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2132 (spio_num > MISC_REGISTERS_SPIO_7)) {
2133 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2134 return -EINVAL;
a2fbb9ea
ET
2135 }
2136
4a37fb66 2137 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2138 /* read SPIO and mask except the float bits */
2139 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2140
c18487ee 2141 switch (mode) {
6378c025 2142 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2143 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2144 /* clear FLOAT and set CLR */
2145 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2146 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2147 break;
a2fbb9ea 2148
6378c025 2149 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2150 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2151 /* clear FLOAT and set SET */
2152 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2153 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2154 break;
a2fbb9ea 2155
c18487ee
YR
2156 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2157 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2158 /* set FLOAT */
2159 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2160 break;
a2fbb9ea 2161
c18487ee
YR
2162 default:
2163 break;
a2fbb9ea
ET
2164 }
2165
c18487ee 2166 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2167 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2168
a2fbb9ea
ET
2169 return 0;
2170}
2171
c18487ee 2172static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2173{
ad33ea3a
EG
2174 switch (bp->link_vars.ieee_fc &
2175 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2176 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2177 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2178 ADVERTISED_Pause);
2179 break;
356e2385 2180
c18487ee 2181 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2182 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2183 ADVERTISED_Pause);
2184 break;
356e2385 2185
c18487ee 2186 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2187 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2188 break;
356e2385 2189
c18487ee 2190 default:
34f80b04 2191 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2192 ADVERTISED_Pause);
2193 break;
2194 }
2195}
f1410647 2196
c18487ee
YR
2197static void bnx2x_link_report(struct bnx2x *bp)
2198{
f34d28ea 2199 if (bp->flags & MF_FUNC_DIS) {
2691d51d 2200 netif_carrier_off(bp->dev);
7995c64e 2201 netdev_err(bp->dev, "NIC Link is Down\n");
2691d51d
EG
2202 return;
2203 }
2204
c18487ee 2205 if (bp->link_vars.link_up) {
35c5f8fe
EG
2206 u16 line_speed;
2207
c18487ee
YR
2208 if (bp->state == BNX2X_STATE_OPEN)
2209 netif_carrier_on(bp->dev);
7995c64e 2210 netdev_info(bp->dev, "NIC Link is Up, ");
f1410647 2211
35c5f8fe
EG
2212 line_speed = bp->link_vars.line_speed;
2213 if (IS_E1HMF(bp)) {
2214 u16 vn_max_rate;
2215
2216 vn_max_rate =
2217 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2218 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2219 if (vn_max_rate < line_speed)
2220 line_speed = vn_max_rate;
2221 }
7995c64e 2222 pr_cont("%d Mbps ", line_speed);
f1410647 2223
c18487ee 2224 if (bp->link_vars.duplex == DUPLEX_FULL)
7995c64e 2225 pr_cont("full duplex");
c18487ee 2226 else
7995c64e 2227 pr_cont("half duplex");
f1410647 2228
c0700f90
DM
2229 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2230 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
7995c64e 2231 pr_cont(", receive ");
356e2385
EG
2232 if (bp->link_vars.flow_ctrl &
2233 BNX2X_FLOW_CTRL_TX)
7995c64e 2234 pr_cont("& transmit ");
c18487ee 2235 } else {
7995c64e 2236 pr_cont(", transmit ");
c18487ee 2237 }
7995c64e 2238 pr_cont("flow control ON");
c18487ee 2239 }
7995c64e 2240 pr_cont("\n");
f1410647 2241
c18487ee
YR
2242 } else { /* link_down */
2243 netif_carrier_off(bp->dev);
7995c64e 2244 netdev_err(bp->dev, "NIC Link is Down\n");
f1410647 2245 }
c18487ee
YR
2246}
2247
b5bf9068 2248static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2249{
19680c48
EG
2250 if (!BP_NOMCP(bp)) {
2251 u8 rc;
a2fbb9ea 2252
19680c48 2253 /* Initialize link parameters structure variables */
8c99e7b0
YR
2254 /* It is recommended to turn off RX FC for jumbo frames
2255 for better performance */
0c593270 2256 if (bp->dev->mtu > 5000)
c0700f90 2257 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2258 else
c0700f90 2259 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2260
4a37fb66 2261 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2262
2263 if (load_mode == LOAD_DIAG)
2264 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2265
19680c48 2266 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2267
4a37fb66 2268 bnx2x_release_phy_lock(bp);
a2fbb9ea 2269
3c96c68b
EG
2270 bnx2x_calc_fc_adv(bp);
2271
b5bf9068
EG
2272 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2273 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2274 bnx2x_link_report(bp);
b5bf9068 2275 }
34f80b04 2276
19680c48
EG
2277 return rc;
2278 }
f5372251 2279 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2280 return -EINVAL;
a2fbb9ea
ET
2281}
2282
c18487ee 2283static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2284{
19680c48 2285 if (!BP_NOMCP(bp)) {
4a37fb66 2286 bnx2x_acquire_phy_lock(bp);
19680c48 2287 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2288 bnx2x_release_phy_lock(bp);
a2fbb9ea 2289
19680c48
EG
2290 bnx2x_calc_fc_adv(bp);
2291 } else
f5372251 2292 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2293}
a2fbb9ea 2294
c18487ee
YR
2295static void bnx2x__link_reset(struct bnx2x *bp)
2296{
19680c48 2297 if (!BP_NOMCP(bp)) {
4a37fb66 2298 bnx2x_acquire_phy_lock(bp);
589abe3a 2299 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2300 bnx2x_release_phy_lock(bp);
19680c48 2301 } else
f5372251 2302 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2303}
a2fbb9ea 2304
c18487ee
YR
2305static u8 bnx2x_link_test(struct bnx2x *bp)
2306{
2145a920 2307 u8 rc = 0;
a2fbb9ea 2308
2145a920
VZ
2309 if (!BP_NOMCP(bp)) {
2310 bnx2x_acquire_phy_lock(bp);
2311 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2312 bnx2x_release_phy_lock(bp);
2313 } else
2314 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 2315
c18487ee
YR
2316 return rc;
2317}
a2fbb9ea 2318
8a1c38d1 2319static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2320{
8a1c38d1
EG
2321 u32 r_param = bp->link_vars.line_speed / 8;
2322 u32 fair_periodic_timeout_usec;
2323 u32 t_fair;
34f80b04 2324
8a1c38d1
EG
2325 memset(&(bp->cmng.rs_vars), 0,
2326 sizeof(struct rate_shaping_vars_per_port));
2327 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2328
8a1c38d1
EG
2329 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2330 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2331
8a1c38d1
EG
2332 /* this is the threshold below which no timer arming will occur
2333 1.25 coefficient is for the threshold to be a little bigger
2334 than the real time, to compensate for timer in-accuracy */
2335 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2336 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2337
8a1c38d1
EG
2338 /* resolution of fairness timer */
2339 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2340 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2341 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2342
8a1c38d1
EG
2343 /* this is the threshold below which we won't arm the timer anymore */
2344 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2345
8a1c38d1
EG
2346 /* we multiply by 1e3/8 to get bytes/msec.
2347 We don't want the credits to pass a credit
2348 of the t_fair*FAIR_MEM (algorithm resolution) */
2349 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2350 /* since each tick is 4 usec */
2351 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2352}
2353
2691d51d
EG
2354/* Calculates the sum of vn_min_rates.
2355 It's needed for further normalizing of the min_rates.
2356 Returns:
2357 sum of vn_min_rates.
2358 or
2359 0 - if all the min_rates are 0.
2360 In the later case fainess algorithm should be deactivated.
2361 If not all min_rates are zero then those that are zeroes will be set to 1.
2362 */
2363static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2364{
2365 int all_zero = 1;
2366 int port = BP_PORT(bp);
2367 int vn;
2368
2369 bp->vn_weight_sum = 0;
2370 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2371 int func = 2*vn + port;
2372 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2373 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2374 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2375
2376 /* Skip hidden vns */
2377 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2378 continue;
2379
2380 /* If min rate is zero - set it to 1 */
2381 if (!vn_min_rate)
2382 vn_min_rate = DEF_MIN_RATE;
2383 else
2384 all_zero = 0;
2385
2386 bp->vn_weight_sum += vn_min_rate;
2387 }
2388
2389 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
2390 if (all_zero) {
2391 bp->cmng.flags.cmng_enables &=
2392 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2393 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2394 " fairness will be disabled\n");
2395 } else
2396 bp->cmng.flags.cmng_enables |=
2397 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
2398}
2399
8a1c38d1 2400static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2401{
2402 struct rate_shaping_vars_per_vn m_rs_vn;
2403 struct fairness_vars_per_vn m_fair_vn;
2404 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2405 u16 vn_min_rate, vn_max_rate;
2406 int i;
2407
2408 /* If function is hidden - set min and max to zeroes */
2409 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2410 vn_min_rate = 0;
2411 vn_max_rate = 0;
2412
2413 } else {
2414 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2415 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
2416 /* If min rate is zero - set it to 1 */
2417 if (!vn_min_rate)
34f80b04
EG
2418 vn_min_rate = DEF_MIN_RATE;
2419 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2420 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2421 }
8a1c38d1 2422 DP(NETIF_MSG_IFUP,
b015e3d1 2423 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 2424 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2425
2426 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2427 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2428
2429 /* global vn counter - maximal Mbps for this vn */
2430 m_rs_vn.vn_counter.rate = vn_max_rate;
2431
2432 /* quota - number of bytes transmitted in this period */
2433 m_rs_vn.vn_counter.quota =
2434 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2435
8a1c38d1 2436 if (bp->vn_weight_sum) {
34f80b04
EG
2437 /* credit for each period of the fairness algorithm:
2438 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2439 vn_weight_sum should not be larger than 10000, thus
2440 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2441 than zero */
34f80b04 2442 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
2443 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2444 (8 * bp->vn_weight_sum))),
2445 (bp->cmng.fair_vars.fair_threshold * 2));
2446 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
2447 m_fair_vn.vn_credit_delta);
2448 }
2449
34f80b04
EG
2450 /* Store it to internal memory */
2451 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2452 REG_WR(bp, BAR_XSTRORM_INTMEM +
2453 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2454 ((u32 *)(&m_rs_vn))[i]);
2455
2456 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2457 REG_WR(bp, BAR_XSTRORM_INTMEM +
2458 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2459 ((u32 *)(&m_fair_vn))[i]);
2460}
2461
8a1c38d1 2462
c18487ee
YR
2463/* This function is called upon link interrupt */
2464static void bnx2x_link_attn(struct bnx2x *bp)
2465{
d9e8b185 2466 u32 prev_link_status = bp->link_vars.link_status;
bb2a0f7a
YG
2467 /* Make sure that we are synced with the current statistics */
2468 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2469
c18487ee 2470 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2471
bb2a0f7a
YG
2472 if (bp->link_vars.link_up) {
2473
1c06328c 2474 /* dropless flow control */
a18f5128 2475 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2476 int port = BP_PORT(bp);
2477 u32 pause_enabled = 0;
2478
2479 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2480 pause_enabled = 1;
2481
2482 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2483 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2484 pause_enabled);
2485 }
2486
bb2a0f7a
YG
2487 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2488 struct host_port_stats *pstats;
2489
2490 pstats = bnx2x_sp(bp, port_stats);
2491 /* reset old bmac stats */
2492 memset(&(pstats->mac_stx[0]), 0,
2493 sizeof(struct mac_stx));
2494 }
f34d28ea 2495 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2496 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2497 }
2498
d9e8b185
VZ
2499 /* indicate link status only if link status actually changed */
2500 if (prev_link_status != bp->link_vars.link_status)
2501 bnx2x_link_report(bp);
34f80b04
EG
2502
2503 if (IS_E1HMF(bp)) {
8a1c38d1 2504 int port = BP_PORT(bp);
34f80b04 2505 int func;
8a1c38d1 2506 int vn;
34f80b04 2507
ab6ad5a4 2508 /* Set the attention towards other drivers on the same port */
34f80b04
EG
2509 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2510 if (vn == BP_E1HVN(bp))
2511 continue;
2512
8a1c38d1 2513 func = ((vn << 1) | port);
34f80b04
EG
2514 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2515 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2516 }
34f80b04 2517
8a1c38d1
EG
2518 if (bp->link_vars.link_up) {
2519 int i;
2520
2521 /* Init rate shaping and fairness contexts */
2522 bnx2x_init_port_minmax(bp);
34f80b04 2523
34f80b04 2524 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2525 bnx2x_init_vn_minmax(bp, 2*vn + port);
2526
2527 /* Store it to internal memory */
2528 for (i = 0;
2529 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2530 REG_WR(bp, BAR_XSTRORM_INTMEM +
2531 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2532 ((u32 *)(&bp->cmng))[i]);
2533 }
34f80b04 2534 }
c18487ee 2535}
a2fbb9ea 2536
c18487ee
YR
2537static void bnx2x__link_status_update(struct bnx2x *bp)
2538{
f34d28ea 2539 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2540 return;
a2fbb9ea 2541
c18487ee 2542 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2543
bb2a0f7a
YG
2544 if (bp->link_vars.link_up)
2545 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2546 else
2547 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2548
2691d51d
EG
2549 bnx2x_calc_vn_weight_sum(bp);
2550
c18487ee
YR
2551 /* indicate link status */
2552 bnx2x_link_report(bp);
a2fbb9ea 2553}
a2fbb9ea 2554
34f80b04
EG
2555static void bnx2x_pmf_update(struct bnx2x *bp)
2556{
2557 int port = BP_PORT(bp);
2558 u32 val;
2559
2560 bp->port.pmf = 1;
2561 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2562
2563 /* enable nig attention */
2564 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2565 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2566 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2567
2568 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2569}
2570
c18487ee 2571/* end of Link */
a2fbb9ea
ET
2572
2573/* slow path */
2574
2575/*
2576 * General service functions
2577 */
2578
2691d51d
EG
2579/* send the MCP a request, block until there is a reply */
2580u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2581{
2582 int func = BP_FUNC(bp);
2583 u32 seq = ++bp->fw_seq;
2584 u32 rc = 0;
2585 u32 cnt = 1;
2586 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2587
c4ff7cbf 2588 mutex_lock(&bp->fw_mb_mutex);
2691d51d
EG
2589 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2590 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2591
2592 do {
2593 /* let the FW do it's magic ... */
2594 msleep(delay);
2595
2596 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2597
c4ff7cbf
EG
2598 /* Give the FW up to 5 second (500*10ms) */
2599 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2600
2601 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2602 cnt*delay, rc, seq);
2603
2604 /* is this a reply to our command? */
2605 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2606 rc &= FW_MSG_CODE_MASK;
2607 else {
2608 /* FW BUG! */
2609 BNX2X_ERR("FW failed to respond!\n");
2610 bnx2x_fw_dump(bp);
2611 rc = 0;
2612 }
c4ff7cbf 2613 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2614
2615 return rc;
2616}
2617
e665bfda 2618static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2691d51d
EG
2619static void bnx2x_set_rx_mode(struct net_device *dev);
2620
2621static void bnx2x_e1h_disable(struct bnx2x *bp)
2622{
2623 int port = BP_PORT(bp);
2691d51d
EG
2624
2625 netif_tx_disable(bp->dev);
2691d51d
EG
2626
2627 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2628
2691d51d
EG
2629 netif_carrier_off(bp->dev);
2630}
2631
2632static void bnx2x_e1h_enable(struct bnx2x *bp)
2633{
2634 int port = BP_PORT(bp);
2635
2636 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2637
2691d51d
EG
2638 /* Tx queue should be only reenabled */
2639 netif_tx_wake_all_queues(bp->dev);
2640
061bc702
EG
2641 /*
2642 * Should not call netif_carrier_on since it will be called if the link
2643 * is up when checking for link state
2644 */
2691d51d
EG
2645}
2646
2647static void bnx2x_update_min_max(struct bnx2x *bp)
2648{
2649 int port = BP_PORT(bp);
2650 int vn, i;
2651
2652 /* Init rate shaping and fairness contexts */
2653 bnx2x_init_port_minmax(bp);
2654
2655 bnx2x_calc_vn_weight_sum(bp);
2656
2657 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2658 bnx2x_init_vn_minmax(bp, 2*vn + port);
2659
2660 if (bp->port.pmf) {
2661 int func;
2662
2663 /* Set the attention towards other drivers on the same port */
2664 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2665 if (vn == BP_E1HVN(bp))
2666 continue;
2667
2668 func = ((vn << 1) | port);
2669 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2670 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2671 }
2672
2673 /* Store it to internal memory */
2674 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2675 REG_WR(bp, BAR_XSTRORM_INTMEM +
2676 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2677 ((u32 *)(&bp->cmng))[i]);
2678 }
2679}
2680
2681static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2682{
2691d51d 2683 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
2684
2685 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2686
f34d28ea
EG
2687 /*
2688 * This is the only place besides the function initialization
2689 * where the bp->flags can change so it is done without any
2690 * locks
2691 */
2691d51d
EG
2692 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2693 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
f34d28ea 2694 bp->flags |= MF_FUNC_DIS;
2691d51d
EG
2695
2696 bnx2x_e1h_disable(bp);
2697 } else {
2698 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
f34d28ea 2699 bp->flags &= ~MF_FUNC_DIS;
2691d51d
EG
2700
2701 bnx2x_e1h_enable(bp);
2702 }
2703 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2704 }
2705 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2706
2707 bnx2x_update_min_max(bp);
2708 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2709 }
2710
2711 /* Report results to MCP */
2712 if (dcc_event)
2713 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2714 else
2715 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2716}
2717
28912902
MC
2718/* must be called under the spq lock */
2719static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2720{
2721 struct eth_spe *next_spe = bp->spq_prod_bd;
2722
2723 if (bp->spq_prod_bd == bp->spq_last_bd) {
2724 bp->spq_prod_bd = bp->spq;
2725 bp->spq_prod_idx = 0;
2726 DP(NETIF_MSG_TIMER, "end of spq\n");
2727 } else {
2728 bp->spq_prod_bd++;
2729 bp->spq_prod_idx++;
2730 }
2731 return next_spe;
2732}
2733
2734/* must be called under the spq lock */
2735static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2736{
2737 int func = BP_FUNC(bp);
2738
2739 /* Make sure that BD data is updated before writing the producer */
2740 wmb();
2741
2742 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2743 bp->spq_prod_idx);
2744 mmiowb();
2745}
2746
a2fbb9ea
ET
2747/* the slow path queue is odd since completions arrive on the fastpath ring */
2748static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2749 u32 data_hi, u32 data_lo, int common)
2750{
28912902 2751 struct eth_spe *spe;
a2fbb9ea 2752
a2fbb9ea
ET
2753#ifdef BNX2X_STOP_ON_ERROR
2754 if (unlikely(bp->panic))
2755 return -EIO;
2756#endif
2757
34f80b04 2758 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2759
2760 if (!bp->spq_left) {
2761 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2762 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2763 bnx2x_panic();
2764 return -EBUSY;
2765 }
f1410647 2766
28912902
MC
2767 spe = bnx2x_sp_get_next(bp);
2768
a2fbb9ea 2769 /* CID needs port number to be encoded int it */
28912902 2770 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
2771 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2772 HW_CID(bp, cid));
28912902 2773 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 2774 if (common)
28912902 2775 spe->hdr.type |=
a2fbb9ea
ET
2776 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2777
28912902
MC
2778 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2779 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
2780
2781 bp->spq_left--;
2782
cdaa7cb8
VZ
2783 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2784 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2785 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2786 (u32)(U64_LO(bp->spq_mapping) +
2787 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2788 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2789
28912902 2790 bnx2x_sp_prod_update(bp);
34f80b04 2791 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2792 return 0;
2793}
2794
2795/* acquire split MCP access lock register */
4a37fb66 2796static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2797{
72fd0718 2798 u32 j, val;
34f80b04 2799 int rc = 0;
a2fbb9ea
ET
2800
2801 might_sleep();
72fd0718 2802 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
2803 val = (1UL << 31);
2804 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2805 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2806 if (val & (1L << 31))
2807 break;
2808
2809 msleep(5);
2810 }
a2fbb9ea 2811 if (!(val & (1L << 31))) {
19680c48 2812 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2813 rc = -EBUSY;
2814 }
2815
2816 return rc;
2817}
2818
4a37fb66
YG
2819/* release split MCP access lock register */
2820static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 2821{
72fd0718 2822 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
2823}
2824
2825static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2826{
2827 struct host_def_status_block *def_sb = bp->def_status_blk;
2828 u16 rc = 0;
2829
2830 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2831 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2832 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2833 rc |= 1;
2834 }
2835 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2836 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2837 rc |= 2;
2838 }
2839 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2840 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2841 rc |= 4;
2842 }
2843 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2844 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2845 rc |= 8;
2846 }
2847 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2848 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2849 rc |= 16;
2850 }
2851 return rc;
2852}
2853
2854/*
2855 * slow path service functions
2856 */
2857
2858static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2859{
34f80b04 2860 int port = BP_PORT(bp);
5c862848
EG
2861 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2862 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2863 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2864 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2865 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2866 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2867 u32 aeu_mask;
87942b46 2868 u32 nig_mask = 0;
a2fbb9ea 2869
a2fbb9ea
ET
2870 if (bp->attn_state & asserted)
2871 BNX2X_ERR("IGU ERROR\n");
2872
3fcaf2e5
EG
2873 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2874 aeu_mask = REG_RD(bp, aeu_addr);
2875
a2fbb9ea 2876 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 2877 aeu_mask, asserted);
72fd0718 2878 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 2879 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2880
3fcaf2e5
EG
2881 REG_WR(bp, aeu_addr, aeu_mask);
2882 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2883
3fcaf2e5 2884 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2885 bp->attn_state |= asserted;
3fcaf2e5 2886 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2887
2888 if (asserted & ATTN_HARD_WIRED_MASK) {
2889 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2890
a5e9a7cf
EG
2891 bnx2x_acquire_phy_lock(bp);
2892
877e9aa4 2893 /* save nig interrupt mask */
87942b46 2894 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2895 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2896
c18487ee 2897 bnx2x_link_attn(bp);
a2fbb9ea
ET
2898
2899 /* handle unicore attn? */
2900 }
2901 if (asserted & ATTN_SW_TIMER_4_FUNC)
2902 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2903
2904 if (asserted & GPIO_2_FUNC)
2905 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2906
2907 if (asserted & GPIO_3_FUNC)
2908 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2909
2910 if (asserted & GPIO_4_FUNC)
2911 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2912
2913 if (port == 0) {
2914 if (asserted & ATTN_GENERAL_ATTN_1) {
2915 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2916 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2917 }
2918 if (asserted & ATTN_GENERAL_ATTN_2) {
2919 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2920 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2921 }
2922 if (asserted & ATTN_GENERAL_ATTN_3) {
2923 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2924 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2925 }
2926 } else {
2927 if (asserted & ATTN_GENERAL_ATTN_4) {
2928 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2929 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2930 }
2931 if (asserted & ATTN_GENERAL_ATTN_5) {
2932 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2933 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2934 }
2935 if (asserted & ATTN_GENERAL_ATTN_6) {
2936 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2937 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2938 }
2939 }
2940
2941 } /* if hardwired */
2942
5c862848
EG
2943 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2944 asserted, hc_addr);
2945 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2946
2947 /* now set back the mask */
a5e9a7cf 2948 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2949 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2950 bnx2x_release_phy_lock(bp);
2951 }
a2fbb9ea
ET
2952}
2953
fd4ef40d
EG
2954static inline void bnx2x_fan_failure(struct bnx2x *bp)
2955{
2956 int port = BP_PORT(bp);
2957
2958 /* mark the failure */
2959 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2960 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2961 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2962 bp->link_params.ext_phy_config);
2963
2964 /* log the failure */
cdaa7cb8
VZ
2965 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2966 " the driver to shutdown the card to prevent permanent"
2967 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 2968}
ab6ad5a4 2969
877e9aa4 2970static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2971{
34f80b04 2972 int port = BP_PORT(bp);
877e9aa4 2973 int reg_offset;
4d295db0 2974 u32 val, swap_val, swap_override;
877e9aa4 2975
34f80b04
EG
2976 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2977 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2978
34f80b04 2979 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2980
2981 val = REG_RD(bp, reg_offset);
2982 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2983 REG_WR(bp, reg_offset, val);
2984
2985 BNX2X_ERR("SPIO5 hw attention\n");
2986
fd4ef40d 2987 /* Fan failure attention */
35b19ba5
EG
2988 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2989 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2990 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2991 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2992 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2993 /* The PHY reset is controlled by GPIO 1 */
2994 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2995 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2996 break;
2997
4d295db0
EG
2998 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2999 /* The PHY reset is controlled by GPIO 1 */
3000 /* fake the port number to cancel the swap done in
3001 set_gpio() */
3002 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
3003 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
3004 port = (swap_val && swap_override) ^ 1;
3005 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3006 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3007 break;
3008
877e9aa4
ET
3009 default:
3010 break;
3011 }
fd4ef40d 3012 bnx2x_fan_failure(bp);
877e9aa4 3013 }
34f80b04 3014
589abe3a
EG
3015 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3016 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3017 bnx2x_acquire_phy_lock(bp);
3018 bnx2x_handle_module_detect_int(&bp->link_params);
3019 bnx2x_release_phy_lock(bp);
3020 }
3021
34f80b04
EG
3022 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3023
3024 val = REG_RD(bp, reg_offset);
3025 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3026 REG_WR(bp, reg_offset, val);
3027
3028 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 3029 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
3030 bnx2x_panic();
3031 }
877e9aa4
ET
3032}
3033
3034static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3035{
3036 u32 val;
3037
0626b899 3038 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
3039
3040 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3041 BNX2X_ERR("DB hw attention 0x%x\n", val);
3042 /* DORQ discard attention */
3043 if (val & 0x2)
3044 BNX2X_ERR("FATAL error from DORQ\n");
3045 }
34f80b04
EG
3046
3047 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3048
3049 int port = BP_PORT(bp);
3050 int reg_offset;
3051
3052 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3053 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3054
3055 val = REG_RD(bp, reg_offset);
3056 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3057 REG_WR(bp, reg_offset, val);
3058
3059 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3060 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3061 bnx2x_panic();
3062 }
877e9aa4
ET
3063}
3064
3065static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3066{
3067 u32 val;
3068
3069 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3070
3071 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3072 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3073 /* CFC error attention */
3074 if (val & 0x2)
3075 BNX2X_ERR("FATAL error from CFC\n");
3076 }
3077
3078 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3079
3080 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3081 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3082 /* RQ_USDMDP_FIFO_OVERFLOW */
3083 if (val & 0x18000)
3084 BNX2X_ERR("FATAL error from PXP\n");
3085 }
34f80b04
EG
3086
3087 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3088
3089 int port = BP_PORT(bp);
3090 int reg_offset;
3091
3092 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3093 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3094
3095 val = REG_RD(bp, reg_offset);
3096 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3097 REG_WR(bp, reg_offset, val);
3098
3099 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3100 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3101 bnx2x_panic();
3102 }
877e9aa4
ET
3103}
3104
3105static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3106{
34f80b04
EG
3107 u32 val;
3108
877e9aa4
ET
3109 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3110
34f80b04
EG
3111 if (attn & BNX2X_PMF_LINK_ASSERT) {
3112 int func = BP_FUNC(bp);
3113
3114 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
3115 bp->mf_config = SHMEM_RD(bp,
3116 mf_cfg.func_mf_config[func].config);
2691d51d
EG
3117 val = SHMEM_RD(bp, func_mb[func].drv_status);
3118 if (val & DRV_STATUS_DCC_EVENT_MASK)
3119 bnx2x_dcc_event(bp,
3120 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3121 bnx2x__link_status_update(bp);
2691d51d 3122 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3123 bnx2x_pmf_update(bp);
3124
3125 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3126
3127 BNX2X_ERR("MC assert!\n");
3128 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3129 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3130 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3131 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3132 bnx2x_panic();
3133
3134 } else if (attn & BNX2X_MCP_ASSERT) {
3135
3136 BNX2X_ERR("MCP assert!\n");
3137 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3138 bnx2x_fw_dump(bp);
877e9aa4
ET
3139
3140 } else
3141 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3142 }
3143
3144 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3145 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3146 if (attn & BNX2X_GRC_TIMEOUT) {
3147 val = CHIP_IS_E1H(bp) ?
3148 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3149 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3150 }
3151 if (attn & BNX2X_GRC_RSV) {
3152 val = CHIP_IS_E1H(bp) ?
3153 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3154 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3155 }
877e9aa4 3156 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3157 }
3158}
3159
72fd0718
VZ
3160static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3161static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3162
3163
3164#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3165#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3166#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3167#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3168#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3169#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3170/*
3171 * should be run under rtnl lock
3172 */
3173static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3174{
3175 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3176 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3177 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3178 barrier();
3179 mmiowb();
3180}
3181
3182/*
3183 * should be run under rtnl lock
3184 */
3185static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3186{
3187 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3188 val |= (1 << 16);
3189 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3190 barrier();
3191 mmiowb();
3192}
3193
3194/*
3195 * should be run under rtnl lock
3196 */
3197static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3198{
3199 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3200 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3201 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3202}
3203
3204/*
3205 * should be run under rtnl lock
3206 */
3207static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3208{
3209 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3210
3211 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3212
3213 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3214 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3215 barrier();
3216 mmiowb();
3217}
3218
3219/*
3220 * should be run under rtnl lock
3221 */
3222static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3223{
3224 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3225
3226 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3227
3228 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3229 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3230 barrier();
3231 mmiowb();
3232
3233 return val1;
3234}
3235
3236/*
3237 * should be run under rtnl lock
3238 */
3239static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3240{
3241 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3242}
3243
3244static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3245{
3246 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3247 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3248}
3249
3250static inline void _print_next_block(int idx, const char *blk)
3251{
3252 if (idx)
3253 pr_cont(", ");
3254 pr_cont("%s", blk);
3255}
3256
3257static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3258{
3259 int i = 0;
3260 u32 cur_bit = 0;
3261 for (i = 0; sig; i++) {
3262 cur_bit = ((u32)0x1 << i);
3263 if (sig & cur_bit) {
3264 switch (cur_bit) {
3265 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3266 _print_next_block(par_num++, "BRB");
3267 break;
3268 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3269 _print_next_block(par_num++, "PARSER");
3270 break;
3271 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3272 _print_next_block(par_num++, "TSDM");
3273 break;
3274 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3275 _print_next_block(par_num++, "SEARCHER");
3276 break;
3277 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3278 _print_next_block(par_num++, "TSEMI");
3279 break;
3280 }
3281
3282 /* Clear the bit */
3283 sig &= ~cur_bit;
3284 }
3285 }
3286
3287 return par_num;
3288}
3289
3290static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3291{
3292 int i = 0;
3293 u32 cur_bit = 0;
3294 for (i = 0; sig; i++) {
3295 cur_bit = ((u32)0x1 << i);
3296 if (sig & cur_bit) {
3297 switch (cur_bit) {
3298 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3299 _print_next_block(par_num++, "PBCLIENT");
3300 break;
3301 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3302 _print_next_block(par_num++, "QM");
3303 break;
3304 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3305 _print_next_block(par_num++, "XSDM");
3306 break;
3307 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3308 _print_next_block(par_num++, "XSEMI");
3309 break;
3310 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3311 _print_next_block(par_num++, "DOORBELLQ");
3312 break;
3313 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3314 _print_next_block(par_num++, "VAUX PCI CORE");
3315 break;
3316 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3317 _print_next_block(par_num++, "DEBUG");
3318 break;
3319 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3320 _print_next_block(par_num++, "USDM");
3321 break;
3322 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3323 _print_next_block(par_num++, "USEMI");
3324 break;
3325 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3326 _print_next_block(par_num++, "UPB");
3327 break;
3328 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3329 _print_next_block(par_num++, "CSDM");
3330 break;
3331 }
3332
3333 /* Clear the bit */
3334 sig &= ~cur_bit;
3335 }
3336 }
3337
3338 return par_num;
3339}
3340
3341static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3342{
3343 int i = 0;
3344 u32 cur_bit = 0;
3345 for (i = 0; sig; i++) {
3346 cur_bit = ((u32)0x1 << i);
3347 if (sig & cur_bit) {
3348 switch (cur_bit) {
3349 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3350 _print_next_block(par_num++, "CSEMI");
3351 break;
3352 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3353 _print_next_block(par_num++, "PXP");
3354 break;
3355 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3356 _print_next_block(par_num++,
3357 "PXPPCICLOCKCLIENT");
3358 break;
3359 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3360 _print_next_block(par_num++, "CFC");
3361 break;
3362 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3363 _print_next_block(par_num++, "CDU");
3364 break;
3365 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3366 _print_next_block(par_num++, "IGU");
3367 break;
3368 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3369 _print_next_block(par_num++, "MISC");
3370 break;
3371 }
3372
3373 /* Clear the bit */
3374 sig &= ~cur_bit;
3375 }
3376 }
3377
3378 return par_num;
3379}
3380
3381static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3382{
3383 int i = 0;
3384 u32 cur_bit = 0;
3385 for (i = 0; sig; i++) {
3386 cur_bit = ((u32)0x1 << i);
3387 if (sig & cur_bit) {
3388 switch (cur_bit) {
3389 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3390 _print_next_block(par_num++, "MCP ROM");
3391 break;
3392 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3393 _print_next_block(par_num++, "MCP UMP RX");
3394 break;
3395 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3396 _print_next_block(par_num++, "MCP UMP TX");
3397 break;
3398 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3399 _print_next_block(par_num++, "MCP SCPAD");
3400 break;
3401 }
3402
3403 /* Clear the bit */
3404 sig &= ~cur_bit;
3405 }
3406 }
3407
3408 return par_num;
3409}
3410
3411static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3412 u32 sig2, u32 sig3)
3413{
3414 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3415 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3416 int par_num = 0;
3417 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3418 "[0]:0x%08x [1]:0x%08x "
3419 "[2]:0x%08x [3]:0x%08x\n",
3420 sig0 & HW_PRTY_ASSERT_SET_0,
3421 sig1 & HW_PRTY_ASSERT_SET_1,
3422 sig2 & HW_PRTY_ASSERT_SET_2,
3423 sig3 & HW_PRTY_ASSERT_SET_3);
3424 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3425 bp->dev->name);
3426 par_num = bnx2x_print_blocks_with_parity0(
3427 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3428 par_num = bnx2x_print_blocks_with_parity1(
3429 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3430 par_num = bnx2x_print_blocks_with_parity2(
3431 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3432 par_num = bnx2x_print_blocks_with_parity3(
3433 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3434 printk("\n");
3435 return true;
3436 } else
3437 return false;
3438}
3439
3440static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 3441{
a2fbb9ea 3442 struct attn_route attn;
72fd0718
VZ
3443 int port = BP_PORT(bp);
3444
3445 attn.sig[0] = REG_RD(bp,
3446 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3447 port*4);
3448 attn.sig[1] = REG_RD(bp,
3449 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3450 port*4);
3451 attn.sig[2] = REG_RD(bp,
3452 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3453 port*4);
3454 attn.sig[3] = REG_RD(bp,
3455 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3456 port*4);
3457
3458 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3459 attn.sig[3]);
3460}
3461
3462static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3463{
3464 struct attn_route attn, *group_mask;
34f80b04 3465 int port = BP_PORT(bp);
877e9aa4 3466 int index;
a2fbb9ea
ET
3467 u32 reg_addr;
3468 u32 val;
3fcaf2e5 3469 u32 aeu_mask;
a2fbb9ea
ET
3470
3471 /* need to take HW lock because MCP or other port might also
3472 try to handle this event */
4a37fb66 3473 bnx2x_acquire_alr(bp);
a2fbb9ea 3474
72fd0718
VZ
3475 if (bnx2x_chk_parity_attn(bp)) {
3476 bp->recovery_state = BNX2X_RECOVERY_INIT;
3477 bnx2x_set_reset_in_progress(bp);
3478 schedule_delayed_work(&bp->reset_task, 0);
3479 /* Disable HW interrupts */
3480 bnx2x_int_disable(bp);
3481 bnx2x_release_alr(bp);
3482 /* In case of parity errors don't handle attentions so that
3483 * other function would "see" parity errors.
3484 */
3485 return;
3486 }
3487
a2fbb9ea
ET
3488 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3489 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3490 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3491 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3492 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3493 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3494
3495 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3496 if (deasserted & (1 << index)) {
72fd0718 3497 group_mask = &bp->attn_group[index];
a2fbb9ea 3498
34f80b04 3499 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
72fd0718
VZ
3500 index, group_mask->sig[0], group_mask->sig[1],
3501 group_mask->sig[2], group_mask->sig[3]);
a2fbb9ea 3502
877e9aa4 3503 bnx2x_attn_int_deasserted3(bp,
72fd0718 3504 attn.sig[3] & group_mask->sig[3]);
877e9aa4 3505 bnx2x_attn_int_deasserted1(bp,
72fd0718 3506 attn.sig[1] & group_mask->sig[1]);
877e9aa4 3507 bnx2x_attn_int_deasserted2(bp,
72fd0718 3508 attn.sig[2] & group_mask->sig[2]);
877e9aa4 3509 bnx2x_attn_int_deasserted0(bp,
72fd0718 3510 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
3511 }
3512 }
3513
4a37fb66 3514 bnx2x_release_alr(bp);
a2fbb9ea 3515
5c862848 3516 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3517
3518 val = ~deasserted;
3fcaf2e5
EG
3519 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3520 val, reg_addr);
5c862848 3521 REG_WR(bp, reg_addr, val);
a2fbb9ea 3522
a2fbb9ea 3523 if (~bp->attn_state & deasserted)
3fcaf2e5 3524 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3525
3526 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3527 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3528
3fcaf2e5
EG
3529 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3530 aeu_mask = REG_RD(bp, reg_addr);
3531
3532 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3533 aeu_mask, deasserted);
72fd0718 3534 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 3535 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3536
3fcaf2e5
EG
3537 REG_WR(bp, reg_addr, aeu_mask);
3538 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3539
3540 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3541 bp->attn_state &= ~deasserted;
3542 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3543}
3544
3545static void bnx2x_attn_int(struct bnx2x *bp)
3546{
3547 /* read local copy of bits */
68d59484
EG
3548 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3549 attn_bits);
3550 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3551 attn_bits_ack);
a2fbb9ea
ET
3552 u32 attn_state = bp->attn_state;
3553
3554 /* look for changed bits */
3555 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3556 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3557
3558 DP(NETIF_MSG_HW,
3559 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3560 attn_bits, attn_ack, asserted, deasserted);
3561
3562 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3563 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3564
3565 /* handle bits that were raised */
3566 if (asserted)
3567 bnx2x_attn_int_asserted(bp, asserted);
3568
3569 if (deasserted)
3570 bnx2x_attn_int_deasserted(bp, deasserted);
3571}
3572
3573static void bnx2x_sp_task(struct work_struct *work)
3574{
1cf167f2 3575 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3576 u16 status;
3577
3578 /* Return here if interrupt is disabled */
3579 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3580 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3581 return;
3582 }
3583
3584 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3585/* if (status == 0) */
3586/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3587
cdaa7cb8 3588 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 3589
877e9aa4 3590 /* HW attentions */
cdaa7cb8 3591 if (status & 0x1) {
a2fbb9ea 3592 bnx2x_attn_int(bp);
cdaa7cb8
VZ
3593 status &= ~0x1;
3594 }
3595
3596 /* CStorm events: STAT_QUERY */
3597 if (status & 0x2) {
3598 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
3599 status &= ~0x2;
3600 }
3601
3602 if (unlikely(status))
3603 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3604 status);
a2fbb9ea 3605
68d59484 3606 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3607 IGU_INT_NOP, 1);
3608 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3609 IGU_INT_NOP, 1);
3610 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3611 IGU_INT_NOP, 1);
3612 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3613 IGU_INT_NOP, 1);
3614 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3615 IGU_INT_ENABLE, 1);
3616}
3617
3618static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3619{
3620 struct net_device *dev = dev_instance;
3621 struct bnx2x *bp = netdev_priv(dev);
3622
3623 /* Return here if interrupt is disabled */
3624 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3625 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3626 return IRQ_HANDLED;
3627 }
3628
8d9c5f34 3629 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3630
3631#ifdef BNX2X_STOP_ON_ERROR
3632 if (unlikely(bp->panic))
3633 return IRQ_HANDLED;
3634#endif
3635
993ac7b5
MC
3636#ifdef BCM_CNIC
3637 {
3638 struct cnic_ops *c_ops;
3639
3640 rcu_read_lock();
3641 c_ops = rcu_dereference(bp->cnic_ops);
3642 if (c_ops)
3643 c_ops->cnic_handler(bp->cnic_data, NULL);
3644 rcu_read_unlock();
3645 }
3646#endif
1cf167f2 3647 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3648
3649 return IRQ_HANDLED;
3650}
3651
3652/* end of slow path */
3653
3654/* Statistics */
3655
3656/****************************************************************************
3657* Macros
3658****************************************************************************/
3659
a2fbb9ea
ET
3660/* sum[hi:lo] += add[hi:lo] */
3661#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3662 do { \
3663 s_lo += a_lo; \
f5ba6772 3664 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3665 } while (0)
3666
3667/* difference = minuend - subtrahend */
3668#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3669 do { \
bb2a0f7a
YG
3670 if (m_lo < s_lo) { \
3671 /* underflow */ \
a2fbb9ea 3672 d_hi = m_hi - s_hi; \
bb2a0f7a 3673 if (d_hi > 0) { \
6378c025 3674 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3675 d_hi--; \
3676 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3677 } else { \
6378c025 3678 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3679 d_hi = 0; \
3680 d_lo = 0; \
3681 } \
bb2a0f7a
YG
3682 } else { \
3683 /* m_lo >= s_lo */ \
a2fbb9ea 3684 if (m_hi < s_hi) { \
bb2a0f7a
YG
3685 d_hi = 0; \
3686 d_lo = 0; \
3687 } else { \
6378c025 3688 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3689 d_hi = m_hi - s_hi; \
3690 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3691 } \
3692 } \
3693 } while (0)
3694
bb2a0f7a 3695#define UPDATE_STAT64(s, t) \
a2fbb9ea 3696 do { \
bb2a0f7a
YG
3697 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3698 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3699 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3700 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3701 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3702 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3703 } while (0)
3704
bb2a0f7a 3705#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3706 do { \
bb2a0f7a
YG
3707 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3708 diff.lo, new->s##_lo, old->s##_lo); \
3709 ADD_64(estats->t##_hi, diff.hi, \
3710 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3711 } while (0)
3712
3713/* sum[hi:lo] += add */
3714#define ADD_EXTEND_64(s_hi, s_lo, a) \
3715 do { \
3716 s_lo += a; \
3717 s_hi += (s_lo < a) ? 1 : 0; \
3718 } while (0)
3719
bb2a0f7a 3720#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3721 do { \
bb2a0f7a
YG
3722 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3723 pstats->mac_stx[1].s##_lo, \
3724 new->s); \
a2fbb9ea
ET
3725 } while (0)
3726
bb2a0f7a 3727#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3728 do { \
4781bfad
EG
3729 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3730 old_tclient->s = tclient->s; \
de832a55
EG
3731 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3732 } while (0)
3733
3734#define UPDATE_EXTEND_USTAT(s, t) \
3735 do { \
3736 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3737 old_uclient->s = uclient->s; \
3738 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3739 } while (0)
3740
3741#define UPDATE_EXTEND_XSTAT(s, t) \
3742 do { \
4781bfad
EG
3743 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3744 old_xclient->s = xclient->s; \
de832a55
EG
3745 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3746 } while (0)
3747
3748/* minuend -= subtrahend */
3749#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3750 do { \
3751 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3752 } while (0)
3753
3754/* minuend[hi:lo] -= subtrahend */
3755#define SUB_EXTEND_64(m_hi, m_lo, s) \
3756 do { \
3757 SUB_64(m_hi, 0, m_lo, s); \
3758 } while (0)
3759
3760#define SUB_EXTEND_USTAT(s, t) \
3761 do { \
3762 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3763 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3764 } while (0)
3765
3766/*
3767 * General service functions
3768 */
3769
3770static inline long bnx2x_hilo(u32 *hiref)
3771{
3772 u32 lo = *(hiref + 1);
3773#if (BITS_PER_LONG == 64)
3774 u32 hi = *hiref;
3775
3776 return HILO_U64(hi, lo);
3777#else
3778 return lo;
3779#endif
3780}
3781
3782/*
3783 * Init service functions
3784 */
3785
bb2a0f7a
YG
3786static void bnx2x_storm_stats_post(struct bnx2x *bp)
3787{
3788 if (!bp->stats_pending) {
3789 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3790 int i, rc;
bb2a0f7a
YG
3791
3792 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3793 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3794 for_each_queue(bp, i)
3795 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3796
3797 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3798 ((u32 *)&ramrod_data)[1],
3799 ((u32 *)&ramrod_data)[0], 0);
3800 if (rc == 0) {
3801 /* stats ramrod has it's own slot on the spq */
3802 bp->spq_left++;
3803 bp->stats_pending = 1;
3804 }
3805 }
3806}
3807
bb2a0f7a
YG
3808static void bnx2x_hw_stats_post(struct bnx2x *bp)
3809{
3810 struct dmae_command *dmae = &bp->stats_dmae;
3811 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3812
3813 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3814 if (CHIP_REV_IS_SLOW(bp))
3815 return;
bb2a0f7a
YG
3816
3817 /* loader */
3818 if (bp->executer_idx) {
3819 int loader_idx = PMF_DMAE_C(bp);
3820
3821 memset(dmae, 0, sizeof(struct dmae_command));
3822
3823 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3824 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3825 DMAE_CMD_DST_RESET |
3826#ifdef __BIG_ENDIAN
3827 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3828#else
3829 DMAE_CMD_ENDIANITY_DW_SWAP |
3830#endif
3831 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3832 DMAE_CMD_PORT_0) |
3833 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3834 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3835 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3836 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3837 sizeof(struct dmae_command) *
3838 (loader_idx + 1)) >> 2;
3839 dmae->dst_addr_hi = 0;
3840 dmae->len = sizeof(struct dmae_command) >> 2;
3841 if (CHIP_IS_E1(bp))
3842 dmae->len--;
3843 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3844 dmae->comp_addr_hi = 0;
3845 dmae->comp_val = 1;
3846
3847 *stats_comp = 0;
3848 bnx2x_post_dmae(bp, dmae, loader_idx);
3849
3850 } else if (bp->func_stx) {
3851 *stats_comp = 0;
3852 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3853 }
3854}
3855
3856static int bnx2x_stats_comp(struct bnx2x *bp)
3857{
3858 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3859 int cnt = 10;
3860
3861 might_sleep();
3862 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3863 if (!cnt) {
3864 BNX2X_ERR("timeout waiting for stats finished\n");
3865 break;
3866 }
3867 cnt--;
12469401 3868 msleep(1);
bb2a0f7a
YG
3869 }
3870 return 1;
3871}
3872
3873/*
3874 * Statistics service functions
3875 */
3876
3877static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3878{
3879 struct dmae_command *dmae;
3880 u32 opcode;
3881 int loader_idx = PMF_DMAE_C(bp);
3882 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3883
3884 /* sanity */
3885 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3886 BNX2X_ERR("BUG!\n");
3887 return;
3888 }
3889
3890 bp->executer_idx = 0;
3891
3892 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3893 DMAE_CMD_C_ENABLE |
3894 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3895#ifdef __BIG_ENDIAN
3896 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3897#else
3898 DMAE_CMD_ENDIANITY_DW_SWAP |
3899#endif
3900 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3901 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3902
3903 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3904 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3905 dmae->src_addr_lo = bp->port.port_stx >> 2;
3906 dmae->src_addr_hi = 0;
3907 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3908 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3909 dmae->len = DMAE_LEN32_RD_MAX;
3910 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3911 dmae->comp_addr_hi = 0;
3912 dmae->comp_val = 1;
3913
3914 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3915 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3916 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3917 dmae->src_addr_hi = 0;
7a9b2557
VZ
3918 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3919 DMAE_LEN32_RD_MAX * 4);
3920 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3921 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3922 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3923 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3924 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3925 dmae->comp_val = DMAE_COMP_VAL;
3926
3927 *stats_comp = 0;
3928 bnx2x_hw_stats_post(bp);
3929 bnx2x_stats_comp(bp);
3930}
3931
3932static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3933{
3934 struct dmae_command *dmae;
34f80b04 3935 int port = BP_PORT(bp);
bb2a0f7a 3936 int vn = BP_E1HVN(bp);
a2fbb9ea 3937 u32 opcode;
bb2a0f7a 3938 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3939 u32 mac_addr;
bb2a0f7a
YG
3940 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3941
3942 /* sanity */
3943 if (!bp->link_vars.link_up || !bp->port.pmf) {
3944 BNX2X_ERR("BUG!\n");
3945 return;
3946 }
a2fbb9ea
ET
3947
3948 bp->executer_idx = 0;
bb2a0f7a
YG
3949
3950 /* MCP */
3951 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3952 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3953 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3954#ifdef __BIG_ENDIAN
bb2a0f7a 3955 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3956#else
bb2a0f7a 3957 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3958#endif
bb2a0f7a
YG
3959 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3960 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3961
bb2a0f7a 3962 if (bp->port.port_stx) {
a2fbb9ea
ET
3963
3964 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3965 dmae->opcode = opcode;
bb2a0f7a
YG
3966 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3967 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3968 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3969 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3970 dmae->len = sizeof(struct host_port_stats) >> 2;
3971 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3972 dmae->comp_addr_hi = 0;
3973 dmae->comp_val = 1;
a2fbb9ea
ET
3974 }
3975
bb2a0f7a
YG
3976 if (bp->func_stx) {
3977
3978 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3979 dmae->opcode = opcode;
3980 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3981 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3982 dmae->dst_addr_lo = bp->func_stx >> 2;
3983 dmae->dst_addr_hi = 0;
3984 dmae->len = sizeof(struct host_func_stats) >> 2;
3985 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3986 dmae->comp_addr_hi = 0;
3987 dmae->comp_val = 1;
a2fbb9ea
ET
3988 }
3989
bb2a0f7a 3990 /* MAC */
a2fbb9ea
ET
3991 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3992 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3993 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3994#ifdef __BIG_ENDIAN
3995 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3996#else
3997 DMAE_CMD_ENDIANITY_DW_SWAP |
3998#endif
bb2a0f7a
YG
3999 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4000 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 4001
c18487ee 4002 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
4003
4004 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4005 NIG_REG_INGRESS_BMAC0_MEM);
4006
4007 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4008 BIGMAC_REGISTER_TX_STAT_GTBYT */
4009 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4010 dmae->opcode = opcode;
4011 dmae->src_addr_lo = (mac_addr +
4012 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4013 dmae->src_addr_hi = 0;
4014 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4015 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4016 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4017 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4018 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4019 dmae->comp_addr_hi = 0;
4020 dmae->comp_val = 1;
4021
4022 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4023 BIGMAC_REGISTER_RX_STAT_GRIPJ */
4024 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4025 dmae->opcode = opcode;
4026 dmae->src_addr_lo = (mac_addr +
4027 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4028 dmae->src_addr_hi = 0;
4029 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4030 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 4031 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4032 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
4033 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4034 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4035 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4036 dmae->comp_addr_hi = 0;
4037 dmae->comp_val = 1;
4038
c18487ee 4039 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
4040
4041 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4042
4043 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4044 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4045 dmae->opcode = opcode;
4046 dmae->src_addr_lo = (mac_addr +
4047 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4048 dmae->src_addr_hi = 0;
4049 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4050 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4051 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4052 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4053 dmae->comp_addr_hi = 0;
4054 dmae->comp_val = 1;
4055
4056 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4057 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4058 dmae->opcode = opcode;
4059 dmae->src_addr_lo = (mac_addr +
4060 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4061 dmae->src_addr_hi = 0;
4062 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4063 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 4064 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4065 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
4066 dmae->len = 1;
4067 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4068 dmae->comp_addr_hi = 0;
4069 dmae->comp_val = 1;
4070
4071 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4072 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4073 dmae->opcode = opcode;
4074 dmae->src_addr_lo = (mac_addr +
4075 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4076 dmae->src_addr_hi = 0;
4077 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4078 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 4079 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4080 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
4081 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4082 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4083 dmae->comp_addr_hi = 0;
4084 dmae->comp_val = 1;
4085 }
4086
4087 /* NIG */
bb2a0f7a
YG
4088 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4089 dmae->opcode = opcode;
4090 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4091 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4092 dmae->src_addr_hi = 0;
4093 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4094 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4095 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4096 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4097 dmae->comp_addr_hi = 0;
4098 dmae->comp_val = 1;
4099
4100 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4101 dmae->opcode = opcode;
4102 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4103 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4104 dmae->src_addr_hi = 0;
4105 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4106 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4107 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4108 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4109 dmae->len = (2*sizeof(u32)) >> 2;
4110 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4111 dmae->comp_addr_hi = 0;
4112 dmae->comp_val = 1;
4113
a2fbb9ea
ET
4114 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4115 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4116 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4117 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4118#ifdef __BIG_ENDIAN
4119 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4120#else
4121 DMAE_CMD_ENDIANITY_DW_SWAP |
4122#endif
bb2a0f7a
YG
4123 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4124 (vn << DMAE_CMD_E1HVN_SHIFT));
4125 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4126 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 4127 dmae->src_addr_hi = 0;
bb2a0f7a
YG
4128 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4129 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4130 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4131 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4132 dmae->len = (2*sizeof(u32)) >> 2;
4133 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4134 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4135 dmae->comp_val = DMAE_COMP_VAL;
4136
4137 *stats_comp = 0;
a2fbb9ea
ET
4138}
4139
bb2a0f7a 4140static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 4141{
bb2a0f7a
YG
4142 struct dmae_command *dmae = &bp->stats_dmae;
4143 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4144
bb2a0f7a
YG
4145 /* sanity */
4146 if (!bp->func_stx) {
4147 BNX2X_ERR("BUG!\n");
4148 return;
4149 }
a2fbb9ea 4150
bb2a0f7a
YG
4151 bp->executer_idx = 0;
4152 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 4153
bb2a0f7a
YG
4154 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4155 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4156 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4157#ifdef __BIG_ENDIAN
4158 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4159#else
4160 DMAE_CMD_ENDIANITY_DW_SWAP |
4161#endif
4162 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4163 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4164 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4165 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4166 dmae->dst_addr_lo = bp->func_stx >> 2;
4167 dmae->dst_addr_hi = 0;
4168 dmae->len = sizeof(struct host_func_stats) >> 2;
4169 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4170 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4171 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4172
bb2a0f7a
YG
4173 *stats_comp = 0;
4174}
a2fbb9ea 4175
bb2a0f7a
YG
4176static void bnx2x_stats_start(struct bnx2x *bp)
4177{
4178 if (bp->port.pmf)
4179 bnx2x_port_stats_init(bp);
4180
4181 else if (bp->func_stx)
4182 bnx2x_func_stats_init(bp);
4183
4184 bnx2x_hw_stats_post(bp);
4185 bnx2x_storm_stats_post(bp);
4186}
4187
4188static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4189{
4190 bnx2x_stats_comp(bp);
4191 bnx2x_stats_pmf_update(bp);
4192 bnx2x_stats_start(bp);
4193}
4194
4195static void bnx2x_stats_restart(struct bnx2x *bp)
4196{
4197 bnx2x_stats_comp(bp);
4198 bnx2x_stats_start(bp);
4199}
4200
4201static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4202{
4203 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4204 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 4205 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
4206 struct {
4207 u32 lo;
4208 u32 hi;
4209 } diff;
bb2a0f7a
YG
4210
4211 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4212 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4213 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4214 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4215 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4216 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 4217 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 4218 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 4219 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
4220 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4221 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4222 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4223 UPDATE_STAT64(tx_stat_gt127,
4224 tx_stat_etherstatspkts65octetsto127octets);
4225 UPDATE_STAT64(tx_stat_gt255,
4226 tx_stat_etherstatspkts128octetsto255octets);
4227 UPDATE_STAT64(tx_stat_gt511,
4228 tx_stat_etherstatspkts256octetsto511octets);
4229 UPDATE_STAT64(tx_stat_gt1023,
4230 tx_stat_etherstatspkts512octetsto1023octets);
4231 UPDATE_STAT64(tx_stat_gt1518,
4232 tx_stat_etherstatspkts1024octetsto1522octets);
4233 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4234 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4235 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4236 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4237 UPDATE_STAT64(tx_stat_gterr,
4238 tx_stat_dot3statsinternalmactransmiterrors);
4239 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
4240
4241 estats->pause_frames_received_hi =
4242 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4243 estats->pause_frames_received_lo =
4244 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4245
4246 estats->pause_frames_sent_hi =
4247 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4248 estats->pause_frames_sent_lo =
4249 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
4250}
4251
4252static void bnx2x_emac_stats_update(struct bnx2x *bp)
4253{
4254 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4255 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 4256 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
4257
4258 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4259 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4260 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4261 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4262 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4263 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4264 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4265 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4266 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4267 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4268 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4269 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4270 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4271 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4272 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4273 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4274 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4275 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4276 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4277 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4278 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4279 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4280 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4281 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4282 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4283 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4284 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4285 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4286 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4287 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4288 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
4289
4290 estats->pause_frames_received_hi =
4291 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4292 estats->pause_frames_received_lo =
4293 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4294 ADD_64(estats->pause_frames_received_hi,
4295 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4296 estats->pause_frames_received_lo,
4297 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4298
4299 estats->pause_frames_sent_hi =
4300 pstats->mac_stx[1].tx_stat_outxonsent_hi;
4301 estats->pause_frames_sent_lo =
4302 pstats->mac_stx[1].tx_stat_outxonsent_lo;
4303 ADD_64(estats->pause_frames_sent_hi,
4304 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4305 estats->pause_frames_sent_lo,
4306 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
4307}
4308
4309static int bnx2x_hw_stats_update(struct bnx2x *bp)
4310{
4311 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4312 struct nig_stats *old = &(bp->port.old_nig_stats);
4313 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4314 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
4315 struct {
4316 u32 lo;
4317 u32 hi;
4318 } diff;
bb2a0f7a
YG
4319
4320 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4321 bnx2x_bmac_stats_update(bp);
4322
4323 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4324 bnx2x_emac_stats_update(bp);
4325
4326 else { /* unreached */
c3eefaf6 4327 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
4328 return -1;
4329 }
a2fbb9ea 4330
bb2a0f7a
YG
4331 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4332 new->brb_discard - old->brb_discard);
66e855f3
YG
4333 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4334 new->brb_truncate - old->brb_truncate);
a2fbb9ea 4335
bb2a0f7a
YG
4336 UPDATE_STAT64_NIG(egress_mac_pkt0,
4337 etherstatspkts1024octetsto1522octets);
4338 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 4339
bb2a0f7a 4340 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 4341
bb2a0f7a
YG
4342 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4343 sizeof(struct mac_stx));
4344 estats->brb_drop_hi = pstats->brb_drop_hi;
4345 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 4346
bb2a0f7a 4347 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 4348
2145a920
VZ
4349 if (!BP_NOMCP(bp)) {
4350 u32 nig_timer_max =
4351 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4352 if (nig_timer_max != estats->nig_timer_max) {
4353 estats->nig_timer_max = nig_timer_max;
4354 BNX2X_ERR("NIG timer max (%u)\n",
4355 estats->nig_timer_max);
4356 }
de832a55
EG
4357 }
4358
bb2a0f7a 4359 return 0;
a2fbb9ea
ET
4360}
4361
bb2a0f7a 4362static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
4363{
4364 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 4365 struct tstorm_per_port_stats *tport =
de832a55 4366 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
4367 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4368 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
4369 int i;
4370
6fe49bb9
EG
4371 memcpy(&(fstats->total_bytes_received_hi),
4372 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
4373 sizeof(struct host_func_stats) - 2*sizeof(u32));
4374 estats->error_bytes_received_hi = 0;
4375 estats->error_bytes_received_lo = 0;
4376 estats->etherstatsoverrsizepkts_hi = 0;
4377 estats->etherstatsoverrsizepkts_lo = 0;
4378 estats->no_buff_discard_hi = 0;
4379 estats->no_buff_discard_lo = 0;
a2fbb9ea 4380
54b9ddaa 4381 for_each_queue(bp, i) {
de832a55
EG
4382 struct bnx2x_fastpath *fp = &bp->fp[i];
4383 int cl_id = fp->cl_id;
4384 struct tstorm_per_client_stats *tclient =
4385 &stats->tstorm_common.client_statistics[cl_id];
4386 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4387 struct ustorm_per_client_stats *uclient =
4388 &stats->ustorm_common.client_statistics[cl_id];
4389 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4390 struct xstorm_per_client_stats *xclient =
4391 &stats->xstorm_common.client_statistics[cl_id];
4392 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4393 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4394 u32 diff;
4395
4396 /* are storm stats valid? */
4397 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 4398 bp->stats_counter) {
de832a55 4399 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
cdaa7cb8 4400 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
de832a55
EG
4401 i, xclient->stats_counter, bp->stats_counter);
4402 return -1;
4403 }
4404 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 4405 bp->stats_counter) {
de832a55 4406 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
cdaa7cb8 4407 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
de832a55
EG
4408 i, tclient->stats_counter, bp->stats_counter);
4409 return -2;
4410 }
4411 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4412 bp->stats_counter) {
4413 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
cdaa7cb8 4414 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
de832a55
EG
4415 i, uclient->stats_counter, bp->stats_counter);
4416 return -4;
4417 }
a2fbb9ea 4418
de832a55 4419 qstats->total_bytes_received_hi =
ca00392c 4420 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4421 qstats->total_bytes_received_lo =
ca00392c
EG
4422 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4423
4424 ADD_64(qstats->total_bytes_received_hi,
4425 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4426 qstats->total_bytes_received_lo,
4427 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4428
4429 ADD_64(qstats->total_bytes_received_hi,
4430 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4431 qstats->total_bytes_received_lo,
4432 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4433
dea7aab1
VZ
4434 SUB_64(qstats->total_bytes_received_hi,
4435 le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
4436 qstats->total_bytes_received_lo,
4437 le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
4438
4439 SUB_64(qstats->total_bytes_received_hi,
4440 le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
4441 qstats->total_bytes_received_lo,
4442 le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
4443
4444 SUB_64(qstats->total_bytes_received_hi,
4445 le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
4446 qstats->total_bytes_received_lo,
4447 le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
4448
ca00392c
EG
4449 qstats->valid_bytes_received_hi =
4450 qstats->total_bytes_received_hi;
de832a55 4451 qstats->valid_bytes_received_lo =
ca00392c 4452 qstats->total_bytes_received_lo;
bb2a0f7a 4453
de832a55 4454 qstats->error_bytes_received_hi =
bb2a0f7a 4455 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4456 qstats->error_bytes_received_lo =
bb2a0f7a 4457 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4458
de832a55
EG
4459 ADD_64(qstats->total_bytes_received_hi,
4460 qstats->error_bytes_received_hi,
4461 qstats->total_bytes_received_lo,
4462 qstats->error_bytes_received_lo);
4463
4464 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4465 total_unicast_packets_received);
4466 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4467 total_multicast_packets_received);
4468 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4469 total_broadcast_packets_received);
4470 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4471 etherstatsoverrsizepkts);
4472 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4473
4474 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4475 total_unicast_packets_received);
4476 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4477 total_multicast_packets_received);
4478 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4479 total_broadcast_packets_received);
4480 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4481 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4482 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4483
4484 qstats->total_bytes_transmitted_hi =
ca00392c 4485 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4486 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4487 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4488
4489 ADD_64(qstats->total_bytes_transmitted_hi,
4490 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4491 qstats->total_bytes_transmitted_lo,
4492 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4493
4494 ADD_64(qstats->total_bytes_transmitted_hi,
4495 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4496 qstats->total_bytes_transmitted_lo,
4497 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4498
de832a55
EG
4499 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4500 total_unicast_packets_transmitted);
4501 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4502 total_multicast_packets_transmitted);
4503 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4504 total_broadcast_packets_transmitted);
4505
4506 old_tclient->checksum_discard = tclient->checksum_discard;
4507 old_tclient->ttl0_discard = tclient->ttl0_discard;
4508
4509 ADD_64(fstats->total_bytes_received_hi,
4510 qstats->total_bytes_received_hi,
4511 fstats->total_bytes_received_lo,
4512 qstats->total_bytes_received_lo);
4513 ADD_64(fstats->total_bytes_transmitted_hi,
4514 qstats->total_bytes_transmitted_hi,
4515 fstats->total_bytes_transmitted_lo,
4516 qstats->total_bytes_transmitted_lo);
4517 ADD_64(fstats->total_unicast_packets_received_hi,
4518 qstats->total_unicast_packets_received_hi,
4519 fstats->total_unicast_packets_received_lo,
4520 qstats->total_unicast_packets_received_lo);
4521 ADD_64(fstats->total_multicast_packets_received_hi,
4522 qstats->total_multicast_packets_received_hi,
4523 fstats->total_multicast_packets_received_lo,
4524 qstats->total_multicast_packets_received_lo);
4525 ADD_64(fstats->total_broadcast_packets_received_hi,
4526 qstats->total_broadcast_packets_received_hi,
4527 fstats->total_broadcast_packets_received_lo,
4528 qstats->total_broadcast_packets_received_lo);
4529 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4530 qstats->total_unicast_packets_transmitted_hi,
4531 fstats->total_unicast_packets_transmitted_lo,
4532 qstats->total_unicast_packets_transmitted_lo);
4533 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4534 qstats->total_multicast_packets_transmitted_hi,
4535 fstats->total_multicast_packets_transmitted_lo,
4536 qstats->total_multicast_packets_transmitted_lo);
4537 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4538 qstats->total_broadcast_packets_transmitted_hi,
4539 fstats->total_broadcast_packets_transmitted_lo,
4540 qstats->total_broadcast_packets_transmitted_lo);
4541 ADD_64(fstats->valid_bytes_received_hi,
4542 qstats->valid_bytes_received_hi,
4543 fstats->valid_bytes_received_lo,
4544 qstats->valid_bytes_received_lo);
4545
4546 ADD_64(estats->error_bytes_received_hi,
4547 qstats->error_bytes_received_hi,
4548 estats->error_bytes_received_lo,
4549 qstats->error_bytes_received_lo);
4550 ADD_64(estats->etherstatsoverrsizepkts_hi,
4551 qstats->etherstatsoverrsizepkts_hi,
4552 estats->etherstatsoverrsizepkts_lo,
4553 qstats->etherstatsoverrsizepkts_lo);
4554 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4555 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4556 }
4557
4558 ADD_64(fstats->total_bytes_received_hi,
4559 estats->rx_stat_ifhcinbadoctets_hi,
4560 fstats->total_bytes_received_lo,
4561 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4562
4563 memcpy(estats, &(fstats->total_bytes_received_hi),
4564 sizeof(struct host_func_stats) - 2*sizeof(u32));
4565
de832a55
EG
4566 ADD_64(estats->etherstatsoverrsizepkts_hi,
4567 estats->rx_stat_dot3statsframestoolong_hi,
4568 estats->etherstatsoverrsizepkts_lo,
4569 estats->rx_stat_dot3statsframestoolong_lo);
4570 ADD_64(estats->error_bytes_received_hi,
4571 estats->rx_stat_ifhcinbadoctets_hi,
4572 estats->error_bytes_received_lo,
4573 estats->rx_stat_ifhcinbadoctets_lo);
4574
4575 if (bp->port.pmf) {
4576 estats->mac_filter_discard =
4577 le32_to_cpu(tport->mac_filter_discard);
4578 estats->xxoverflow_discard =
4579 le32_to_cpu(tport->xxoverflow_discard);
4580 estats->brb_truncate_discard =
bb2a0f7a 4581 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4582 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4583 }
bb2a0f7a
YG
4584
4585 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4586
de832a55
EG
4587 bp->stats_pending = 0;
4588
a2fbb9ea
ET
4589 return 0;
4590}
4591
bb2a0f7a 4592static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4593{
bb2a0f7a 4594 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4595 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4596 int i;
a2fbb9ea
ET
4597
4598 nstats->rx_packets =
4599 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4600 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4601 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4602
4603 nstats->tx_packets =
4604 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4605 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4606 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4607
de832a55 4608 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4609
0e39e645 4610 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4611
de832a55 4612 nstats->rx_dropped = estats->mac_discard;
54b9ddaa 4613 for_each_queue(bp, i)
de832a55
EG
4614 nstats->rx_dropped +=
4615 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4616
a2fbb9ea
ET
4617 nstats->tx_dropped = 0;
4618
4619 nstats->multicast =
de832a55 4620 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4621
bb2a0f7a 4622 nstats->collisions =
de832a55 4623 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4624
4625 nstats->rx_length_errors =
de832a55
EG
4626 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4627 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4628 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4629 bnx2x_hilo(&estats->brb_truncate_hi);
4630 nstats->rx_crc_errors =
4631 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4632 nstats->rx_frame_errors =
4633 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4634 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4635 nstats->rx_missed_errors = estats->xxoverflow_discard;
4636
4637 nstats->rx_errors = nstats->rx_length_errors +
4638 nstats->rx_over_errors +
4639 nstats->rx_crc_errors +
4640 nstats->rx_frame_errors +
0e39e645
ET
4641 nstats->rx_fifo_errors +
4642 nstats->rx_missed_errors;
a2fbb9ea 4643
bb2a0f7a 4644 nstats->tx_aborted_errors =
de832a55
EG
4645 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4646 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4647 nstats->tx_carrier_errors =
4648 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4649 nstats->tx_fifo_errors = 0;
4650 nstats->tx_heartbeat_errors = 0;
4651 nstats->tx_window_errors = 0;
4652
4653 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4654 nstats->tx_carrier_errors +
4655 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4656}
4657
4658static void bnx2x_drv_stats_update(struct bnx2x *bp)
4659{
4660 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4661 int i;
4662
4663 estats->driver_xoff = 0;
4664 estats->rx_err_discard_pkt = 0;
4665 estats->rx_skb_alloc_failed = 0;
4666 estats->hw_csum_err = 0;
54b9ddaa 4667 for_each_queue(bp, i) {
de832a55
EG
4668 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4669
4670 estats->driver_xoff += qstats->driver_xoff;
4671 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4672 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4673 estats->hw_csum_err += qstats->hw_csum_err;
4674 }
a2fbb9ea
ET
4675}
4676
bb2a0f7a 4677static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4678{
bb2a0f7a 4679 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4680
bb2a0f7a
YG
4681 if (*stats_comp != DMAE_COMP_VAL)
4682 return;
4683
4684 if (bp->port.pmf)
de832a55 4685 bnx2x_hw_stats_update(bp);
a2fbb9ea 4686
de832a55
EG
4687 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4688 BNX2X_ERR("storm stats were not updated for 3 times\n");
4689 bnx2x_panic();
4690 return;
a2fbb9ea
ET
4691 }
4692
de832a55
EG
4693 bnx2x_net_stats_update(bp);
4694 bnx2x_drv_stats_update(bp);
4695
7995c64e 4696 if (netif_msg_timer(bp)) {
bb2a0f7a 4697 struct bnx2x_eth_stats *estats = &bp->eth_stats;
34f80b04 4698 int i;
a2fbb9ea 4699
dea7aab1
VZ
4700 printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
4701 bp->dev->name,
de832a55 4702 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea
ET
4703
4704 for_each_queue(bp, i) {
dea7aab1
VZ
4705 struct bnx2x_fastpath *fp = &bp->fp[i];
4706 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4707
4708 printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
4709 " rx pkt(%lu) rx calls(%lu %lu)\n",
4710 fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
4711 fp->rx_comp_cons),
4712 le16_to_cpu(*fp->rx_cons_sb),
4713 bnx2x_hilo(&qstats->
4714 total_unicast_packets_received_hi),
4715 fp->rx_calls, fp->rx_pkt);
4716 }
4717
4718 for_each_queue(bp, i) {
4719 struct bnx2x_fastpath *fp = &bp->fp[i];
4720 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4721 struct netdev_queue *txq =
4722 netdev_get_tx_queue(bp->dev, i);
4723
4724 printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
4725 " tx pkt(%lu) tx calls (%lu)"
4726 " %s (Xoff events %u)\n",
4727 fp->name, bnx2x_tx_avail(fp),
4728 le16_to_cpu(*fp->tx_cons_sb),
4729 bnx2x_hilo(&qstats->
4730 total_unicast_packets_transmitted_hi),
4731 fp->tx_pkt,
4732 (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
4733 qstats->driver_xoff);
a2fbb9ea
ET
4734 }
4735 }
4736
bb2a0f7a
YG
4737 bnx2x_hw_stats_post(bp);
4738 bnx2x_storm_stats_post(bp);
4739}
a2fbb9ea 4740
bb2a0f7a
YG
4741static void bnx2x_port_stats_stop(struct bnx2x *bp)
4742{
4743 struct dmae_command *dmae;
4744 u32 opcode;
4745 int loader_idx = PMF_DMAE_C(bp);
4746 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4747
bb2a0f7a 4748 bp->executer_idx = 0;
a2fbb9ea 4749
bb2a0f7a
YG
4750 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4751 DMAE_CMD_C_ENABLE |
4752 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4753#ifdef __BIG_ENDIAN
bb2a0f7a 4754 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4755#else
bb2a0f7a 4756 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4757#endif
bb2a0f7a
YG
4758 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4759 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4760
4761 if (bp->port.port_stx) {
4762
4763 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4764 if (bp->func_stx)
4765 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4766 else
4767 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4768 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4769 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4770 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4771 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4772 dmae->len = sizeof(struct host_port_stats) >> 2;
4773 if (bp->func_stx) {
4774 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4775 dmae->comp_addr_hi = 0;
4776 dmae->comp_val = 1;
4777 } else {
4778 dmae->comp_addr_lo =
4779 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4780 dmae->comp_addr_hi =
4781 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4782 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4783
bb2a0f7a
YG
4784 *stats_comp = 0;
4785 }
a2fbb9ea
ET
4786 }
4787
bb2a0f7a
YG
4788 if (bp->func_stx) {
4789
4790 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4791 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4792 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4793 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4794 dmae->dst_addr_lo = bp->func_stx >> 2;
4795 dmae->dst_addr_hi = 0;
4796 dmae->len = sizeof(struct host_func_stats) >> 2;
4797 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4798 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4799 dmae->comp_val = DMAE_COMP_VAL;
4800
4801 *stats_comp = 0;
a2fbb9ea 4802 }
bb2a0f7a
YG
4803}
4804
4805static void bnx2x_stats_stop(struct bnx2x *bp)
4806{
4807 int update = 0;
4808
4809 bnx2x_stats_comp(bp);
4810
4811 if (bp->port.pmf)
4812 update = (bnx2x_hw_stats_update(bp) == 0);
4813
4814 update |= (bnx2x_storm_stats_update(bp) == 0);
4815
4816 if (update) {
4817 bnx2x_net_stats_update(bp);
a2fbb9ea 4818
bb2a0f7a
YG
4819 if (bp->port.pmf)
4820 bnx2x_port_stats_stop(bp);
4821
4822 bnx2x_hw_stats_post(bp);
4823 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4824 }
4825}
4826
bb2a0f7a
YG
4827static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4828{
4829}
4830
4831static const struct {
4832 void (*action)(struct bnx2x *bp);
4833 enum bnx2x_stats_state next_state;
4834} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4835/* state event */
4836{
4837/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4838/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4839/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4840/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4841},
4842{
4843/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4844/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4845/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4846/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4847}
4848};
4849
4850static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4851{
4852 enum bnx2x_stats_state state = bp->stats_state;
4853
cdaa7cb8
VZ
4854 if (unlikely(bp->panic))
4855 return;
4856
bb2a0f7a
YG
4857 bnx2x_stats_stm[state][event].action(bp);
4858 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4859
8924665a
EG
4860 /* Make sure the state has been "changed" */
4861 smp_wmb();
4862
7995c64e 4863 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
bb2a0f7a
YG
4864 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4865 state, event, bp->stats_state);
4866}
4867
6fe49bb9
EG
4868static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4869{
4870 struct dmae_command *dmae;
4871 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4872
4873 /* sanity */
4874 if (!bp->port.pmf || !bp->port.port_stx) {
4875 BNX2X_ERR("BUG!\n");
4876 return;
4877 }
4878
4879 bp->executer_idx = 0;
4880
4881 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4882 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4883 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4884 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4885#ifdef __BIG_ENDIAN
4886 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4887#else
4888 DMAE_CMD_ENDIANITY_DW_SWAP |
4889#endif
4890 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4891 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4892 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4893 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4894 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4895 dmae->dst_addr_hi = 0;
4896 dmae->len = sizeof(struct host_port_stats) >> 2;
4897 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4898 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4899 dmae->comp_val = DMAE_COMP_VAL;
4900
4901 *stats_comp = 0;
4902 bnx2x_hw_stats_post(bp);
4903 bnx2x_stats_comp(bp);
4904}
4905
4906static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4907{
4908 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4909 int port = BP_PORT(bp);
4910 int func;
4911 u32 func_stx;
4912
4913 /* sanity */
4914 if (!bp->port.pmf || !bp->func_stx) {
4915 BNX2X_ERR("BUG!\n");
4916 return;
4917 }
4918
4919 /* save our func_stx */
4920 func_stx = bp->func_stx;
4921
4922 for (vn = VN_0; vn < vn_max; vn++) {
4923 func = 2*vn + port;
4924
4925 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4926 bnx2x_func_stats_init(bp);
4927 bnx2x_hw_stats_post(bp);
4928 bnx2x_stats_comp(bp);
4929 }
4930
4931 /* restore our func_stx */
4932 bp->func_stx = func_stx;
4933}
4934
4935static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4936{
4937 struct dmae_command *dmae = &bp->stats_dmae;
4938 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4939
4940 /* sanity */
4941 if (!bp->func_stx) {
4942 BNX2X_ERR("BUG!\n");
4943 return;
4944 }
4945
4946 bp->executer_idx = 0;
4947 memset(dmae, 0, sizeof(struct dmae_command));
4948
4949 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4950 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4951 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4952#ifdef __BIG_ENDIAN
4953 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4954#else
4955 DMAE_CMD_ENDIANITY_DW_SWAP |
4956#endif
4957 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4958 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4959 dmae->src_addr_lo = bp->func_stx >> 2;
4960 dmae->src_addr_hi = 0;
4961 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4962 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4963 dmae->len = sizeof(struct host_func_stats) >> 2;
4964 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4965 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4966 dmae->comp_val = DMAE_COMP_VAL;
4967
4968 *stats_comp = 0;
4969 bnx2x_hw_stats_post(bp);
4970 bnx2x_stats_comp(bp);
4971}
4972
4973static void bnx2x_stats_init(struct bnx2x *bp)
4974{
4975 int port = BP_PORT(bp);
4976 int func = BP_FUNC(bp);
4977 int i;
4978
4979 bp->stats_pending = 0;
4980 bp->executer_idx = 0;
4981 bp->stats_counter = 0;
4982
4983 /* port and func stats for management */
4984 if (!BP_NOMCP(bp)) {
4985 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4986 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4987
4988 } else {
4989 bp->port.port_stx = 0;
4990 bp->func_stx = 0;
4991 }
4992 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4993 bp->port.port_stx, bp->func_stx);
4994
4995 /* port stats */
4996 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4997 bp->port.old_nig_stats.brb_discard =
4998 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4999 bp->port.old_nig_stats.brb_truncate =
5000 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
5001 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
5002 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
5003 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
5004 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
5005
5006 /* function stats */
5007 for_each_queue(bp, i) {
5008 struct bnx2x_fastpath *fp = &bp->fp[i];
5009
5010 memset(&fp->old_tclient, 0,
5011 sizeof(struct tstorm_per_client_stats));
5012 memset(&fp->old_uclient, 0,
5013 sizeof(struct ustorm_per_client_stats));
5014 memset(&fp->old_xclient, 0,
5015 sizeof(struct xstorm_per_client_stats));
5016 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
5017 }
5018
5019 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
5020 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
5021
5022 bp->stats_state = STATS_STATE_DISABLED;
5023
5024 if (bp->port.pmf) {
5025 if (bp->port.port_stx)
5026 bnx2x_port_stats_base_init(bp);
5027
5028 if (bp->func_stx)
5029 bnx2x_func_stats_base_init(bp);
5030
5031 } else if (bp->func_stx)
5032 bnx2x_func_stats_base_update(bp);
5033}
5034
a2fbb9ea
ET
5035static void bnx2x_timer(unsigned long data)
5036{
5037 struct bnx2x *bp = (struct bnx2x *) data;
5038
5039 if (!netif_running(bp->dev))
5040 return;
5041
5042 if (atomic_read(&bp->intr_sem) != 0)
f1410647 5043 goto timer_restart;
a2fbb9ea
ET
5044
5045 if (poll) {
5046 struct bnx2x_fastpath *fp = &bp->fp[0];
5047 int rc;
5048
7961f791 5049 bnx2x_tx_int(fp);
a2fbb9ea
ET
5050 rc = bnx2x_rx_int(fp, 1000);
5051 }
5052
34f80b04
EG
5053 if (!BP_NOMCP(bp)) {
5054 int func = BP_FUNC(bp);
a2fbb9ea
ET
5055 u32 drv_pulse;
5056 u32 mcp_pulse;
5057
5058 ++bp->fw_drv_pulse_wr_seq;
5059 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5060 /* TBD - add SYSTEM_TIME */
5061 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 5062 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 5063
34f80b04 5064 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
5065 MCP_PULSE_SEQ_MASK);
5066 /* The delta between driver pulse and mcp response
5067 * should be 1 (before mcp response) or 0 (after mcp response)
5068 */
5069 if ((drv_pulse != mcp_pulse) &&
5070 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5071 /* someone lost a heartbeat... */
5072 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5073 drv_pulse, mcp_pulse);
5074 }
5075 }
5076
f34d28ea 5077 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 5078 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 5079
f1410647 5080timer_restart:
a2fbb9ea
ET
5081 mod_timer(&bp->timer, jiffies + bp->current_interval);
5082}
5083
5084/* end of Statistics */
5085
5086/* nic init */
5087
5088/*
5089 * nic init service functions
5090 */
5091
34f80b04 5092static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 5093{
34f80b04
EG
5094 int port = BP_PORT(bp);
5095
ca00392c
EG
5096 /* "CSTORM" */
5097 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5098 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5099 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5100 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5101 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5102 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
5103}
5104
5c862848
EG
5105static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5106 dma_addr_t mapping, int sb_id)
34f80b04
EG
5107{
5108 int port = BP_PORT(bp);
bb2a0f7a 5109 int func = BP_FUNC(bp);
a2fbb9ea 5110 int index;
34f80b04 5111 u64 section;
a2fbb9ea
ET
5112
5113 /* USTORM */
5114 section = ((u64)mapping) + offsetof(struct host_status_block,
5115 u_status_block);
34f80b04 5116 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 5117
ca00392c
EG
5118 REG_WR(bp, BAR_CSTRORM_INTMEM +
5119 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5120 REG_WR(bp, BAR_CSTRORM_INTMEM +
5121 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 5122 U64_HI(section));
ca00392c
EG
5123 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5124 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
5125
5126 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
5127 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5128 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
5129
5130 /* CSTORM */
5131 section = ((u64)mapping) + offsetof(struct host_status_block,
5132 c_status_block);
34f80b04 5133 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5134
5135 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5136 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 5137 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5138 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 5139 U64_HI(section));
7a9b2557 5140 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 5141 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
5142
5143 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5144 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 5145 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
5146
5147 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5148}
5149
5150static void bnx2x_zero_def_sb(struct bnx2x *bp)
5151{
5152 int func = BP_FUNC(bp);
a2fbb9ea 5153
ca00392c 5154 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
5155 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5156 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
5157 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5158 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5159 sizeof(struct cstorm_def_status_block_u)/4);
5160 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5161 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5162 sizeof(struct cstorm_def_status_block_c)/4);
5163 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
5164 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5165 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
5166}
5167
5168static void bnx2x_init_def_sb(struct bnx2x *bp,
5169 struct host_def_status_block *def_sb,
34f80b04 5170 dma_addr_t mapping, int sb_id)
a2fbb9ea 5171{
34f80b04
EG
5172 int port = BP_PORT(bp);
5173 int func = BP_FUNC(bp);
a2fbb9ea
ET
5174 int index, val, reg_offset;
5175 u64 section;
5176
5177 /* ATTN */
5178 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5179 atten_status_block);
34f80b04 5180 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 5181
49d66772
ET
5182 bp->attn_state = 0;
5183
a2fbb9ea
ET
5184 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5185 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5186
34f80b04 5187 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
5188 bp->attn_group[index].sig[0] = REG_RD(bp,
5189 reg_offset + 0x10*index);
5190 bp->attn_group[index].sig[1] = REG_RD(bp,
5191 reg_offset + 0x4 + 0x10*index);
5192 bp->attn_group[index].sig[2] = REG_RD(bp,
5193 reg_offset + 0x8 + 0x10*index);
5194 bp->attn_group[index].sig[3] = REG_RD(bp,
5195 reg_offset + 0xc + 0x10*index);
5196 }
5197
a2fbb9ea
ET
5198 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5199 HC_REG_ATTN_MSG0_ADDR_L);
5200
5201 REG_WR(bp, reg_offset, U64_LO(section));
5202 REG_WR(bp, reg_offset + 4, U64_HI(section));
5203
5204 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5205
5206 val = REG_RD(bp, reg_offset);
34f80b04 5207 val |= sb_id;
a2fbb9ea
ET
5208 REG_WR(bp, reg_offset, val);
5209
5210 /* USTORM */
5211 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5212 u_def_status_block);
34f80b04 5213 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 5214
ca00392c
EG
5215 REG_WR(bp, BAR_CSTRORM_INTMEM +
5216 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5217 REG_WR(bp, BAR_CSTRORM_INTMEM +
5218 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 5219 U64_HI(section));
ca00392c
EG
5220 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5221 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
5222
5223 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
5224 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5225 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
5226
5227 /* CSTORM */
5228 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5229 c_def_status_block);
34f80b04 5230 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5231
5232 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5233 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 5234 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5235 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 5236 U64_HI(section));
5c862848 5237 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 5238 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
5239
5240 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5241 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 5242 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
5243
5244 /* TSTORM */
5245 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5246 t_def_status_block);
34f80b04 5247 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5248
5249 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5250 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 5251 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5252 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 5253 U64_HI(section));
5c862848 5254 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 5255 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
5256
5257 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5258 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 5259 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
5260
5261 /* XSTORM */
5262 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5263 x_def_status_block);
34f80b04 5264 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5265
5266 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 5267 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 5268 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 5269 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 5270 U64_HI(section));
5c862848 5271 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 5272 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
5273
5274 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5275 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 5276 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 5277
bb2a0f7a 5278 bp->stats_pending = 0;
66e855f3 5279 bp->set_mac_pending = 0;
bb2a0f7a 5280
34f80b04 5281 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
5282}
5283
5284static void bnx2x_update_coalesce(struct bnx2x *bp)
5285{
34f80b04 5286 int port = BP_PORT(bp);
a2fbb9ea
ET
5287 int i;
5288
5289 for_each_queue(bp, i) {
34f80b04 5290 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
5291
5292 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
5293 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5294 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5295 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 5296 bp->rx_ticks/(4 * BNX2X_BTR));
ca00392c
EG
5297 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5298 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5299 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 5300 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
5301
5302 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5303 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
5304 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5305 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 5306 bp->tx_ticks/(4 * BNX2X_BTR));
a2fbb9ea 5307 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
5308 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5309 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 5310 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
5311 }
5312}
5313
7a9b2557
VZ
5314static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5315 struct bnx2x_fastpath *fp, int last)
5316{
5317 int i;
5318
5319 for (i = 0; i < last; i++) {
5320 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5321 struct sk_buff *skb = rx_buf->skb;
5322
5323 if (skb == NULL) {
5324 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5325 continue;
5326 }
5327
5328 if (fp->tpa_state[i] == BNX2X_TPA_START)
1a983142
FT
5329 dma_unmap_single(&bp->pdev->dev,
5330 dma_unmap_addr(rx_buf, mapping),
5331 bp->rx_buf_size, DMA_FROM_DEVICE);
7a9b2557
VZ
5332
5333 dev_kfree_skb(skb);
5334 rx_buf->skb = NULL;
5335 }
5336}
5337
a2fbb9ea
ET
5338static void bnx2x_init_rx_rings(struct bnx2x *bp)
5339{
7a9b2557 5340 int func = BP_FUNC(bp);
32626230
EG
5341 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5342 ETH_MAX_AGGREGATION_QUEUES_E1H;
5343 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 5344 int i, j;
a2fbb9ea 5345
87942b46 5346 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
5347 DP(NETIF_MSG_IFUP,
5348 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 5349
7a9b2557 5350 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 5351
54b9ddaa 5352 for_each_queue(bp, j) {
32626230 5353 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 5354
32626230 5355 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
5356 fp->tpa_pool[i].skb =
5357 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5358 if (!fp->tpa_pool[i].skb) {
5359 BNX2X_ERR("Failed to allocate TPA "
5360 "skb pool for queue[%d] - "
5361 "disabling TPA on this "
5362 "queue!\n", j);
5363 bnx2x_free_tpa_pool(bp, fp, i);
5364 fp->disable_tpa = 1;
5365 break;
5366 }
1a983142 5367 dma_unmap_addr_set((struct sw_rx_bd *)
7a9b2557
VZ
5368 &bp->fp->tpa_pool[i],
5369 mapping, 0);
5370 fp->tpa_state[i] = BNX2X_TPA_STOP;
5371 }
5372 }
5373 }
5374
54b9ddaa 5375 for_each_queue(bp, j) {
a2fbb9ea
ET
5376 struct bnx2x_fastpath *fp = &bp->fp[j];
5377
5378 fp->rx_bd_cons = 0;
5379 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
5380 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5381
5382 /* "next page" elements initialization */
5383 /* SGE ring */
5384 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5385 struct eth_rx_sge *sge;
5386
5387 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5388 sge->addr_hi =
5389 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5390 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5391 sge->addr_lo =
5392 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5393 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5394 }
5395
5396 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 5397
7a9b2557 5398 /* RX BD ring */
a2fbb9ea
ET
5399 for (i = 1; i <= NUM_RX_RINGS; i++) {
5400 struct eth_rx_bd *rx_bd;
5401
5402 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5403 rx_bd->addr_hi =
5404 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 5405 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5406 rx_bd->addr_lo =
5407 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 5408 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5409 }
5410
34f80b04 5411 /* CQ ring */
a2fbb9ea
ET
5412 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5413 struct eth_rx_cqe_next_page *nextpg;
5414
5415 nextpg = (struct eth_rx_cqe_next_page *)
5416 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5417 nextpg->addr_hi =
5418 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 5419 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5420 nextpg->addr_lo =
5421 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 5422 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5423 }
5424
7a9b2557
VZ
5425 /* Allocate SGEs and initialize the ring elements */
5426 for (i = 0, ring_prod = 0;
5427 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 5428
7a9b2557
VZ
5429 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5430 BNX2X_ERR("was only able to allocate "
5431 "%d rx sges\n", i);
5432 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5433 /* Cleanup already allocated elements */
5434 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 5435 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
5436 fp->disable_tpa = 1;
5437 ring_prod = 0;
5438 break;
5439 }
5440 ring_prod = NEXT_SGE_IDX(ring_prod);
5441 }
5442 fp->rx_sge_prod = ring_prod;
5443
5444 /* Allocate BDs and initialize BD ring */
66e855f3 5445 fp->rx_comp_cons = 0;
7a9b2557 5446 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5447 for (i = 0; i < bp->rx_ring_size; i++) {
5448 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5449 BNX2X_ERR("was only able to allocate "
de832a55
EG
5450 "%d rx skbs on queue[%d]\n", i, j);
5451 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5452 break;
5453 }
5454 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5455 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5456 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5457 }
5458
7a9b2557
VZ
5459 fp->rx_bd_prod = ring_prod;
5460 /* must not have more available CQEs than BDs */
cdaa7cb8
VZ
5461 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5462 cqe_ring_prod);
a2fbb9ea
ET
5463 fp->rx_pkt = fp->rx_calls = 0;
5464
7a9b2557
VZ
5465 /* Warning!
5466 * this will generate an interrupt (to the TSTORM)
5467 * must only be done after chip is initialized
5468 */
5469 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5470 fp->rx_sge_prod);
a2fbb9ea
ET
5471 if (j != 0)
5472 continue;
5473
5474 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5475 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5476 U64_LO(fp->rx_comp_mapping));
5477 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5478 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5479 U64_HI(fp->rx_comp_mapping));
5480 }
5481}
5482
5483static void bnx2x_init_tx_ring(struct bnx2x *bp)
5484{
5485 int i, j;
5486
54b9ddaa 5487 for_each_queue(bp, j) {
a2fbb9ea
ET
5488 struct bnx2x_fastpath *fp = &bp->fp[j];
5489
5490 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5491 struct eth_tx_next_bd *tx_next_bd =
5492 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5493
ca00392c 5494 tx_next_bd->addr_hi =
a2fbb9ea 5495 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5496 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5497 tx_next_bd->addr_lo =
a2fbb9ea 5498 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5499 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5500 }
5501
ca00392c
EG
5502 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5503 fp->tx_db.data.zero_fill1 = 0;
5504 fp->tx_db.data.prod = 0;
5505
a2fbb9ea
ET
5506 fp->tx_pkt_prod = 0;
5507 fp->tx_pkt_cons = 0;
5508 fp->tx_bd_prod = 0;
5509 fp->tx_bd_cons = 0;
5510 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5511 fp->tx_pkt = 0;
5512 }
5513}
5514
5515static void bnx2x_init_sp_ring(struct bnx2x *bp)
5516{
34f80b04 5517 int func = BP_FUNC(bp);
a2fbb9ea
ET
5518
5519 spin_lock_init(&bp->spq_lock);
5520
5521 bp->spq_left = MAX_SPQ_PENDING;
5522 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5523 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5524 bp->spq_prod_bd = bp->spq;
5525 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5526
34f80b04 5527 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5528 U64_LO(bp->spq_mapping));
34f80b04
EG
5529 REG_WR(bp,
5530 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5531 U64_HI(bp->spq_mapping));
5532
34f80b04 5533 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5534 bp->spq_prod_idx);
5535}
5536
5537static void bnx2x_init_context(struct bnx2x *bp)
5538{
5539 int i;
5540
54b9ddaa
VZ
5541 /* Rx */
5542 for_each_queue(bp, i) {
a2fbb9ea
ET
5543 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5544 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5545 u8 cl_id = fp->cl_id;
a2fbb9ea 5546
34f80b04
EG
5547 context->ustorm_st_context.common.sb_index_numbers =
5548 BNX2X_RX_SB_INDEX_NUM;
0626b899 5549 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5550 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5551 context->ustorm_st_context.common.flags =
de832a55
EG
5552 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5553 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5554 context->ustorm_st_context.common.statistics_counter_id =
5555 cl_id;
8d9c5f34 5556 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5557 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5558 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5559 bp->rx_buf_size;
34f80b04 5560 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5561 U64_HI(fp->rx_desc_mapping);
34f80b04 5562 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5563 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5564 if (!fp->disable_tpa) {
5565 context->ustorm_st_context.common.flags |=
ca00392c 5566 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5567 context->ustorm_st_context.common.sge_buff_size =
cdaa7cb8
VZ
5568 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
5569 0xffff);
7a9b2557
VZ
5570 context->ustorm_st_context.common.sge_page_base_hi =
5571 U64_HI(fp->rx_sge_mapping);
5572 context->ustorm_st_context.common.sge_page_base_lo =
5573 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5574
5575 context->ustorm_st_context.common.max_sges_for_packet =
5576 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5577 context->ustorm_st_context.common.max_sges_for_packet =
5578 ((context->ustorm_st_context.common.
5579 max_sges_for_packet + PAGES_PER_SGE - 1) &
5580 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5581 }
5582
8d9c5f34
EG
5583 context->ustorm_ag_context.cdu_usage =
5584 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5585 CDU_REGION_NUMBER_UCM_AG,
5586 ETH_CONNECTION_TYPE);
5587
ca00392c
EG
5588 context->xstorm_ag_context.cdu_reserved =
5589 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5590 CDU_REGION_NUMBER_XCM_AG,
5591 ETH_CONNECTION_TYPE);
5592 }
5593
54b9ddaa
VZ
5594 /* Tx */
5595 for_each_queue(bp, i) {
ca00392c
EG
5596 struct bnx2x_fastpath *fp = &bp->fp[i];
5597 struct eth_context *context =
54b9ddaa 5598 bnx2x_sp(bp, context[i].eth);
ca00392c
EG
5599
5600 context->cstorm_st_context.sb_index_number =
5601 C_SB_ETH_TX_CQ_INDEX;
5602 context->cstorm_st_context.status_block_id = fp->sb_id;
5603
8d9c5f34
EG
5604 context->xstorm_st_context.tx_bd_page_base_hi =
5605 U64_HI(fp->tx_desc_mapping);
5606 context->xstorm_st_context.tx_bd_page_base_lo =
5607 U64_LO(fp->tx_desc_mapping);
ca00392c 5608 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5609 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5610 }
5611}
5612
5613static void bnx2x_init_ind_table(struct bnx2x *bp)
5614{
26c8fa4d 5615 int func = BP_FUNC(bp);
a2fbb9ea
ET
5616 int i;
5617
555f6c78 5618 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5619 return;
5620
555f6c78
EG
5621 DP(NETIF_MSG_IFUP,
5622 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5623 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5624 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5625 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 5626 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
5627}
5628
49d66772
ET
5629static void bnx2x_set_client_config(struct bnx2x *bp)
5630{
49d66772 5631 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5632 int port = BP_PORT(bp);
5633 int i;
49d66772 5634
e7799c5f 5635 tstorm_client.mtu = bp->dev->mtu;
49d66772 5636 tstorm_client.config_flags =
de832a55
EG
5637 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5638 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5639#ifdef BCM_VLAN
0c6671b0 5640 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5641 tstorm_client.config_flags |=
8d9c5f34 5642 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5643 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5644 }
5645#endif
49d66772
ET
5646
5647 for_each_queue(bp, i) {
de832a55
EG
5648 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5649
49d66772 5650 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5651 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5652 ((u32 *)&tstorm_client)[0]);
5653 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5654 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5655 ((u32 *)&tstorm_client)[1]);
5656 }
5657
34f80b04
EG
5658 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5659 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5660}
5661
a2fbb9ea
ET
5662static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5663{
a2fbb9ea 5664 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 5665 int mode = bp->rx_mode;
37b091ba 5666 int mask = bp->rx_mode_cl_mask;
34f80b04 5667 int func = BP_FUNC(bp);
581ce43d 5668 int port = BP_PORT(bp);
a2fbb9ea 5669 int i;
581ce43d
EG
5670 /* All but management unicast packets should pass to the host as well */
5671 u32 llh_mask =
5672 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5673 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5674 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5675 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5676
3196a88a 5677 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5678
5679 switch (mode) {
5680 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5681 tstorm_mac_filter.ucast_drop_all = mask;
5682 tstorm_mac_filter.mcast_drop_all = mask;
5683 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5684 break;
356e2385 5685
a2fbb9ea 5686 case BNX2X_RX_MODE_NORMAL:
34f80b04 5687 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5688 break;
356e2385 5689
a2fbb9ea 5690 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5691 tstorm_mac_filter.mcast_accept_all = mask;
5692 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5693 break;
356e2385 5694
a2fbb9ea 5695 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5696 tstorm_mac_filter.ucast_accept_all = mask;
5697 tstorm_mac_filter.mcast_accept_all = mask;
5698 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5699 /* pass management unicast packets as well */
5700 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5701 break;
356e2385 5702
a2fbb9ea 5703 default:
34f80b04
EG
5704 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5705 break;
a2fbb9ea
ET
5706 }
5707
581ce43d
EG
5708 REG_WR(bp,
5709 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5710 llh_mask);
5711
a2fbb9ea
ET
5712 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5713 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5714 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5715 ((u32 *)&tstorm_mac_filter)[i]);
5716
34f80b04 5717/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5718 ((u32 *)&tstorm_mac_filter)[i]); */
5719 }
a2fbb9ea 5720
49d66772
ET
5721 if (mode != BNX2X_RX_MODE_NONE)
5722 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5723}
5724
471de716
EG
5725static void bnx2x_init_internal_common(struct bnx2x *bp)
5726{
5727 int i;
5728
5729 /* Zero this manually as its initialization is
5730 currently missing in the initTool */
5731 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5732 REG_WR(bp, BAR_USTRORM_INTMEM +
5733 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5734}
5735
5736static void bnx2x_init_internal_port(struct bnx2x *bp)
5737{
5738 int port = BP_PORT(bp);
5739
ca00392c
EG
5740 REG_WR(bp,
5741 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5742 REG_WR(bp,
5743 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5744 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5745 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5746}
5747
5748static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5749{
a2fbb9ea
ET
5750 struct tstorm_eth_function_common_config tstorm_config = {0};
5751 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5752 int port = BP_PORT(bp);
5753 int func = BP_FUNC(bp);
de832a55
EG
5754 int i, j;
5755 u32 offset;
471de716 5756 u16 max_agg_size;
a2fbb9ea 5757
c68ed255
TH
5758 tstorm_config.config_flags = RSS_FLAGS(bp);
5759
5760 if (is_multi(bp))
a2fbb9ea 5761 tstorm_config.rss_result_mask = MULTI_MASK;
ca00392c
EG
5762
5763 /* Enable TPA if needed */
5764 if (bp->flags & TPA_ENABLE_FLAG)
5765 tstorm_config.config_flags |=
5766 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5767
8d9c5f34
EG
5768 if (IS_E1HMF(bp))
5769 tstorm_config.config_flags |=
5770 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5771
34f80b04
EG
5772 tstorm_config.leading_client_id = BP_L_ID(bp);
5773
a2fbb9ea 5774 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5775 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5776 (*(u32 *)&tstorm_config));
5777
c14423fe 5778 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 5779 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
5780 bnx2x_set_storm_rx_mode(bp);
5781
de832a55
EG
5782 for_each_queue(bp, i) {
5783 u8 cl_id = bp->fp[i].cl_id;
5784
5785 /* reset xstorm per client statistics */
5786 offset = BAR_XSTRORM_INTMEM +
5787 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5788 for (j = 0;
5789 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5790 REG_WR(bp, offset + j*4, 0);
5791
5792 /* reset tstorm per client statistics */
5793 offset = BAR_TSTRORM_INTMEM +
5794 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5795 for (j = 0;
5796 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5797 REG_WR(bp, offset + j*4, 0);
5798
5799 /* reset ustorm per client statistics */
5800 offset = BAR_USTRORM_INTMEM +
5801 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5802 for (j = 0;
5803 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5804 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5805 }
5806
5807 /* Init statistics related context */
34f80b04 5808 stats_flags.collect_eth = 1;
a2fbb9ea 5809
66e855f3 5810 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5811 ((u32 *)&stats_flags)[0]);
66e855f3 5812 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5813 ((u32 *)&stats_flags)[1]);
5814
66e855f3 5815 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5816 ((u32 *)&stats_flags)[0]);
66e855f3 5817 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5818 ((u32 *)&stats_flags)[1]);
5819
de832a55
EG
5820 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5821 ((u32 *)&stats_flags)[0]);
5822 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5823 ((u32 *)&stats_flags)[1]);
5824
66e855f3 5825 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5826 ((u32 *)&stats_flags)[0]);
66e855f3 5827 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5828 ((u32 *)&stats_flags)[1]);
5829
66e855f3
YG
5830 REG_WR(bp, BAR_XSTRORM_INTMEM +
5831 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5832 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5833 REG_WR(bp, BAR_XSTRORM_INTMEM +
5834 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5835 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5836
5837 REG_WR(bp, BAR_TSTRORM_INTMEM +
5838 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5839 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5840 REG_WR(bp, BAR_TSTRORM_INTMEM +
5841 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5842 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5843
de832a55
EG
5844 REG_WR(bp, BAR_USTRORM_INTMEM +
5845 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5846 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5847 REG_WR(bp, BAR_USTRORM_INTMEM +
5848 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5849 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5850
34f80b04
EG
5851 if (CHIP_IS_E1H(bp)) {
5852 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5853 IS_E1HMF(bp));
5854 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5855 IS_E1HMF(bp));
5856 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5857 IS_E1HMF(bp));
5858 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5859 IS_E1HMF(bp));
5860
7a9b2557
VZ
5861 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5862 bp->e1hov);
34f80b04
EG
5863 }
5864
4f40f2cb 5865 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
cdaa7cb8
VZ
5866 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
5867 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
54b9ddaa 5868 for_each_queue(bp, i) {
7a9b2557 5869 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5870
5871 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5872 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5873 U64_LO(fp->rx_comp_mapping));
5874 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5875 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5876 U64_HI(fp->rx_comp_mapping));
5877
ca00392c
EG
5878 /* Next page */
5879 REG_WR(bp, BAR_USTRORM_INTMEM +
5880 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5881 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5882 REG_WR(bp, BAR_USTRORM_INTMEM +
5883 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5884 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5885
7a9b2557 5886 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5887 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5888 max_agg_size);
5889 }
8a1c38d1 5890
1c06328c
EG
5891 /* dropless flow control */
5892 if (CHIP_IS_E1H(bp)) {
5893 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5894
5895 rx_pause.bd_thr_low = 250;
5896 rx_pause.cqe_thr_low = 250;
5897 rx_pause.cos = 1;
5898 rx_pause.sge_thr_low = 0;
5899 rx_pause.bd_thr_high = 350;
5900 rx_pause.cqe_thr_high = 350;
5901 rx_pause.sge_thr_high = 0;
5902
54b9ddaa 5903 for_each_queue(bp, i) {
1c06328c
EG
5904 struct bnx2x_fastpath *fp = &bp->fp[i];
5905
5906 if (!fp->disable_tpa) {
5907 rx_pause.sge_thr_low = 150;
5908 rx_pause.sge_thr_high = 250;
5909 }
5910
5911
5912 offset = BAR_USTRORM_INTMEM +
5913 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5914 fp->cl_id);
5915 for (j = 0;
5916 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5917 j++)
5918 REG_WR(bp, offset + j*4,
5919 ((u32 *)&rx_pause)[j]);
5920 }
5921 }
5922
8a1c38d1
EG
5923 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5924
5925 /* Init rate shaping and fairness contexts */
5926 if (IS_E1HMF(bp)) {
5927 int vn;
5928
5929 /* During init there is no active link
5930 Until link is up, set link rate to 10Gbps */
5931 bp->link_vars.line_speed = SPEED_10000;
5932 bnx2x_init_port_minmax(bp);
5933
b015e3d1
EG
5934 if (!BP_NOMCP(bp))
5935 bp->mf_config =
5936 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
5937 bnx2x_calc_vn_weight_sum(bp);
5938
5939 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5940 bnx2x_init_vn_minmax(bp, 2*vn + port);
5941
5942 /* Enable rate shaping and fairness */
b015e3d1 5943 bp->cmng.flags.cmng_enables |=
8a1c38d1 5944 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 5945
8a1c38d1
EG
5946 } else {
5947 /* rate shaping and fairness are disabled */
5948 DP(NETIF_MSG_IFUP,
5949 "single function mode minmax will be disabled\n");
5950 }
5951
5952
cdaa7cb8 5953 /* Store cmng structures to internal memory */
8a1c38d1
EG
5954 if (bp->port.pmf)
5955 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5956 REG_WR(bp, BAR_XSTRORM_INTMEM +
5957 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5958 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5959}
5960
471de716
EG
5961static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5962{
5963 switch (load_code) {
5964 case FW_MSG_CODE_DRV_LOAD_COMMON:
5965 bnx2x_init_internal_common(bp);
5966 /* no break */
5967
5968 case FW_MSG_CODE_DRV_LOAD_PORT:
5969 bnx2x_init_internal_port(bp);
5970 /* no break */
5971
5972 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5973 bnx2x_init_internal_func(bp);
5974 break;
5975
5976 default:
5977 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5978 break;
5979 }
5980}
5981
5982static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5983{
5984 int i;
5985
5986 for_each_queue(bp, i) {
5987 struct bnx2x_fastpath *fp = &bp->fp[i];
5988
34f80b04 5989 fp->bp = bp;
a2fbb9ea 5990 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5991 fp->index = i;
34f80b04 5992 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
5993#ifdef BCM_CNIC
5994 fp->sb_id = fp->cl_id + 1;
5995#else
34f80b04 5996 fp->sb_id = fp->cl_id;
37b091ba 5997#endif
34f80b04 5998 DP(NETIF_MSG_IFUP,
f5372251
EG
5999 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
6000 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 6001 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 6002 fp->sb_id);
5c862848 6003 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
6004 }
6005
16119785
EG
6006 /* ensure status block indices were read */
6007 rmb();
6008
6009
5c862848
EG
6010 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
6011 DEF_SB_ID);
6012 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
6013 bnx2x_update_coalesce(bp);
6014 bnx2x_init_rx_rings(bp);
6015 bnx2x_init_tx_ring(bp);
6016 bnx2x_init_sp_ring(bp);
6017 bnx2x_init_context(bp);
471de716 6018 bnx2x_init_internal(bp, load_code);
a2fbb9ea 6019 bnx2x_init_ind_table(bp);
0ef00459
EG
6020 bnx2x_stats_init(bp);
6021
6022 /* At this point, we are ready for interrupts */
6023 atomic_set(&bp->intr_sem, 0);
6024
6025 /* flush all before enabling interrupts */
6026 mb();
6027 mmiowb();
6028
615f8fd9 6029 bnx2x_int_enable(bp);
eb8da205
EG
6030
6031 /* Check for SPIO5 */
6032 bnx2x_attn_int_deasserted0(bp,
6033 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6034 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
6035}
6036
6037/* end of nic init */
6038
6039/*
6040 * gzip service functions
6041 */
6042
6043static int bnx2x_gunzip_init(struct bnx2x *bp)
6044{
1a983142
FT
6045 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6046 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
6047 if (bp->gunzip_buf == NULL)
6048 goto gunzip_nomem1;
6049
6050 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6051 if (bp->strm == NULL)
6052 goto gunzip_nomem2;
6053
6054 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6055 GFP_KERNEL);
6056 if (bp->strm->workspace == NULL)
6057 goto gunzip_nomem3;
6058
6059 return 0;
6060
6061gunzip_nomem3:
6062 kfree(bp->strm);
6063 bp->strm = NULL;
6064
6065gunzip_nomem2:
1a983142
FT
6066 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6067 bp->gunzip_mapping);
a2fbb9ea
ET
6068 bp->gunzip_buf = NULL;
6069
6070gunzip_nomem1:
cdaa7cb8
VZ
6071 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
6072 " un-compression\n");
a2fbb9ea
ET
6073 return -ENOMEM;
6074}
6075
6076static void bnx2x_gunzip_end(struct bnx2x *bp)
6077{
6078 kfree(bp->strm->workspace);
6079
6080 kfree(bp->strm);
6081 bp->strm = NULL;
6082
6083 if (bp->gunzip_buf) {
1a983142
FT
6084 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6085 bp->gunzip_mapping);
a2fbb9ea
ET
6086 bp->gunzip_buf = NULL;
6087 }
6088}
6089
94a78b79 6090static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
6091{
6092 int n, rc;
6093
6094 /* check gzip header */
94a78b79
VZ
6095 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6096 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 6097 return -EINVAL;
94a78b79 6098 }
a2fbb9ea
ET
6099
6100 n = 10;
6101
34f80b04 6102#define FNAME 0x8
a2fbb9ea
ET
6103
6104 if (zbuf[3] & FNAME)
6105 while ((zbuf[n++] != 0) && (n < len));
6106
94a78b79 6107 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
6108 bp->strm->avail_in = len - n;
6109 bp->strm->next_out = bp->gunzip_buf;
6110 bp->strm->avail_out = FW_BUF_SIZE;
6111
6112 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6113 if (rc != Z_OK)
6114 return rc;
6115
6116 rc = zlib_inflate(bp->strm, Z_FINISH);
6117 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
6118 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6119 bp->strm->msg);
a2fbb9ea
ET
6120
6121 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6122 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
6123 netdev_err(bp->dev, "Firmware decompression error:"
6124 " gunzip_outlen (%d) not aligned\n",
6125 bp->gunzip_outlen);
a2fbb9ea
ET
6126 bp->gunzip_outlen >>= 2;
6127
6128 zlib_inflateEnd(bp->strm);
6129
6130 if (rc == Z_STREAM_END)
6131 return 0;
6132
6133 return rc;
6134}
6135
6136/* nic load/unload */
6137
6138/*
34f80b04 6139 * General service functions
a2fbb9ea
ET
6140 */
6141
6142/* send a NIG loopback debug packet */
6143static void bnx2x_lb_pckt(struct bnx2x *bp)
6144{
a2fbb9ea 6145 u32 wb_write[3];
a2fbb9ea
ET
6146
6147 /* Ethernet source and destination addresses */
a2fbb9ea
ET
6148 wb_write[0] = 0x55555555;
6149 wb_write[1] = 0x55555555;
34f80b04 6150 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 6151 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
6152
6153 /* NON-IP protocol */
a2fbb9ea
ET
6154 wb_write[0] = 0x09000000;
6155 wb_write[1] = 0x55555555;
34f80b04 6156 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 6157 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
6158}
6159
6160/* some of the internal memories
6161 * are not directly readable from the driver
6162 * to test them we send debug packets
6163 */
6164static int bnx2x_int_mem_test(struct bnx2x *bp)
6165{
6166 int factor;
6167 int count, i;
6168 u32 val = 0;
6169
ad8d3948 6170 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 6171 factor = 120;
ad8d3948
EG
6172 else if (CHIP_REV_IS_EMUL(bp))
6173 factor = 200;
6174 else
a2fbb9ea 6175 factor = 1;
a2fbb9ea
ET
6176
6177 DP(NETIF_MSG_HW, "start part1\n");
6178
6179 /* Disable inputs of parser neighbor blocks */
6180 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6181 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6182 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 6183 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
6184
6185 /* Write 0 to parser credits for CFC search request */
6186 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6187
6188 /* send Ethernet packet */
6189 bnx2x_lb_pckt(bp);
6190
6191 /* TODO do i reset NIG statistic? */
6192 /* Wait until NIG register shows 1 packet of size 0x10 */
6193 count = 1000 * factor;
6194 while (count) {
34f80b04 6195
a2fbb9ea
ET
6196 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6197 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
6198 if (val == 0x10)
6199 break;
6200
6201 msleep(10);
6202 count--;
6203 }
6204 if (val != 0x10) {
6205 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6206 return -1;
6207 }
6208
6209 /* Wait until PRS register shows 1 packet */
6210 count = 1000 * factor;
6211 while (count) {
6212 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
6213 if (val == 1)
6214 break;
6215
6216 msleep(10);
6217 count--;
6218 }
6219 if (val != 0x1) {
6220 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6221 return -2;
6222 }
6223
6224 /* Reset and init BRB, PRS */
34f80b04 6225 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 6226 msleep(50);
34f80b04 6227 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 6228 msleep(50);
94a78b79
VZ
6229 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6230 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
6231
6232 DP(NETIF_MSG_HW, "part2\n");
6233
6234 /* Disable inputs of parser neighbor blocks */
6235 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6236 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6237 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 6238 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
6239
6240 /* Write 0 to parser credits for CFC search request */
6241 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6242
6243 /* send 10 Ethernet packets */
6244 for (i = 0; i < 10; i++)
6245 bnx2x_lb_pckt(bp);
6246
6247 /* Wait until NIG register shows 10 + 1
6248 packets of size 11*0x10 = 0xb0 */
6249 count = 1000 * factor;
6250 while (count) {
34f80b04 6251
a2fbb9ea
ET
6252 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6253 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
6254 if (val == 0xb0)
6255 break;
6256
6257 msleep(10);
6258 count--;
6259 }
6260 if (val != 0xb0) {
6261 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6262 return -3;
6263 }
6264
6265 /* Wait until PRS register shows 2 packets */
6266 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6267 if (val != 2)
6268 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6269
6270 /* Write 1 to parser credits for CFC search request */
6271 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6272
6273 /* Wait until PRS register shows 3 packets */
6274 msleep(10 * factor);
6275 /* Wait until NIG register shows 1 packet of size 0x10 */
6276 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6277 if (val != 3)
6278 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6279
6280 /* clear NIG EOP FIFO */
6281 for (i = 0; i < 11; i++)
6282 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6283 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6284 if (val != 1) {
6285 BNX2X_ERR("clear of NIG failed\n");
6286 return -4;
6287 }
6288
6289 /* Reset and init BRB, PRS, NIG */
6290 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6291 msleep(50);
6292 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6293 msleep(50);
94a78b79
VZ
6294 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6295 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 6296#ifndef BCM_CNIC
a2fbb9ea
ET
6297 /* set NIC mode */
6298 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6299#endif
6300
6301 /* Enable inputs of parser neighbor blocks */
6302 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6303 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6304 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 6305 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
6306
6307 DP(NETIF_MSG_HW, "done\n");
6308
6309 return 0; /* OK */
6310}
6311
6312static void enable_blocks_attention(struct bnx2x *bp)
6313{
6314 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6315 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6316 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6317 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6318 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6319 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6320 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6321 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6322 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
6323/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6324/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6325 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6326 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6327 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
6328/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6329/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6330 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6331 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6332 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6333 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
6334/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6335/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6336 if (CHIP_REV_IS_FPGA(bp))
6337 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6338 else
6339 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
6340 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6341 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6342 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
6343/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6344/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6345 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6346 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
6347/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6348 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
6349}
6350
72fd0718
VZ
6351static const struct {
6352 u32 addr;
6353 u32 mask;
6354} bnx2x_parity_mask[] = {
6355 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6356 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6357 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6358 {HC_REG_HC_PRTY_MASK, 0xffffffff},
6359 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6360 {QM_REG_QM_PRTY_MASK, 0x0},
6361 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6362 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6363 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6364 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6365 {CDU_REG_CDU_PRTY_MASK, 0x0},
6366 {CFC_REG_CFC_PRTY_MASK, 0x0},
6367 {DBG_REG_DBG_PRTY_MASK, 0x0},
6368 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6369 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6370 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6371 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6372 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6373 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6374 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6375 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6376 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6377 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6378 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6379 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6380 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6381 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6382 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6383};
6384
6385static void enable_blocks_parity(struct bnx2x *bp)
6386{
6387 int i, mask_arr_len =
6388 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6389
6390 for (i = 0; i < mask_arr_len; i++)
6391 REG_WR(bp, bnx2x_parity_mask[i].addr,
6392 bnx2x_parity_mask[i].mask);
6393}
6394
34f80b04 6395
81f75bbf
EG
6396static void bnx2x_reset_common(struct bnx2x *bp)
6397{
6398 /* reset_common */
6399 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6400 0xd3ffff7f);
6401 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6402}
6403
573f2035
EG
6404static void bnx2x_init_pxp(struct bnx2x *bp)
6405{
6406 u16 devctl;
6407 int r_order, w_order;
6408
6409 pci_read_config_word(bp->pdev,
6410 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6411 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6412 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6413 if (bp->mrrs == -1)
6414 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6415 else {
6416 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6417 r_order = bp->mrrs;
6418 }
6419
6420 bnx2x_init_pxp_arb(bp, r_order, w_order);
6421}
fd4ef40d
EG
6422
6423static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6424{
2145a920 6425 int is_required;
fd4ef40d 6426 u32 val;
2145a920 6427 int port;
fd4ef40d 6428
2145a920
VZ
6429 if (BP_NOMCP(bp))
6430 return;
6431
6432 is_required = 0;
fd4ef40d
EG
6433 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6434 SHARED_HW_CFG_FAN_FAILURE_MASK;
6435
6436 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6437 is_required = 1;
6438
6439 /*
6440 * The fan failure mechanism is usually related to the PHY type since
6441 * the power consumption of the board is affected by the PHY. Currently,
6442 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6443 */
6444 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6445 for (port = PORT_0; port < PORT_MAX; port++) {
6446 u32 phy_type =
6447 SHMEM_RD(bp, dev_info.port_hw_config[port].
6448 external_phy_config) &
6449 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6450 is_required |=
6451 ((phy_type ==
6452 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
6453 (phy_type ==
6454 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
6455 (phy_type ==
6456 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6457 }
6458
6459 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6460
6461 if (is_required == 0)
6462 return;
6463
6464 /* Fan failure is indicated by SPIO 5 */
6465 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6466 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6467
6468 /* set to active low mode */
6469 val = REG_RD(bp, MISC_REG_SPIO_INT);
6470 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 6471 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
6472 REG_WR(bp, MISC_REG_SPIO_INT, val);
6473
6474 /* enable interrupt to signal the IGU */
6475 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6476 val |= (1 << MISC_REGISTERS_SPIO_5);
6477 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6478}
6479
34f80b04 6480static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 6481{
a2fbb9ea 6482 u32 val, i;
37b091ba
MC
6483#ifdef BCM_CNIC
6484 u32 wb_write[2];
6485#endif
a2fbb9ea 6486
34f80b04 6487 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 6488
81f75bbf 6489 bnx2x_reset_common(bp);
34f80b04
EG
6490 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6491 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 6492
94a78b79 6493 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
6494 if (CHIP_IS_E1H(bp))
6495 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 6496
34f80b04
EG
6497 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6498 msleep(30);
6499 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 6500
94a78b79 6501 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
6502 if (CHIP_IS_E1(bp)) {
6503 /* enable HW interrupt from PXP on USDM overflow
6504 bit 16 on INT_MASK_0 */
6505 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6506 }
a2fbb9ea 6507
94a78b79 6508 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6509 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6510
6511#ifdef __BIG_ENDIAN
34f80b04
EG
6512 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6513 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6514 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6515 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6516 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6517 /* make sure this value is 0 */
6518 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6519
6520/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6521 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6522 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6523 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6524 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6525#endif
6526
34f80b04 6527 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 6528#ifdef BCM_CNIC
34f80b04
EG
6529 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6530 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6531 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6532#endif
6533
34f80b04
EG
6534 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6535 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6536
34f80b04
EG
6537 /* let the HW do it's magic ... */
6538 msleep(100);
6539 /* finish PXP init */
6540 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6541 if (val != 1) {
6542 BNX2X_ERR("PXP2 CFG failed\n");
6543 return -EBUSY;
6544 }
6545 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6546 if (val != 1) {
6547 BNX2X_ERR("PXP2 RD_INIT failed\n");
6548 return -EBUSY;
6549 }
a2fbb9ea 6550
34f80b04
EG
6551 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6552 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6553
94a78b79 6554 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6555
34f80b04
EG
6556 /* clean the DMAE memory */
6557 bp->dmae_ready = 1;
6558 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6559
94a78b79
VZ
6560 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6561 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6562 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6563 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6564
34f80b04
EG
6565 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6566 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6567 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6568 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6569
94a78b79 6570 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
6571
6572#ifdef BCM_CNIC
6573 wb_write[0] = 0;
6574 wb_write[1] = 0;
6575 for (i = 0; i < 64; i++) {
6576 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6577 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6578
6579 if (CHIP_IS_E1H(bp)) {
6580 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6581 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6582 wb_write, 2);
6583 }
6584 }
6585#endif
34f80b04
EG
6586 /* soft reset pulse */
6587 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6588 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 6589
37b091ba 6590#ifdef BCM_CNIC
94a78b79 6591 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6592#endif
a2fbb9ea 6593
94a78b79 6594 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6595 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6596 if (!CHIP_REV_IS_SLOW(bp)) {
6597 /* enable hw interrupt from doorbell Q */
6598 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6599 }
a2fbb9ea 6600
94a78b79
VZ
6601 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6602 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6603 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 6604#ifndef BCM_CNIC
3196a88a
EG
6605 /* set NIC mode */
6606 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 6607#endif
34f80b04
EG
6608 if (CHIP_IS_E1H(bp))
6609 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6610
94a78b79
VZ
6611 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6612 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6613 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6614 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6615
ca00392c
EG
6616 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6617 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6618 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6619 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6620
94a78b79
VZ
6621 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6622 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6623 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6624 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6625
34f80b04
EG
6626 /* sync semi rtc */
6627 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6628 0x80000000);
6629 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6630 0x80000000);
a2fbb9ea 6631
94a78b79
VZ
6632 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6633 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6634 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6635
34f80b04 6636 REG_WR(bp, SRC_REG_SOFT_RST, 1);
c68ed255
TH
6637 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
6638 REG_WR(bp, i, random32());
94a78b79 6639 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
6640#ifdef BCM_CNIC
6641 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6642 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6643 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6644 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6645 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6646 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6647 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6648 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6649 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6650 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6651#endif
34f80b04 6652 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6653
34f80b04
EG
6654 if (sizeof(union cdu_context) != 1024)
6655 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
6656 dev_alert(&bp->pdev->dev, "please adjust the size "
6657 "of cdu_context(%ld)\n",
7995c64e 6658 (long)sizeof(union cdu_context));
a2fbb9ea 6659
94a78b79 6660 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6661 val = (4 << 24) + (0 << 12) + 1024;
6662 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6663
94a78b79 6664 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6665 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6666 /* enable context validation interrupt from CFC */
6667 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6668
6669 /* set the thresholds to prevent CFC/CDU race */
6670 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6671
94a78b79
VZ
6672 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6673 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6674
94a78b79 6675 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6676 /* Reset PCIE errors for debug */
6677 REG_WR(bp, 0x2814, 0xffffffff);
6678 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6679
94a78b79 6680 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6681 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6682 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6683 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6684
94a78b79 6685 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6686 if (CHIP_IS_E1H(bp)) {
6687 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6688 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6689 }
6690
6691 if (CHIP_REV_IS_SLOW(bp))
6692 msleep(200);
6693
6694 /* finish CFC init */
6695 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6696 if (val != 1) {
6697 BNX2X_ERR("CFC LL_INIT failed\n");
6698 return -EBUSY;
6699 }
6700 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6701 if (val != 1) {
6702 BNX2X_ERR("CFC AC_INIT failed\n");
6703 return -EBUSY;
6704 }
6705 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6706 if (val != 1) {
6707 BNX2X_ERR("CFC CAM_INIT failed\n");
6708 return -EBUSY;
6709 }
6710 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6711
34f80b04
EG
6712 /* read NIG statistic
6713 to see if this is our first up since powerup */
6714 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6715 val = *bnx2x_sp(bp, wb_data[0]);
6716
6717 /* do internal memory self test */
6718 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6719 BNX2X_ERR("internal mem self test failed\n");
6720 return -EBUSY;
6721 }
6722
35b19ba5 6723 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6724 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6725 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6726 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6727 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6728 bp->port.need_hw_lock = 1;
6729 break;
6730
34f80b04
EG
6731 default:
6732 break;
6733 }
f1410647 6734
fd4ef40d
EG
6735 bnx2x_setup_fan_failure_detection(bp);
6736
34f80b04
EG
6737 /* clear PXP2 attentions */
6738 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6739
34f80b04 6740 enable_blocks_attention(bp);
72fd0718
VZ
6741 if (CHIP_PARITY_SUPPORTED(bp))
6742 enable_blocks_parity(bp);
a2fbb9ea 6743
6bbca910
YR
6744 if (!BP_NOMCP(bp)) {
6745 bnx2x_acquire_phy_lock(bp);
6746 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6747 bnx2x_release_phy_lock(bp);
6748 } else
6749 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6750
34f80b04
EG
6751 return 0;
6752}
a2fbb9ea 6753
34f80b04
EG
6754static int bnx2x_init_port(struct bnx2x *bp)
6755{
6756 int port = BP_PORT(bp);
94a78b79 6757 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6758 u32 low, high;
34f80b04 6759 u32 val;
a2fbb9ea 6760
cdaa7cb8 6761 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
6762
6763 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6764
94a78b79 6765 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6766 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6767
6768 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6769 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6770 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 6771 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6772
37b091ba
MC
6773#ifdef BCM_CNIC
6774 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 6775
94a78b79 6776 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
6777 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6778 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 6779#endif
cdaa7cb8 6780
94a78b79 6781 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6782
94a78b79 6783 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6784 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6785 /* no pause for emulation and FPGA */
6786 low = 0;
6787 high = 513;
6788 } else {
6789 if (IS_E1HMF(bp))
6790 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6791 else if (bp->dev->mtu > 4096) {
6792 if (bp->flags & ONE_PORT_FLAG)
6793 low = 160;
6794 else {
6795 val = bp->dev->mtu;
6796 /* (24*1024 + val*4)/256 */
6797 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6798 }
6799 } else
6800 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6801 high = low + 56; /* 14*1024/256 */
6802 }
6803 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6804 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6805
6806
94a78b79 6807 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6808
94a78b79 6809 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6810 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6811 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6812 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6813
94a78b79
VZ
6814 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6815 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6816 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6817 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6818
94a78b79 6819 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6820 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6821
94a78b79 6822 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6823
6824 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6825 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6826
6827 /* update threshold */
34f80b04 6828 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6829 /* update init credit */
34f80b04 6830 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6831
6832 /* probe changes */
34f80b04 6833 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6834 msleep(5);
34f80b04 6835 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 6836
37b091ba
MC
6837#ifdef BCM_CNIC
6838 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 6839#endif
94a78b79 6840 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6841 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6842
6843 if (CHIP_IS_E1(bp)) {
6844 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6845 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6846 }
94a78b79 6847 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6848
94a78b79 6849 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6850 /* init aeu_mask_attn_func_0/1:
6851 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6852 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6853 * bits 4-7 are used for "per vn group attention" */
6854 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6855 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6856
94a78b79 6857 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6858 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6859 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6860 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6861 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6862
94a78b79 6863 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6864
6865 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6866
6867 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6868 /* 0x2 disable e1hov, 0x1 enable */
6869 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6870 (IS_E1HMF(bp) ? 0x1 : 0x2));
6871
1c06328c
EG
6872 {
6873 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6874 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6875 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6876 }
34f80b04
EG
6877 }
6878
94a78b79 6879 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6880 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6881
35b19ba5 6882 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6883 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6884 {
6885 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6886
6887 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6888 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6889
6890 /* The GPIO should be swapped if the swap register is
6891 set and active */
6892 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6893 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6894
6895 /* Select function upon port-swap configuration */
6896 if (port == 0) {
6897 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6898 aeu_gpio_mask = (swap_val && swap_override) ?
6899 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6900 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6901 } else {
6902 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6903 aeu_gpio_mask = (swap_val && swap_override) ?
6904 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6905 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6906 }
6907 val = REG_RD(bp, offset);
6908 /* add GPIO3 to group */
6909 val |= aeu_gpio_mask;
6910 REG_WR(bp, offset, val);
6911 }
6912 break;
6913
35b19ba5 6914 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6915 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6916 /* add SPIO 5 to group 0 */
4d295db0
EG
6917 {
6918 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6919 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6920 val = REG_RD(bp, reg_addr);
f1410647 6921 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6922 REG_WR(bp, reg_addr, val);
6923 }
f1410647
ET
6924 break;
6925
6926 default:
6927 break;
6928 }
6929
c18487ee 6930 bnx2x__link_reset(bp);
a2fbb9ea 6931
34f80b04
EG
6932 return 0;
6933}
6934
6935#define ILT_PER_FUNC (768/2)
6936#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6937/* the phys address is shifted right 12 bits and has an added
6938 1=valid bit added to the 53rd bit
6939 then since this is a wide register(TM)
6940 we split it into two 32 bit writes
6941 */
6942#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6943#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6944#define PXP_ONE_ILT(x) (((x) << 10) | x)
6945#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6946
37b091ba
MC
6947#ifdef BCM_CNIC
6948#define CNIC_ILT_LINES 127
6949#define CNIC_CTX_PER_ILT 16
6950#else
34f80b04 6951#define CNIC_ILT_LINES 0
37b091ba 6952#endif
34f80b04
EG
6953
6954static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6955{
6956 int reg;
6957
6958 if (CHIP_IS_E1H(bp))
6959 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6960 else /* E1 */
6961 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6962
6963 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6964}
6965
6966static int bnx2x_init_func(struct bnx2x *bp)
6967{
6968 int port = BP_PORT(bp);
6969 int func = BP_FUNC(bp);
8badd27a 6970 u32 addr, val;
34f80b04
EG
6971 int i;
6972
cdaa7cb8 6973 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 6974
8badd27a
EG
6975 /* set MSI reconfigure capability */
6976 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6977 val = REG_RD(bp, addr);
6978 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6979 REG_WR(bp, addr, val);
6980
34f80b04
EG
6981 i = FUNC_ILT_BASE(func);
6982
6983 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6984 if (CHIP_IS_E1H(bp)) {
6985 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6986 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6987 } else /* E1 */
6988 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6989 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6990
37b091ba
MC
6991#ifdef BCM_CNIC
6992 i += 1 + CNIC_ILT_LINES;
6993 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6994 if (CHIP_IS_E1(bp))
6995 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6996 else {
6997 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6998 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6999 }
7000
7001 i++;
7002 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
7003 if (CHIP_IS_E1(bp))
7004 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
7005 else {
7006 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
7007 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
7008 }
7009
7010 i++;
7011 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
7012 if (CHIP_IS_E1(bp))
7013 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
7014 else {
7015 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
7016 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
7017 }
7018
7019 /* tell the searcher where the T2 table is */
7020 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
7021
7022 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
7023 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
7024
7025 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
7026 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
7027 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
7028
7029 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
7030#endif
34f80b04
EG
7031
7032 if (CHIP_IS_E1H(bp)) {
573f2035
EG
7033 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
7034 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
7035 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
7036 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
7037 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
7038 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
7039 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
7040 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
7041 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
7042
7043 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7044 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
7045 }
7046
7047 /* HC init per function */
7048 if (CHIP_IS_E1H(bp)) {
7049 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7050
7051 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7052 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7053 }
94a78b79 7054 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 7055
c14423fe 7056 /* Reset PCIE errors for debug */
a2fbb9ea
ET
7057 REG_WR(bp, 0x2114, 0xffffffff);
7058 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 7059
34f80b04
EG
7060 return 0;
7061}
7062
7063static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7064{
7065 int i, rc = 0;
a2fbb9ea 7066
34f80b04
EG
7067 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
7068 BP_FUNC(bp), load_code);
a2fbb9ea 7069
34f80b04
EG
7070 bp->dmae_ready = 0;
7071 mutex_init(&bp->dmae_mutex);
54016b26
EG
7072 rc = bnx2x_gunzip_init(bp);
7073 if (rc)
7074 return rc;
a2fbb9ea 7075
34f80b04
EG
7076 switch (load_code) {
7077 case FW_MSG_CODE_DRV_LOAD_COMMON:
7078 rc = bnx2x_init_common(bp);
7079 if (rc)
7080 goto init_hw_err;
7081 /* no break */
7082
7083 case FW_MSG_CODE_DRV_LOAD_PORT:
7084 bp->dmae_ready = 1;
7085 rc = bnx2x_init_port(bp);
7086 if (rc)
7087 goto init_hw_err;
7088 /* no break */
7089
7090 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7091 bp->dmae_ready = 1;
7092 rc = bnx2x_init_func(bp);
7093 if (rc)
7094 goto init_hw_err;
7095 break;
7096
7097 default:
7098 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7099 break;
7100 }
7101
7102 if (!BP_NOMCP(bp)) {
7103 int func = BP_FUNC(bp);
a2fbb9ea
ET
7104
7105 bp->fw_drv_pulse_wr_seq =
34f80b04 7106 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 7107 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
7108 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7109 }
a2fbb9ea 7110
34f80b04
EG
7111 /* this needs to be done before gunzip end */
7112 bnx2x_zero_def_sb(bp);
7113 for_each_queue(bp, i)
7114 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
7115#ifdef BCM_CNIC
7116 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7117#endif
34f80b04
EG
7118
7119init_hw_err:
7120 bnx2x_gunzip_end(bp);
7121
7122 return rc;
a2fbb9ea
ET
7123}
7124
a2fbb9ea
ET
7125static void bnx2x_free_mem(struct bnx2x *bp)
7126{
7127
7128#define BNX2X_PCI_FREE(x, y, size) \
7129 do { \
7130 if (x) { \
1a983142 7131 dma_free_coherent(&bp->pdev->dev, size, x, y); \
a2fbb9ea
ET
7132 x = NULL; \
7133 y = 0; \
7134 } \
7135 } while (0)
7136
7137#define BNX2X_FREE(x) \
7138 do { \
7139 if (x) { \
7140 vfree(x); \
7141 x = NULL; \
7142 } \
7143 } while (0)
7144
7145 int i;
7146
7147 /* fastpath */
555f6c78 7148 /* Common */
a2fbb9ea
ET
7149 for_each_queue(bp, i) {
7150
555f6c78 7151 /* status blocks */
a2fbb9ea
ET
7152 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7153 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 7154 sizeof(struct host_status_block));
555f6c78
EG
7155 }
7156 /* Rx */
54b9ddaa 7157 for_each_queue(bp, i) {
a2fbb9ea 7158
555f6c78 7159 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
7160 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7161 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7162 bnx2x_fp(bp, i, rx_desc_mapping),
7163 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7164
7165 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7166 bnx2x_fp(bp, i, rx_comp_mapping),
7167 sizeof(struct eth_fast_path_rx_cqe) *
7168 NUM_RCQ_BD);
a2fbb9ea 7169
7a9b2557 7170 /* SGE ring */
32626230 7171 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
7172 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7173 bnx2x_fp(bp, i, rx_sge_mapping),
7174 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7175 }
555f6c78 7176 /* Tx */
54b9ddaa 7177 for_each_queue(bp, i) {
555f6c78
EG
7178
7179 /* fastpath tx rings: tx_buf tx_desc */
7180 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7181 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7182 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 7183 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 7184 }
a2fbb9ea
ET
7185 /* end of fastpath */
7186
7187 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 7188 sizeof(struct host_def_status_block));
a2fbb9ea
ET
7189
7190 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 7191 sizeof(struct bnx2x_slowpath));
a2fbb9ea 7192
37b091ba 7193#ifdef BCM_CNIC
a2fbb9ea
ET
7194 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7195 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7196 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7197 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
7198 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7199 sizeof(struct host_status_block));
a2fbb9ea 7200#endif
7a9b2557 7201 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
7202
7203#undef BNX2X_PCI_FREE
7204#undef BNX2X_KFREE
7205}
7206
7207static int bnx2x_alloc_mem(struct bnx2x *bp)
7208{
7209
7210#define BNX2X_PCI_ALLOC(x, y, size) \
7211 do { \
1a983142 7212 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
a2fbb9ea
ET
7213 if (x == NULL) \
7214 goto alloc_mem_err; \
7215 memset(x, 0, size); \
7216 } while (0)
7217
7218#define BNX2X_ALLOC(x, size) \
7219 do { \
7220 x = vmalloc(size); \
7221 if (x == NULL) \
7222 goto alloc_mem_err; \
7223 memset(x, 0, size); \
7224 } while (0)
7225
7226 int i;
7227
7228 /* fastpath */
555f6c78 7229 /* Common */
a2fbb9ea
ET
7230 for_each_queue(bp, i) {
7231 bnx2x_fp(bp, i, bp) = bp;
7232
555f6c78 7233 /* status blocks */
a2fbb9ea
ET
7234 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7235 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 7236 sizeof(struct host_status_block));
555f6c78
EG
7237 }
7238 /* Rx */
54b9ddaa 7239 for_each_queue(bp, i) {
a2fbb9ea 7240
555f6c78 7241 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
7242 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7243 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7244 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7245 &bnx2x_fp(bp, i, rx_desc_mapping),
7246 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7247
7248 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7249 &bnx2x_fp(bp, i, rx_comp_mapping),
7250 sizeof(struct eth_fast_path_rx_cqe) *
7251 NUM_RCQ_BD);
7252
7a9b2557
VZ
7253 /* SGE ring */
7254 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7255 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7256 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7257 &bnx2x_fp(bp, i, rx_sge_mapping),
7258 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 7259 }
555f6c78 7260 /* Tx */
54b9ddaa 7261 for_each_queue(bp, i) {
555f6c78 7262
555f6c78
EG
7263 /* fastpath tx rings: tx_buf tx_desc */
7264 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7265 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7266 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7267 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 7268 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 7269 }
a2fbb9ea
ET
7270 /* end of fastpath */
7271
7272 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7273 sizeof(struct host_def_status_block));
7274
7275 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7276 sizeof(struct bnx2x_slowpath));
7277
37b091ba 7278#ifdef BCM_CNIC
a2fbb9ea
ET
7279 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7280
a2fbb9ea
ET
7281 /* allocate searcher T2 table
7282 we allocate 1/4 of alloc num for T2
7283 (which is not entered into the ILT) */
7284 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7285
37b091ba 7286 /* Initialize T2 (for 1024 connections) */
a2fbb9ea 7287 for (i = 0; i < 16*1024; i += 64)
37b091ba 7288 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 7289
37b091ba 7290 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
a2fbb9ea
ET
7291 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7292
7293 /* QM queues (128*MAX_CONN) */
7294 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
37b091ba
MC
7295
7296 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7297 sizeof(struct host_status_block));
a2fbb9ea
ET
7298#endif
7299
7300 /* Slow path ring */
7301 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7302
7303 return 0;
7304
7305alloc_mem_err:
7306 bnx2x_free_mem(bp);
7307 return -ENOMEM;
7308
7309#undef BNX2X_PCI_ALLOC
7310#undef BNX2X_ALLOC
7311}
7312
7313static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7314{
7315 int i;
7316
54b9ddaa 7317 for_each_queue(bp, i) {
a2fbb9ea
ET
7318 struct bnx2x_fastpath *fp = &bp->fp[i];
7319
7320 u16 bd_cons = fp->tx_bd_cons;
7321 u16 sw_prod = fp->tx_pkt_prod;
7322 u16 sw_cons = fp->tx_pkt_cons;
7323
a2fbb9ea
ET
7324 while (sw_cons != sw_prod) {
7325 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7326 sw_cons++;
7327 }
7328 }
7329}
7330
7331static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7332{
7333 int i, j;
7334
54b9ddaa 7335 for_each_queue(bp, j) {
a2fbb9ea
ET
7336 struct bnx2x_fastpath *fp = &bp->fp[j];
7337
a2fbb9ea
ET
7338 for (i = 0; i < NUM_RX_BD; i++) {
7339 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7340 struct sk_buff *skb = rx_buf->skb;
7341
7342 if (skb == NULL)
7343 continue;
7344
1a983142
FT
7345 dma_unmap_single(&bp->pdev->dev,
7346 dma_unmap_addr(rx_buf, mapping),
7347 bp->rx_buf_size, DMA_FROM_DEVICE);
a2fbb9ea
ET
7348
7349 rx_buf->skb = NULL;
7350 dev_kfree_skb(skb);
7351 }
7a9b2557 7352 if (!fp->disable_tpa)
32626230
EG
7353 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7354 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 7355 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
7356 }
7357}
7358
7359static void bnx2x_free_skbs(struct bnx2x *bp)
7360{
7361 bnx2x_free_tx_skbs(bp);
7362 bnx2x_free_rx_skbs(bp);
7363}
7364
7365static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7366{
34f80b04 7367 int i, offset = 1;
a2fbb9ea
ET
7368
7369 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 7370 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
7371 bp->msix_table[0].vector);
7372
37b091ba
MC
7373#ifdef BCM_CNIC
7374 offset++;
7375#endif
a2fbb9ea 7376 for_each_queue(bp, i) {
c14423fe 7377 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 7378 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
7379 bnx2x_fp(bp, i, state));
7380
34f80b04 7381 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 7382 }
a2fbb9ea
ET
7383}
7384
6cbe5065 7385static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
a2fbb9ea 7386{
a2fbb9ea 7387 if (bp->flags & USING_MSIX_FLAG) {
6cbe5065
VZ
7388 if (!disable_only)
7389 bnx2x_free_msix_irqs(bp);
a2fbb9ea 7390 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
7391 bp->flags &= ~USING_MSIX_FLAG;
7392
8badd27a 7393 } else if (bp->flags & USING_MSI_FLAG) {
6cbe5065
VZ
7394 if (!disable_only)
7395 free_irq(bp->pdev->irq, bp->dev);
8badd27a
EG
7396 pci_disable_msi(bp->pdev);
7397 bp->flags &= ~USING_MSI_FLAG;
7398
6cbe5065 7399 } else if (!disable_only)
a2fbb9ea
ET
7400 free_irq(bp->pdev->irq, bp->dev);
7401}
7402
7403static int bnx2x_enable_msix(struct bnx2x *bp)
7404{
8badd27a
EG
7405 int i, rc, offset = 1;
7406 int igu_vec = 0;
a2fbb9ea 7407
8badd27a
EG
7408 bp->msix_table[0].entry = igu_vec;
7409 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 7410
37b091ba
MC
7411#ifdef BCM_CNIC
7412 igu_vec = BP_L_ID(bp) + offset;
7413 bp->msix_table[1].entry = igu_vec;
7414 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7415 offset++;
7416#endif
34f80b04 7417 for_each_queue(bp, i) {
8badd27a 7418 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
7419 bp->msix_table[i + offset].entry = igu_vec;
7420 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7421 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
7422 }
7423
34f80b04 7424 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 7425 BNX2X_NUM_QUEUES(bp) + offset);
1ac218c8
VZ
7426
7427 /*
7428 * reconfigure number of tx/rx queues according to available
7429 * MSI-X vectors
7430 */
7431 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
7432 /* vectors available for FP */
7433 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
7434
7435 DP(NETIF_MSG_IFUP,
7436 "Trying to use less MSI-X vectors: %d\n", rc);
7437
7438 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
7439
7440 if (rc) {
7441 DP(NETIF_MSG_IFUP,
7442 "MSI-X is not attainable rc %d\n", rc);
7443 return rc;
7444 }
7445
7446 bp->num_queues = min(bp->num_queues, fp_vec);
7447
7448 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
7449 bp->num_queues);
7450 } else if (rc) {
8badd27a
EG
7451 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7452 return rc;
34f80b04 7453 }
8badd27a 7454
a2fbb9ea
ET
7455 bp->flags |= USING_MSIX_FLAG;
7456
7457 return 0;
a2fbb9ea
ET
7458}
7459
a2fbb9ea
ET
7460static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7461{
34f80b04 7462 int i, rc, offset = 1;
a2fbb9ea 7463
a2fbb9ea
ET
7464 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7465 bp->dev->name, bp->dev);
a2fbb9ea
ET
7466 if (rc) {
7467 BNX2X_ERR("request sp irq failed\n");
7468 return -EBUSY;
7469 }
7470
37b091ba
MC
7471#ifdef BCM_CNIC
7472 offset++;
7473#endif
a2fbb9ea 7474 for_each_queue(bp, i) {
555f6c78 7475 struct bnx2x_fastpath *fp = &bp->fp[i];
54b9ddaa
VZ
7476 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7477 bp->dev->name, i);
ca00392c 7478
34f80b04 7479 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 7480 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 7481 if (rc) {
555f6c78 7482 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
7483 bnx2x_free_msix_irqs(bp);
7484 return -EBUSY;
7485 }
7486
555f6c78 7487 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
7488 }
7489
555f6c78 7490 i = BNX2X_NUM_QUEUES(bp);
cdaa7cb8
VZ
7491 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
7492 " ... fp[%d] %d\n",
7493 bp->msix_table[0].vector,
7494 0, bp->msix_table[offset].vector,
7495 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 7496
a2fbb9ea 7497 return 0;
a2fbb9ea
ET
7498}
7499
8badd27a
EG
7500static int bnx2x_enable_msi(struct bnx2x *bp)
7501{
7502 int rc;
7503
7504 rc = pci_enable_msi(bp->pdev);
7505 if (rc) {
7506 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7507 return -1;
7508 }
7509 bp->flags |= USING_MSI_FLAG;
7510
7511 return 0;
7512}
7513
a2fbb9ea
ET
7514static int bnx2x_req_irq(struct bnx2x *bp)
7515{
8badd27a 7516 unsigned long flags;
34f80b04 7517 int rc;
a2fbb9ea 7518
8badd27a
EG
7519 if (bp->flags & USING_MSI_FLAG)
7520 flags = 0;
7521 else
7522 flags = IRQF_SHARED;
7523
7524 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 7525 bp->dev->name, bp->dev);
a2fbb9ea
ET
7526 if (!rc)
7527 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7528
7529 return rc;
a2fbb9ea
ET
7530}
7531
65abd74d
YG
7532static void bnx2x_napi_enable(struct bnx2x *bp)
7533{
7534 int i;
7535
54b9ddaa 7536 for_each_queue(bp, i)
65abd74d
YG
7537 napi_enable(&bnx2x_fp(bp, i, napi));
7538}
7539
7540static void bnx2x_napi_disable(struct bnx2x *bp)
7541{
7542 int i;
7543
54b9ddaa 7544 for_each_queue(bp, i)
65abd74d
YG
7545 napi_disable(&bnx2x_fp(bp, i, napi));
7546}
7547
7548static void bnx2x_netif_start(struct bnx2x *bp)
7549{
e1510706
EG
7550 int intr_sem;
7551
7552 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7553 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7554
7555 if (intr_sem) {
65abd74d 7556 if (netif_running(bp->dev)) {
65abd74d
YG
7557 bnx2x_napi_enable(bp);
7558 bnx2x_int_enable(bp);
555f6c78
EG
7559 if (bp->state == BNX2X_STATE_OPEN)
7560 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
7561 }
7562 }
7563}
7564
f8ef6e44 7565static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 7566{
f8ef6e44 7567 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 7568 bnx2x_napi_disable(bp);
762d5f6c 7569 netif_tx_disable(bp->dev);
65abd74d
YG
7570}
7571
a2fbb9ea
ET
7572/*
7573 * Init service functions
7574 */
7575
e665bfda
MC
7576/**
7577 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7578 *
7579 * @param bp driver descriptor
7580 * @param set set or clear an entry (1 or 0)
7581 * @param mac pointer to a buffer containing a MAC
7582 * @param cl_bit_vec bit vector of clients to register a MAC for
7583 * @param cam_offset offset in a CAM to use
7584 * @param with_bcast set broadcast MAC as well
7585 */
7586static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7587 u32 cl_bit_vec, u8 cam_offset,
7588 u8 with_bcast)
a2fbb9ea
ET
7589{
7590 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 7591 int port = BP_PORT(bp);
a2fbb9ea
ET
7592
7593 /* CAM allocation
7594 * unicasts 0-31:port0 32-63:port1
7595 * multicast 64-127:port0 128-191:port1
7596 */
e665bfda
MC
7597 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7598 config->hdr.offset = cam_offset;
7599 config->hdr.client_id = 0xff;
a2fbb9ea
ET
7600 config->hdr.reserved1 = 0;
7601
7602 /* primary MAC */
7603 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 7604 swab16(*(u16 *)&mac[0]);
a2fbb9ea 7605 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 7606 swab16(*(u16 *)&mac[2]);
a2fbb9ea 7607 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 7608 swab16(*(u16 *)&mac[4]);
34f80b04 7609 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7610 if (set)
7611 config->config_table[0].target_table_entry.flags = 0;
7612 else
7613 CAM_INVALIDATE(config->config_table[0]);
ca00392c 7614 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 7615 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
7616 config->config_table[0].target_table_entry.vlan_id = 0;
7617
3101c2bc
YG
7618 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7619 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7620 config->config_table[0].cam_entry.msb_mac_addr,
7621 config->config_table[0].cam_entry.middle_mac_addr,
7622 config->config_table[0].cam_entry.lsb_mac_addr);
7623
7624 /* broadcast */
e665bfda
MC
7625 if (with_bcast) {
7626 config->config_table[1].cam_entry.msb_mac_addr =
7627 cpu_to_le16(0xffff);
7628 config->config_table[1].cam_entry.middle_mac_addr =
7629 cpu_to_le16(0xffff);
7630 config->config_table[1].cam_entry.lsb_mac_addr =
7631 cpu_to_le16(0xffff);
7632 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7633 if (set)
7634 config->config_table[1].target_table_entry.flags =
7635 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7636 else
7637 CAM_INVALIDATE(config->config_table[1]);
7638 config->config_table[1].target_table_entry.clients_bit_vector =
7639 cpu_to_le32(cl_bit_vec);
7640 config->config_table[1].target_table_entry.vlan_id = 0;
7641 }
a2fbb9ea
ET
7642
7643 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7644 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7645 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7646}
7647
e665bfda
MC
7648/**
7649 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7650 *
7651 * @param bp driver descriptor
7652 * @param set set or clear an entry (1 or 0)
7653 * @param mac pointer to a buffer containing a MAC
7654 * @param cl_bit_vec bit vector of clients to register a MAC for
7655 * @param cam_offset offset in a CAM to use
7656 */
7657static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7658 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
7659{
7660 struct mac_configuration_cmd_e1h *config =
7661 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7662
8d9c5f34 7663 config->hdr.length = 1;
e665bfda
MC
7664 config->hdr.offset = cam_offset;
7665 config->hdr.client_id = 0xff;
34f80b04
EG
7666 config->hdr.reserved1 = 0;
7667
7668 /* primary MAC */
7669 config->config_table[0].msb_mac_addr =
e665bfda 7670 swab16(*(u16 *)&mac[0]);
34f80b04 7671 config->config_table[0].middle_mac_addr =
e665bfda 7672 swab16(*(u16 *)&mac[2]);
34f80b04 7673 config->config_table[0].lsb_mac_addr =
e665bfda 7674 swab16(*(u16 *)&mac[4]);
ca00392c 7675 config->config_table[0].clients_bit_vector =
e665bfda 7676 cpu_to_le32(cl_bit_vec);
34f80b04
EG
7677 config->config_table[0].vlan_id = 0;
7678 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7679 if (set)
7680 config->config_table[0].flags = BP_PORT(bp);
7681 else
7682 config->config_table[0].flags =
7683 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7684
e665bfda 7685 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 7686 (set ? "setting" : "clearing"),
34f80b04
EG
7687 config->config_table[0].msb_mac_addr,
7688 config->config_table[0].middle_mac_addr,
e665bfda 7689 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
7690
7691 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7692 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7693 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7694}
7695
a2fbb9ea
ET
7696static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7697 int *state_p, int poll)
7698{
7699 /* can take a while if any port is running */
8b3a0f0b 7700 int cnt = 5000;
a2fbb9ea 7701
c14423fe
ET
7702 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7703 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7704
7705 might_sleep();
34f80b04 7706 while (cnt--) {
a2fbb9ea
ET
7707 if (poll) {
7708 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7709 /* if index is different from 0
7710 * the reply for some commands will
3101c2bc 7711 * be on the non default queue
a2fbb9ea
ET
7712 */
7713 if (idx)
7714 bnx2x_rx_int(&bp->fp[idx], 10);
7715 }
a2fbb9ea 7716
3101c2bc 7717 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7718 if (*state_p == state) {
7719#ifdef BNX2X_STOP_ON_ERROR
7720 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7721#endif
a2fbb9ea 7722 return 0;
8b3a0f0b 7723 }
a2fbb9ea 7724
a2fbb9ea 7725 msleep(1);
e3553b29
EG
7726
7727 if (bp->panic)
7728 return -EIO;
a2fbb9ea
ET
7729 }
7730
a2fbb9ea 7731 /* timeout! */
49d66772
ET
7732 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7733 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7734#ifdef BNX2X_STOP_ON_ERROR
7735 bnx2x_panic();
7736#endif
a2fbb9ea 7737
49d66772 7738 return -EBUSY;
a2fbb9ea
ET
7739}
7740
e665bfda
MC
7741static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7742{
7743 bp->set_mac_pending++;
7744 smp_wmb();
7745
7746 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7747 (1 << bp->fp->cl_id), BP_FUNC(bp));
7748
7749 /* Wait for a completion */
7750 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7751}
7752
7753static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7754{
7755 bp->set_mac_pending++;
7756 smp_wmb();
7757
7758 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7759 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7760 1);
7761
7762 /* Wait for a completion */
7763 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7764}
7765
993ac7b5
MC
7766#ifdef BCM_CNIC
7767/**
7768 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7769 * MAC(s). This function will wait until the ramdord completion
7770 * returns.
7771 *
7772 * @param bp driver handle
7773 * @param set set or clear the CAM entry
7774 *
7775 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7776 */
7777static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7778{
7779 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7780
7781 bp->set_mac_pending++;
7782 smp_wmb();
7783
7784 /* Send a SET_MAC ramrod */
7785 if (CHIP_IS_E1(bp))
7786 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7787 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7788 1);
7789 else
7790 /* CAM allocation for E1H
7791 * unicasts: by func number
7792 * multicast: 20+FUNC*20, 20 each
7793 */
7794 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7795 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7796
7797 /* Wait for a completion when setting */
7798 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7799
7800 return 0;
7801}
7802#endif
7803
a2fbb9ea
ET
7804static int bnx2x_setup_leading(struct bnx2x *bp)
7805{
34f80b04 7806 int rc;
a2fbb9ea 7807
c14423fe 7808 /* reset IGU state */
34f80b04 7809 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7810
7811 /* SETUP ramrod */
7812 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7813
34f80b04
EG
7814 /* Wait for completion */
7815 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7816
34f80b04 7817 return rc;
a2fbb9ea
ET
7818}
7819
7820static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7821{
555f6c78
EG
7822 struct bnx2x_fastpath *fp = &bp->fp[index];
7823
a2fbb9ea 7824 /* reset IGU state */
555f6c78 7825 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7826
228241eb 7827 /* SETUP ramrod */
555f6c78
EG
7828 fp->state = BNX2X_FP_STATE_OPENING;
7829 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7830 fp->cl_id, 0);
a2fbb9ea
ET
7831
7832 /* Wait for completion */
7833 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7834 &(fp->state), 0);
a2fbb9ea
ET
7835}
7836
a2fbb9ea 7837static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7838
54b9ddaa 7839static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
ca00392c 7840{
ca00392c
EG
7841
7842 switch (bp->multi_mode) {
7843 case ETH_RSS_MODE_DISABLED:
54b9ddaa 7844 bp->num_queues = 1;
ca00392c
EG
7845 break;
7846
7847 case ETH_RSS_MODE_REGULAR:
54b9ddaa
VZ
7848 if (num_queues)
7849 bp->num_queues = min_t(u32, num_queues,
7850 BNX2X_MAX_QUEUES(bp));
ca00392c 7851 else
54b9ddaa
VZ
7852 bp->num_queues = min_t(u32, num_online_cpus(),
7853 BNX2X_MAX_QUEUES(bp));
ca00392c
EG
7854 break;
7855
7856
7857 default:
54b9ddaa 7858 bp->num_queues = 1;
ca00392c
EG
7859 break;
7860 }
ca00392c
EG
7861}
7862
54b9ddaa 7863static int bnx2x_set_num_queues(struct bnx2x *bp)
a2fbb9ea 7864{
ca00392c 7865 int rc = 0;
a2fbb9ea 7866
8badd27a
EG
7867 switch (int_mode) {
7868 case INT_MODE_INTx:
7869 case INT_MODE_MSI:
54b9ddaa 7870 bp->num_queues = 1;
ca00392c 7871 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a 7872 break;
8badd27a 7873 default:
54b9ddaa
VZ
7874 /* Set number of queues according to bp->multi_mode value */
7875 bnx2x_set_num_queues_msix(bp);
ca00392c 7876
54b9ddaa
VZ
7877 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7878 bp->num_queues);
ca00392c 7879
2dfe0e1f
EG
7880 /* if we can't use MSI-X we only need one fp,
7881 * so try to enable MSI-X with the requested number of fp's
7882 * and fallback to MSI or legacy INTx with one fp
7883 */
ca00392c 7884 rc = bnx2x_enable_msix(bp);
54b9ddaa 7885 if (rc)
34f80b04 7886 /* failed to enable MSI-X */
54b9ddaa 7887 bp->num_queues = 1;
8badd27a 7888 break;
a2fbb9ea 7889 }
54b9ddaa 7890 bp->dev->real_num_tx_queues = bp->num_queues;
ca00392c 7891 return rc;
8badd27a
EG
7892}
7893
993ac7b5
MC
7894#ifdef BCM_CNIC
7895static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7896static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7897#endif
8badd27a
EG
7898
7899/* must be called with rtnl_lock */
7900static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7901{
7902 u32 load_code;
ca00392c
EG
7903 int i, rc;
7904
8badd27a 7905#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7906 if (unlikely(bp->panic))
7907 return -EPERM;
7908#endif
7909
7910 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7911
54b9ddaa 7912 rc = bnx2x_set_num_queues(bp);
c14423fe 7913
6cbe5065
VZ
7914 if (bnx2x_alloc_mem(bp)) {
7915 bnx2x_free_irq(bp, true);
a2fbb9ea 7916 return -ENOMEM;
6cbe5065 7917 }
a2fbb9ea 7918
54b9ddaa 7919 for_each_queue(bp, i)
7a9b2557
VZ
7920 bnx2x_fp(bp, i, disable_tpa) =
7921 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7922
54b9ddaa 7923 for_each_queue(bp, i)
2dfe0e1f
EG
7924 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7925 bnx2x_poll, 128);
7926
2dfe0e1f
EG
7927 bnx2x_napi_enable(bp);
7928
34f80b04
EG
7929 if (bp->flags & USING_MSIX_FLAG) {
7930 rc = bnx2x_req_msix_irqs(bp);
7931 if (rc) {
6cbe5065 7932 bnx2x_free_irq(bp, true);
2dfe0e1f 7933 goto load_error1;
34f80b04
EG
7934 }
7935 } else {
ca00392c 7936 /* Fall to INTx if failed to enable MSI-X due to lack of
54b9ddaa 7937 memory (in bnx2x_set_num_queues()) */
8badd27a
EG
7938 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7939 bnx2x_enable_msi(bp);
34f80b04
EG
7940 bnx2x_ack_int(bp);
7941 rc = bnx2x_req_irq(bp);
7942 if (rc) {
2dfe0e1f 7943 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6cbe5065 7944 bnx2x_free_irq(bp, true);
2dfe0e1f 7945 goto load_error1;
a2fbb9ea 7946 }
8badd27a
EG
7947 if (bp->flags & USING_MSI_FLAG) {
7948 bp->dev->irq = bp->pdev->irq;
7995c64e
JP
7949 netdev_info(bp->dev, "using MSI IRQ %d\n",
7950 bp->pdev->irq);
8badd27a 7951 }
a2fbb9ea
ET
7952 }
7953
2dfe0e1f
EG
7954 /* Send LOAD_REQUEST command to MCP
7955 Returns the type of LOAD command:
7956 if it is the first port to be initialized
7957 common blocks should be initialized, otherwise - not
7958 */
7959 if (!BP_NOMCP(bp)) {
7960 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7961 if (!load_code) {
7962 BNX2X_ERR("MCP response failure, aborting\n");
7963 rc = -EBUSY;
7964 goto load_error2;
7965 }
7966 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7967 rc = -EBUSY; /* other port in diagnostic mode */
7968 goto load_error2;
7969 }
7970
7971 } else {
7972 int port = BP_PORT(bp);
7973
f5372251 7974 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7975 load_count[0], load_count[1], load_count[2]);
7976 load_count[0]++;
7977 load_count[1 + port]++;
f5372251 7978 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7979 load_count[0], load_count[1], load_count[2]);
7980 if (load_count[0] == 1)
7981 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7982 else if (load_count[1 + port] == 1)
7983 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7984 else
7985 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7986 }
7987
7988 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7989 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7990 bp->port.pmf = 1;
7991 else
7992 bp->port.pmf = 0;
7993 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7994
a2fbb9ea 7995 /* Initialize HW */
34f80b04
EG
7996 rc = bnx2x_init_hw(bp, load_code);
7997 if (rc) {
a2fbb9ea 7998 BNX2X_ERR("HW init failed, aborting\n");
f1e1a199
VZ
7999 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8000 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8001 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
2dfe0e1f 8002 goto load_error2;
a2fbb9ea
ET
8003 }
8004
a2fbb9ea 8005 /* Setup NIC internals and enable interrupts */
471de716 8006 bnx2x_nic_init(bp, load_code);
a2fbb9ea 8007
2691d51d
EG
8008 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
8009 (bp->common.shmem2_base))
8010 SHMEM2_WR(bp, dcc_support,
8011 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
8012 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
8013
a2fbb9ea 8014 /* Send LOAD_DONE command to MCP */
34f80b04 8015 if (!BP_NOMCP(bp)) {
228241eb
ET
8016 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8017 if (!load_code) {
da5a662a 8018 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 8019 rc = -EBUSY;
2dfe0e1f 8020 goto load_error3;
a2fbb9ea
ET
8021 }
8022 }
8023
8024 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
8025
34f80b04
EG
8026 rc = bnx2x_setup_leading(bp);
8027 if (rc) {
da5a662a 8028 BNX2X_ERR("Setup leading failed!\n");
e3553b29 8029#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 8030 goto load_error3;
e3553b29
EG
8031#else
8032 bp->panic = 1;
8033 return -EBUSY;
8034#endif
34f80b04 8035 }
a2fbb9ea 8036
34f80b04
EG
8037 if (CHIP_IS_E1H(bp))
8038 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 8039 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
f34d28ea 8040 bp->flags |= MF_FUNC_DIS;
34f80b04 8041 }
a2fbb9ea 8042
ca00392c 8043 if (bp->state == BNX2X_STATE_OPEN) {
37b091ba
MC
8044#ifdef BCM_CNIC
8045 /* Enable Timer scan */
8046 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
8047#endif
34f80b04
EG
8048 for_each_nondefault_queue(bp, i) {
8049 rc = bnx2x_setup_multi(bp, i);
8050 if (rc)
37b091ba
MC
8051#ifdef BCM_CNIC
8052 goto load_error4;
8053#else
2dfe0e1f 8054 goto load_error3;
37b091ba 8055#endif
34f80b04 8056 }
a2fbb9ea 8057
ca00392c 8058 if (CHIP_IS_E1(bp))
e665bfda 8059 bnx2x_set_eth_mac_addr_e1(bp, 1);
ca00392c 8060 else
e665bfda 8061 bnx2x_set_eth_mac_addr_e1h(bp, 1);
993ac7b5
MC
8062#ifdef BCM_CNIC
8063 /* Set iSCSI L2 MAC */
8064 mutex_lock(&bp->cnic_mutex);
8065 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
8066 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8067 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
4a6e47a4
MC
8068 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
8069 CNIC_SB_ID(bp));
993ac7b5
MC
8070 }
8071 mutex_unlock(&bp->cnic_mutex);
8072#endif
ca00392c 8073 }
34f80b04
EG
8074
8075 if (bp->port.pmf)
b5bf9068 8076 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
8077
8078 /* Start fast path */
34f80b04
EG
8079 switch (load_mode) {
8080 case LOAD_NORMAL:
ca00392c
EG
8081 if (bp->state == BNX2X_STATE_OPEN) {
8082 /* Tx queue should be only reenabled */
8083 netif_tx_wake_all_queues(bp->dev);
8084 }
2dfe0e1f 8085 /* Initialize the receive filter. */
34f80b04
EG
8086 bnx2x_set_rx_mode(bp->dev);
8087 break;
8088
8089 case LOAD_OPEN:
555f6c78 8090 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
8091 if (bp->state != BNX2X_STATE_OPEN)
8092 netif_tx_disable(bp->dev);
2dfe0e1f 8093 /* Initialize the receive filter. */
34f80b04 8094 bnx2x_set_rx_mode(bp->dev);
34f80b04 8095 break;
a2fbb9ea 8096
34f80b04 8097 case LOAD_DIAG:
2dfe0e1f 8098 /* Initialize the receive filter. */
a2fbb9ea 8099 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
8100 bp->state = BNX2X_STATE_DIAG;
8101 break;
8102
8103 default:
8104 break;
a2fbb9ea
ET
8105 }
8106
34f80b04
EG
8107 if (!bp->port.pmf)
8108 bnx2x__link_status_update(bp);
8109
a2fbb9ea
ET
8110 /* start the timer */
8111 mod_timer(&bp->timer, jiffies + bp->current_interval);
8112
993ac7b5
MC
8113#ifdef BCM_CNIC
8114 bnx2x_setup_cnic_irq_info(bp);
8115 if (bp->state == BNX2X_STATE_OPEN)
8116 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8117#endif
72fd0718 8118 bnx2x_inc_load_cnt(bp);
34f80b04 8119
a2fbb9ea
ET
8120 return 0;
8121
37b091ba
MC
8122#ifdef BCM_CNIC
8123load_error4:
8124 /* Disable Timer scan */
8125 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8126#endif
2dfe0e1f
EG
8127load_error3:
8128 bnx2x_int_disable_sync(bp, 1);
8129 if (!BP_NOMCP(bp)) {
8130 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8131 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8132 }
8133 bp->port.pmf = 0;
7a9b2557
VZ
8134 /* Free SKBs, SGEs, TPA pool and driver internals */
8135 bnx2x_free_skbs(bp);
54b9ddaa 8136 for_each_queue(bp, i)
3196a88a 8137 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 8138load_error2:
d1014634 8139 /* Release IRQs */
6cbe5065 8140 bnx2x_free_irq(bp, false);
2dfe0e1f
EG
8141load_error1:
8142 bnx2x_napi_disable(bp);
54b9ddaa 8143 for_each_queue(bp, i)
7cde1c8b 8144 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8145 bnx2x_free_mem(bp);
8146
34f80b04 8147 return rc;
a2fbb9ea
ET
8148}
8149
8150static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8151{
555f6c78 8152 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
8153 int rc;
8154
c14423fe 8155 /* halt the connection */
555f6c78
EG
8156 fp->state = BNX2X_FP_STATE_HALTING;
8157 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 8158
34f80b04 8159 /* Wait for completion */
a2fbb9ea 8160 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 8161 &(fp->state), 1);
c14423fe 8162 if (rc) /* timeout */
a2fbb9ea
ET
8163 return rc;
8164
8165 /* delete cfc entry */
8166 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8167
34f80b04
EG
8168 /* Wait for completion */
8169 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 8170 &(fp->state), 1);
34f80b04 8171 return rc;
a2fbb9ea
ET
8172}
8173
da5a662a 8174static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 8175{
4781bfad 8176 __le16 dsb_sp_prod_idx;
c14423fe 8177 /* if the other port is handling traffic,
a2fbb9ea 8178 this can take a lot of time */
34f80b04
EG
8179 int cnt = 500;
8180 int rc;
a2fbb9ea
ET
8181
8182 might_sleep();
8183
8184 /* Send HALT ramrod */
8185 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 8186 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 8187
34f80b04
EG
8188 /* Wait for completion */
8189 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8190 &(bp->fp[0].state), 1);
8191 if (rc) /* timeout */
da5a662a 8192 return rc;
a2fbb9ea 8193
49d66772 8194 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 8195
228241eb 8196 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
8197 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8198
49d66772 8199 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
8200 we are going to reset the chip anyway
8201 so there is not much to do if this times out
8202 */
34f80b04 8203 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
8204 if (!cnt) {
8205 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8206 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8207 *bp->dsb_sp_prod, dsb_sp_prod_idx);
8208#ifdef BNX2X_STOP_ON_ERROR
8209 bnx2x_panic();
8210#endif
36e552ab 8211 rc = -EBUSY;
34f80b04
EG
8212 break;
8213 }
8214 cnt--;
da5a662a 8215 msleep(1);
5650d9d4 8216 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
8217 }
8218 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8219 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
8220
8221 return rc;
a2fbb9ea
ET
8222}
8223
34f80b04
EG
8224static void bnx2x_reset_func(struct bnx2x *bp)
8225{
8226 int port = BP_PORT(bp);
8227 int func = BP_FUNC(bp);
8228 int base, i;
8229
8230 /* Configure IGU */
8231 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8232 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8233
37b091ba
MC
8234#ifdef BCM_CNIC
8235 /* Disable Timer scan */
8236 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8237 /*
8238 * Wait for at least 10ms and up to 2 second for the timers scan to
8239 * complete
8240 */
8241 for (i = 0; i < 200; i++) {
8242 msleep(10);
8243 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8244 break;
8245 }
8246#endif
34f80b04
EG
8247 /* Clear ILT */
8248 base = FUNC_ILT_BASE(func);
8249 for (i = base; i < base + ILT_PER_FUNC; i++)
8250 bnx2x_ilt_wr(bp, i, 0);
8251}
8252
8253static void bnx2x_reset_port(struct bnx2x *bp)
8254{
8255 int port = BP_PORT(bp);
8256 u32 val;
8257
8258 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8259
8260 /* Do not rcv packets to BRB */
8261 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8262 /* Do not direct rcv packets that are not for MCP to the BRB */
8263 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8264 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8265
8266 /* Configure AEU */
8267 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8268
8269 msleep(100);
8270 /* Check for BRB port occupancy */
8271 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8272 if (val)
8273 DP(NETIF_MSG_IFDOWN,
33471629 8274 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
8275
8276 /* TODO: Close Doorbell port? */
8277}
8278
34f80b04
EG
8279static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8280{
8281 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
8282 BP_FUNC(bp), reset_code);
8283
8284 switch (reset_code) {
8285 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8286 bnx2x_reset_port(bp);
8287 bnx2x_reset_func(bp);
8288 bnx2x_reset_common(bp);
8289 break;
8290
8291 case FW_MSG_CODE_DRV_UNLOAD_PORT:
8292 bnx2x_reset_port(bp);
8293 bnx2x_reset_func(bp);
8294 break;
8295
8296 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8297 bnx2x_reset_func(bp);
8298 break;
49d66772 8299
34f80b04
EG
8300 default:
8301 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8302 break;
8303 }
8304}
8305
72fd0718 8306static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 8307{
da5a662a 8308 int port = BP_PORT(bp);
a2fbb9ea 8309 u32 reset_code = 0;
da5a662a 8310 int i, cnt, rc;
a2fbb9ea 8311
555f6c78 8312 /* Wait until tx fastpath tasks complete */
54b9ddaa 8313 for_each_queue(bp, i) {
228241eb
ET
8314 struct bnx2x_fastpath *fp = &bp->fp[i];
8315
34f80b04 8316 cnt = 1000;
e8b5fc51 8317 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 8318
7961f791 8319 bnx2x_tx_int(fp);
34f80b04
EG
8320 if (!cnt) {
8321 BNX2X_ERR("timeout waiting for queue[%d]\n",
8322 i);
8323#ifdef BNX2X_STOP_ON_ERROR
8324 bnx2x_panic();
8325 return -EBUSY;
8326#else
8327 break;
8328#endif
8329 }
8330 cnt--;
da5a662a 8331 msleep(1);
34f80b04 8332 }
228241eb 8333 }
da5a662a
VZ
8334 /* Give HW time to discard old tx messages */
8335 msleep(1);
a2fbb9ea 8336
3101c2bc
YG
8337 if (CHIP_IS_E1(bp)) {
8338 struct mac_configuration_cmd *config =
8339 bnx2x_sp(bp, mcast_config);
8340
e665bfda 8341 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 8342
8d9c5f34 8343 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
8344 CAM_INVALIDATE(config->config_table[i]);
8345
8d9c5f34 8346 config->hdr.length = i;
3101c2bc
YG
8347 if (CHIP_REV_IS_SLOW(bp))
8348 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8349 else
8350 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 8351 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
8352 config->hdr.reserved1 = 0;
8353
e665bfda
MC
8354 bp->set_mac_pending++;
8355 smp_wmb();
8356
3101c2bc
YG
8357 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8358 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8359 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8360
8361 } else { /* E1H */
65abd74d
YG
8362 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8363
e665bfda 8364 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
8365
8366 for (i = 0; i < MC_HASH_SIZE; i++)
8367 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
8368
8369 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 8370 }
993ac7b5
MC
8371#ifdef BCM_CNIC
8372 /* Clear iSCSI L2 MAC */
8373 mutex_lock(&bp->cnic_mutex);
8374 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8375 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8376 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8377 }
8378 mutex_unlock(&bp->cnic_mutex);
8379#endif
3101c2bc 8380
65abd74d
YG
8381 if (unload_mode == UNLOAD_NORMAL)
8382 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8383
7d0446c2 8384 else if (bp->flags & NO_WOL_FLAG)
65abd74d 8385 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 8386
7d0446c2 8387 else if (bp->wol) {
65abd74d
YG
8388 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8389 u8 *mac_addr = bp->dev->dev_addr;
8390 u32 val;
8391 /* The mac address is written to entries 1-4 to
8392 preserve entry 0 which is used by the PMF */
8393 u8 entry = (BP_E1HVN(bp) + 1)*8;
8394
8395 val = (mac_addr[0] << 8) | mac_addr[1];
8396 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8397
8398 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8399 (mac_addr[4] << 8) | mac_addr[5];
8400 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8401
8402 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8403
8404 } else
8405 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8406
34f80b04
EG
8407 /* Close multi and leading connections
8408 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
8409 for_each_nondefault_queue(bp, i)
8410 if (bnx2x_stop_multi(bp, i))
228241eb 8411 goto unload_error;
a2fbb9ea 8412
da5a662a
VZ
8413 rc = bnx2x_stop_leading(bp);
8414 if (rc) {
34f80b04 8415 BNX2X_ERR("Stop leading failed!\n");
da5a662a 8416#ifdef BNX2X_STOP_ON_ERROR
34f80b04 8417 return -EBUSY;
da5a662a
VZ
8418#else
8419 goto unload_error;
34f80b04 8420#endif
228241eb
ET
8421 }
8422
8423unload_error:
34f80b04 8424 if (!BP_NOMCP(bp))
228241eb 8425 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 8426 else {
f5372251 8427 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
8428 load_count[0], load_count[1], load_count[2]);
8429 load_count[0]--;
da5a662a 8430 load_count[1 + port]--;
f5372251 8431 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
8432 load_count[0], load_count[1], load_count[2]);
8433 if (load_count[0] == 0)
8434 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 8435 else if (load_count[1 + port] == 0)
34f80b04
EG
8436 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8437 else
8438 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8439 }
a2fbb9ea 8440
34f80b04
EG
8441 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8442 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8443 bnx2x__link_reset(bp);
a2fbb9ea
ET
8444
8445 /* Reset the chip */
228241eb 8446 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
8447
8448 /* Report UNLOAD_DONE to MCP */
34f80b04 8449 if (!BP_NOMCP(bp))
a2fbb9ea 8450 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 8451
72fd0718
VZ
8452}
8453
8454static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8455{
8456 u32 val;
8457
8458 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8459
8460 if (CHIP_IS_E1(bp)) {
8461 int port = BP_PORT(bp);
8462 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8463 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8464
8465 val = REG_RD(bp, addr);
8466 val &= ~(0x300);
8467 REG_WR(bp, addr, val);
8468 } else if (CHIP_IS_E1H(bp)) {
8469 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8470 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8471 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8472 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8473 }
8474}
8475
8476/* must be called with rtnl_lock */
8477static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8478{
8479 int i;
8480
8481 if (bp->state == BNX2X_STATE_CLOSED) {
8482 /* Interface has been removed - nothing to recover */
8483 bp->recovery_state = BNX2X_RECOVERY_DONE;
8484 bp->is_leader = 0;
8485 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8486 smp_wmb();
8487
8488 return -EINVAL;
8489 }
8490
8491#ifdef BCM_CNIC
8492 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8493#endif
8494 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8495
8496 /* Set "drop all" */
8497 bp->rx_mode = BNX2X_RX_MODE_NONE;
8498 bnx2x_set_storm_rx_mode(bp);
8499
8500 /* Disable HW interrupts, NAPI and Tx */
8501 bnx2x_netif_stop(bp, 1);
c89af1a3 8502 netif_carrier_off(bp->dev);
72fd0718
VZ
8503
8504 del_timer_sync(&bp->timer);
8505 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8506 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8507 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8508
8509 /* Release IRQs */
8510 bnx2x_free_irq(bp, false);
8511
8512 /* Cleanup the chip if needed */
8513 if (unload_mode != UNLOAD_RECOVERY)
8514 bnx2x_chip_cleanup(bp, unload_mode);
8515
9a035440 8516 bp->port.pmf = 0;
a2fbb9ea 8517
7a9b2557 8518 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 8519 bnx2x_free_skbs(bp);
54b9ddaa 8520 for_each_queue(bp, i)
3196a88a 8521 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 8522 for_each_queue(bp, i)
7cde1c8b 8523 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8524 bnx2x_free_mem(bp);
8525
8526 bp->state = BNX2X_STATE_CLOSED;
228241eb 8527
72fd0718
VZ
8528 /* The last driver must disable a "close the gate" if there is no
8529 * parity attention or "process kill" pending.
8530 */
8531 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8532 bnx2x_reset_is_done(bp))
8533 bnx2x_disable_close_the_gate(bp);
8534
8535 /* Reset MCP mail box sequence if there is on going recovery */
8536 if (unload_mode == UNLOAD_RECOVERY)
8537 bp->fw_seq = 0;
8538
8539 return 0;
8540}
8541
8542/* Close gates #2, #3 and #4: */
8543static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8544{
8545 u32 val, addr;
8546
8547 /* Gates #2 and #4a are closed/opened for "not E1" only */
8548 if (!CHIP_IS_E1(bp)) {
8549 /* #4 */
8550 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8551 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8552 close ? (val | 0x1) : (val & (~(u32)1)));
8553 /* #2 */
8554 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8555 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8556 close ? (val | 0x1) : (val & (~(u32)1)));
8557 }
8558
8559 /* #3 */
8560 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8561 val = REG_RD(bp, addr);
8562 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8563
8564 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8565 close ? "closing" : "opening");
8566 mmiowb();
8567}
8568
8569#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8570
8571static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8572{
8573 /* Do some magic... */
8574 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8575 *magic_val = val & SHARED_MF_CLP_MAGIC;
8576 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8577}
8578
8579/* Restore the value of the `magic' bit.
8580 *
8581 * @param pdev Device handle.
8582 * @param magic_val Old value of the `magic' bit.
8583 */
8584static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8585{
8586 /* Restore the `magic' bit value... */
8587 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8588 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8589 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8590 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8591 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8592 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8593}
8594
8595/* Prepares for MCP reset: takes care of CLP configurations.
8596 *
8597 * @param bp
8598 * @param magic_val Old value of 'magic' bit.
8599 */
8600static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8601{
8602 u32 shmem;
8603 u32 validity_offset;
8604
8605 DP(NETIF_MSG_HW, "Starting\n");
8606
8607 /* Set `magic' bit in order to save MF config */
8608 if (!CHIP_IS_E1(bp))
8609 bnx2x_clp_reset_prep(bp, magic_val);
8610
8611 /* Get shmem offset */
8612 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8613 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8614
8615 /* Clear validity map flags */
8616 if (shmem > 0)
8617 REG_WR(bp, shmem + validity_offset, 0);
8618}
8619
8620#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8621#define MCP_ONE_TIMEOUT 100 /* 100 ms */
8622
8623/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8624 * depending on the HW type.
8625 *
8626 * @param bp
8627 */
8628static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8629{
8630 /* special handling for emulation and FPGA,
8631 wait 10 times longer */
8632 if (CHIP_REV_IS_SLOW(bp))
8633 msleep(MCP_ONE_TIMEOUT*10);
8634 else
8635 msleep(MCP_ONE_TIMEOUT);
8636}
8637
8638static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8639{
8640 u32 shmem, cnt, validity_offset, val;
8641 int rc = 0;
8642
8643 msleep(100);
8644
8645 /* Get shmem offset */
8646 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8647 if (shmem == 0) {
8648 BNX2X_ERR("Shmem 0 return failure\n");
8649 rc = -ENOTTY;
8650 goto exit_lbl;
8651 }
8652
8653 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8654
8655 /* Wait for MCP to come up */
8656 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8657 /* TBD: its best to check validity map of last port.
8658 * currently checks on port 0.
8659 */
8660 val = REG_RD(bp, shmem + validity_offset);
8661 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8662 shmem + validity_offset, val);
8663
8664 /* check that shared memory is valid. */
8665 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8666 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8667 break;
8668
8669 bnx2x_mcp_wait_one(bp);
8670 }
8671
8672 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8673
8674 /* Check that shared memory is valid. This indicates that MCP is up. */
8675 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8676 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8677 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8678 rc = -ENOTTY;
8679 goto exit_lbl;
8680 }
8681
8682exit_lbl:
8683 /* Restore the `magic' bit value */
8684 if (!CHIP_IS_E1(bp))
8685 bnx2x_clp_reset_done(bp, magic_val);
8686
8687 return rc;
8688}
8689
8690static void bnx2x_pxp_prep(struct bnx2x *bp)
8691{
8692 if (!CHIP_IS_E1(bp)) {
8693 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8694 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8695 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8696 mmiowb();
8697 }
8698}
8699
8700/*
8701 * Reset the whole chip except for:
8702 * - PCIE core
8703 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8704 * one reset bit)
8705 * - IGU
8706 * - MISC (including AEU)
8707 * - GRC
8708 * - RBCN, RBCP
8709 */
8710static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8711{
8712 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8713
8714 not_reset_mask1 =
8715 MISC_REGISTERS_RESET_REG_1_RST_HC |
8716 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8717 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8718
8719 not_reset_mask2 =
8720 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8721 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8722 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8723 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8724 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8725 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8726 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8727 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8728
8729 reset_mask1 = 0xffffffff;
8730
8731 if (CHIP_IS_E1(bp))
8732 reset_mask2 = 0xffff;
8733 else
8734 reset_mask2 = 0x1ffff;
8735
8736 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8737 reset_mask1 & (~not_reset_mask1));
8738 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8739 reset_mask2 & (~not_reset_mask2));
8740
8741 barrier();
8742 mmiowb();
8743
8744 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8745 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8746 mmiowb();
8747}
8748
8749static int bnx2x_process_kill(struct bnx2x *bp)
8750{
8751 int cnt = 1000;
8752 u32 val = 0;
8753 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8754
8755
8756 /* Empty the Tetris buffer, wait for 1s */
8757 do {
8758 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8759 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8760 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8761 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8762 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8763 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8764 ((port_is_idle_0 & 0x1) == 0x1) &&
8765 ((port_is_idle_1 & 0x1) == 0x1) &&
8766 (pgl_exp_rom2 == 0xffffffff))
8767 break;
8768 msleep(1);
8769 } while (cnt-- > 0);
8770
8771 if (cnt <= 0) {
8772 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8773 " are still"
8774 " outstanding read requests after 1s!\n");
8775 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8776 " port_is_idle_0=0x%08x,"
8777 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8778 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8779 pgl_exp_rom2);
8780 return -EAGAIN;
8781 }
8782
8783 barrier();
8784
8785 /* Close gates #2, #3 and #4 */
8786 bnx2x_set_234_gates(bp, true);
8787
8788 /* TBD: Indicate that "process kill" is in progress to MCP */
8789
8790 /* Clear "unprepared" bit */
8791 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8792 barrier();
8793
8794 /* Make sure all is written to the chip before the reset */
8795 mmiowb();
8796
8797 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8798 * PSWHST, GRC and PSWRD Tetris buffer.
8799 */
8800 msleep(1);
8801
8802 /* Prepare to chip reset: */
8803 /* MCP */
8804 bnx2x_reset_mcp_prep(bp, &val);
8805
8806 /* PXP */
8807 bnx2x_pxp_prep(bp);
8808 barrier();
8809
8810 /* reset the chip */
8811 bnx2x_process_kill_chip_reset(bp);
8812 barrier();
8813
8814 /* Recover after reset: */
8815 /* MCP */
8816 if (bnx2x_reset_mcp_comp(bp, val))
8817 return -EAGAIN;
8818
8819 /* PXP */
8820 bnx2x_pxp_prep(bp);
8821
8822 /* Open the gates #2, #3 and #4 */
8823 bnx2x_set_234_gates(bp, false);
8824
8825 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8826 * reset state, re-enable attentions. */
8827
a2fbb9ea
ET
8828 return 0;
8829}
8830
72fd0718
VZ
8831static int bnx2x_leader_reset(struct bnx2x *bp)
8832{
8833 int rc = 0;
8834 /* Try to recover after the failure */
8835 if (bnx2x_process_kill(bp)) {
8836 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8837 bp->dev->name);
8838 rc = -EAGAIN;
8839 goto exit_leader_reset;
8840 }
8841
8842 /* Clear "reset is in progress" bit and update the driver state */
8843 bnx2x_set_reset_done(bp);
8844 bp->recovery_state = BNX2X_RECOVERY_DONE;
8845
8846exit_leader_reset:
8847 bp->is_leader = 0;
8848 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8849 smp_wmb();
8850 return rc;
8851}
8852
8853static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8854
8855/* Assumption: runs under rtnl lock. This together with the fact
8856 * that it's called only from bnx2x_reset_task() ensure that it
8857 * will never be called when netif_running(bp->dev) is false.
8858 */
8859static void bnx2x_parity_recover(struct bnx2x *bp)
8860{
8861 DP(NETIF_MSG_HW, "Handling parity\n");
8862 while (1) {
8863 switch (bp->recovery_state) {
8864 case BNX2X_RECOVERY_INIT:
8865 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8866 /* Try to get a LEADER_LOCK HW lock */
8867 if (bnx2x_trylock_hw_lock(bp,
8868 HW_LOCK_RESOURCE_RESERVED_08))
8869 bp->is_leader = 1;
8870
8871 /* Stop the driver */
8872 /* If interface has been removed - break */
8873 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8874 return;
8875
8876 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8877 /* Ensure "is_leader" and "recovery_state"
8878 * update values are seen on other CPUs
8879 */
8880 smp_wmb();
8881 break;
8882
8883 case BNX2X_RECOVERY_WAIT:
8884 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8885 if (bp->is_leader) {
8886 u32 load_counter = bnx2x_get_load_cnt(bp);
8887 if (load_counter) {
8888 /* Wait until all other functions get
8889 * down.
8890 */
8891 schedule_delayed_work(&bp->reset_task,
8892 HZ/10);
8893 return;
8894 } else {
8895 /* If all other functions got down -
8896 * try to bring the chip back to
8897 * normal. In any case it's an exit
8898 * point for a leader.
8899 */
8900 if (bnx2x_leader_reset(bp) ||
8901 bnx2x_nic_load(bp, LOAD_NORMAL)) {
8902 printk(KERN_ERR"%s: Recovery "
8903 "has failed. Power cycle is "
8904 "needed.\n", bp->dev->name);
8905 /* Disconnect this device */
8906 netif_device_detach(bp->dev);
8907 /* Block ifup for all function
8908 * of this ASIC until
8909 * "process kill" or power
8910 * cycle.
8911 */
8912 bnx2x_set_reset_in_progress(bp);
8913 /* Shut down the power */
8914 bnx2x_set_power_state(bp,
8915 PCI_D3hot);
8916 return;
8917 }
8918
8919 return;
8920 }
8921 } else { /* non-leader */
8922 if (!bnx2x_reset_is_done(bp)) {
8923 /* Try to get a LEADER_LOCK HW lock as
8924 * long as a former leader may have
8925 * been unloaded by the user or
8926 * released a leadership by another
8927 * reason.
8928 */
8929 if (bnx2x_trylock_hw_lock(bp,
8930 HW_LOCK_RESOURCE_RESERVED_08)) {
8931 /* I'm a leader now! Restart a
8932 * switch case.
8933 */
8934 bp->is_leader = 1;
8935 break;
8936 }
8937
8938 schedule_delayed_work(&bp->reset_task,
8939 HZ/10);
8940 return;
8941
8942 } else { /* A leader has completed
8943 * the "process kill". It's an exit
8944 * point for a non-leader.
8945 */
8946 bnx2x_nic_load(bp, LOAD_NORMAL);
8947 bp->recovery_state =
8948 BNX2X_RECOVERY_DONE;
8949 smp_wmb();
8950 return;
8951 }
8952 }
8953 default:
8954 return;
8955 }
8956 }
8957}
8958
8959/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8960 * scheduled on a general queue in order to prevent a dead lock.
8961 */
34f80b04
EG
8962static void bnx2x_reset_task(struct work_struct *work)
8963{
72fd0718 8964 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
8965
8966#ifdef BNX2X_STOP_ON_ERROR
8967 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8968 " so reset not done to allow debug dump,\n"
72fd0718 8969 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
8970 return;
8971#endif
8972
8973 rtnl_lock();
8974
8975 if (!netif_running(bp->dev))
8976 goto reset_task_exit;
8977
72fd0718
VZ
8978 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8979 bnx2x_parity_recover(bp);
8980 else {
8981 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8982 bnx2x_nic_load(bp, LOAD_NORMAL);
8983 }
34f80b04
EG
8984
8985reset_task_exit:
8986 rtnl_unlock();
8987}
8988
a2fbb9ea
ET
8989/* end of nic load/unload */
8990
8991/* ethtool_ops */
8992
8993/*
8994 * Init service functions
8995 */
8996
f1ef27ef
EG
8997static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8998{
8999 switch (func) {
9000 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
9001 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
9002 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
9003 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
9004 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
9005 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
9006 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
9007 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
9008 default:
9009 BNX2X_ERR("Unsupported function index: %d\n", func);
9010 return (u32)(-1);
9011 }
9012}
9013
9014static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
9015{
9016 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
9017
9018 /* Flush all outstanding writes */
9019 mmiowb();
9020
9021 /* Pretend to be function 0 */
9022 REG_WR(bp, reg, 0);
9023 /* Flush the GRC transaction (in the chip) */
9024 new_val = REG_RD(bp, reg);
9025 if (new_val != 0) {
9026 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
9027 new_val);
9028 BUG();
9029 }
9030
9031 /* From now we are in the "like-E1" mode */
9032 bnx2x_int_disable(bp);
9033
9034 /* Flush all outstanding writes */
9035 mmiowb();
9036
9037 /* Restore the original funtion settings */
9038 REG_WR(bp, reg, orig_func);
9039 new_val = REG_RD(bp, reg);
9040 if (new_val != orig_func) {
9041 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
9042 orig_func, new_val);
9043 BUG();
9044 }
9045}
9046
9047static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
9048{
9049 if (CHIP_IS_E1H(bp))
9050 bnx2x_undi_int_disable_e1h(bp, func);
9051 else
9052 bnx2x_int_disable(bp);
9053}
9054
34f80b04
EG
9055static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
9056{
9057 u32 val;
9058
9059 /* Check if there is any driver already loaded */
9060 val = REG_RD(bp, MISC_REG_UNPREPARED);
9061 if (val == 0x1) {
9062 /* Check if it is the UNDI driver
9063 * UNDI driver initializes CID offset for normal bell to 0x7
9064 */
4a37fb66 9065 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
9066 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9067 if (val == 0x7) {
9068 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 9069 /* save our func */
34f80b04 9070 int func = BP_FUNC(bp);
da5a662a
VZ
9071 u32 swap_en;
9072 u32 swap_val;
34f80b04 9073
b4661739
EG
9074 /* clear the UNDI indication */
9075 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9076
34f80b04
EG
9077 BNX2X_DEV_INFO("UNDI is active! reset device\n");
9078
9079 /* try unload UNDI on port 0 */
9080 bp->func = 0;
da5a662a
VZ
9081 bp->fw_seq =
9082 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9083 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 9084 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
9085
9086 /* if UNDI is loaded on the other port */
9087 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9088
da5a662a
VZ
9089 /* send "DONE" for previous unload */
9090 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9091
9092 /* unload UNDI on port 1 */
34f80b04 9093 bp->func = 1;
da5a662a
VZ
9094 bp->fw_seq =
9095 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9096 DRV_MSG_SEQ_NUMBER_MASK);
9097 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9098
9099 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
9100 }
9101
b4661739
EG
9102 /* now it's safe to release the lock */
9103 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9104
f1ef27ef 9105 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
9106
9107 /* close input traffic and wait for it */
9108 /* Do not rcv packets to BRB */
9109 REG_WR(bp,
9110 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9111 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9112 /* Do not direct rcv packets that are not for MCP to
9113 * the BRB */
9114 REG_WR(bp,
9115 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9116 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9117 /* clear AEU */
9118 REG_WR(bp,
9119 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9120 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9121 msleep(10);
9122
9123 /* save NIG port swap info */
9124 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9125 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
9126 /* reset device */
9127 REG_WR(bp,
9128 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 9129 0xd3ffffff);
34f80b04
EG
9130 REG_WR(bp,
9131 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9132 0x1403);
da5a662a
VZ
9133 /* take the NIG out of reset and restore swap values */
9134 REG_WR(bp,
9135 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9136 MISC_REGISTERS_RESET_REG_1_RST_NIG);
9137 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9138 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9139
9140 /* send unload done to the MCP */
9141 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9142
9143 /* restore our func and fw_seq */
9144 bp->func = func;
9145 bp->fw_seq =
9146 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9147 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
9148
9149 } else
9150 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
9151 }
9152}
9153
9154static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9155{
9156 u32 val, val2, val3, val4, id;
72ce58c3 9157 u16 pmc;
34f80b04
EG
9158
9159 /* Get the chip revision id and number. */
9160 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9161 val = REG_RD(bp, MISC_REG_CHIP_NUM);
9162 id = ((val & 0xffff) << 16);
9163 val = REG_RD(bp, MISC_REG_CHIP_REV);
9164 id |= ((val & 0xf) << 12);
9165 val = REG_RD(bp, MISC_REG_CHIP_METAL);
9166 id |= ((val & 0xff) << 4);
5a40e08e 9167 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
9168 id |= (val & 0xf);
9169 bp->common.chip_id = id;
9170 bp->link_params.chip_id = bp->common.chip_id;
9171 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9172
1c06328c
EG
9173 val = (REG_RD(bp, 0x2874) & 0x55);
9174 if ((bp->common.chip_id & 0x1) ||
9175 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9176 bp->flags |= ONE_PORT_FLAG;
9177 BNX2X_DEV_INFO("single port device\n");
9178 }
9179
34f80b04
EG
9180 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9181 bp->common.flash_size = (NVRAM_1MB_SIZE <<
9182 (val & MCPR_NVM_CFG4_FLASH_SIZE));
9183 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9184 bp->common.flash_size, bp->common.flash_size);
9185
9186 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 9187 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 9188 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
9189 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
9190 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
9191
9192 if (!bp->common.shmem_base ||
9193 (bp->common.shmem_base < 0xA0000) ||
9194 (bp->common.shmem_base >= 0xC0000)) {
9195 BNX2X_DEV_INFO("MCP not active\n");
9196 bp->flags |= NO_MCP_FLAG;
9197 return;
9198 }
9199
9200 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9201 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9202 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
cdaa7cb8 9203 BNX2X_ERROR("BAD MCP validity signature\n");
34f80b04
EG
9204
9205 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 9206 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
9207
9208 bp->link_params.hw_led_mode = ((bp->common.hw_config &
9209 SHARED_HW_CFG_LED_MODE_MASK) >>
9210 SHARED_HW_CFG_LED_MODE_SHIFT);
9211
c2c8b03e
EG
9212 bp->link_params.feature_config_flags = 0;
9213 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9214 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9215 bp->link_params.feature_config_flags |=
9216 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9217 else
9218 bp->link_params.feature_config_flags &=
9219 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9220
34f80b04
EG
9221 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9222 bp->common.bc_ver = val;
9223 BNX2X_DEV_INFO("bc_ver %X\n", val);
9224 if (val < BNX2X_BC_VER) {
9225 /* for now only warn
9226 * later we might need to enforce this */
cdaa7cb8
VZ
9227 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
9228 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 9229 }
4d295db0
EG
9230 bp->link_params.feature_config_flags |=
9231 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9232 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
9233
9234 if (BP_E1HVN(bp) == 0) {
9235 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9236 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9237 } else {
9238 /* no WOL capability for E1HVN != 0 */
9239 bp->flags |= NO_WOL_FLAG;
9240 }
9241 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 9242 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
9243
9244 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9245 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9246 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9247 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9248
cdaa7cb8
VZ
9249 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9250 val, val2, val3, val4);
34f80b04
EG
9251}
9252
9253static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9254 u32 switch_cfg)
a2fbb9ea 9255{
34f80b04 9256 int port = BP_PORT(bp);
a2fbb9ea
ET
9257 u32 ext_phy_type;
9258
a2fbb9ea
ET
9259 switch (switch_cfg) {
9260 case SWITCH_CFG_1G:
9261 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9262
c18487ee
YR
9263 ext_phy_type =
9264 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
9265 switch (ext_phy_type) {
9266 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9267 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9268 ext_phy_type);
9269
34f80b04
EG
9270 bp->port.supported |= (SUPPORTED_10baseT_Half |
9271 SUPPORTED_10baseT_Full |
9272 SUPPORTED_100baseT_Half |
9273 SUPPORTED_100baseT_Full |
9274 SUPPORTED_1000baseT_Full |
9275 SUPPORTED_2500baseX_Full |
9276 SUPPORTED_TP |
9277 SUPPORTED_FIBRE |
9278 SUPPORTED_Autoneg |
9279 SUPPORTED_Pause |
9280 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9281 break;
9282
9283 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9284 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9285 ext_phy_type);
9286
34f80b04
EG
9287 bp->port.supported |= (SUPPORTED_10baseT_Half |
9288 SUPPORTED_10baseT_Full |
9289 SUPPORTED_100baseT_Half |
9290 SUPPORTED_100baseT_Full |
9291 SUPPORTED_1000baseT_Full |
9292 SUPPORTED_TP |
9293 SUPPORTED_FIBRE |
9294 SUPPORTED_Autoneg |
9295 SUPPORTED_Pause |
9296 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9297 break;
9298
9299 default:
9300 BNX2X_ERR("NVRAM config error. "
9301 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 9302 bp->link_params.ext_phy_config);
a2fbb9ea
ET
9303 return;
9304 }
9305
34f80b04
EG
9306 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9307 port*0x10);
9308 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
9309 break;
9310
9311 case SWITCH_CFG_10G:
9312 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9313
c18487ee
YR
9314 ext_phy_type =
9315 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
9316 switch (ext_phy_type) {
9317 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9318 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9319 ext_phy_type);
9320
34f80b04
EG
9321 bp->port.supported |= (SUPPORTED_10baseT_Half |
9322 SUPPORTED_10baseT_Full |
9323 SUPPORTED_100baseT_Half |
9324 SUPPORTED_100baseT_Full |
9325 SUPPORTED_1000baseT_Full |
9326 SUPPORTED_2500baseX_Full |
9327 SUPPORTED_10000baseT_Full |
9328 SUPPORTED_TP |
9329 SUPPORTED_FIBRE |
9330 SUPPORTED_Autoneg |
9331 SUPPORTED_Pause |
9332 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9333 break;
9334
589abe3a
EG
9335 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9336 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 9337 ext_phy_type);
f1410647 9338
34f80b04 9339 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 9340 SUPPORTED_1000baseT_Full |
34f80b04 9341 SUPPORTED_FIBRE |
589abe3a 9342 SUPPORTED_Autoneg |
34f80b04
EG
9343 SUPPORTED_Pause |
9344 SUPPORTED_Asym_Pause);
f1410647
ET
9345 break;
9346
589abe3a
EG
9347 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9348 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
9349 ext_phy_type);
9350
34f80b04 9351 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 9352 SUPPORTED_2500baseX_Full |
34f80b04 9353 SUPPORTED_1000baseT_Full |
589abe3a
EG
9354 SUPPORTED_FIBRE |
9355 SUPPORTED_Autoneg |
9356 SUPPORTED_Pause |
9357 SUPPORTED_Asym_Pause);
9358 break;
9359
9360 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9361 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9362 ext_phy_type);
9363
9364 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
9365 SUPPORTED_FIBRE |
9366 SUPPORTED_Pause |
9367 SUPPORTED_Asym_Pause);
f1410647
ET
9368 break;
9369
589abe3a
EG
9370 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9371 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
9372 ext_phy_type);
9373
34f80b04
EG
9374 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9375 SUPPORTED_1000baseT_Full |
9376 SUPPORTED_FIBRE |
34f80b04
EG
9377 SUPPORTED_Pause |
9378 SUPPORTED_Asym_Pause);
f1410647
ET
9379 break;
9380
589abe3a
EG
9381 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9382 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
9383 ext_phy_type);
9384
34f80b04 9385 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 9386 SUPPORTED_1000baseT_Full |
34f80b04 9387 SUPPORTED_Autoneg |
589abe3a 9388 SUPPORTED_FIBRE |
34f80b04
EG
9389 SUPPORTED_Pause |
9390 SUPPORTED_Asym_Pause);
c18487ee
YR
9391 break;
9392
4d295db0
EG
9393 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9394 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9395 ext_phy_type);
9396
9397 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9398 SUPPORTED_1000baseT_Full |
9399 SUPPORTED_Autoneg |
9400 SUPPORTED_FIBRE |
9401 SUPPORTED_Pause |
9402 SUPPORTED_Asym_Pause);
9403 break;
9404
f1410647
ET
9405 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9406 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9407 ext_phy_type);
9408
34f80b04
EG
9409 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9410 SUPPORTED_TP |
9411 SUPPORTED_Autoneg |
9412 SUPPORTED_Pause |
9413 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9414 break;
9415
28577185
EG
9416 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9417 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9418 ext_phy_type);
9419
9420 bp->port.supported |= (SUPPORTED_10baseT_Half |
9421 SUPPORTED_10baseT_Full |
9422 SUPPORTED_100baseT_Half |
9423 SUPPORTED_100baseT_Full |
9424 SUPPORTED_1000baseT_Full |
9425 SUPPORTED_10000baseT_Full |
9426 SUPPORTED_TP |
9427 SUPPORTED_Autoneg |
9428 SUPPORTED_Pause |
9429 SUPPORTED_Asym_Pause);
9430 break;
9431
c18487ee
YR
9432 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9433 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9434 bp->link_params.ext_phy_config);
9435 break;
9436
a2fbb9ea
ET
9437 default:
9438 BNX2X_ERR("NVRAM config error. "
9439 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 9440 bp->link_params.ext_phy_config);
a2fbb9ea
ET
9441 return;
9442 }
9443
34f80b04
EG
9444 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9445 port*0x18);
9446 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 9447
a2fbb9ea
ET
9448 break;
9449
9450 default:
9451 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 9452 bp->port.link_config);
a2fbb9ea
ET
9453 return;
9454 }
34f80b04 9455 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
9456
9457 /* mask what we support according to speed_cap_mask */
c18487ee
YR
9458 if (!(bp->link_params.speed_cap_mask &
9459 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 9460 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 9461
c18487ee
YR
9462 if (!(bp->link_params.speed_cap_mask &
9463 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 9464 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 9465
c18487ee
YR
9466 if (!(bp->link_params.speed_cap_mask &
9467 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 9468 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 9469
c18487ee
YR
9470 if (!(bp->link_params.speed_cap_mask &
9471 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 9472 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 9473
c18487ee
YR
9474 if (!(bp->link_params.speed_cap_mask &
9475 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
9476 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9477 SUPPORTED_1000baseT_Full);
a2fbb9ea 9478
c18487ee
YR
9479 if (!(bp->link_params.speed_cap_mask &
9480 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 9481 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 9482
c18487ee
YR
9483 if (!(bp->link_params.speed_cap_mask &
9484 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 9485 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 9486
34f80b04 9487 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
9488}
9489
34f80b04 9490static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 9491{
c18487ee 9492 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 9493
34f80b04 9494 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 9495 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 9496 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 9497 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 9498 bp->port.advertising = bp->port.supported;
a2fbb9ea 9499 } else {
c18487ee
YR
9500 u32 ext_phy_type =
9501 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9502
9503 if ((ext_phy_type ==
9504 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9505 (ext_phy_type ==
9506 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 9507 /* force 10G, no AN */
c18487ee 9508 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 9509 bp->port.advertising =
a2fbb9ea
ET
9510 (ADVERTISED_10000baseT_Full |
9511 ADVERTISED_FIBRE);
9512 break;
9513 }
9514 BNX2X_ERR("NVRAM config error. "
9515 "Invalid link_config 0x%x"
9516 " Autoneg not supported\n",
34f80b04 9517 bp->port.link_config);
a2fbb9ea
ET
9518 return;
9519 }
9520 break;
9521
9522 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 9523 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 9524 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
9525 bp->port.advertising = (ADVERTISED_10baseT_Full |
9526 ADVERTISED_TP);
a2fbb9ea 9527 } else {
cdaa7cb8
VZ
9528 BNX2X_ERROR("NVRAM config error. "
9529 "Invalid link_config 0x%x"
9530 " speed_cap_mask 0x%x\n",
9531 bp->port.link_config,
9532 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9533 return;
9534 }
9535 break;
9536
9537 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 9538 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
9539 bp->link_params.req_line_speed = SPEED_10;
9540 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
9541 bp->port.advertising = (ADVERTISED_10baseT_Half |
9542 ADVERTISED_TP);
a2fbb9ea 9543 } else {
cdaa7cb8
VZ
9544 BNX2X_ERROR("NVRAM config error. "
9545 "Invalid link_config 0x%x"
9546 " speed_cap_mask 0x%x\n",
9547 bp->port.link_config,
9548 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9549 return;
9550 }
9551 break;
9552
9553 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 9554 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 9555 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
9556 bp->port.advertising = (ADVERTISED_100baseT_Full |
9557 ADVERTISED_TP);
a2fbb9ea 9558 } else {
cdaa7cb8
VZ
9559 BNX2X_ERROR("NVRAM config error. "
9560 "Invalid link_config 0x%x"
9561 " speed_cap_mask 0x%x\n",
9562 bp->port.link_config,
9563 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9564 return;
9565 }
9566 break;
9567
9568 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 9569 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
9570 bp->link_params.req_line_speed = SPEED_100;
9571 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
9572 bp->port.advertising = (ADVERTISED_100baseT_Half |
9573 ADVERTISED_TP);
a2fbb9ea 9574 } else {
cdaa7cb8
VZ
9575 BNX2X_ERROR("NVRAM config error. "
9576 "Invalid link_config 0x%x"
9577 " speed_cap_mask 0x%x\n",
9578 bp->port.link_config,
9579 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9580 return;
9581 }
9582 break;
9583
9584 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 9585 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 9586 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
9587 bp->port.advertising = (ADVERTISED_1000baseT_Full |
9588 ADVERTISED_TP);
a2fbb9ea 9589 } else {
cdaa7cb8
VZ
9590 BNX2X_ERROR("NVRAM config error. "
9591 "Invalid link_config 0x%x"
9592 " speed_cap_mask 0x%x\n",
9593 bp->port.link_config,
9594 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9595 return;
9596 }
9597 break;
9598
9599 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 9600 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 9601 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
9602 bp->port.advertising = (ADVERTISED_2500baseX_Full |
9603 ADVERTISED_TP);
a2fbb9ea 9604 } else {
cdaa7cb8
VZ
9605 BNX2X_ERROR("NVRAM config error. "
9606 "Invalid link_config 0x%x"
9607 " speed_cap_mask 0x%x\n",
9608 bp->port.link_config,
9609 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9610 return;
9611 }
9612 break;
9613
9614 case PORT_FEATURE_LINK_SPEED_10G_CX4:
9615 case PORT_FEATURE_LINK_SPEED_10G_KX4:
9616 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 9617 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 9618 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
9619 bp->port.advertising = (ADVERTISED_10000baseT_Full |
9620 ADVERTISED_FIBRE);
a2fbb9ea 9621 } else {
cdaa7cb8
VZ
9622 BNX2X_ERROR("NVRAM config error. "
9623 "Invalid link_config 0x%x"
9624 " speed_cap_mask 0x%x\n",
9625 bp->port.link_config,
9626 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9627 return;
9628 }
9629 break;
9630
9631 default:
cdaa7cb8
VZ
9632 BNX2X_ERROR("NVRAM config error. "
9633 "BAD link speed link_config 0x%x\n",
9634 bp->port.link_config);
c18487ee 9635 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 9636 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
9637 break;
9638 }
a2fbb9ea 9639
34f80b04
EG
9640 bp->link_params.req_flow_ctrl = (bp->port.link_config &
9641 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 9642 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 9643 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 9644 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9645
c18487ee 9646 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 9647 " advertising 0x%x\n",
c18487ee
YR
9648 bp->link_params.req_line_speed,
9649 bp->link_params.req_duplex,
34f80b04 9650 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
9651}
9652
e665bfda
MC
9653static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9654{
9655 mac_hi = cpu_to_be16(mac_hi);
9656 mac_lo = cpu_to_be32(mac_lo);
9657 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9658 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9659}
9660
34f80b04 9661static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 9662{
34f80b04
EG
9663 int port = BP_PORT(bp);
9664 u32 val, val2;
589abe3a 9665 u32 config;
c2c8b03e 9666 u16 i;
01cd4528 9667 u32 ext_phy_type;
a2fbb9ea 9668
c18487ee 9669 bp->link_params.bp = bp;
34f80b04 9670 bp->link_params.port = port;
c18487ee 9671
c18487ee 9672 bp->link_params.lane_config =
a2fbb9ea 9673 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 9674 bp->link_params.ext_phy_config =
a2fbb9ea
ET
9675 SHMEM_RD(bp,
9676 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
9677 /* BCM8727_NOC => BCM8727 no over current */
9678 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9679 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9680 bp->link_params.ext_phy_config &=
9681 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9682 bp->link_params.ext_phy_config |=
9683 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9684 bp->link_params.feature_config_flags |=
9685 FEATURE_CONFIG_BCM8727_NOC;
9686 }
9687
c18487ee 9688 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
9689 SHMEM_RD(bp,
9690 dev_info.port_hw_config[port].speed_capability_mask);
9691
34f80b04 9692 bp->port.link_config =
a2fbb9ea
ET
9693 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9694
c2c8b03e
EG
9695 /* Get the 4 lanes xgxs config rx and tx */
9696 for (i = 0; i < 2; i++) {
9697 val = SHMEM_RD(bp,
9698 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9699 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9700 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9701
9702 val = SHMEM_RD(bp,
9703 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9704 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9705 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9706 }
9707
3ce2c3f9
EG
9708 /* If the device is capable of WoL, set the default state according
9709 * to the HW
9710 */
4d295db0 9711 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
9712 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9713 (config & PORT_FEATURE_WOL_ENABLED));
9714
c2c8b03e
EG
9715 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
9716 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
9717 bp->link_params.lane_config,
9718 bp->link_params.ext_phy_config,
34f80b04 9719 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 9720
4d295db0
EG
9721 bp->link_params.switch_cfg |= (bp->port.link_config &
9722 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 9723 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
9724
9725 bnx2x_link_settings_requested(bp);
9726
01cd4528
EG
9727 /*
9728 * If connected directly, work with the internal PHY, otherwise, work
9729 * with the external PHY
9730 */
9731 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9732 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9733 bp->mdio.prtad = bp->link_params.phy_addr;
9734
9735 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9736 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9737 bp->mdio.prtad =
659bc5c4 9738 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 9739
a2fbb9ea
ET
9740 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9741 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 9742 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
9743 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9744 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
9745
9746#ifdef BCM_CNIC
9747 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9748 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9749 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9750#endif
34f80b04
EG
9751}
9752
9753static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9754{
9755 int func = BP_FUNC(bp);
9756 u32 val, val2;
9757 int rc = 0;
a2fbb9ea 9758
34f80b04 9759 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 9760
34f80b04
EG
9761 bp->e1hov = 0;
9762 bp->e1hmf = 0;
2145a920 9763 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
34f80b04
EG
9764 bp->mf_config =
9765 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 9766
2691d51d 9767 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 9768 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 9769 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 9770 bp->e1hmf = 1;
2691d51d
EG
9771 BNX2X_DEV_INFO("%s function mode\n",
9772 IS_E1HMF(bp) ? "multi" : "single");
9773
9774 if (IS_E1HMF(bp)) {
9775 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9776 e1hov_tag) &
9777 FUNC_MF_CFG_E1HOV_TAG_MASK);
9778 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9779 bp->e1hov = val;
9780 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9781 "(0x%04x)\n",
9782 func, bp->e1hov, bp->e1hov);
9783 } else {
cdaa7cb8
VZ
9784 BNX2X_ERROR("No valid E1HOV for func %d,"
9785 " aborting\n", func);
34f80b04
EG
9786 rc = -EPERM;
9787 }
2691d51d
EG
9788 } else {
9789 if (BP_E1HVN(bp)) {
cdaa7cb8
VZ
9790 BNX2X_ERROR("VN %d in single function mode,"
9791 " aborting\n", BP_E1HVN(bp));
2691d51d
EG
9792 rc = -EPERM;
9793 }
34f80b04
EG
9794 }
9795 }
a2fbb9ea 9796
34f80b04
EG
9797 if (!BP_NOMCP(bp)) {
9798 bnx2x_get_port_hwinfo(bp);
9799
9800 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9801 DRV_MSG_SEQ_NUMBER_MASK);
9802 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9803 }
9804
9805 if (IS_E1HMF(bp)) {
9806 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9807 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
9808 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9809 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9810 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9811 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9812 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9813 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9814 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
9815 bp->dev->dev_addr[5] = (u8)(val & 0xff);
9816 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9817 ETH_ALEN);
9818 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9819 ETH_ALEN);
a2fbb9ea 9820 }
34f80b04
EG
9821
9822 return rc;
a2fbb9ea
ET
9823 }
9824
34f80b04
EG
9825 if (BP_NOMCP(bp)) {
9826 /* only supposed to happen on emulation/FPGA */
cdaa7cb8 9827 BNX2X_ERROR("warning: random MAC workaround active\n");
34f80b04
EG
9828 random_ether_addr(bp->dev->dev_addr);
9829 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9830 }
a2fbb9ea 9831
34f80b04
EG
9832 return rc;
9833}
9834
34f24c7f
VZ
9835static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9836{
9837 int cnt, i, block_end, rodi;
9838 char vpd_data[BNX2X_VPD_LEN+1];
9839 char str_id_reg[VENDOR_ID_LEN+1];
9840 char str_id_cap[VENDOR_ID_LEN+1];
9841 u8 len;
9842
9843 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9844 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9845
9846 if (cnt < BNX2X_VPD_LEN)
9847 goto out_not_found;
9848
9849 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9850 PCI_VPD_LRDT_RO_DATA);
9851 if (i < 0)
9852 goto out_not_found;
9853
9854
9855 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9856 pci_vpd_lrdt_size(&vpd_data[i]);
9857
9858 i += PCI_VPD_LRDT_TAG_SIZE;
9859
9860 if (block_end > BNX2X_VPD_LEN)
9861 goto out_not_found;
9862
9863 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9864 PCI_VPD_RO_KEYWORD_MFR_ID);
9865 if (rodi < 0)
9866 goto out_not_found;
9867
9868 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9869
9870 if (len != VENDOR_ID_LEN)
9871 goto out_not_found;
9872
9873 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9874
9875 /* vendor specific info */
9876 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9877 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9878 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9879 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9880
9881 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9882 PCI_VPD_RO_KEYWORD_VENDOR0);
9883 if (rodi >= 0) {
9884 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9885
9886 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9887
9888 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9889 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9890 bp->fw_ver[len] = ' ';
9891 }
9892 }
9893 return;
9894 }
9895out_not_found:
9896 return;
9897}
9898
34f80b04
EG
9899static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9900{
9901 int func = BP_FUNC(bp);
87942b46 9902 int timer_interval;
34f80b04
EG
9903 int rc;
9904
da5a662a
VZ
9905 /* Disable interrupt handling until HW is initialized */
9906 atomic_set(&bp->intr_sem, 1);
e1510706 9907 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 9908
34f80b04 9909 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 9910 mutex_init(&bp->fw_mb_mutex);
993ac7b5
MC
9911#ifdef BCM_CNIC
9912 mutex_init(&bp->cnic_mutex);
9913#endif
a2fbb9ea 9914
1cf167f2 9915 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 9916 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
9917
9918 rc = bnx2x_get_hwinfo(bp);
9919
34f24c7f 9920 bnx2x_read_fwinfo(bp);
34f80b04
EG
9921 /* need to reset chip if undi was active */
9922 if (!BP_NOMCP(bp))
9923 bnx2x_undi_unload(bp);
9924
9925 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 9926 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
9927
9928 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
9929 dev_err(&bp->pdev->dev, "MCP disabled, "
9930 "must load devices in order!\n");
34f80b04 9931
555f6c78 9932 /* Set multi queue mode */
8badd27a
EG
9933 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9934 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
cdaa7cb8
VZ
9935 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
9936 "requested is not MSI-X\n");
555f6c78
EG
9937 multi_mode = ETH_RSS_MODE_DISABLED;
9938 }
9939 bp->multi_mode = multi_mode;
9940
9941
4fd89b7a
DK
9942 bp->dev->features |= NETIF_F_GRO;
9943
7a9b2557
VZ
9944 /* Set TPA flags */
9945 if (disable_tpa) {
9946 bp->flags &= ~TPA_ENABLE_FLAG;
9947 bp->dev->features &= ~NETIF_F_LRO;
9948 } else {
9949 bp->flags |= TPA_ENABLE_FLAG;
9950 bp->dev->features |= NETIF_F_LRO;
9951 }
9952
a18f5128
EG
9953 if (CHIP_IS_E1(bp))
9954 bp->dropless_fc = 0;
9955 else
9956 bp->dropless_fc = dropless_fc;
9957
8d5726c4 9958 bp->mrrs = mrrs;
7a9b2557 9959
34f80b04
EG
9960 bp->tx_ring_size = MAX_TX_AVAIL;
9961 bp->rx_ring_size = MAX_RX_AVAIL;
9962
9963 bp->rx_csum = 1;
34f80b04 9964
7d323bfd
EG
9965 /* make sure that the numbers are in the right granularity */
9966 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9967 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
34f80b04 9968
87942b46
EG
9969 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9970 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
9971
9972 init_timer(&bp->timer);
9973 bp->timer.expires = jiffies + bp->current_interval;
9974 bp->timer.data = (unsigned long) bp;
9975 bp->timer.function = bnx2x_timer;
9976
9977 return rc;
a2fbb9ea
ET
9978}
9979
9980/*
9981 * ethtool service functions
9982 */
9983
9984/* All ethtool functions called with rtnl_lock */
9985
9986static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9987{
9988 struct bnx2x *bp = netdev_priv(dev);
9989
34f80b04
EG
9990 cmd->supported = bp->port.supported;
9991 cmd->advertising = bp->port.advertising;
a2fbb9ea 9992
f34d28ea
EG
9993 if ((bp->state == BNX2X_STATE_OPEN) &&
9994 !(bp->flags & MF_FUNC_DIS) &&
9995 (bp->link_vars.link_up)) {
c18487ee
YR
9996 cmd->speed = bp->link_vars.line_speed;
9997 cmd->duplex = bp->link_vars.duplex;
b015e3d1
EG
9998 if (IS_E1HMF(bp)) {
9999 u16 vn_max_rate;
34f80b04 10000
b015e3d1
EG
10001 vn_max_rate =
10002 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
34f80b04 10003 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
b015e3d1
EG
10004 if (vn_max_rate < cmd->speed)
10005 cmd->speed = vn_max_rate;
10006 }
10007 } else {
10008 cmd->speed = -1;
10009 cmd->duplex = -1;
34f80b04 10010 }
a2fbb9ea 10011
c18487ee
YR
10012 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
10013 u32 ext_phy_type =
10014 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
10015
10016 switch (ext_phy_type) {
10017 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 10018 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 10019 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
10020 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
10021 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
10022 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 10023 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
10024 cmd->port = PORT_FIBRE;
10025 break;
10026
10027 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 10028 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
10029 cmd->port = PORT_TP;
10030 break;
10031
c18487ee
YR
10032 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
10033 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
10034 bp->link_params.ext_phy_config);
10035 break;
10036
f1410647
ET
10037 default:
10038 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
10039 bp->link_params.ext_phy_config);
10040 break;
f1410647
ET
10041 }
10042 } else
a2fbb9ea 10043 cmd->port = PORT_TP;
a2fbb9ea 10044
01cd4528 10045 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
10046 cmd->transceiver = XCVR_INTERNAL;
10047
c18487ee 10048 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 10049 cmd->autoneg = AUTONEG_ENABLE;
f1410647 10050 else
a2fbb9ea 10051 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
10052
10053 cmd->maxtxpkt = 0;
10054 cmd->maxrxpkt = 0;
10055
10056 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10057 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10058 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10059 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10060 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10061 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10062 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10063
10064 return 0;
10065}
10066
10067static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10068{
10069 struct bnx2x *bp = netdev_priv(dev);
10070 u32 advertising;
10071
34f80b04
EG
10072 if (IS_E1HMF(bp))
10073 return 0;
10074
a2fbb9ea
ET
10075 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10076 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10077 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10078 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10079 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10080 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10081 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10082
a2fbb9ea 10083 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
10084 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10085 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 10086 return -EINVAL;
f1410647 10087 }
a2fbb9ea
ET
10088
10089 /* advertise the requested speed and duplex if supported */
34f80b04 10090 cmd->advertising &= bp->port.supported;
a2fbb9ea 10091
c18487ee
YR
10092 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10093 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
10094 bp->port.advertising |= (ADVERTISED_Autoneg |
10095 cmd->advertising);
a2fbb9ea
ET
10096
10097 } else { /* forced speed */
10098 /* advertise the requested speed and duplex if supported */
10099 switch (cmd->speed) {
10100 case SPEED_10:
10101 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 10102 if (!(bp->port.supported &
f1410647
ET
10103 SUPPORTED_10baseT_Full)) {
10104 DP(NETIF_MSG_LINK,
10105 "10M full not supported\n");
a2fbb9ea 10106 return -EINVAL;
f1410647 10107 }
a2fbb9ea
ET
10108
10109 advertising = (ADVERTISED_10baseT_Full |
10110 ADVERTISED_TP);
10111 } else {
34f80b04 10112 if (!(bp->port.supported &
f1410647
ET
10113 SUPPORTED_10baseT_Half)) {
10114 DP(NETIF_MSG_LINK,
10115 "10M half not supported\n");
a2fbb9ea 10116 return -EINVAL;
f1410647 10117 }
a2fbb9ea
ET
10118
10119 advertising = (ADVERTISED_10baseT_Half |
10120 ADVERTISED_TP);
10121 }
10122 break;
10123
10124 case SPEED_100:
10125 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 10126 if (!(bp->port.supported &
f1410647
ET
10127 SUPPORTED_100baseT_Full)) {
10128 DP(NETIF_MSG_LINK,
10129 "100M full not supported\n");
a2fbb9ea 10130 return -EINVAL;
f1410647 10131 }
a2fbb9ea
ET
10132
10133 advertising = (ADVERTISED_100baseT_Full |
10134 ADVERTISED_TP);
10135 } else {
34f80b04 10136 if (!(bp->port.supported &
f1410647
ET
10137 SUPPORTED_100baseT_Half)) {
10138 DP(NETIF_MSG_LINK,
10139 "100M half not supported\n");
a2fbb9ea 10140 return -EINVAL;
f1410647 10141 }
a2fbb9ea
ET
10142
10143 advertising = (ADVERTISED_100baseT_Half |
10144 ADVERTISED_TP);
10145 }
10146 break;
10147
10148 case SPEED_1000:
f1410647
ET
10149 if (cmd->duplex != DUPLEX_FULL) {
10150 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 10151 return -EINVAL;
f1410647 10152 }
a2fbb9ea 10153
34f80b04 10154 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 10155 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 10156 return -EINVAL;
f1410647 10157 }
a2fbb9ea
ET
10158
10159 advertising = (ADVERTISED_1000baseT_Full |
10160 ADVERTISED_TP);
10161 break;
10162
10163 case SPEED_2500:
f1410647
ET
10164 if (cmd->duplex != DUPLEX_FULL) {
10165 DP(NETIF_MSG_LINK,
10166 "2.5G half not supported\n");
a2fbb9ea 10167 return -EINVAL;
f1410647 10168 }
a2fbb9ea 10169
34f80b04 10170 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
10171 DP(NETIF_MSG_LINK,
10172 "2.5G full not supported\n");
a2fbb9ea 10173 return -EINVAL;
f1410647 10174 }
a2fbb9ea 10175
f1410647 10176 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
10177 ADVERTISED_TP);
10178 break;
10179
10180 case SPEED_10000:
f1410647
ET
10181 if (cmd->duplex != DUPLEX_FULL) {
10182 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 10183 return -EINVAL;
f1410647 10184 }
a2fbb9ea 10185
34f80b04 10186 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 10187 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 10188 return -EINVAL;
f1410647 10189 }
a2fbb9ea
ET
10190
10191 advertising = (ADVERTISED_10000baseT_Full |
10192 ADVERTISED_FIBRE);
10193 break;
10194
10195 default:
f1410647 10196 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
10197 return -EINVAL;
10198 }
10199
c18487ee
YR
10200 bp->link_params.req_line_speed = cmd->speed;
10201 bp->link_params.req_duplex = cmd->duplex;
34f80b04 10202 bp->port.advertising = advertising;
a2fbb9ea
ET
10203 }
10204
c18487ee 10205 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 10206 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 10207 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 10208 bp->port.advertising);
a2fbb9ea 10209
34f80b04 10210 if (netif_running(dev)) {
bb2a0f7a 10211 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10212 bnx2x_link_set(bp);
10213 }
a2fbb9ea
ET
10214
10215 return 0;
10216}
10217
0a64ea57
EG
10218#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10219#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10220
10221static int bnx2x_get_regs_len(struct net_device *dev)
10222{
0a64ea57 10223 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 10224 int regdump_len = 0;
0a64ea57
EG
10225 int i;
10226
0a64ea57
EG
10227 if (CHIP_IS_E1(bp)) {
10228 for (i = 0; i < REGS_COUNT; i++)
10229 if (IS_E1_ONLINE(reg_addrs[i].info))
10230 regdump_len += reg_addrs[i].size;
10231
10232 for (i = 0; i < WREGS_COUNT_E1; i++)
10233 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10234 regdump_len += wreg_addrs_e1[i].size *
10235 (1 + wreg_addrs_e1[i].read_regs_count);
10236
10237 } else { /* E1H */
10238 for (i = 0; i < REGS_COUNT; i++)
10239 if (IS_E1H_ONLINE(reg_addrs[i].info))
10240 regdump_len += reg_addrs[i].size;
10241
10242 for (i = 0; i < WREGS_COUNT_E1H; i++)
10243 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10244 regdump_len += wreg_addrs_e1h[i].size *
10245 (1 + wreg_addrs_e1h[i].read_regs_count);
10246 }
10247 regdump_len *= 4;
10248 regdump_len += sizeof(struct dump_hdr);
10249
10250 return regdump_len;
10251}
10252
10253static void bnx2x_get_regs(struct net_device *dev,
10254 struct ethtool_regs *regs, void *_p)
10255{
10256 u32 *p = _p, i, j;
10257 struct bnx2x *bp = netdev_priv(dev);
10258 struct dump_hdr dump_hdr = {0};
10259
10260 regs->version = 0;
10261 memset(p, 0, regs->len);
10262
10263 if (!netif_running(bp->dev))
10264 return;
10265
10266 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10267 dump_hdr.dump_sign = dump_sign_all;
10268 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10269 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10270 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10271 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10272 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10273
10274 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10275 p += dump_hdr.hdr_size + 1;
10276
10277 if (CHIP_IS_E1(bp)) {
10278 for (i = 0; i < REGS_COUNT; i++)
10279 if (IS_E1_ONLINE(reg_addrs[i].info))
10280 for (j = 0; j < reg_addrs[i].size; j++)
10281 *p++ = REG_RD(bp,
10282 reg_addrs[i].addr + j*4);
10283
10284 } else { /* E1H */
10285 for (i = 0; i < REGS_COUNT; i++)
10286 if (IS_E1H_ONLINE(reg_addrs[i].info))
10287 for (j = 0; j < reg_addrs[i].size; j++)
10288 *p++ = REG_RD(bp,
10289 reg_addrs[i].addr + j*4);
10290 }
10291}
10292
0d28e49a
EG
10293#define PHY_FW_VER_LEN 10
10294
10295static void bnx2x_get_drvinfo(struct net_device *dev,
10296 struct ethtool_drvinfo *info)
10297{
10298 struct bnx2x *bp = netdev_priv(dev);
10299 u8 phy_fw_ver[PHY_FW_VER_LEN];
10300
10301 strcpy(info->driver, DRV_MODULE_NAME);
10302 strcpy(info->version, DRV_MODULE_VERSION);
10303
10304 phy_fw_ver[0] = '\0';
10305 if (bp->port.pmf) {
10306 bnx2x_acquire_phy_lock(bp);
10307 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10308 (bp->state != BNX2X_STATE_CLOSED),
10309 phy_fw_ver, PHY_FW_VER_LEN);
10310 bnx2x_release_phy_lock(bp);
10311 }
10312
34f24c7f
VZ
10313 strncpy(info->fw_version, bp->fw_ver, 32);
10314 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10315 "bc %d.%d.%d%s%s",
0d28e49a
EG
10316 (bp->common.bc_ver & 0xff0000) >> 16,
10317 (bp->common.bc_ver & 0xff00) >> 8,
10318 (bp->common.bc_ver & 0xff),
34f24c7f 10319 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
0d28e49a
EG
10320 strcpy(info->bus_info, pci_name(bp->pdev));
10321 info->n_stats = BNX2X_NUM_STATS;
10322 info->testinfo_len = BNX2X_NUM_TESTS;
10323 info->eedump_len = bp->common.flash_size;
10324 info->regdump_len = bnx2x_get_regs_len(dev);
10325}
10326
a2fbb9ea
ET
10327static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10328{
10329 struct bnx2x *bp = netdev_priv(dev);
10330
10331 if (bp->flags & NO_WOL_FLAG) {
10332 wol->supported = 0;
10333 wol->wolopts = 0;
10334 } else {
10335 wol->supported = WAKE_MAGIC;
10336 if (bp->wol)
10337 wol->wolopts = WAKE_MAGIC;
10338 else
10339 wol->wolopts = 0;
10340 }
10341 memset(&wol->sopass, 0, sizeof(wol->sopass));
10342}
10343
10344static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10345{
10346 struct bnx2x *bp = netdev_priv(dev);
10347
10348 if (wol->wolopts & ~WAKE_MAGIC)
10349 return -EINVAL;
10350
10351 if (wol->wolopts & WAKE_MAGIC) {
10352 if (bp->flags & NO_WOL_FLAG)
10353 return -EINVAL;
10354
10355 bp->wol = 1;
34f80b04 10356 } else
a2fbb9ea 10357 bp->wol = 0;
34f80b04 10358
a2fbb9ea
ET
10359 return 0;
10360}
10361
10362static u32 bnx2x_get_msglevel(struct net_device *dev)
10363{
10364 struct bnx2x *bp = netdev_priv(dev);
10365
7995c64e 10366 return bp->msg_enable;
a2fbb9ea
ET
10367}
10368
10369static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10370{
10371 struct bnx2x *bp = netdev_priv(dev);
10372
10373 if (capable(CAP_NET_ADMIN))
7995c64e 10374 bp->msg_enable = level;
a2fbb9ea
ET
10375}
10376
10377static int bnx2x_nway_reset(struct net_device *dev)
10378{
10379 struct bnx2x *bp = netdev_priv(dev);
10380
34f80b04
EG
10381 if (!bp->port.pmf)
10382 return 0;
a2fbb9ea 10383
34f80b04 10384 if (netif_running(dev)) {
bb2a0f7a 10385 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10386 bnx2x_link_set(bp);
10387 }
a2fbb9ea
ET
10388
10389 return 0;
10390}
10391
ab6ad5a4 10392static u32 bnx2x_get_link(struct net_device *dev)
01e53298
NO
10393{
10394 struct bnx2x *bp = netdev_priv(dev);
10395
f34d28ea
EG
10396 if (bp->flags & MF_FUNC_DIS)
10397 return 0;
10398
01e53298
NO
10399 return bp->link_vars.link_up;
10400}
10401
a2fbb9ea
ET
10402static int bnx2x_get_eeprom_len(struct net_device *dev)
10403{
10404 struct bnx2x *bp = netdev_priv(dev);
10405
34f80b04 10406 return bp->common.flash_size;
a2fbb9ea
ET
10407}
10408
10409static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10410{
34f80b04 10411 int port = BP_PORT(bp);
a2fbb9ea
ET
10412 int count, i;
10413 u32 val = 0;
10414
10415 /* adjust timeout for emulation/FPGA */
10416 count = NVRAM_TIMEOUT_COUNT;
10417 if (CHIP_REV_IS_SLOW(bp))
10418 count *= 100;
10419
10420 /* request access to nvram interface */
10421 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10422 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10423
10424 for (i = 0; i < count*10; i++) {
10425 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10426 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10427 break;
10428
10429 udelay(5);
10430 }
10431
10432 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 10433 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
10434 return -EBUSY;
10435 }
10436
10437 return 0;
10438}
10439
10440static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10441{
34f80b04 10442 int port = BP_PORT(bp);
a2fbb9ea
ET
10443 int count, i;
10444 u32 val = 0;
10445
10446 /* adjust timeout for emulation/FPGA */
10447 count = NVRAM_TIMEOUT_COUNT;
10448 if (CHIP_REV_IS_SLOW(bp))
10449 count *= 100;
10450
10451 /* relinquish nvram interface */
10452 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10453 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10454
10455 for (i = 0; i < count*10; i++) {
10456 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10457 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10458 break;
10459
10460 udelay(5);
10461 }
10462
10463 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 10464 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
10465 return -EBUSY;
10466 }
10467
10468 return 0;
10469}
10470
10471static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10472{
10473 u32 val;
10474
10475 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10476
10477 /* enable both bits, even on read */
10478 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10479 (val | MCPR_NVM_ACCESS_ENABLE_EN |
10480 MCPR_NVM_ACCESS_ENABLE_WR_EN));
10481}
10482
10483static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10484{
10485 u32 val;
10486
10487 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10488
10489 /* disable both bits, even after read */
10490 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10491 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10492 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10493}
10494
4781bfad 10495static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
10496 u32 cmd_flags)
10497{
f1410647 10498 int count, i, rc;
a2fbb9ea
ET
10499 u32 val;
10500
10501 /* build the command word */
10502 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10503
10504 /* need to clear DONE bit separately */
10505 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10506
10507 /* address of the NVRAM to read from */
10508 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10509 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10510
10511 /* issue a read command */
10512 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10513
10514 /* adjust timeout for emulation/FPGA */
10515 count = NVRAM_TIMEOUT_COUNT;
10516 if (CHIP_REV_IS_SLOW(bp))
10517 count *= 100;
10518
10519 /* wait for completion */
10520 *ret_val = 0;
10521 rc = -EBUSY;
10522 for (i = 0; i < count; i++) {
10523 udelay(5);
10524 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10525
10526 if (val & MCPR_NVM_COMMAND_DONE) {
10527 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
10528 /* we read nvram data in cpu order
10529 * but ethtool sees it as an array of bytes
10530 * converting to big-endian will do the work */
4781bfad 10531 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
10532 rc = 0;
10533 break;
10534 }
10535 }
10536
10537 return rc;
10538}
10539
10540static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10541 int buf_size)
10542{
10543 int rc;
10544 u32 cmd_flags;
4781bfad 10545 __be32 val;
a2fbb9ea
ET
10546
10547 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 10548 DP(BNX2X_MSG_NVM,
c14423fe 10549 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
10550 offset, buf_size);
10551 return -EINVAL;
10552 }
10553
34f80b04
EG
10554 if (offset + buf_size > bp->common.flash_size) {
10555 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10556 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10557 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10558 return -EINVAL;
10559 }
10560
10561 /* request access to nvram interface */
10562 rc = bnx2x_acquire_nvram_lock(bp);
10563 if (rc)
10564 return rc;
10565
10566 /* enable access to nvram interface */
10567 bnx2x_enable_nvram_access(bp);
10568
10569 /* read the first word(s) */
10570 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10571 while ((buf_size > sizeof(u32)) && (rc == 0)) {
10572 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10573 memcpy(ret_buf, &val, 4);
10574
10575 /* advance to the next dword */
10576 offset += sizeof(u32);
10577 ret_buf += sizeof(u32);
10578 buf_size -= sizeof(u32);
10579 cmd_flags = 0;
10580 }
10581
10582 if (rc == 0) {
10583 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10584 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10585 memcpy(ret_buf, &val, 4);
10586 }
10587
10588 /* disable access to nvram interface */
10589 bnx2x_disable_nvram_access(bp);
10590 bnx2x_release_nvram_lock(bp);
10591
10592 return rc;
10593}
10594
10595static int bnx2x_get_eeprom(struct net_device *dev,
10596 struct ethtool_eeprom *eeprom, u8 *eebuf)
10597{
10598 struct bnx2x *bp = netdev_priv(dev);
10599 int rc;
10600
2add3acb
EG
10601 if (!netif_running(dev))
10602 return -EAGAIN;
10603
34f80b04 10604 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
10605 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10606 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10607 eeprom->len, eeprom->len);
10608
10609 /* parameters already validated in ethtool_get_eeprom */
10610
10611 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10612
10613 return rc;
10614}
10615
10616static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10617 u32 cmd_flags)
10618{
f1410647 10619 int count, i, rc;
a2fbb9ea
ET
10620
10621 /* build the command word */
10622 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10623
10624 /* need to clear DONE bit separately */
10625 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10626
10627 /* write the data */
10628 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10629
10630 /* address of the NVRAM to write to */
10631 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10632 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10633
10634 /* issue the write command */
10635 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10636
10637 /* adjust timeout for emulation/FPGA */
10638 count = NVRAM_TIMEOUT_COUNT;
10639 if (CHIP_REV_IS_SLOW(bp))
10640 count *= 100;
10641
10642 /* wait for completion */
10643 rc = -EBUSY;
10644 for (i = 0; i < count; i++) {
10645 udelay(5);
10646 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10647 if (val & MCPR_NVM_COMMAND_DONE) {
10648 rc = 0;
10649 break;
10650 }
10651 }
10652
10653 return rc;
10654}
10655
f1410647 10656#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
10657
10658static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10659 int buf_size)
10660{
10661 int rc;
10662 u32 cmd_flags;
10663 u32 align_offset;
4781bfad 10664 __be32 val;
a2fbb9ea 10665
34f80b04
EG
10666 if (offset + buf_size > bp->common.flash_size) {
10667 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10668 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10669 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10670 return -EINVAL;
10671 }
10672
10673 /* request access to nvram interface */
10674 rc = bnx2x_acquire_nvram_lock(bp);
10675 if (rc)
10676 return rc;
10677
10678 /* enable access to nvram interface */
10679 bnx2x_enable_nvram_access(bp);
10680
10681 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10682 align_offset = (offset & ~0x03);
10683 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10684
10685 if (rc == 0) {
10686 val &= ~(0xff << BYTE_OFFSET(offset));
10687 val |= (*data_buf << BYTE_OFFSET(offset));
10688
10689 /* nvram data is returned as an array of bytes
10690 * convert it back to cpu order */
10691 val = be32_to_cpu(val);
10692
a2fbb9ea
ET
10693 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10694 cmd_flags);
10695 }
10696
10697 /* disable access to nvram interface */
10698 bnx2x_disable_nvram_access(bp);
10699 bnx2x_release_nvram_lock(bp);
10700
10701 return rc;
10702}
10703
10704static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10705 int buf_size)
10706{
10707 int rc;
10708 u32 cmd_flags;
10709 u32 val;
10710 u32 written_so_far;
10711
34f80b04 10712 if (buf_size == 1) /* ethtool */
a2fbb9ea 10713 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
10714
10715 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 10716 DP(BNX2X_MSG_NVM,
c14423fe 10717 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
10718 offset, buf_size);
10719 return -EINVAL;
10720 }
10721
34f80b04
EG
10722 if (offset + buf_size > bp->common.flash_size) {
10723 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10724 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10725 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10726 return -EINVAL;
10727 }
10728
10729 /* request access to nvram interface */
10730 rc = bnx2x_acquire_nvram_lock(bp);
10731 if (rc)
10732 return rc;
10733
10734 /* enable access to nvram interface */
10735 bnx2x_enable_nvram_access(bp);
10736
10737 written_so_far = 0;
10738 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10739 while ((written_so_far < buf_size) && (rc == 0)) {
10740 if (written_so_far == (buf_size - sizeof(u32)))
10741 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10742 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10743 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10744 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10745 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10746
10747 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
10748
10749 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10750
10751 /* advance to the next dword */
10752 offset += sizeof(u32);
10753 data_buf += sizeof(u32);
10754 written_so_far += sizeof(u32);
10755 cmd_flags = 0;
10756 }
10757
10758 /* disable access to nvram interface */
10759 bnx2x_disable_nvram_access(bp);
10760 bnx2x_release_nvram_lock(bp);
10761
10762 return rc;
10763}
10764
10765static int bnx2x_set_eeprom(struct net_device *dev,
10766 struct ethtool_eeprom *eeprom, u8 *eebuf)
10767{
10768 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
10769 int port = BP_PORT(bp);
10770 int rc = 0;
a2fbb9ea 10771
9f4c9583
EG
10772 if (!netif_running(dev))
10773 return -EAGAIN;
10774
34f80b04 10775 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
10776 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10777 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10778 eeprom->len, eeprom->len);
10779
10780 /* parameters already validated in ethtool_set_eeprom */
10781
f57a6025
EG
10782 /* PHY eeprom can be accessed only by the PMF */
10783 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10784 !bp->port.pmf)
10785 return -EINVAL;
10786
10787 if (eeprom->magic == 0x50485950) {
10788 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10789 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 10790
f57a6025
EG
10791 bnx2x_acquire_phy_lock(bp);
10792 rc |= bnx2x_link_reset(&bp->link_params,
10793 &bp->link_vars, 0);
10794 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10795 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10796 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10797 MISC_REGISTERS_GPIO_HIGH, port);
10798 bnx2x_release_phy_lock(bp);
10799 bnx2x_link_report(bp);
10800
10801 } else if (eeprom->magic == 0x50485952) {
10802 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
f34d28ea 10803 if (bp->state == BNX2X_STATE_OPEN) {
4a37fb66 10804 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
10805 rc |= bnx2x_link_reset(&bp->link_params,
10806 &bp->link_vars, 1);
10807
10808 rc |= bnx2x_phy_init(&bp->link_params,
10809 &bp->link_vars);
4a37fb66 10810 bnx2x_release_phy_lock(bp);
f57a6025
EG
10811 bnx2x_calc_fc_adv(bp);
10812 }
10813 } else if (eeprom->magic == 0x53985943) {
10814 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10815 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10816 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10817 u8 ext_phy_addr =
659bc5c4 10818 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
f57a6025
EG
10819
10820 /* DSP Remove Download Mode */
10821 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10822 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 10823
f57a6025
EG
10824 bnx2x_acquire_phy_lock(bp);
10825
10826 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10827
10828 /* wait 0.5 sec to allow it to run */
10829 msleep(500);
10830 bnx2x_ext_phy_hw_reset(bp, port);
10831 msleep(500);
10832 bnx2x_release_phy_lock(bp);
10833 }
10834 } else
c18487ee 10835 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
10836
10837 return rc;
10838}
10839
10840static int bnx2x_get_coalesce(struct net_device *dev,
10841 struct ethtool_coalesce *coal)
10842{
10843 struct bnx2x *bp = netdev_priv(dev);
10844
10845 memset(coal, 0, sizeof(struct ethtool_coalesce));
10846
10847 coal->rx_coalesce_usecs = bp->rx_ticks;
10848 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
10849
10850 return 0;
10851}
10852
10853static int bnx2x_set_coalesce(struct net_device *dev,
10854 struct ethtool_coalesce *coal)
10855{
10856 struct bnx2x *bp = netdev_priv(dev);
10857
cdaa7cb8
VZ
10858 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
10859 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
10860 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea 10861
cdaa7cb8
VZ
10862 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
10863 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
10864 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea 10865
34f80b04 10866 if (netif_running(dev))
a2fbb9ea
ET
10867 bnx2x_update_coalesce(bp);
10868
10869 return 0;
10870}
10871
10872static void bnx2x_get_ringparam(struct net_device *dev,
10873 struct ethtool_ringparam *ering)
10874{
10875 struct bnx2x *bp = netdev_priv(dev);
10876
10877 ering->rx_max_pending = MAX_RX_AVAIL;
10878 ering->rx_mini_max_pending = 0;
10879 ering->rx_jumbo_max_pending = 0;
10880
10881 ering->rx_pending = bp->rx_ring_size;
10882 ering->rx_mini_pending = 0;
10883 ering->rx_jumbo_pending = 0;
10884
10885 ering->tx_max_pending = MAX_TX_AVAIL;
10886 ering->tx_pending = bp->tx_ring_size;
10887}
10888
10889static int bnx2x_set_ringparam(struct net_device *dev,
10890 struct ethtool_ringparam *ering)
10891{
10892 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10893 int rc = 0;
a2fbb9ea 10894
72fd0718
VZ
10895 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10896 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10897 return -EAGAIN;
10898 }
10899
a2fbb9ea
ET
10900 if ((ering->rx_pending > MAX_RX_AVAIL) ||
10901 (ering->tx_pending > MAX_TX_AVAIL) ||
10902 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10903 return -EINVAL;
10904
10905 bp->rx_ring_size = ering->rx_pending;
10906 bp->tx_ring_size = ering->tx_pending;
10907
34f80b04
EG
10908 if (netif_running(dev)) {
10909 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10910 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
10911 }
10912
34f80b04 10913 return rc;
a2fbb9ea
ET
10914}
10915
10916static void bnx2x_get_pauseparam(struct net_device *dev,
10917 struct ethtool_pauseparam *epause)
10918{
10919 struct bnx2x *bp = netdev_priv(dev);
10920
356e2385
EG
10921 epause->autoneg = (bp->link_params.req_flow_ctrl ==
10922 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
10923 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10924
c0700f90
DM
10925 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10926 BNX2X_FLOW_CTRL_RX);
10927 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10928 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
10929
10930 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10931 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10932 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10933}
10934
10935static int bnx2x_set_pauseparam(struct net_device *dev,
10936 struct ethtool_pauseparam *epause)
10937{
10938 struct bnx2x *bp = netdev_priv(dev);
10939
34f80b04
EG
10940 if (IS_E1HMF(bp))
10941 return 0;
10942
a2fbb9ea
ET
10943 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10944 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10945 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10946
c0700f90 10947 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 10948
f1410647 10949 if (epause->rx_pause)
c0700f90 10950 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 10951
f1410647 10952 if (epause->tx_pause)
c0700f90 10953 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 10954
c0700f90
DM
10955 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10956 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 10957
c18487ee 10958 if (epause->autoneg) {
34f80b04 10959 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 10960 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
10961 return -EINVAL;
10962 }
a2fbb9ea 10963
c18487ee 10964 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 10965 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 10966 }
a2fbb9ea 10967
c18487ee
YR
10968 DP(NETIF_MSG_LINK,
10969 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
10970
10971 if (netif_running(dev)) {
bb2a0f7a 10972 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10973 bnx2x_link_set(bp);
10974 }
a2fbb9ea
ET
10975
10976 return 0;
10977}
10978
df0f2343
VZ
10979static int bnx2x_set_flags(struct net_device *dev, u32 data)
10980{
10981 struct bnx2x *bp = netdev_priv(dev);
10982 int changed = 0;
10983 int rc = 0;
10984
e0d904ff
SG
10985 if (data & ~(ETH_FLAG_LRO | ETH_FLAG_RXHASH))
10986 return -EOPNOTSUPP;
10987
72fd0718
VZ
10988 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10989 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10990 return -EAGAIN;
10991 }
10992
df0f2343
VZ
10993 /* TPA requires Rx CSUM offloading */
10994 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
d43a7e67
VZ
10995 if (!disable_tpa) {
10996 if (!(dev->features & NETIF_F_LRO)) {
10997 dev->features |= NETIF_F_LRO;
10998 bp->flags |= TPA_ENABLE_FLAG;
10999 changed = 1;
11000 }
11001 } else
11002 rc = -EINVAL;
df0f2343
VZ
11003 } else if (dev->features & NETIF_F_LRO) {
11004 dev->features &= ~NETIF_F_LRO;
11005 bp->flags &= ~TPA_ENABLE_FLAG;
11006 changed = 1;
11007 }
11008
c68ed255
TH
11009 if (data & ETH_FLAG_RXHASH)
11010 dev->features |= NETIF_F_RXHASH;
11011 else
11012 dev->features &= ~NETIF_F_RXHASH;
11013
df0f2343
VZ
11014 if (changed && netif_running(dev)) {
11015 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11016 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11017 }
11018
11019 return rc;
11020}
11021
a2fbb9ea
ET
11022static u32 bnx2x_get_rx_csum(struct net_device *dev)
11023{
11024 struct bnx2x *bp = netdev_priv(dev);
11025
11026 return bp->rx_csum;
11027}
11028
11029static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
11030{
11031 struct bnx2x *bp = netdev_priv(dev);
df0f2343 11032 int rc = 0;
a2fbb9ea 11033
72fd0718
VZ
11034 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11035 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11036 return -EAGAIN;
11037 }
11038
a2fbb9ea 11039 bp->rx_csum = data;
df0f2343
VZ
11040
11041 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
11042 TPA'ed packets will be discarded due to wrong TCP CSUM */
11043 if (!data) {
11044 u32 flags = ethtool_op_get_flags(dev);
11045
11046 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
11047 }
11048
11049 return rc;
a2fbb9ea
ET
11050}
11051
11052static int bnx2x_set_tso(struct net_device *dev, u32 data)
11053{
755735eb 11054 if (data) {
a2fbb9ea 11055 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
11056 dev->features |= NETIF_F_TSO6;
11057 } else {
a2fbb9ea 11058 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
11059 dev->features &= ~NETIF_F_TSO6;
11060 }
11061
a2fbb9ea
ET
11062 return 0;
11063}
11064
f3c87cdd 11065static const struct {
a2fbb9ea
ET
11066 char string[ETH_GSTRING_LEN];
11067} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
11068 { "register_test (offline)" },
11069 { "memory_test (offline)" },
11070 { "loopback_test (offline)" },
11071 { "nvram_test (online)" },
11072 { "interrupt_test (online)" },
11073 { "link_test (online)" },
d3d4f495 11074 { "idle check (online)" }
a2fbb9ea
ET
11075};
11076
f3c87cdd
YG
11077static int bnx2x_test_registers(struct bnx2x *bp)
11078{
11079 int idx, i, rc = -ENODEV;
11080 u32 wr_val = 0;
9dabc424 11081 int port = BP_PORT(bp);
f3c87cdd 11082 static const struct {
cdaa7cb8
VZ
11083 u32 offset0;
11084 u32 offset1;
11085 u32 mask;
f3c87cdd
YG
11086 } reg_tbl[] = {
11087/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
11088 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
11089 { HC_REG_AGG_INT_0, 4, 0x000003ff },
11090 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
11091 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
11092 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
11093 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
11094 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11095 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
11096 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11097/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
11098 { QM_REG_CONNNUM_0, 4, 0x000fffff },
11099 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
11100 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
11101 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
11102 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11103 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
11104 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 11105 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
11106 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
11107/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
11108 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
11109 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
11110 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
11111 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
11112 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
11113 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
11114 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
11115 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
11116 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
11117/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
11118 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
11119 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
11120 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11121 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
11122 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11123 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
11124
11125 { 0xffffffff, 0, 0x00000000 }
11126 };
11127
11128 if (!netif_running(bp->dev))
11129 return rc;
11130
11131 /* Repeat the test twice:
11132 First by writing 0x00000000, second by writing 0xffffffff */
11133 for (idx = 0; idx < 2; idx++) {
11134
11135 switch (idx) {
11136 case 0:
11137 wr_val = 0;
11138 break;
11139 case 1:
11140 wr_val = 0xffffffff;
11141 break;
11142 }
11143
11144 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11145 u32 offset, mask, save_val, val;
f3c87cdd
YG
11146
11147 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11148 mask = reg_tbl[i].mask;
11149
11150 save_val = REG_RD(bp, offset);
11151
8eb5a20c 11152 REG_WR(bp, offset, (wr_val & mask));
f3c87cdd
YG
11153 val = REG_RD(bp, offset);
11154
11155 /* Restore the original register's value */
11156 REG_WR(bp, offset, save_val);
11157
cdaa7cb8
VZ
11158 /* verify value is as expected */
11159 if ((val & mask) != (wr_val & mask)) {
11160 DP(NETIF_MSG_PROBE,
11161 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11162 offset, val, wr_val, mask);
f3c87cdd 11163 goto test_reg_exit;
cdaa7cb8 11164 }
f3c87cdd
YG
11165 }
11166 }
11167
11168 rc = 0;
11169
11170test_reg_exit:
11171 return rc;
11172}
11173
11174static int bnx2x_test_memory(struct bnx2x *bp)
11175{
11176 int i, j, rc = -ENODEV;
11177 u32 val;
11178 static const struct {
11179 u32 offset;
11180 int size;
11181 } mem_tbl[] = {
11182 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
11183 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11184 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
11185 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
11186 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
11187 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
11188 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
11189
11190 { 0xffffffff, 0 }
11191 };
11192 static const struct {
11193 char *name;
11194 u32 offset;
9dabc424
YG
11195 u32 e1_mask;
11196 u32 e1h_mask;
f3c87cdd 11197 } prty_tbl[] = {
9dabc424
YG
11198 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
11199 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
11200 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
11201 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
11202 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
11203 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
11204
11205 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
11206 };
11207
11208 if (!netif_running(bp->dev))
11209 return rc;
11210
11211 /* Go through all the memories */
11212 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11213 for (j = 0; j < mem_tbl[i].size; j++)
11214 REG_RD(bp, mem_tbl[i].offset + j*4);
11215
11216 /* Check the parity status */
11217 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11218 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
11219 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11220 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
11221 DP(NETIF_MSG_HW,
11222 "%s is 0x%x\n", prty_tbl[i].name, val);
11223 goto test_mem_exit;
11224 }
11225 }
11226
11227 rc = 0;
11228
11229test_mem_exit:
11230 return rc;
11231}
11232
f3c87cdd
YG
11233static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11234{
11235 int cnt = 1000;
11236
11237 if (link_up)
11238 while (bnx2x_link_test(bp) && cnt--)
11239 msleep(10);
11240}
11241
11242static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11243{
11244 unsigned int pkt_size, num_pkts, i;
11245 struct sk_buff *skb;
11246 unsigned char *packet;
ca00392c 11247 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
54b9ddaa 11248 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
f3c87cdd
YG
11249 u16 tx_start_idx, tx_idx;
11250 u16 rx_start_idx, rx_idx;
ca00392c 11251 u16 pkt_prod, bd_prod;
f3c87cdd 11252 struct sw_tx_bd *tx_buf;
ca00392c
EG
11253 struct eth_tx_start_bd *tx_start_bd;
11254 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
11255 dma_addr_t mapping;
11256 union eth_rx_cqe *cqe;
11257 u8 cqe_fp_flags;
11258 struct sw_rx_bd *rx_buf;
11259 u16 len;
11260 int rc = -ENODEV;
11261
b5bf9068
EG
11262 /* check the loopback mode */
11263 switch (loopback_mode) {
11264 case BNX2X_PHY_LOOPBACK:
11265 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11266 return -EINVAL;
11267 break;
11268 case BNX2X_MAC_LOOPBACK:
f3c87cdd 11269 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 11270 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
11271 break;
11272 default:
f3c87cdd 11273 return -EINVAL;
b5bf9068 11274 }
f3c87cdd 11275
b5bf9068
EG
11276 /* prepare the loopback packet */
11277 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11278 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
11279 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11280 if (!skb) {
11281 rc = -ENOMEM;
11282 goto test_loopback_exit;
11283 }
11284 packet = skb_put(skb, pkt_size);
11285 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
11286 memset(packet + ETH_ALEN, 0, ETH_ALEN);
11287 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
11288 for (i = ETH_HLEN; i < pkt_size; i++)
11289 packet[i] = (unsigned char) (i & 0xff);
11290
b5bf9068 11291 /* send the loopback packet */
f3c87cdd 11292 num_pkts = 0;
ca00392c
EG
11293 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11294 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 11295
ca00392c
EG
11296 pkt_prod = fp_tx->tx_pkt_prod++;
11297 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11298 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 11299 tx_buf->skb = skb;
ca00392c 11300 tx_buf->flags = 0;
f3c87cdd 11301
ca00392c
EG
11302 bd_prod = TX_BD(fp_tx->tx_bd_prod);
11303 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
1a983142
FT
11304 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11305 skb_headlen(skb), DMA_TO_DEVICE);
ca00392c
EG
11306 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11307 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11308 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11309 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11310 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11311 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11312 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11313 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11314
11315 /* turn on parsing and get a BD */
11316 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11317 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11318
11319 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 11320
58f4c4cf
EG
11321 wmb();
11322
ca00392c
EG
11323 fp_tx->tx_db.data.prod += 2;
11324 barrier();
54b9ddaa 11325 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
f3c87cdd
YG
11326
11327 mmiowb();
11328
11329 num_pkts++;
ca00392c 11330 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
11331
11332 udelay(100);
11333
ca00392c 11334 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
11335 if (tx_idx != tx_start_idx + num_pkts)
11336 goto test_loopback_exit;
11337
ca00392c 11338 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
11339 if (rx_idx != rx_start_idx + num_pkts)
11340 goto test_loopback_exit;
11341
ca00392c 11342 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
11343 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11344 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11345 goto test_loopback_rx_exit;
11346
11347 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11348 if (len != pkt_size)
11349 goto test_loopback_rx_exit;
11350
ca00392c 11351 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
11352 skb = rx_buf->skb;
11353 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11354 for (i = ETH_HLEN; i < pkt_size; i++)
11355 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11356 goto test_loopback_rx_exit;
11357
11358 rc = 0;
11359
11360test_loopback_rx_exit:
f3c87cdd 11361
ca00392c
EG
11362 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11363 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11364 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11365 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
11366
11367 /* Update producers */
ca00392c
EG
11368 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11369 fp_rx->rx_sge_prod);
f3c87cdd
YG
11370
11371test_loopback_exit:
11372 bp->link_params.loopback_mode = LOOPBACK_NONE;
11373
11374 return rc;
11375}
11376
11377static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11378{
b5bf9068 11379 int rc = 0, res;
f3c87cdd 11380
2145a920
VZ
11381 if (BP_NOMCP(bp))
11382 return rc;
11383
f3c87cdd
YG
11384 if (!netif_running(bp->dev))
11385 return BNX2X_LOOPBACK_FAILED;
11386
f8ef6e44 11387 bnx2x_netif_stop(bp, 1);
3910c8ae 11388 bnx2x_acquire_phy_lock(bp);
f3c87cdd 11389
b5bf9068
EG
11390 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11391 if (res) {
11392 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
11393 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
11394 }
11395
b5bf9068
EG
11396 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11397 if (res) {
11398 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
11399 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
11400 }
11401
3910c8ae 11402 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
11403 bnx2x_netif_start(bp);
11404
11405 return rc;
11406}
11407
11408#define CRC32_RESIDUAL 0xdebb20e3
11409
11410static int bnx2x_test_nvram(struct bnx2x *bp)
11411{
11412 static const struct {
11413 int offset;
11414 int size;
11415 } nvram_tbl[] = {
11416 { 0, 0x14 }, /* bootstrap */
11417 { 0x14, 0xec }, /* dir */
11418 { 0x100, 0x350 }, /* manuf_info */
11419 { 0x450, 0xf0 }, /* feature_info */
11420 { 0x640, 0x64 }, /* upgrade_key_info */
11421 { 0x6a4, 0x64 },
11422 { 0x708, 0x70 }, /* manuf_key_info */
11423 { 0x778, 0x70 },
11424 { 0, 0 }
11425 };
4781bfad 11426 __be32 buf[0x350 / 4];
f3c87cdd
YG
11427 u8 *data = (u8 *)buf;
11428 int i, rc;
ab6ad5a4 11429 u32 magic, crc;
f3c87cdd 11430
2145a920
VZ
11431 if (BP_NOMCP(bp))
11432 return 0;
11433
f3c87cdd
YG
11434 rc = bnx2x_nvram_read(bp, 0, data, 4);
11435 if (rc) {
f5372251 11436 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
11437 goto test_nvram_exit;
11438 }
11439
11440 magic = be32_to_cpu(buf[0]);
11441 if (magic != 0x669955aa) {
11442 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11443 rc = -ENODEV;
11444 goto test_nvram_exit;
11445 }
11446
11447 for (i = 0; nvram_tbl[i].size; i++) {
11448
11449 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11450 nvram_tbl[i].size);
11451 if (rc) {
11452 DP(NETIF_MSG_PROBE,
f5372251 11453 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
11454 goto test_nvram_exit;
11455 }
11456
ab6ad5a4
EG
11457 crc = ether_crc_le(nvram_tbl[i].size, data);
11458 if (crc != CRC32_RESIDUAL) {
f3c87cdd 11459 DP(NETIF_MSG_PROBE,
ab6ad5a4 11460 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
f3c87cdd
YG
11461 rc = -ENODEV;
11462 goto test_nvram_exit;
11463 }
11464 }
11465
11466test_nvram_exit:
11467 return rc;
11468}
11469
11470static int bnx2x_test_intr(struct bnx2x *bp)
11471{
11472 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11473 int i, rc;
11474
11475 if (!netif_running(bp->dev))
11476 return -ENODEV;
11477
8d9c5f34 11478 config->hdr.length = 0;
af246401 11479 if (CHIP_IS_E1(bp))
0c43f43f
VZ
11480 /* use last unicast entries */
11481 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
af246401
EG
11482 else
11483 config->hdr.offset = BP_FUNC(bp);
0626b899 11484 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
11485 config->hdr.reserved1 = 0;
11486
e665bfda
MC
11487 bp->set_mac_pending++;
11488 smp_wmb();
f3c87cdd
YG
11489 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11490 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11491 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11492 if (rc == 0) {
f3c87cdd
YG
11493 for (i = 0; i < 10; i++) {
11494 if (!bp->set_mac_pending)
11495 break;
e665bfda 11496 smp_rmb();
f3c87cdd
YG
11497 msleep_interruptible(10);
11498 }
11499 if (i == 10)
11500 rc = -ENODEV;
11501 }
11502
11503 return rc;
11504}
11505
a2fbb9ea
ET
11506static void bnx2x_self_test(struct net_device *dev,
11507 struct ethtool_test *etest, u64 *buf)
11508{
11509 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea 11510
72fd0718
VZ
11511 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11512 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11513 etest->flags |= ETH_TEST_FL_FAILED;
11514 return;
11515 }
11516
a2fbb9ea
ET
11517 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11518
f3c87cdd 11519 if (!netif_running(dev))
a2fbb9ea 11520 return;
a2fbb9ea 11521
33471629 11522 /* offline tests are not supported in MF mode */
f3c87cdd
YG
11523 if (IS_E1HMF(bp))
11524 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11525
11526 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
11527 int port = BP_PORT(bp);
11528 u32 val;
f3c87cdd
YG
11529 u8 link_up;
11530
279abdf5
EG
11531 /* save current value of input enable for TX port IF */
11532 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11533 /* disable input for TX port IF */
11534 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11535
061bc702 11536 link_up = (bnx2x_link_test(bp) == 0);
f3c87cdd
YG
11537 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11538 bnx2x_nic_load(bp, LOAD_DIAG);
11539 /* wait until link state is restored */
11540 bnx2x_wait_for_link(bp, link_up);
11541
11542 if (bnx2x_test_registers(bp) != 0) {
11543 buf[0] = 1;
11544 etest->flags |= ETH_TEST_FL_FAILED;
11545 }
11546 if (bnx2x_test_memory(bp) != 0) {
11547 buf[1] = 1;
11548 etest->flags |= ETH_TEST_FL_FAILED;
11549 }
11550 buf[2] = bnx2x_test_loopback(bp, link_up);
11551 if (buf[2] != 0)
11552 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 11553
f3c87cdd 11554 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
11555
11556 /* restore input for TX port IF */
11557 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11558
f3c87cdd
YG
11559 bnx2x_nic_load(bp, LOAD_NORMAL);
11560 /* wait until link state is restored */
11561 bnx2x_wait_for_link(bp, link_up);
11562 }
11563 if (bnx2x_test_nvram(bp) != 0) {
11564 buf[3] = 1;
a2fbb9ea
ET
11565 etest->flags |= ETH_TEST_FL_FAILED;
11566 }
f3c87cdd
YG
11567 if (bnx2x_test_intr(bp) != 0) {
11568 buf[4] = 1;
11569 etest->flags |= ETH_TEST_FL_FAILED;
11570 }
11571 if (bp->port.pmf)
11572 if (bnx2x_link_test(bp) != 0) {
11573 buf[5] = 1;
11574 etest->flags |= ETH_TEST_FL_FAILED;
11575 }
f3c87cdd
YG
11576
11577#ifdef BNX2X_EXTRA_DEBUG
11578 bnx2x_panic_dump(bp);
11579#endif
a2fbb9ea
ET
11580}
11581
de832a55
EG
11582static const struct {
11583 long offset;
11584 int size;
11585 u8 string[ETH_GSTRING_LEN];
11586} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11587/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11588 { Q_STATS_OFFSET32(error_bytes_received_hi),
11589 8, "[%d]: rx_error_bytes" },
11590 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11591 8, "[%d]: rx_ucast_packets" },
11592 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11593 8, "[%d]: rx_mcast_packets" },
11594 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11595 8, "[%d]: rx_bcast_packets" },
11596 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11597 { Q_STATS_OFFSET32(rx_err_discard_pkt),
11598 4, "[%d]: rx_phy_ip_err_discards"},
11599 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11600 4, "[%d]: rx_skb_alloc_discard" },
11601 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11602
11603/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11604 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
dea7aab1
VZ
11605 8, "[%d]: tx_ucast_packets" },
11606 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11607 8, "[%d]: tx_mcast_packets" },
11608 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11609 8, "[%d]: tx_bcast_packets" }
de832a55
EG
11610};
11611
bb2a0f7a
YG
11612static const struct {
11613 long offset;
11614 int size;
11615 u32 flags;
66e855f3
YG
11616#define STATS_FLAGS_PORT 1
11617#define STATS_FLAGS_FUNC 2
de832a55 11618#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 11619 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 11620} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
11621/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11622 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 11623 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 11624 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 11625 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 11626 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 11627 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 11628 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 11629 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 11630 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 11631 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 11632 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 11633 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 11634 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
11635 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11636 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11637 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11638 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11639/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11640 8, STATS_FLAGS_PORT, "rx_fragments" },
11641 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11642 8, STATS_FLAGS_PORT, "rx_jabbers" },
11643 { STATS_OFFSET32(no_buff_discard_hi),
11644 8, STATS_FLAGS_BOTH, "rx_discards" },
11645 { STATS_OFFSET32(mac_filter_discard),
11646 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11647 { STATS_OFFSET32(xxoverflow_discard),
11648 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11649 { STATS_OFFSET32(brb_drop_hi),
11650 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11651 { STATS_OFFSET32(brb_truncate_hi),
11652 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11653 { STATS_OFFSET32(pause_frames_received_hi),
11654 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11655 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11656 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11657 { STATS_OFFSET32(nig_timer_max),
11658 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11659/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11660 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11661 { STATS_OFFSET32(rx_skb_alloc_failed),
11662 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11663 { STATS_OFFSET32(hw_csum_err),
11664 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11665
11666 { STATS_OFFSET32(total_bytes_transmitted_hi),
11667 8, STATS_FLAGS_BOTH, "tx_bytes" },
11668 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11669 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11670 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
dea7aab1
VZ
11671 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
11672 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11673 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
11674 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11675 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
de832a55
EG
11676 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11677 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11678 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11679 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
dea7aab1 11680/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 11681 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 11682 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 11683 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
dea7aab1 11684 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 11685 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 11686 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 11687 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 11688 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 11689 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 11690 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 11691 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 11692 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 11693 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 11694 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 11695 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 11696 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 11697 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 11698 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 11699 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
dea7aab1 11700/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 11701 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 11702 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 11703 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
dea7aab1 11704 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 11705 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
11706 { STATS_OFFSET32(pause_frames_sent_hi),
11707 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
11708};
11709
de832a55
EG
11710#define IS_PORT_STAT(i) \
11711 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11712#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11713#define IS_E1HMF_MODE_STAT(bp) \
7995c64e 11714 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
66e855f3 11715
15f0a394
BH
11716static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11717{
11718 struct bnx2x *bp = netdev_priv(dev);
11719 int i, num_stats;
11720
cdaa7cb8 11721 switch (stringset) {
15f0a394
BH
11722 case ETH_SS_STATS:
11723 if (is_multi(bp)) {
54b9ddaa 11724 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
15f0a394
BH
11725 if (!IS_E1HMF_MODE_STAT(bp))
11726 num_stats += BNX2X_NUM_STATS;
11727 } else {
11728 if (IS_E1HMF_MODE_STAT(bp)) {
11729 num_stats = 0;
11730 for (i = 0; i < BNX2X_NUM_STATS; i++)
11731 if (IS_FUNC_STAT(i))
11732 num_stats++;
11733 } else
11734 num_stats = BNX2X_NUM_STATS;
11735 }
11736 return num_stats;
11737
11738 case ETH_SS_TEST:
11739 return BNX2X_NUM_TESTS;
11740
11741 default:
11742 return -EINVAL;
11743 }
11744}
11745
a2fbb9ea
ET
11746static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11747{
bb2a0f7a 11748 struct bnx2x *bp = netdev_priv(dev);
de832a55 11749 int i, j, k;
bb2a0f7a 11750
a2fbb9ea
ET
11751 switch (stringset) {
11752 case ETH_SS_STATS:
de832a55
EG
11753 if (is_multi(bp)) {
11754 k = 0;
54b9ddaa 11755 for_each_queue(bp, i) {
de832a55
EG
11756 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11757 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11758 bnx2x_q_stats_arr[j].string, i);
11759 k += BNX2X_NUM_Q_STATS;
11760 }
11761 if (IS_E1HMF_MODE_STAT(bp))
11762 break;
11763 for (j = 0; j < BNX2X_NUM_STATS; j++)
11764 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11765 bnx2x_stats_arr[j].string);
11766 } else {
11767 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11768 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11769 continue;
11770 strcpy(buf + j*ETH_GSTRING_LEN,
11771 bnx2x_stats_arr[i].string);
11772 j++;
11773 }
bb2a0f7a 11774 }
a2fbb9ea
ET
11775 break;
11776
11777 case ETH_SS_TEST:
11778 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11779 break;
11780 }
11781}
11782
a2fbb9ea
ET
11783static void bnx2x_get_ethtool_stats(struct net_device *dev,
11784 struct ethtool_stats *stats, u64 *buf)
11785{
11786 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
11787 u32 *hw_stats, *offset;
11788 int i, j, k;
bb2a0f7a 11789
de832a55
EG
11790 if (is_multi(bp)) {
11791 k = 0;
54b9ddaa 11792 for_each_queue(bp, i) {
de832a55
EG
11793 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11794 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11795 if (bnx2x_q_stats_arr[j].size == 0) {
11796 /* skip this counter */
11797 buf[k + j] = 0;
11798 continue;
11799 }
11800 offset = (hw_stats +
11801 bnx2x_q_stats_arr[j].offset);
11802 if (bnx2x_q_stats_arr[j].size == 4) {
11803 /* 4-byte counter */
11804 buf[k + j] = (u64) *offset;
11805 continue;
11806 }
11807 /* 8-byte counter */
11808 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11809 }
11810 k += BNX2X_NUM_Q_STATS;
11811 }
11812 if (IS_E1HMF_MODE_STAT(bp))
11813 return;
11814 hw_stats = (u32 *)&bp->eth_stats;
11815 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11816 if (bnx2x_stats_arr[j].size == 0) {
11817 /* skip this counter */
11818 buf[k + j] = 0;
11819 continue;
11820 }
11821 offset = (hw_stats + bnx2x_stats_arr[j].offset);
11822 if (bnx2x_stats_arr[j].size == 4) {
11823 /* 4-byte counter */
11824 buf[k + j] = (u64) *offset;
11825 continue;
11826 }
11827 /* 8-byte counter */
11828 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 11829 }
de832a55
EG
11830 } else {
11831 hw_stats = (u32 *)&bp->eth_stats;
11832 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11833 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11834 continue;
11835 if (bnx2x_stats_arr[i].size == 0) {
11836 /* skip this counter */
11837 buf[j] = 0;
11838 j++;
11839 continue;
11840 }
11841 offset = (hw_stats + bnx2x_stats_arr[i].offset);
11842 if (bnx2x_stats_arr[i].size == 4) {
11843 /* 4-byte counter */
11844 buf[j] = (u64) *offset;
11845 j++;
11846 continue;
11847 }
11848 /* 8-byte counter */
11849 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 11850 j++;
a2fbb9ea 11851 }
a2fbb9ea
ET
11852 }
11853}
11854
11855static int bnx2x_phys_id(struct net_device *dev, u32 data)
11856{
11857 struct bnx2x *bp = netdev_priv(dev);
11858 int i;
11859
34f80b04
EG
11860 if (!netif_running(dev))
11861 return 0;
11862
11863 if (!bp->port.pmf)
11864 return 0;
11865
a2fbb9ea
ET
11866 if (data == 0)
11867 data = 2;
11868
11869 for (i = 0; i < (data * 2); i++) {
c18487ee 11870 if ((i % 2) == 0)
7846e471
YR
11871 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11872 SPEED_1000);
c18487ee 11873 else
7846e471 11874 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
c18487ee 11875
a2fbb9ea
ET
11876 msleep_interruptible(500);
11877 if (signal_pending(current))
11878 break;
11879 }
11880
c18487ee 11881 if (bp->link_vars.link_up)
7846e471
YR
11882 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11883 bp->link_vars.line_speed);
a2fbb9ea
ET
11884
11885 return 0;
11886}
11887
0fc0b732 11888static const struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
11889 .get_settings = bnx2x_get_settings,
11890 .set_settings = bnx2x_set_settings,
11891 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
11892 .get_regs_len = bnx2x_get_regs_len,
11893 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
11894 .get_wol = bnx2x_get_wol,
11895 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
11896 .get_msglevel = bnx2x_get_msglevel,
11897 .set_msglevel = bnx2x_set_msglevel,
11898 .nway_reset = bnx2x_nway_reset,
01e53298 11899 .get_link = bnx2x_get_link,
7a9b2557
VZ
11900 .get_eeprom_len = bnx2x_get_eeprom_len,
11901 .get_eeprom = bnx2x_get_eeprom,
11902 .set_eeprom = bnx2x_set_eeprom,
11903 .get_coalesce = bnx2x_get_coalesce,
11904 .set_coalesce = bnx2x_set_coalesce,
11905 .get_ringparam = bnx2x_get_ringparam,
11906 .set_ringparam = bnx2x_set_ringparam,
11907 .get_pauseparam = bnx2x_get_pauseparam,
11908 .set_pauseparam = bnx2x_set_pauseparam,
11909 .get_rx_csum = bnx2x_get_rx_csum,
11910 .set_rx_csum = bnx2x_set_rx_csum,
11911 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 11912 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
11913 .set_flags = bnx2x_set_flags,
11914 .get_flags = ethtool_op_get_flags,
11915 .get_sg = ethtool_op_get_sg,
11916 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
11917 .get_tso = ethtool_op_get_tso,
11918 .set_tso = bnx2x_set_tso,
7a9b2557 11919 .self_test = bnx2x_self_test,
15f0a394 11920 .get_sset_count = bnx2x_get_sset_count,
7a9b2557 11921 .get_strings = bnx2x_get_strings,
a2fbb9ea 11922 .phys_id = bnx2x_phys_id,
bb2a0f7a 11923 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
11924};
11925
11926/* end of ethtool_ops */
11927
11928/****************************************************************************
11929* General service functions
11930****************************************************************************/
11931
11932static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11933{
11934 u16 pmcsr;
11935
11936 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11937
11938 switch (state) {
11939 case PCI_D0:
34f80b04 11940 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
11941 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11942 PCI_PM_CTRL_PME_STATUS));
11943
11944 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 11945 /* delay required during transition out of D3hot */
a2fbb9ea 11946 msleep(20);
34f80b04 11947 break;
a2fbb9ea 11948
34f80b04 11949 case PCI_D3hot:
d3dbfee0
VZ
11950 /* If there are other clients above don't
11951 shut down the power */
11952 if (atomic_read(&bp->pdev->enable_cnt) != 1)
11953 return 0;
11954 /* Don't shut down the power for emulation and FPGA */
11955 if (CHIP_REV_IS_SLOW(bp))
11956 return 0;
11957
34f80b04
EG
11958 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11959 pmcsr |= 3;
a2fbb9ea 11960
34f80b04
EG
11961 if (bp->wol)
11962 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 11963
34f80b04
EG
11964 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11965 pmcsr);
a2fbb9ea 11966
34f80b04
EG
11967 /* No more memory access after this point until
11968 * device is brought back to D0.
11969 */
11970 break;
11971
11972 default:
11973 return -EINVAL;
11974 }
11975 return 0;
a2fbb9ea
ET
11976}
11977
237907c1
EG
11978static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11979{
11980 u16 rx_cons_sb;
11981
11982 /* Tell compiler that status block fields can change */
11983 barrier();
11984 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11985 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
11986 rx_cons_sb++;
11987 return (fp->rx_comp_cons != rx_cons_sb);
11988}
11989
34f80b04
EG
11990/*
11991 * net_device service functions
11992 */
11993
a2fbb9ea
ET
11994static int bnx2x_poll(struct napi_struct *napi, int budget)
11995{
54b9ddaa 11996 int work_done = 0;
a2fbb9ea
ET
11997 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
11998 napi);
11999 struct bnx2x *bp = fp->bp;
a2fbb9ea 12000
54b9ddaa 12001 while (1) {
a2fbb9ea 12002#ifdef BNX2X_STOP_ON_ERROR
54b9ddaa
VZ
12003 if (unlikely(bp->panic)) {
12004 napi_complete(napi);
12005 return 0;
12006 }
a2fbb9ea
ET
12007#endif
12008
54b9ddaa
VZ
12009 if (bnx2x_has_tx_work(fp))
12010 bnx2x_tx_int(fp);
356e2385 12011
54b9ddaa
VZ
12012 if (bnx2x_has_rx_work(fp)) {
12013 work_done += bnx2x_rx_int(fp, budget - work_done);
a2fbb9ea 12014
54b9ddaa
VZ
12015 /* must not complete if we consumed full budget */
12016 if (work_done >= budget)
12017 break;
12018 }
a2fbb9ea 12019
54b9ddaa
VZ
12020 /* Fall out from the NAPI loop if needed */
12021 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12022 bnx2x_update_fpsb_idx(fp);
12023 /* bnx2x_has_rx_work() reads the status block, thus we need
12024 * to ensure that status block indices have been actually read
12025 * (bnx2x_update_fpsb_idx) prior to this check
12026 * (bnx2x_has_rx_work) so that we won't write the "newer"
12027 * value of the status block to IGU (if there was a DMA right
12028 * after bnx2x_has_rx_work and if there is no rmb, the memory
12029 * reading (bnx2x_update_fpsb_idx) may be postponed to right
12030 * before bnx2x_ack_sb). In this case there will never be
12031 * another interrupt until there is another update of the
12032 * status block, while there is still unhandled work.
12033 */
12034 rmb();
a2fbb9ea 12035
54b9ddaa
VZ
12036 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12037 napi_complete(napi);
12038 /* Re-enable interrupts */
12039 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
12040 le16_to_cpu(fp->fp_c_idx),
12041 IGU_INT_NOP, 1);
12042 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
12043 le16_to_cpu(fp->fp_u_idx),
12044 IGU_INT_ENABLE, 1);
12045 break;
12046 }
12047 }
a2fbb9ea 12048 }
356e2385 12049
a2fbb9ea
ET
12050 return work_done;
12051}
12052
755735eb
EG
12053
12054/* we split the first BD into headers and data BDs
33471629 12055 * to ease the pain of our fellow microcode engineers
755735eb
EG
12056 * we use one mapping for both BDs
12057 * So far this has only been observed to happen
12058 * in Other Operating Systems(TM)
12059 */
12060static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
12061 struct bnx2x_fastpath *fp,
ca00392c
EG
12062 struct sw_tx_bd *tx_buf,
12063 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
12064 u16 bd_prod, int nbd)
12065{
ca00392c 12066 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
12067 struct eth_tx_bd *d_tx_bd;
12068 dma_addr_t mapping;
12069 int old_len = le16_to_cpu(h_tx_bd->nbytes);
12070
12071 /* first fix first BD */
12072 h_tx_bd->nbd = cpu_to_le16(nbd);
12073 h_tx_bd->nbytes = cpu_to_le16(hlen);
12074
12075 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
12076 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
12077 h_tx_bd->addr_lo, h_tx_bd->nbd);
12078
12079 /* now get a new data BD
12080 * (after the pbd) and fill it */
12081 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 12082 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
12083
12084 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
12085 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
12086
12087 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12088 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12089 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
12090
12091 /* this marks the BD as one that has no individual mapping */
12092 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
12093
755735eb
EG
12094 DP(NETIF_MSG_TX_QUEUED,
12095 "TSO split data size is %d (%x:%x)\n",
12096 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
12097
ca00392c
EG
12098 /* update tx_bd */
12099 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
12100
12101 return bd_prod;
12102}
12103
12104static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
12105{
12106 if (fix > 0)
12107 csum = (u16) ~csum_fold(csum_sub(csum,
12108 csum_partial(t_header - fix, fix, 0)));
12109
12110 else if (fix < 0)
12111 csum = (u16) ~csum_fold(csum_add(csum,
12112 csum_partial(t_header, -fix, 0)));
12113
12114 return swab16(csum);
12115}
12116
12117static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12118{
12119 u32 rc;
12120
12121 if (skb->ip_summed != CHECKSUM_PARTIAL)
12122 rc = XMIT_PLAIN;
12123
12124 else {
4781bfad 12125 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
12126 rc = XMIT_CSUM_V6;
12127 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12128 rc |= XMIT_CSUM_TCP;
12129
12130 } else {
12131 rc = XMIT_CSUM_V4;
12132 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12133 rc |= XMIT_CSUM_TCP;
12134 }
12135 }
12136
12137 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
d6a2f98b 12138 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
755735eb
EG
12139
12140 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
d6a2f98b 12141 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
755735eb
EG
12142
12143 return rc;
12144}
12145
632da4d6 12146#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
12147/* check if packet requires linearization (packet is too fragmented)
12148 no need to check fragmentation if page size > 8K (there will be no
12149 violation to FW restrictions) */
755735eb
EG
12150static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12151 u32 xmit_type)
12152{
12153 int to_copy = 0;
12154 int hlen = 0;
12155 int first_bd_sz = 0;
12156
12157 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12158 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12159
12160 if (xmit_type & XMIT_GSO) {
12161 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12162 /* Check if LSO packet needs to be copied:
12163 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12164 int wnd_size = MAX_FETCH_BD - 3;
33471629 12165 /* Number of windows to check */
755735eb
EG
12166 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12167 int wnd_idx = 0;
12168 int frag_idx = 0;
12169 u32 wnd_sum = 0;
12170
12171 /* Headers length */
12172 hlen = (int)(skb_transport_header(skb) - skb->data) +
12173 tcp_hdrlen(skb);
12174
12175 /* Amount of data (w/o headers) on linear part of SKB*/
12176 first_bd_sz = skb_headlen(skb) - hlen;
12177
12178 wnd_sum = first_bd_sz;
12179
12180 /* Calculate the first sum - it's special */
12181 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12182 wnd_sum +=
12183 skb_shinfo(skb)->frags[frag_idx].size;
12184
12185 /* If there was data on linear skb data - check it */
12186 if (first_bd_sz > 0) {
12187 if (unlikely(wnd_sum < lso_mss)) {
12188 to_copy = 1;
12189 goto exit_lbl;
12190 }
12191
12192 wnd_sum -= first_bd_sz;
12193 }
12194
12195 /* Others are easier: run through the frag list and
12196 check all windows */
12197 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12198 wnd_sum +=
12199 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12200
12201 if (unlikely(wnd_sum < lso_mss)) {
12202 to_copy = 1;
12203 break;
12204 }
12205 wnd_sum -=
12206 skb_shinfo(skb)->frags[wnd_idx].size;
12207 }
755735eb
EG
12208 } else {
12209 /* in non-LSO too fragmented packet should always
12210 be linearized */
12211 to_copy = 1;
12212 }
12213 }
12214
12215exit_lbl:
12216 if (unlikely(to_copy))
12217 DP(NETIF_MSG_TX_QUEUED,
12218 "Linearization IS REQUIRED for %s packet. "
12219 "num_frags %d hlen %d first_bd_sz %d\n",
12220 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12221 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12222
12223 return to_copy;
12224}
632da4d6 12225#endif
755735eb
EG
12226
12227/* called with netif_tx_lock
a2fbb9ea 12228 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 12229 * netif_wake_queue()
a2fbb9ea 12230 */
61357325 12231static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
a2fbb9ea
ET
12232{
12233 struct bnx2x *bp = netdev_priv(dev);
54b9ddaa 12234 struct bnx2x_fastpath *fp;
555f6c78 12235 struct netdev_queue *txq;
a2fbb9ea 12236 struct sw_tx_bd *tx_buf;
ca00392c
EG
12237 struct eth_tx_start_bd *tx_start_bd;
12238 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
12239 struct eth_tx_parse_bd *pbd = NULL;
12240 u16 pkt_prod, bd_prod;
755735eb 12241 int nbd, fp_index;
a2fbb9ea 12242 dma_addr_t mapping;
755735eb 12243 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
12244 int i;
12245 u8 hlen = 0;
ca00392c 12246 __le16 pkt_size = 0;
dea7aab1
VZ
12247 struct ethhdr *eth;
12248 u8 mac_type = UNICAST_ADDRESS;
a2fbb9ea
ET
12249
12250#ifdef BNX2X_STOP_ON_ERROR
12251 if (unlikely(bp->panic))
12252 return NETDEV_TX_BUSY;
12253#endif
12254
555f6c78
EG
12255 fp_index = skb_get_queue_mapping(skb);
12256 txq = netdev_get_tx_queue(dev, fp_index);
12257
54b9ddaa 12258 fp = &bp->fp[fp_index];
755735eb 12259
231fd58a 12260 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
54b9ddaa 12261 fp->eth_q_stats.driver_xoff++;
555f6c78 12262 netif_tx_stop_queue(txq);
a2fbb9ea
ET
12263 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12264 return NETDEV_TX_BUSY;
12265 }
12266
755735eb
EG
12267 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
12268 " gso type %x xmit_type %x\n",
12269 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12270 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12271
dea7aab1
VZ
12272 eth = (struct ethhdr *)skb->data;
12273
12274 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12275 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12276 if (is_broadcast_ether_addr(eth->h_dest))
12277 mac_type = BROADCAST_ADDRESS;
12278 else
12279 mac_type = MULTICAST_ADDRESS;
12280 }
12281
632da4d6 12282#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
12283 /* First, check if we need to linearize the skb (due to FW
12284 restrictions). No need to check fragmentation if page size > 8K
12285 (there will be no violation to FW restrictions) */
755735eb
EG
12286 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12287 /* Statistics of linearization */
12288 bp->lin_cnt++;
12289 if (skb_linearize(skb) != 0) {
12290 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12291 "silently dropping this SKB\n");
12292 dev_kfree_skb_any(skb);
da5a662a 12293 return NETDEV_TX_OK;
755735eb
EG
12294 }
12295 }
632da4d6 12296#endif
755735eb 12297
a2fbb9ea 12298 /*
755735eb 12299 Please read carefully. First we use one BD which we mark as start,
ca00392c 12300 then we have a parsing info BD (used for TSO or xsum),
755735eb 12301 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
12302 (don't forget to mark the last one as last,
12303 and to unmap only AFTER you write to the BD ...)
755735eb 12304 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
12305 */
12306
12307 pkt_prod = fp->tx_pkt_prod++;
755735eb 12308 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 12309
755735eb 12310 /* get a tx_buf and first BD */
a2fbb9ea 12311 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 12312 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 12313
ca00392c 12314 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
dea7aab1
VZ
12315 tx_start_bd->general_data = (mac_type <<
12316 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 12317 /* header nbd */
ca00392c 12318 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 12319
755735eb
EG
12320 /* remember the first BD of the packet */
12321 tx_buf->first_bd = fp->tx_bd_prod;
12322 tx_buf->skb = skb;
ca00392c 12323 tx_buf->flags = 0;
a2fbb9ea
ET
12324
12325 DP(NETIF_MSG_TX_QUEUED,
12326 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 12327 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 12328
0c6671b0
EG
12329#ifdef BCM_VLAN
12330 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12331 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
12332 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12333 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 12334 } else
0c6671b0 12335#endif
ca00392c 12336 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 12337
ca00392c
EG
12338 /* turn on parsing and get a BD */
12339 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12340 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 12341
ca00392c 12342 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
12343
12344 if (xmit_type & XMIT_CSUM) {
ca00392c 12345 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
12346
12347 /* for now NS flag is not used in Linux */
4781bfad
EG
12348 pbd->global_data =
12349 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12350 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 12351
755735eb
EG
12352 pbd->ip_hlen = (skb_transport_header(skb) -
12353 skb_network_header(skb)) / 2;
12354
12355 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 12356
755735eb 12357 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 12358 hlen = hlen*2;
a2fbb9ea 12359
ca00392c 12360 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
12361
12362 if (xmit_type & XMIT_CSUM_V4)
ca00392c 12363 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
12364 ETH_TX_BD_FLAGS_IP_CSUM;
12365 else
ca00392c
EG
12366 tx_start_bd->bd_flags.as_bitfield |=
12367 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
12368
12369 if (xmit_type & XMIT_CSUM_TCP) {
12370 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12371
12372 } else {
12373 s8 fix = SKB_CS_OFF(skb); /* signed! */
12374
ca00392c 12375 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 12376
755735eb 12377 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
12378 "hlen %d fix %d csum before fix %x\n",
12379 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
12380
12381 /* HW bug: fixup the CSUM */
12382 pbd->tcp_pseudo_csum =
12383 bnx2x_csum_fix(skb_transport_header(skb),
12384 SKB_CS(skb), fix);
12385
12386 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12387 pbd->tcp_pseudo_csum);
12388 }
a2fbb9ea
ET
12389 }
12390
1a983142
FT
12391 mapping = dma_map_single(&bp->pdev->dev, skb->data,
12392 skb_headlen(skb), DMA_TO_DEVICE);
a2fbb9ea 12393
ca00392c
EG
12394 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12395 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12396 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12397 tx_start_bd->nbd = cpu_to_le16(nbd);
12398 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12399 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
12400
12401 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 12402 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
12403 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12404 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12405 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 12406
755735eb 12407 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
12408
12409 DP(NETIF_MSG_TX_QUEUED,
12410 "TSO packet len %d hlen %d total len %d tso size %d\n",
12411 skb->len, hlen, skb_headlen(skb),
12412 skb_shinfo(skb)->gso_size);
12413
ca00392c 12414 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 12415
755735eb 12416 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
12417 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12418 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
12419
12420 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12421 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
12422 pbd->tcp_flags = pbd_tcp_flags(skb);
12423
12424 if (xmit_type & XMIT_GSO_V4) {
12425 pbd->ip_id = swab16(ip_hdr(skb)->id);
12426 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
12427 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12428 ip_hdr(skb)->daddr,
12429 0, IPPROTO_TCP, 0));
755735eb
EG
12430
12431 } else
12432 pbd->tcp_pseudo_csum =
12433 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12434 &ipv6_hdr(skb)->daddr,
12435 0, IPPROTO_TCP, 0));
12436
a2fbb9ea
ET
12437 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12438 }
ca00392c 12439 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 12440
755735eb
EG
12441 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12442 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 12443
755735eb 12444 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
12445 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12446 if (total_pkt_bd == NULL)
12447 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 12448
1a983142
FT
12449 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12450 frag->page_offset,
12451 frag->size, DMA_TO_DEVICE);
a2fbb9ea 12452
ca00392c
EG
12453 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12454 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12455 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12456 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 12457
755735eb 12458 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
12459 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
12460 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12461 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
12462 }
12463
ca00392c 12464 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 12465
a2fbb9ea
ET
12466 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12467
755735eb 12468 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
12469 * if the packet contains or ends with it
12470 */
12471 if (TX_BD_POFF(bd_prod) < nbd)
12472 nbd++;
12473
ca00392c
EG
12474 if (total_pkt_bd != NULL)
12475 total_pkt_bd->total_pkt_bytes = pkt_size;
12476
a2fbb9ea
ET
12477 if (pbd)
12478 DP(NETIF_MSG_TX_QUEUED,
12479 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
12480 " tcp_flags %x xsum %x seq %u hlen %u\n",
12481 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12482 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 12483 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 12484
755735eb 12485 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 12486
58f4c4cf
EG
12487 /*
12488 * Make sure that the BD data is updated before updating the producer
12489 * since FW might read the BD right after the producer is updated.
12490 * This is only applicable for weak-ordered memory model archs such
12491 * as IA-64. The following barrier is also mandatory since FW will
12492 * assumes packets must have BDs.
12493 */
12494 wmb();
12495
ca00392c
EG
12496 fp->tx_db.data.prod += nbd;
12497 barrier();
54b9ddaa 12498 DOORBELL(bp, fp->index, fp->tx_db.raw);
a2fbb9ea
ET
12499
12500 mmiowb();
12501
755735eb 12502 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
12503
12504 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 12505 netif_tx_stop_queue(txq);
9baddeb8
SG
12506
12507 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12508 * ordering of set_bit() in netif_tx_stop_queue() and read of
12509 * fp->bd_tx_cons */
58f4c4cf 12510 smp_mb();
9baddeb8 12511
54b9ddaa 12512 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 12513 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 12514 netif_tx_wake_queue(txq);
a2fbb9ea 12515 }
54b9ddaa 12516 fp->tx_pkt++;
a2fbb9ea
ET
12517
12518 return NETDEV_TX_OK;
12519}
12520
bb2a0f7a 12521/* called with rtnl_lock */
a2fbb9ea
ET
12522static int bnx2x_open(struct net_device *dev)
12523{
12524 struct bnx2x *bp = netdev_priv(dev);
12525
6eccabb3
EG
12526 netif_carrier_off(dev);
12527
a2fbb9ea
ET
12528 bnx2x_set_power_state(bp, PCI_D0);
12529
72fd0718
VZ
12530 if (!bnx2x_reset_is_done(bp)) {
12531 do {
12532 /* Reset MCP mail box sequence if there is on going
12533 * recovery
12534 */
12535 bp->fw_seq = 0;
12536
12537 /* If it's the first function to load and reset done
12538 * is still not cleared it may mean that. We don't
12539 * check the attention state here because it may have
12540 * already been cleared by a "common" reset but we
12541 * shell proceed with "process kill" anyway.
12542 */
12543 if ((bnx2x_get_load_cnt(bp) == 0) &&
12544 bnx2x_trylock_hw_lock(bp,
12545 HW_LOCK_RESOURCE_RESERVED_08) &&
12546 (!bnx2x_leader_reset(bp))) {
12547 DP(NETIF_MSG_HW, "Recovered in open\n");
12548 break;
12549 }
12550
12551 bnx2x_set_power_state(bp, PCI_D3hot);
12552
12553 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12554 " completed yet. Try again later. If u still see this"
12555 " message after a few retries then power cycle is"
12556 " required.\n", bp->dev->name);
12557
12558 return -EAGAIN;
12559 } while (0);
12560 }
12561
12562 bp->recovery_state = BNX2X_RECOVERY_DONE;
12563
bb2a0f7a 12564 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
12565}
12566
bb2a0f7a 12567/* called with rtnl_lock */
a2fbb9ea
ET
12568static int bnx2x_close(struct net_device *dev)
12569{
a2fbb9ea
ET
12570 struct bnx2x *bp = netdev_priv(dev);
12571
12572 /* Unload the driver, release IRQs */
bb2a0f7a 12573 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 12574 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
12575
12576 return 0;
12577}
12578
f5372251 12579/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
12580static void bnx2x_set_rx_mode(struct net_device *dev)
12581{
12582 struct bnx2x *bp = netdev_priv(dev);
12583 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12584 int port = BP_PORT(bp);
12585
12586 if (bp->state != BNX2X_STATE_OPEN) {
12587 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12588 return;
12589 }
12590
12591 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12592
12593 if (dev->flags & IFF_PROMISC)
12594 rx_mode = BNX2X_RX_MODE_PROMISC;
12595
12596 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
12597 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12598 CHIP_IS_E1(bp)))
34f80b04
EG
12599 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12600
12601 else { /* some multicasts */
12602 if (CHIP_IS_E1(bp)) {
12603 int i, old, offset;
22bedad3 12604 struct netdev_hw_addr *ha;
34f80b04
EG
12605 struct mac_configuration_cmd *config =
12606 bnx2x_sp(bp, mcast_config);
12607
0ddf477b 12608 i = 0;
22bedad3 12609 netdev_for_each_mc_addr(ha, dev) {
34f80b04
EG
12610 config->config_table[i].
12611 cam_entry.msb_mac_addr =
22bedad3 12612 swab16(*(u16 *)&ha->addr[0]);
34f80b04
EG
12613 config->config_table[i].
12614 cam_entry.middle_mac_addr =
22bedad3 12615 swab16(*(u16 *)&ha->addr[2]);
34f80b04
EG
12616 config->config_table[i].
12617 cam_entry.lsb_mac_addr =
22bedad3 12618 swab16(*(u16 *)&ha->addr[4]);
34f80b04
EG
12619 config->config_table[i].cam_entry.flags =
12620 cpu_to_le16(port);
12621 config->config_table[i].
12622 target_table_entry.flags = 0;
ca00392c
EG
12623 config->config_table[i].target_table_entry.
12624 clients_bit_vector =
12625 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
12626 config->config_table[i].
12627 target_table_entry.vlan_id = 0;
12628
12629 DP(NETIF_MSG_IFUP,
12630 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12631 config->config_table[i].
12632 cam_entry.msb_mac_addr,
12633 config->config_table[i].
12634 cam_entry.middle_mac_addr,
12635 config->config_table[i].
12636 cam_entry.lsb_mac_addr);
0ddf477b 12637 i++;
34f80b04 12638 }
8d9c5f34 12639 old = config->hdr.length;
34f80b04
EG
12640 if (old > i) {
12641 for (; i < old; i++) {
12642 if (CAM_IS_INVALID(config->
12643 config_table[i])) {
af246401 12644 /* already invalidated */
34f80b04
EG
12645 break;
12646 }
12647 /* invalidate */
12648 CAM_INVALIDATE(config->
12649 config_table[i]);
12650 }
12651 }
12652
12653 if (CHIP_REV_IS_SLOW(bp))
12654 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12655 else
12656 offset = BNX2X_MAX_MULTICAST*(1 + port);
12657
8d9c5f34 12658 config->hdr.length = i;
34f80b04 12659 config->hdr.offset = offset;
8d9c5f34 12660 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
12661 config->hdr.reserved1 = 0;
12662
e665bfda
MC
12663 bp->set_mac_pending++;
12664 smp_wmb();
12665
34f80b04
EG
12666 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12667 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12668 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12669 0);
12670 } else { /* E1H */
12671 /* Accept one or more multicasts */
22bedad3 12672 struct netdev_hw_addr *ha;
34f80b04
EG
12673 u32 mc_filter[MC_HASH_SIZE];
12674 u32 crc, bit, regidx;
12675 int i;
12676
12677 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12678
22bedad3 12679 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 12680 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
22bedad3 12681 ha->addr);
34f80b04 12682
22bedad3 12683 crc = crc32c_le(0, ha->addr, ETH_ALEN);
34f80b04
EG
12684 bit = (crc >> 24) & 0xff;
12685 regidx = bit >> 5;
12686 bit &= 0x1f;
12687 mc_filter[regidx] |= (1 << bit);
12688 }
12689
12690 for (i = 0; i < MC_HASH_SIZE; i++)
12691 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12692 mc_filter[i]);
12693 }
12694 }
12695
12696 bp->rx_mode = rx_mode;
12697 bnx2x_set_storm_rx_mode(bp);
12698}
12699
12700/* called with rtnl_lock */
a2fbb9ea
ET
12701static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12702{
12703 struct sockaddr *addr = p;
12704 struct bnx2x *bp = netdev_priv(dev);
12705
34f80b04 12706 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
12707 return -EINVAL;
12708
12709 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
12710 if (netif_running(dev)) {
12711 if (CHIP_IS_E1(bp))
e665bfda 12712 bnx2x_set_eth_mac_addr_e1(bp, 1);
34f80b04 12713 else
e665bfda 12714 bnx2x_set_eth_mac_addr_e1h(bp, 1);
34f80b04 12715 }
a2fbb9ea
ET
12716
12717 return 0;
12718}
12719
c18487ee 12720/* called with rtnl_lock */
01cd4528
EG
12721static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12722 int devad, u16 addr)
a2fbb9ea 12723{
01cd4528
EG
12724 struct bnx2x *bp = netdev_priv(netdev);
12725 u16 value;
12726 int rc;
12727 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 12728
01cd4528
EG
12729 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12730 prtad, devad, addr);
a2fbb9ea 12731
01cd4528
EG
12732 if (prtad != bp->mdio.prtad) {
12733 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12734 prtad, bp->mdio.prtad);
12735 return -EINVAL;
12736 }
12737
12738 /* The HW expects different devad if CL22 is used */
12739 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 12740
01cd4528
EG
12741 bnx2x_acquire_phy_lock(bp);
12742 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12743 devad, addr, &value);
12744 bnx2x_release_phy_lock(bp);
12745 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 12746
01cd4528
EG
12747 if (!rc)
12748 rc = value;
12749 return rc;
12750}
a2fbb9ea 12751
01cd4528
EG
12752/* called with rtnl_lock */
12753static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12754 u16 addr, u16 value)
12755{
12756 struct bnx2x *bp = netdev_priv(netdev);
12757 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12758 int rc;
12759
12760 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12761 " value 0x%x\n", prtad, devad, addr, value);
12762
12763 if (prtad != bp->mdio.prtad) {
12764 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12765 prtad, bp->mdio.prtad);
12766 return -EINVAL;
a2fbb9ea
ET
12767 }
12768
01cd4528
EG
12769 /* The HW expects different devad if CL22 is used */
12770 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 12771
01cd4528
EG
12772 bnx2x_acquire_phy_lock(bp);
12773 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12774 devad, addr, value);
12775 bnx2x_release_phy_lock(bp);
12776 return rc;
12777}
c18487ee 12778
01cd4528
EG
12779/* called with rtnl_lock */
12780static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12781{
12782 struct bnx2x *bp = netdev_priv(dev);
12783 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 12784
01cd4528
EG
12785 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12786 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 12787
01cd4528
EG
12788 if (!netif_running(dev))
12789 return -EAGAIN;
12790
12791 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
12792}
12793
34f80b04 12794/* called with rtnl_lock */
a2fbb9ea
ET
12795static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12796{
12797 struct bnx2x *bp = netdev_priv(dev);
34f80b04 12798 int rc = 0;
a2fbb9ea 12799
72fd0718
VZ
12800 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12801 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12802 return -EAGAIN;
12803 }
12804
a2fbb9ea
ET
12805 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12806 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12807 return -EINVAL;
12808
12809 /* This does not race with packet allocation
c14423fe 12810 * because the actual alloc size is
a2fbb9ea
ET
12811 * only updated as part of load
12812 */
12813 dev->mtu = new_mtu;
12814
12815 if (netif_running(dev)) {
34f80b04
EG
12816 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12817 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 12818 }
34f80b04
EG
12819
12820 return rc;
a2fbb9ea
ET
12821}
12822
12823static void bnx2x_tx_timeout(struct net_device *dev)
12824{
12825 struct bnx2x *bp = netdev_priv(dev);
12826
12827#ifdef BNX2X_STOP_ON_ERROR
12828 if (!bp->panic)
12829 bnx2x_panic();
12830#endif
12831 /* This allows the netif to be shutdown gracefully before resetting */
72fd0718 12832 schedule_delayed_work(&bp->reset_task, 0);
a2fbb9ea
ET
12833}
12834
12835#ifdef BCM_VLAN
34f80b04 12836/* called with rtnl_lock */
a2fbb9ea
ET
12837static void bnx2x_vlan_rx_register(struct net_device *dev,
12838 struct vlan_group *vlgrp)
12839{
12840 struct bnx2x *bp = netdev_priv(dev);
12841
12842 bp->vlgrp = vlgrp;
0c6671b0
EG
12843
12844 /* Set flags according to the required capabilities */
12845 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12846
12847 if (dev->features & NETIF_F_HW_VLAN_TX)
12848 bp->flags |= HW_VLAN_TX_FLAG;
12849
12850 if (dev->features & NETIF_F_HW_VLAN_RX)
12851 bp->flags |= HW_VLAN_RX_FLAG;
12852
a2fbb9ea 12853 if (netif_running(dev))
49d66772 12854 bnx2x_set_client_config(bp);
a2fbb9ea 12855}
34f80b04 12856
a2fbb9ea
ET
12857#endif
12858
257ddbda 12859#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
12860static void poll_bnx2x(struct net_device *dev)
12861{
12862 struct bnx2x *bp = netdev_priv(dev);
12863
12864 disable_irq(bp->pdev->irq);
12865 bnx2x_interrupt(bp->pdev->irq, dev);
12866 enable_irq(bp->pdev->irq);
12867}
12868#endif
12869
c64213cd
SH
12870static const struct net_device_ops bnx2x_netdev_ops = {
12871 .ndo_open = bnx2x_open,
12872 .ndo_stop = bnx2x_close,
12873 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 12874 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
12875 .ndo_set_mac_address = bnx2x_change_mac_addr,
12876 .ndo_validate_addr = eth_validate_addr,
12877 .ndo_do_ioctl = bnx2x_ioctl,
12878 .ndo_change_mtu = bnx2x_change_mtu,
12879 .ndo_tx_timeout = bnx2x_tx_timeout,
12880#ifdef BCM_VLAN
12881 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
12882#endif
257ddbda 12883#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
12884 .ndo_poll_controller = poll_bnx2x,
12885#endif
12886};
12887
34f80b04
EG
12888static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12889 struct net_device *dev)
a2fbb9ea
ET
12890{
12891 struct bnx2x *bp;
12892 int rc;
12893
12894 SET_NETDEV_DEV(dev, &pdev->dev);
12895 bp = netdev_priv(dev);
12896
34f80b04
EG
12897 bp->dev = dev;
12898 bp->pdev = pdev;
a2fbb9ea 12899 bp->flags = 0;
34f80b04 12900 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
12901
12902 rc = pci_enable_device(pdev);
12903 if (rc) {
cdaa7cb8
VZ
12904 dev_err(&bp->pdev->dev,
12905 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
12906 goto err_out;
12907 }
12908
12909 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
12910 dev_err(&bp->pdev->dev,
12911 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
12912 rc = -ENODEV;
12913 goto err_out_disable;
12914 }
12915
12916 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
12917 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
12918 " base address, aborting\n");
a2fbb9ea
ET
12919 rc = -ENODEV;
12920 goto err_out_disable;
12921 }
12922
34f80b04
EG
12923 if (atomic_read(&pdev->enable_cnt) == 1) {
12924 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12925 if (rc) {
cdaa7cb8
VZ
12926 dev_err(&bp->pdev->dev,
12927 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
12928 goto err_out_disable;
12929 }
a2fbb9ea 12930
34f80b04
EG
12931 pci_set_master(pdev);
12932 pci_save_state(pdev);
12933 }
a2fbb9ea
ET
12934
12935 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12936 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
12937 dev_err(&bp->pdev->dev,
12938 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
12939 rc = -EIO;
12940 goto err_out_release;
12941 }
12942
12943 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12944 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
12945 dev_err(&bp->pdev->dev,
12946 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
12947 rc = -EIO;
12948 goto err_out_release;
12949 }
12950
1a983142 12951 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 12952 bp->flags |= USING_DAC_FLAG;
1a983142 12953 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
12954 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
12955 " failed, aborting\n");
a2fbb9ea
ET
12956 rc = -EIO;
12957 goto err_out_release;
12958 }
12959
1a983142 12960 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
12961 dev_err(&bp->pdev->dev,
12962 "System does not support DMA, aborting\n");
a2fbb9ea
ET
12963 rc = -EIO;
12964 goto err_out_release;
12965 }
12966
34f80b04
EG
12967 dev->mem_start = pci_resource_start(pdev, 0);
12968 dev->base_addr = dev->mem_start;
12969 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
12970
12971 dev->irq = pdev->irq;
12972
275f165f 12973 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 12974 if (!bp->regview) {
cdaa7cb8
VZ
12975 dev_err(&bp->pdev->dev,
12976 "Cannot map register space, aborting\n");
a2fbb9ea
ET
12977 rc = -ENOMEM;
12978 goto err_out_release;
12979 }
12980
34f80b04
EG
12981 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12982 min_t(u64, BNX2X_DB_SIZE,
12983 pci_resource_len(pdev, 2)));
a2fbb9ea 12984 if (!bp->doorbells) {
cdaa7cb8
VZ
12985 dev_err(&bp->pdev->dev,
12986 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
12987 rc = -ENOMEM;
12988 goto err_out_unmap;
12989 }
12990
12991 bnx2x_set_power_state(bp, PCI_D0);
12992
34f80b04
EG
12993 /* clean indirect addresses */
12994 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12995 PCICFG_VENDOR_ID_OFFSET);
12996 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
12997 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
12998 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
12999 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 13000
72fd0718
VZ
13001 /* Reset the load counter */
13002 bnx2x_clear_load_cnt(bp);
13003
34f80b04 13004 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 13005
c64213cd 13006 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 13007 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
13008 dev->features |= NETIF_F_SG;
13009 dev->features |= NETIF_F_HW_CSUM;
13010 if (bp->flags & USING_DAC_FLAG)
13011 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
13012 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13013 dev->features |= NETIF_F_TSO6;
34f80b04
EG
13014#ifdef BCM_VLAN
13015 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 13016 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
13017
13018 dev->vlan_features |= NETIF_F_SG;
13019 dev->vlan_features |= NETIF_F_HW_CSUM;
13020 if (bp->flags & USING_DAC_FLAG)
13021 dev->vlan_features |= NETIF_F_HIGHDMA;
13022 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13023 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 13024#endif
a2fbb9ea 13025
01cd4528
EG
13026 /* get_port_hwinfo() will set prtad and mmds properly */
13027 bp->mdio.prtad = MDIO_PRTAD_NONE;
13028 bp->mdio.mmds = 0;
13029 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13030 bp->mdio.dev = dev;
13031 bp->mdio.mdio_read = bnx2x_mdio_read;
13032 bp->mdio.mdio_write = bnx2x_mdio_write;
13033
a2fbb9ea
ET
13034 return 0;
13035
13036err_out_unmap:
13037 if (bp->regview) {
13038 iounmap(bp->regview);
13039 bp->regview = NULL;
13040 }
a2fbb9ea
ET
13041 if (bp->doorbells) {
13042 iounmap(bp->doorbells);
13043 bp->doorbells = NULL;
13044 }
13045
13046err_out_release:
34f80b04
EG
13047 if (atomic_read(&pdev->enable_cnt) == 1)
13048 pci_release_regions(pdev);
a2fbb9ea
ET
13049
13050err_out_disable:
13051 pci_disable_device(pdev);
13052 pci_set_drvdata(pdev, NULL);
13053
13054err_out:
13055 return rc;
13056}
13057
37f9ce62
EG
13058static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
13059 int *width, int *speed)
25047950
ET
13060{
13061 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
13062
37f9ce62 13063 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 13064
37f9ce62
EG
13065 /* return value of 1=2.5GHz 2=5GHz */
13066 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 13067}
37f9ce62 13068
94a78b79
VZ
13069static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
13070{
37f9ce62 13071 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
13072 struct bnx2x_fw_file_hdr *fw_hdr;
13073 struct bnx2x_fw_file_section *sections;
94a78b79 13074 u32 offset, len, num_ops;
37f9ce62 13075 u16 *ops_offsets;
94a78b79 13076 int i;
37f9ce62 13077 const u8 *fw_ver;
94a78b79
VZ
13078
13079 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
13080 return -EINVAL;
13081
13082 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13083 sections = (struct bnx2x_fw_file_section *)fw_hdr;
13084
13085 /* Make sure none of the offsets and sizes make us read beyond
13086 * the end of the firmware data */
13087 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13088 offset = be32_to_cpu(sections[i].offset);
13089 len = be32_to_cpu(sections[i].len);
13090 if (offset + len > firmware->size) {
cdaa7cb8
VZ
13091 dev_err(&bp->pdev->dev,
13092 "Section %d length is out of bounds\n", i);
94a78b79
VZ
13093 return -EINVAL;
13094 }
13095 }
13096
13097 /* Likewise for the init_ops offsets */
13098 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13099 ops_offsets = (u16 *)(firmware->data + offset);
13100 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13101
13102 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13103 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
13104 dev_err(&bp->pdev->dev,
13105 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
13106 return -EINVAL;
13107 }
13108 }
13109
13110 /* Check FW version */
13111 offset = be32_to_cpu(fw_hdr->fw_version.offset);
13112 fw_ver = firmware->data + offset;
13113 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13114 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13115 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13116 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
13117 dev_err(&bp->pdev->dev,
13118 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
13119 fw_ver[0], fw_ver[1], fw_ver[2],
13120 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
13121 BCM_5710_FW_MINOR_VERSION,
13122 BCM_5710_FW_REVISION_VERSION,
13123 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 13124 return -EINVAL;
94a78b79
VZ
13125 }
13126
13127 return 0;
13128}
13129
ab6ad5a4 13130static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 13131{
ab6ad5a4
EG
13132 const __be32 *source = (const __be32 *)_source;
13133 u32 *target = (u32 *)_target;
94a78b79 13134 u32 i;
94a78b79
VZ
13135
13136 for (i = 0; i < n/4; i++)
13137 target[i] = be32_to_cpu(source[i]);
13138}
13139
13140/*
13141 Ops array is stored in the following format:
13142 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13143 */
ab6ad5a4 13144static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 13145{
ab6ad5a4
EG
13146 const __be32 *source = (const __be32 *)_source;
13147 struct raw_op *target = (struct raw_op *)_target;
94a78b79 13148 u32 i, j, tmp;
94a78b79 13149
ab6ad5a4 13150 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
13151 tmp = be32_to_cpu(source[j]);
13152 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
13153 target[i].offset = tmp & 0xffffff;
13154 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
13155 }
13156}
ab6ad5a4
EG
13157
13158static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 13159{
ab6ad5a4
EG
13160 const __be16 *source = (const __be16 *)_source;
13161 u16 *target = (u16 *)_target;
94a78b79 13162 u32 i;
94a78b79
VZ
13163
13164 for (i = 0; i < n/2; i++)
13165 target[i] = be16_to_cpu(source[i]);
13166}
13167
7995c64e
JP
13168#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
13169do { \
13170 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13171 bp->arr = kmalloc(len, GFP_KERNEL); \
13172 if (!bp->arr) { \
13173 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
13174 goto lbl; \
13175 } \
13176 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13177 (u8 *)bp->arr, len); \
13178} while (0)
94a78b79 13179
94a78b79
VZ
13180static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
13181{
45229b42 13182 const char *fw_file_name;
94a78b79 13183 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 13184 int rc;
94a78b79 13185
94a78b79 13186 if (CHIP_IS_E1(bp))
45229b42 13187 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 13188 else if (CHIP_IS_E1H(bp))
45229b42 13189 fw_file_name = FW_FILE_NAME_E1H;
cdaa7cb8
VZ
13190 else {
13191 dev_err(dev, "Unsupported chip revision\n");
13192 return -EINVAL;
13193 }
94a78b79 13194
cdaa7cb8 13195 dev_info(dev, "Loading %s\n", fw_file_name);
94a78b79
VZ
13196
13197 rc = request_firmware(&bp->firmware, fw_file_name, dev);
13198 if (rc) {
cdaa7cb8 13199 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
13200 goto request_firmware_exit;
13201 }
13202
13203 rc = bnx2x_check_firmware(bp);
13204 if (rc) {
cdaa7cb8 13205 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
13206 goto request_firmware_exit;
13207 }
13208
13209 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13210
13211 /* Initialize the pointers to the init arrays */
13212 /* Blob */
13213 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13214
13215 /* Opcodes */
13216 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13217
13218 /* Offsets */
ab6ad5a4
EG
13219 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13220 be16_to_cpu_n);
94a78b79
VZ
13221
13222 /* STORMs firmware */
573f2035
EG
13223 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13224 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13225 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13226 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13227 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13228 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13229 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13230 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13231 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13232 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13233 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13234 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13235 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13236 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13237 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13238 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
13239
13240 return 0;
ab6ad5a4 13241
94a78b79
VZ
13242init_offsets_alloc_err:
13243 kfree(bp->init_ops);
13244init_ops_alloc_err:
13245 kfree(bp->init_data);
13246request_firmware_exit:
13247 release_firmware(bp->firmware);
13248
13249 return rc;
13250}
13251
13252
a2fbb9ea
ET
13253static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13254 const struct pci_device_id *ent)
13255{
a2fbb9ea
ET
13256 struct net_device *dev = NULL;
13257 struct bnx2x *bp;
37f9ce62 13258 int pcie_width, pcie_speed;
25047950 13259 int rc;
a2fbb9ea 13260
a2fbb9ea 13261 /* dev zeroed in init_etherdev */
555f6c78 13262 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04 13263 if (!dev) {
cdaa7cb8 13264 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 13265 return -ENOMEM;
34f80b04 13266 }
a2fbb9ea 13267
a2fbb9ea 13268 bp = netdev_priv(dev);
7995c64e 13269 bp->msg_enable = debug;
a2fbb9ea 13270
df4770de
EG
13271 pci_set_drvdata(pdev, dev);
13272
34f80b04 13273 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
13274 if (rc < 0) {
13275 free_netdev(dev);
13276 return rc;
13277 }
13278
34f80b04 13279 rc = bnx2x_init_bp(bp);
693fc0d1
EG
13280 if (rc)
13281 goto init_one_exit;
13282
94a78b79
VZ
13283 /* Set init arrays */
13284 rc = bnx2x_init_firmware(bp, &pdev->dev);
13285 if (rc) {
cdaa7cb8 13286 dev_err(&pdev->dev, "Error loading firmware\n");
94a78b79
VZ
13287 goto init_one_exit;
13288 }
13289
693fc0d1 13290 rc = register_netdev(dev);
34f80b04 13291 if (rc) {
693fc0d1 13292 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
13293 goto init_one_exit;
13294 }
13295
37f9ce62 13296 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
cdaa7cb8
VZ
13297 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
13298 " IRQ %d, ", board_info[ent->driver_data].name,
13299 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13300 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13301 dev->base_addr, bp->pdev->irq);
13302 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 13303
a2fbb9ea 13304 return 0;
34f80b04
EG
13305
13306init_one_exit:
13307 if (bp->regview)
13308 iounmap(bp->regview);
13309
13310 if (bp->doorbells)
13311 iounmap(bp->doorbells);
13312
13313 free_netdev(dev);
13314
13315 if (atomic_read(&pdev->enable_cnt) == 1)
13316 pci_release_regions(pdev);
13317
13318 pci_disable_device(pdev);
13319 pci_set_drvdata(pdev, NULL);
13320
13321 return rc;
a2fbb9ea
ET
13322}
13323
13324static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13325{
13326 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
13327 struct bnx2x *bp;
13328
13329 if (!dev) {
cdaa7cb8 13330 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
13331 return;
13332 }
228241eb 13333 bp = netdev_priv(dev);
a2fbb9ea 13334
a2fbb9ea
ET
13335 unregister_netdev(dev);
13336
72fd0718
VZ
13337 /* Make sure RESET task is not scheduled before continuing */
13338 cancel_delayed_work_sync(&bp->reset_task);
13339
94a78b79
VZ
13340 kfree(bp->init_ops_offsets);
13341 kfree(bp->init_ops);
13342 kfree(bp->init_data);
13343 release_firmware(bp->firmware);
13344
a2fbb9ea
ET
13345 if (bp->regview)
13346 iounmap(bp->regview);
13347
13348 if (bp->doorbells)
13349 iounmap(bp->doorbells);
13350
13351 free_netdev(dev);
34f80b04
EG
13352
13353 if (atomic_read(&pdev->enable_cnt) == 1)
13354 pci_release_regions(pdev);
13355
a2fbb9ea
ET
13356 pci_disable_device(pdev);
13357 pci_set_drvdata(pdev, NULL);
13358}
13359
13360static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13361{
13362 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
13363 struct bnx2x *bp;
13364
34f80b04 13365 if (!dev) {
cdaa7cb8 13366 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
34f80b04
EG
13367 return -ENODEV;
13368 }
13369 bp = netdev_priv(dev);
a2fbb9ea 13370
34f80b04 13371 rtnl_lock();
a2fbb9ea 13372
34f80b04 13373 pci_save_state(pdev);
228241eb 13374
34f80b04
EG
13375 if (!netif_running(dev)) {
13376 rtnl_unlock();
13377 return 0;
13378 }
a2fbb9ea
ET
13379
13380 netif_device_detach(dev);
a2fbb9ea 13381
da5a662a 13382 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 13383
a2fbb9ea 13384 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 13385
34f80b04
EG
13386 rtnl_unlock();
13387
a2fbb9ea
ET
13388 return 0;
13389}
13390
13391static int bnx2x_resume(struct pci_dev *pdev)
13392{
13393 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 13394 struct bnx2x *bp;
a2fbb9ea
ET
13395 int rc;
13396
228241eb 13397 if (!dev) {
cdaa7cb8 13398 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
13399 return -ENODEV;
13400 }
228241eb 13401 bp = netdev_priv(dev);
a2fbb9ea 13402
72fd0718
VZ
13403 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13404 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13405 return -EAGAIN;
13406 }
13407
34f80b04
EG
13408 rtnl_lock();
13409
228241eb 13410 pci_restore_state(pdev);
34f80b04
EG
13411
13412 if (!netif_running(dev)) {
13413 rtnl_unlock();
13414 return 0;
13415 }
13416
a2fbb9ea
ET
13417 bnx2x_set_power_state(bp, PCI_D0);
13418 netif_device_attach(dev);
13419
da5a662a 13420 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 13421
34f80b04
EG
13422 rtnl_unlock();
13423
13424 return rc;
a2fbb9ea
ET
13425}
13426
f8ef6e44
YG
13427static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13428{
13429 int i;
13430
13431 bp->state = BNX2X_STATE_ERROR;
13432
13433 bp->rx_mode = BNX2X_RX_MODE_NONE;
13434
13435 bnx2x_netif_stop(bp, 0);
c89af1a3 13436 netif_carrier_off(bp->dev);
f8ef6e44
YG
13437
13438 del_timer_sync(&bp->timer);
13439 bp->stats_state = STATS_STATE_DISABLED;
13440 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13441
13442 /* Release IRQs */
6cbe5065 13443 bnx2x_free_irq(bp, false);
f8ef6e44
YG
13444
13445 if (CHIP_IS_E1(bp)) {
13446 struct mac_configuration_cmd *config =
13447 bnx2x_sp(bp, mcast_config);
13448
8d9c5f34 13449 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
13450 CAM_INVALIDATE(config->config_table[i]);
13451 }
13452
13453 /* Free SKBs, SGEs, TPA pool and driver internals */
13454 bnx2x_free_skbs(bp);
54b9ddaa 13455 for_each_queue(bp, i)
f8ef6e44 13456 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 13457 for_each_queue(bp, i)
7cde1c8b 13458 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
13459 bnx2x_free_mem(bp);
13460
13461 bp->state = BNX2X_STATE_CLOSED;
13462
f8ef6e44
YG
13463 return 0;
13464}
13465
13466static void bnx2x_eeh_recover(struct bnx2x *bp)
13467{
13468 u32 val;
13469
13470 mutex_init(&bp->port.phy_mutex);
13471
13472 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13473 bp->link_params.shmem_base = bp->common.shmem_base;
13474 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13475
13476 if (!bp->common.shmem_base ||
13477 (bp->common.shmem_base < 0xA0000) ||
13478 (bp->common.shmem_base >= 0xC0000)) {
13479 BNX2X_DEV_INFO("MCP not active\n");
13480 bp->flags |= NO_MCP_FLAG;
13481 return;
13482 }
13483
13484 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13485 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13486 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13487 BNX2X_ERR("BAD MCP validity signature\n");
13488
13489 if (!BP_NOMCP(bp)) {
13490 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13491 & DRV_MSG_SEQ_NUMBER_MASK);
13492 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13493 }
13494}
13495
493adb1f
WX
13496/**
13497 * bnx2x_io_error_detected - called when PCI error is detected
13498 * @pdev: Pointer to PCI device
13499 * @state: The current pci connection state
13500 *
13501 * This function is called after a PCI bus error affecting
13502 * this device has been detected.
13503 */
13504static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13505 pci_channel_state_t state)
13506{
13507 struct net_device *dev = pci_get_drvdata(pdev);
13508 struct bnx2x *bp = netdev_priv(dev);
13509
13510 rtnl_lock();
13511
13512 netif_device_detach(dev);
13513
07ce50e4
DN
13514 if (state == pci_channel_io_perm_failure) {
13515 rtnl_unlock();
13516 return PCI_ERS_RESULT_DISCONNECT;
13517 }
13518
493adb1f 13519 if (netif_running(dev))
f8ef6e44 13520 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
13521
13522 pci_disable_device(pdev);
13523
13524 rtnl_unlock();
13525
13526 /* Request a slot reset */
13527 return PCI_ERS_RESULT_NEED_RESET;
13528}
13529
13530/**
13531 * bnx2x_io_slot_reset - called after the PCI bus has been reset
13532 * @pdev: Pointer to PCI device
13533 *
13534 * Restart the card from scratch, as if from a cold-boot.
13535 */
13536static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13537{
13538 struct net_device *dev = pci_get_drvdata(pdev);
13539 struct bnx2x *bp = netdev_priv(dev);
13540
13541 rtnl_lock();
13542
13543 if (pci_enable_device(pdev)) {
13544 dev_err(&pdev->dev,
13545 "Cannot re-enable PCI device after reset\n");
13546 rtnl_unlock();
13547 return PCI_ERS_RESULT_DISCONNECT;
13548 }
13549
13550 pci_set_master(pdev);
13551 pci_restore_state(pdev);
13552
13553 if (netif_running(dev))
13554 bnx2x_set_power_state(bp, PCI_D0);
13555
13556 rtnl_unlock();
13557
13558 return PCI_ERS_RESULT_RECOVERED;
13559}
13560
13561/**
13562 * bnx2x_io_resume - called when traffic can start flowing again
13563 * @pdev: Pointer to PCI device
13564 *
13565 * This callback is called when the error recovery driver tells us that
13566 * its OK to resume normal operation.
13567 */
13568static void bnx2x_io_resume(struct pci_dev *pdev)
13569{
13570 struct net_device *dev = pci_get_drvdata(pdev);
13571 struct bnx2x *bp = netdev_priv(dev);
13572
72fd0718
VZ
13573 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13574 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13575 return;
13576 }
13577
493adb1f
WX
13578 rtnl_lock();
13579
f8ef6e44
YG
13580 bnx2x_eeh_recover(bp);
13581
493adb1f 13582 if (netif_running(dev))
f8ef6e44 13583 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
13584
13585 netif_device_attach(dev);
13586
13587 rtnl_unlock();
13588}
13589
13590static struct pci_error_handlers bnx2x_err_handler = {
13591 .error_detected = bnx2x_io_error_detected,
356e2385
EG
13592 .slot_reset = bnx2x_io_slot_reset,
13593 .resume = bnx2x_io_resume,
493adb1f
WX
13594};
13595
a2fbb9ea 13596static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
13597 .name = DRV_MODULE_NAME,
13598 .id_table = bnx2x_pci_tbl,
13599 .probe = bnx2x_init_one,
13600 .remove = __devexit_p(bnx2x_remove_one),
13601 .suspend = bnx2x_suspend,
13602 .resume = bnx2x_resume,
13603 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
13604};
13605
13606static int __init bnx2x_init(void)
13607{
dd21ca6d
SG
13608 int ret;
13609
7995c64e 13610 pr_info("%s", version);
938cf541 13611
1cf167f2
EG
13612 bnx2x_wq = create_singlethread_workqueue("bnx2x");
13613 if (bnx2x_wq == NULL) {
7995c64e 13614 pr_err("Cannot create workqueue\n");
1cf167f2
EG
13615 return -ENOMEM;
13616 }
13617
dd21ca6d
SG
13618 ret = pci_register_driver(&bnx2x_pci_driver);
13619 if (ret) {
7995c64e 13620 pr_err("Cannot register driver\n");
dd21ca6d
SG
13621 destroy_workqueue(bnx2x_wq);
13622 }
13623 return ret;
a2fbb9ea
ET
13624}
13625
13626static void __exit bnx2x_cleanup(void)
13627{
13628 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
13629
13630 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
13631}
13632
13633module_init(bnx2x_init);
13634module_exit(bnx2x_cleanup);
13635
993ac7b5
MC
13636#ifdef BCM_CNIC
13637
13638/* count denotes the number of new completions we have seen */
13639static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13640{
13641 struct eth_spe *spe;
13642
13643#ifdef BNX2X_STOP_ON_ERROR
13644 if (unlikely(bp->panic))
13645 return;
13646#endif
13647
13648 spin_lock_bh(&bp->spq_lock);
13649 bp->cnic_spq_pending -= count;
13650
13651 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13652 bp->cnic_spq_pending++) {
13653
13654 if (!bp->cnic_kwq_pending)
13655 break;
13656
13657 spe = bnx2x_sp_get_next(bp);
13658 *spe = *bp->cnic_kwq_cons;
13659
13660 bp->cnic_kwq_pending--;
13661
13662 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13663 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13664
13665 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13666 bp->cnic_kwq_cons = bp->cnic_kwq;
13667 else
13668 bp->cnic_kwq_cons++;
13669 }
13670 bnx2x_sp_prod_update(bp);
13671 spin_unlock_bh(&bp->spq_lock);
13672}
13673
13674static int bnx2x_cnic_sp_queue(struct net_device *dev,
13675 struct kwqe_16 *kwqes[], u32 count)
13676{
13677 struct bnx2x *bp = netdev_priv(dev);
13678 int i;
13679
13680#ifdef BNX2X_STOP_ON_ERROR
13681 if (unlikely(bp->panic))
13682 return -EIO;
13683#endif
13684
13685 spin_lock_bh(&bp->spq_lock);
13686
13687 for (i = 0; i < count; i++) {
13688 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13689
13690 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13691 break;
13692
13693 *bp->cnic_kwq_prod = *spe;
13694
13695 bp->cnic_kwq_pending++;
13696
13697 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13698 spe->hdr.conn_and_cmd_data, spe->hdr.type,
13699 spe->data.mac_config_addr.hi,
13700 spe->data.mac_config_addr.lo,
13701 bp->cnic_kwq_pending);
13702
13703 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13704 bp->cnic_kwq_prod = bp->cnic_kwq;
13705 else
13706 bp->cnic_kwq_prod++;
13707 }
13708
13709 spin_unlock_bh(&bp->spq_lock);
13710
13711 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13712 bnx2x_cnic_sp_post(bp, 0);
13713
13714 return i;
13715}
13716
13717static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13718{
13719 struct cnic_ops *c_ops;
13720 int rc = 0;
13721
13722 mutex_lock(&bp->cnic_mutex);
13723 c_ops = bp->cnic_ops;
13724 if (c_ops)
13725 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13726 mutex_unlock(&bp->cnic_mutex);
13727
13728 return rc;
13729}
13730
13731static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13732{
13733 struct cnic_ops *c_ops;
13734 int rc = 0;
13735
13736 rcu_read_lock();
13737 c_ops = rcu_dereference(bp->cnic_ops);
13738 if (c_ops)
13739 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13740 rcu_read_unlock();
13741
13742 return rc;
13743}
13744
13745/*
13746 * for commands that have no data
13747 */
13748static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13749{
13750 struct cnic_ctl_info ctl = {0};
13751
13752 ctl.cmd = cmd;
13753
13754 return bnx2x_cnic_ctl_send(bp, &ctl);
13755}
13756
13757static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13758{
13759 struct cnic_ctl_info ctl;
13760
13761 /* first we tell CNIC and only then we count this as a completion */
13762 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13763 ctl.data.comp.cid = cid;
13764
13765 bnx2x_cnic_ctl_send_bh(bp, &ctl);
13766 bnx2x_cnic_sp_post(bp, 1);
13767}
13768
13769static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13770{
13771 struct bnx2x *bp = netdev_priv(dev);
13772 int rc = 0;
13773
13774 switch (ctl->cmd) {
13775 case DRV_CTL_CTXTBL_WR_CMD: {
13776 u32 index = ctl->data.io.offset;
13777 dma_addr_t addr = ctl->data.io.dma_addr;
13778
13779 bnx2x_ilt_wr(bp, index, addr);
13780 break;
13781 }
13782
13783 case DRV_CTL_COMPLETION_CMD: {
13784 int count = ctl->data.comp.comp_count;
13785
13786 bnx2x_cnic_sp_post(bp, count);
13787 break;
13788 }
13789
13790 /* rtnl_lock is held. */
13791 case DRV_CTL_START_L2_CMD: {
13792 u32 cli = ctl->data.ring.client_id;
13793
13794 bp->rx_mode_cl_mask |= (1 << cli);
13795 bnx2x_set_storm_rx_mode(bp);
13796 break;
13797 }
13798
13799 /* rtnl_lock is held. */
13800 case DRV_CTL_STOP_L2_CMD: {
13801 u32 cli = ctl->data.ring.client_id;
13802
13803 bp->rx_mode_cl_mask &= ~(1 << cli);
13804 bnx2x_set_storm_rx_mode(bp);
13805 break;
13806 }
13807
13808 default:
13809 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13810 rc = -EINVAL;
13811 }
13812
13813 return rc;
13814}
13815
13816static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13817{
13818 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13819
13820 if (bp->flags & USING_MSIX_FLAG) {
13821 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13822 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13823 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13824 } else {
13825 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13826 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13827 }
13828 cp->irq_arr[0].status_blk = bp->cnic_sb;
13829 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13830 cp->irq_arr[1].status_blk = bp->def_status_blk;
13831 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13832
13833 cp->num_irq = 2;
13834}
13835
13836static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13837 void *data)
13838{
13839 struct bnx2x *bp = netdev_priv(dev);
13840 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13841
13842 if (ops == NULL)
13843 return -EINVAL;
13844
13845 if (atomic_read(&bp->intr_sem) != 0)
13846 return -EBUSY;
13847
13848 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13849 if (!bp->cnic_kwq)
13850 return -ENOMEM;
13851
13852 bp->cnic_kwq_cons = bp->cnic_kwq;
13853 bp->cnic_kwq_prod = bp->cnic_kwq;
13854 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13855
13856 bp->cnic_spq_pending = 0;
13857 bp->cnic_kwq_pending = 0;
13858
13859 bp->cnic_data = data;
13860
13861 cp->num_irq = 0;
13862 cp->drv_state = CNIC_DRV_STATE_REGD;
13863
13864 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13865
13866 bnx2x_setup_cnic_irq_info(bp);
13867 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13868 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13869 rcu_assign_pointer(bp->cnic_ops, ops);
13870
13871 return 0;
13872}
13873
13874static int bnx2x_unregister_cnic(struct net_device *dev)
13875{
13876 struct bnx2x *bp = netdev_priv(dev);
13877 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13878
13879 mutex_lock(&bp->cnic_mutex);
13880 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13881 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13882 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13883 }
13884 cp->drv_state = 0;
13885 rcu_assign_pointer(bp->cnic_ops, NULL);
13886 mutex_unlock(&bp->cnic_mutex);
13887 synchronize_rcu();
13888 kfree(bp->cnic_kwq);
13889 bp->cnic_kwq = NULL;
13890
13891 return 0;
13892}
13893
13894struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13895{
13896 struct bnx2x *bp = netdev_priv(dev);
13897 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13898
13899 cp->drv_owner = THIS_MODULE;
13900 cp->chip_id = CHIP_ID(bp);
13901 cp->pdev = bp->pdev;
13902 cp->io_base = bp->regview;
13903 cp->io_base2 = bp->doorbells;
13904 cp->max_kwqe_pending = 8;
13905 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13906 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13907 cp->ctx_tbl_len = CNIC_ILT_LINES;
13908 cp->starting_cid = BCM_CNIC_CID_START;
13909 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13910 cp->drv_ctl = bnx2x_drv_ctl;
13911 cp->drv_register_cnic = bnx2x_register_cnic;
13912 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13913
13914 return cp;
13915}
13916EXPORT_SYMBOL(bnx2x_cnic_probe);
13917
13918#endif /* BCM_CNIC */
94a78b79 13919