]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: add support for receive hashing
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea 51#include <linux/io.h>
45229b42 52#include <linux/stringify.h>
a2fbb9ea 53
359d8b15 54
a2fbb9ea
ET
55#include "bnx2x.h"
56#include "bnx2x_init.h"
94a78b79 57#include "bnx2x_init_ops.h"
0a64ea57 58#include "bnx2x_dump.h"
a2fbb9ea 59
a03b1a5c
VZ
60#define DRV_MODULE_VERSION "1.52.53-1"
61#define DRV_MODULE_RELDATE "2010/18/04"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
94a78b79
VZ
64#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h"
66/* FW files */
45229b42
BH
67#define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
94a78b79 74
34f80b04
EG
75/* Time in jiffies before concluding the transmitter is hung */
76#define TX_TIMEOUT (5*HZ)
a2fbb9ea 77
53a10565 78static char version[] __devinitdata =
34f80b04 79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
24e3fcef 82MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 83MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
84MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
86MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
a2fbb9ea 88
555f6c78
EG
89static int multi_mode = 1;
90module_param(multi_mode, int, 0);
ca00392c
EG
91MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
54b9ddaa
VZ
94static int num_queues;
95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
555f6c78 98
19680c48 99static int disable_tpa;
19680c48 100module_param(disable_tpa, int, 0);
9898f86d 101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
102
103static int int_mode;
104module_param(int_mode, int, 0);
cdaa7cb8
VZ
105MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
106 "(1 INT#x; 2 MSI)");
8badd27a 107
a18f5128
EG
108static int dropless_fc;
109module_param(dropless_fc, int, 0);
110MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
111
9898f86d 112static int poll;
a2fbb9ea 113module_param(poll, int, 0);
9898f86d 114MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
115
116static int mrrs = -1;
117module_param(mrrs, int, 0);
118MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
119
9898f86d 120static int debug;
a2fbb9ea 121module_param(debug, int, 0);
9898f86d
EG
122MODULE_PARM_DESC(debug, " Default debug msglevel");
123
124static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 125
1cf167f2 126static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
127
128enum bnx2x_board_type {
129 BCM57710 = 0,
34f80b04
EG
130 BCM57711 = 1,
131 BCM57711E = 2,
a2fbb9ea
ET
132};
133
34f80b04 134/* indexed by board_type, above */
53a10565 135static struct {
a2fbb9ea
ET
136 char *name;
137} board_info[] __devinitdata = {
34f80b04
EG
138 { "Broadcom NetXtreme II BCM57710 XGb" },
139 { "Broadcom NetXtreme II BCM57711 XGb" },
140 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
141};
142
34f80b04 143
a3aa1884 144static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
147 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
148 { 0 }
149};
150
151MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
152
153/****************************************************************************
154* General service functions
155****************************************************************************/
156
157/* used only at init
158 * locking is done by mcp
159 */
573f2035 160void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
161{
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
164 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
165 PCICFG_VENDOR_ID_OFFSET);
166}
167
a2fbb9ea
ET
168static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
169{
170 u32 val;
171
172 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
173 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
174 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
175 PCICFG_VENDOR_ID_OFFSET);
176
177 return val;
178}
a2fbb9ea
ET
179
180static const u32 dmae_reg_go_c[] = {
181 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
182 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
183 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
184 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
185};
186
187/* copy command into DMAE command memory and set DMAE command go */
188static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
189 int idx)
190{
191 u32 cmd_offset;
192 int i;
193
194 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
195 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
196 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
197
ad8d3948
EG
198 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
199 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
200 }
201 REG_WR(bp, dmae_reg_go_c[idx], 1);
202}
203
ad8d3948
EG
204void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
205 u32 len32)
a2fbb9ea 206{
5ff7b6d4 207 struct dmae_command dmae;
a2fbb9ea 208 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
209 int cnt = 200;
210
211 if (!bp->dmae_ready) {
212 u32 *data = bnx2x_sp(bp, wb_data[0]);
213
214 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
215 " using indirect\n", dst_addr, len32);
216 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
217 return;
218 }
219
5ff7b6d4 220 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 221
5ff7b6d4
EG
222 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
223 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
224 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 225#ifdef __BIG_ENDIAN
5ff7b6d4 226 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 227#else
5ff7b6d4 228 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 229#endif
5ff7b6d4
EG
230 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
231 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
232 dmae.src_addr_lo = U64_LO(dma_addr);
233 dmae.src_addr_hi = U64_HI(dma_addr);
234 dmae.dst_addr_lo = dst_addr >> 2;
235 dmae.dst_addr_hi = 0;
236 dmae.len = len32;
237 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
239 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 240
c3eefaf6 241 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
242 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
243 "dst_addr [%x:%08x (%08x)]\n"
244 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
245 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
246 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
247 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 248 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
249 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
250 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 251
5ff7b6d4
EG
252 mutex_lock(&bp->dmae_mutex);
253
a2fbb9ea
ET
254 *wb_comp = 0;
255
5ff7b6d4 256 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
257
258 udelay(5);
ad8d3948
EG
259
260 while (*wb_comp != DMAE_COMP_VAL) {
261 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
262
ad8d3948 263 if (!cnt) {
c3eefaf6 264 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
265 break;
266 }
ad8d3948 267 cnt--;
12469401
YG
268 /* adjust delay for emulation/FPGA */
269 if (CHIP_REV_IS_SLOW(bp))
270 msleep(100);
271 else
272 udelay(5);
a2fbb9ea 273 }
ad8d3948
EG
274
275 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
276}
277
c18487ee 278void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 279{
5ff7b6d4 280 struct dmae_command dmae;
a2fbb9ea 281 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
282 int cnt = 200;
283
284 if (!bp->dmae_ready) {
285 u32 *data = bnx2x_sp(bp, wb_data[0]);
286 int i;
287
288 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
289 " using indirect\n", src_addr, len32);
290 for (i = 0; i < len32; i++)
291 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
292 return;
293 }
294
5ff7b6d4 295 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 296
5ff7b6d4
EG
297 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
298 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
299 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 300#ifdef __BIG_ENDIAN
5ff7b6d4 301 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 302#else
5ff7b6d4 303 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 304#endif
5ff7b6d4
EG
305 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
306 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
307 dmae.src_addr_lo = src_addr >> 2;
308 dmae.src_addr_hi = 0;
309 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
310 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
311 dmae.len = len32;
312 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
314 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 315
c3eefaf6 316 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
317 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
318 "dst_addr [%x:%08x (%08x)]\n"
319 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
320 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
321 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
322 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 323
5ff7b6d4
EG
324 mutex_lock(&bp->dmae_mutex);
325
326 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
327 *wb_comp = 0;
328
5ff7b6d4 329 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
330
331 udelay(5);
ad8d3948
EG
332
333 while (*wb_comp != DMAE_COMP_VAL) {
334
ad8d3948 335 if (!cnt) {
c3eefaf6 336 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
337 break;
338 }
ad8d3948 339 cnt--;
12469401
YG
340 /* adjust delay for emulation/FPGA */
341 if (CHIP_REV_IS_SLOW(bp))
342 msleep(100);
343 else
344 udelay(5);
a2fbb9ea 345 }
ad8d3948 346 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
347 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
348 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
349
350 mutex_unlock(&bp->dmae_mutex);
351}
352
573f2035
EG
353void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
354 u32 addr, u32 len)
355{
02e3c6cb 356 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
357 int offset = 0;
358
02e3c6cb 359 while (len > dmae_wr_max) {
573f2035 360 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
361 addr + offset, dmae_wr_max);
362 offset += dmae_wr_max * 4;
363 len -= dmae_wr_max;
573f2035
EG
364 }
365
366 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
367}
368
ad8d3948
EG
369/* used only for slowpath so not inlined */
370static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
371{
372 u32 wb_write[2];
373
374 wb_write[0] = val_hi;
375 wb_write[1] = val_lo;
376 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 377}
a2fbb9ea 378
ad8d3948
EG
379#ifdef USE_WB_RD
380static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
381{
382 u32 wb_data[2];
383
384 REG_RD_DMAE(bp, reg, wb_data, 2);
385
386 return HILO_U64(wb_data[0], wb_data[1]);
387}
388#endif
389
a2fbb9ea
ET
390static int bnx2x_mc_assert(struct bnx2x *bp)
391{
a2fbb9ea 392 char last_idx;
34f80b04
EG
393 int i, rc = 0;
394 u32 row0, row1, row2, row3;
395
396 /* XSTORM */
397 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
398 XSTORM_ASSERT_LIST_INDEX_OFFSET);
399 if (last_idx)
400 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
401
402 /* print the asserts */
403 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
404
405 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i));
407 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
409 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
411 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
412 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
413
414 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
415 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
416 " 0x%08x 0x%08x 0x%08x\n",
417 i, row3, row2, row1, row0);
418 rc++;
419 } else {
420 break;
421 }
422 }
423
424 /* TSTORM */
425 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
426 TSTORM_ASSERT_LIST_INDEX_OFFSET);
427 if (last_idx)
428 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
429
430 /* print the asserts */
431 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
432
433 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i));
435 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
437 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
439 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
440 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
441
442 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
443 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
444 " 0x%08x 0x%08x 0x%08x\n",
445 i, row3, row2, row1, row0);
446 rc++;
447 } else {
448 break;
449 }
450 }
451
452 /* CSTORM */
453 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
454 CSTORM_ASSERT_LIST_INDEX_OFFSET);
455 if (last_idx)
456 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
457
458 /* print the asserts */
459 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
460
461 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i));
463 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
465 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
467 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
468 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
469
470 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
471 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
472 " 0x%08x 0x%08x 0x%08x\n",
473 i, row3, row2, row1, row0);
474 rc++;
475 } else {
476 break;
477 }
478 }
479
480 /* USTORM */
481 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
482 USTORM_ASSERT_LIST_INDEX_OFFSET);
483 if (last_idx)
484 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
485
486 /* print the asserts */
487 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
488
489 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i));
491 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 4);
493 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 8);
495 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
496 USTORM_ASSERT_LIST_OFFSET(i) + 12);
497
498 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
499 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
500 " 0x%08x 0x%08x 0x%08x\n",
501 i, row3, row2, row1, row0);
502 rc++;
503 } else {
504 break;
a2fbb9ea
ET
505 }
506 }
34f80b04 507
a2fbb9ea
ET
508 return rc;
509}
c14423fe 510
a2fbb9ea
ET
511static void bnx2x_fw_dump(struct bnx2x *bp)
512{
cdaa7cb8 513 u32 addr;
a2fbb9ea 514 u32 mark, offset;
4781bfad 515 __be32 data[9];
a2fbb9ea
ET
516 int word;
517
2145a920
VZ
518 if (BP_NOMCP(bp)) {
519 BNX2X_ERR("NO MCP - can not dump\n");
520 return;
521 }
cdaa7cb8
VZ
522
523 addr = bp->common.shmem_base - 0x0800 + 4;
524 mark = REG_RD(bp, addr);
525 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
7995c64e 526 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 527
7995c64e 528 pr_err("");
cdaa7cb8 529 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
a2fbb9ea 530 for (word = 0; word < 8; word++)
cdaa7cb8 531 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 532 data[8] = 0x0;
7995c64e 533 pr_cont("%s", (char *)data);
a2fbb9ea 534 }
cdaa7cb8 535 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 536 for (word = 0; word < 8; word++)
cdaa7cb8 537 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 538 data[8] = 0x0;
7995c64e 539 pr_cont("%s", (char *)data);
a2fbb9ea 540 }
7995c64e 541 pr_err("end of fw dump\n");
a2fbb9ea
ET
542}
543
544static void bnx2x_panic_dump(struct bnx2x *bp)
545{
546 int i;
547 u16 j, start, end;
548
66e855f3
YG
549 bp->stats_state = STATS_STATE_DISABLED;
550 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
551
a2fbb9ea
ET
552 BNX2X_ERR("begin crash dump -----------------\n");
553
8440d2b6
EG
554 /* Indices */
555 /* Common */
cdaa7cb8
VZ
556 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
557 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
558 " spq_prod_idx(0x%x)\n",
8440d2b6
EG
559 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
560 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
561
562 /* Rx */
54b9ddaa 563 for_each_queue(bp, i) {
a2fbb9ea 564 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 565
cdaa7cb8
VZ
566 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
567 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
568 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 569 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
570 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
571 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8
VZ
572 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
573 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
8440d2b6
EG
574 fp->rx_sge_prod, fp->last_max_sge,
575 le16_to_cpu(fp->fp_u_idx),
576 fp->status_blk->u_status_block.status_block_index);
577 }
a2fbb9ea 578
8440d2b6 579 /* Tx */
54b9ddaa 580 for_each_queue(bp, i) {
8440d2b6 581 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 582
cdaa7cb8
VZ
583 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
584 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
585 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
586 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
587 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
cdaa7cb8
VZ
588 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
589 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 590 fp->status_blk->c_status_block.status_block_index,
ca00392c 591 fp->tx_db.data.prod);
8440d2b6 592 }
a2fbb9ea 593
8440d2b6
EG
594 /* Rings */
595 /* Rx */
54b9ddaa 596 for_each_queue(bp, i) {
8440d2b6 597 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
598
599 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
600 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 601 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
602 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
603 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
604
c3eefaf6
EG
605 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
606 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
607 }
608
3196a88a
EG
609 start = RX_SGE(fp->rx_sge_prod);
610 end = RX_SGE(fp->last_max_sge);
8440d2b6 611 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
612 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
613 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
614
c3eefaf6
EG
615 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
616 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
617 }
618
a2fbb9ea
ET
619 start = RCQ_BD(fp->rx_comp_cons - 10);
620 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 621 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
622 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
623
c3eefaf6
EG
624 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
625 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
626 }
627 }
628
8440d2b6 629 /* Tx */
54b9ddaa 630 for_each_queue(bp, i) {
8440d2b6
EG
631 struct bnx2x_fastpath *fp = &bp->fp[i];
632
633 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
634 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
635 for (j = start; j != end; j = TX_BD(j + 1)) {
636 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
637
c3eefaf6
EG
638 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
639 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
640 }
641
642 start = TX_BD(fp->tx_bd_cons - 10);
643 end = TX_BD(fp->tx_bd_cons + 254);
644 for (j = start; j != end; j = TX_BD(j + 1)) {
645 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
646
c3eefaf6
EG
647 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
648 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
649 }
650 }
a2fbb9ea 651
34f80b04 652 bnx2x_fw_dump(bp);
a2fbb9ea
ET
653 bnx2x_mc_assert(bp);
654 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
655}
656
615f8fd9 657static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 658{
34f80b04 659 int port = BP_PORT(bp);
a2fbb9ea
ET
660 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
661 u32 val = REG_RD(bp, addr);
662 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 663 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
664
665 if (msix) {
8badd27a
EG
666 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
668 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
669 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
670 } else if (msi) {
671 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
672 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
673 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
674 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
675 } else {
676 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 677 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
678 HC_CONFIG_0_REG_INT_LINE_EN_0 |
679 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 680
8badd27a
EG
681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
682 val, port, addr);
615f8fd9
ET
683
684 REG_WR(bp, addr, val);
685
a2fbb9ea
ET
686 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
687 }
688
8badd27a
EG
689 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
690 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
691
692 REG_WR(bp, addr, val);
37dbbf32
EG
693 /*
694 * Ensure that HC_CONFIG is written before leading/trailing edge config
695 */
696 mmiowb();
697 barrier();
34f80b04
EG
698
699 if (CHIP_IS_E1H(bp)) {
700 /* init leading/trailing edge */
701 if (IS_E1HMF(bp)) {
8badd27a 702 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 703 if (bp->port.pmf)
4acac6a5
EG
704 /* enable nig and gpio3 attention */
705 val |= 0x1100;
34f80b04
EG
706 } else
707 val = 0xffff;
708
709 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
710 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
711 }
37dbbf32
EG
712
713 /* Make sure that interrupts are indeed enabled from here on */
714 mmiowb();
a2fbb9ea
ET
715}
716
615f8fd9 717static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 718{
34f80b04 719 int port = BP_PORT(bp);
a2fbb9ea
ET
720 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
721 u32 val = REG_RD(bp, addr);
722
723 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
724 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
725 HC_CONFIG_0_REG_INT_LINE_EN_0 |
726 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
727
728 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
729 val, port, addr);
730
8badd27a
EG
731 /* flush all outstanding writes */
732 mmiowb();
733
a2fbb9ea
ET
734 REG_WR(bp, addr, val);
735 if (REG_RD(bp, addr) != val)
736 BNX2X_ERR("BUG! proper val not read from IGU!\n");
737}
738
f8ef6e44 739static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 740{
a2fbb9ea 741 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 742 int i, offset;
a2fbb9ea 743
34f80b04 744 /* disable interrupt handling */
a2fbb9ea 745 atomic_inc(&bp->intr_sem);
e1510706
EG
746 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
747
f8ef6e44
YG
748 if (disable_hw)
749 /* prevent the HW from sending interrupts */
750 bnx2x_int_disable(bp);
a2fbb9ea
ET
751
752 /* make sure all ISRs are done */
753 if (msix) {
8badd27a
EG
754 synchronize_irq(bp->msix_table[0].vector);
755 offset = 1;
37b091ba
MC
756#ifdef BCM_CNIC
757 offset++;
758#endif
a2fbb9ea 759 for_each_queue(bp, i)
8badd27a 760 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
761 } else
762 synchronize_irq(bp->pdev->irq);
763
764 /* make sure sp_task is not running */
1cf167f2
EG
765 cancel_delayed_work(&bp->sp_task);
766 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
767}
768
34f80b04 769/* fast path */
a2fbb9ea
ET
770
771/*
34f80b04 772 * General service functions
a2fbb9ea
ET
773 */
774
72fd0718
VZ
775/* Return true if succeeded to acquire the lock */
776static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
777{
778 u32 lock_status;
779 u32 resource_bit = (1 << resource);
780 int func = BP_FUNC(bp);
781 u32 hw_lock_control_reg;
782
783 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
784
785 /* Validating that the resource is within range */
786 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
787 DP(NETIF_MSG_HW,
788 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
789 resource, HW_LOCK_MAX_RESOURCE_VALUE);
790 return -EINVAL;
791 }
792
793 if (func <= 5)
794 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
795 else
796 hw_lock_control_reg =
797 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
798
799 /* Try to acquire the lock */
800 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
801 lock_status = REG_RD(bp, hw_lock_control_reg);
802 if (lock_status & resource_bit)
803 return true;
804
805 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
806 return false;
807}
808
34f80b04 809static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
810 u8 storm, u16 index, u8 op, u8 update)
811{
5c862848
EG
812 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
813 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
814 struct igu_ack_register igu_ack;
815
816 igu_ack.status_block_index = index;
817 igu_ack.sb_id_and_flags =
34f80b04 818 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
819 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
820 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
821 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
822
5c862848
EG
823 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
824 (*(u32 *)&igu_ack), hc_addr);
825 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
826
827 /* Make sure that ACK is written */
828 mmiowb();
829 barrier();
a2fbb9ea
ET
830}
831
54b9ddaa 832static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
833{
834 struct host_status_block *fpsb = fp->status_blk;
a2fbb9ea
ET
835
836 barrier(); /* status block is written to by the chip */
54b9ddaa
VZ
837 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
838 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
a2fbb9ea
ET
839}
840
a2fbb9ea
ET
841static u16 bnx2x_ack_int(struct bnx2x *bp)
842{
5c862848
EG
843 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
844 COMMAND_REG_SIMD_MASK);
845 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 846
5c862848
EG
847 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
848 result, hc_addr);
a2fbb9ea 849
a2fbb9ea
ET
850 return result;
851}
852
853
854/*
855 * fast path service functions
856 */
857
e8b5fc51
VZ
858static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
859{
860 /* Tell compiler that consumer and producer can change */
861 barrier();
862 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
863}
864
a2fbb9ea
ET
865/* free skb in the packet ring at pos idx
866 * return idx of last bd freed
867 */
868static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
869 u16 idx)
870{
871 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
872 struct eth_tx_start_bd *tx_start_bd;
873 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 874 struct sk_buff *skb = tx_buf->skb;
34f80b04 875 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
876 int nbd;
877
54b9ddaa
VZ
878 /* prefetch skb end pointer to speedup dev_kfree_skb() */
879 prefetch(&skb->end);
880
a2fbb9ea
ET
881 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
882 idx, tx_buf, skb);
883
884 /* unmap first bd */
885 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c 886 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
1a983142 887 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
ca00392c 888 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 889
ca00392c 890 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 891#ifdef BNX2X_STOP_ON_ERROR
ca00392c 892 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 893 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
894 bnx2x_panic();
895 }
896#endif
ca00392c 897 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 898
ca00392c
EG
899 /* Get the next bd */
900 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 901
ca00392c
EG
902 /* Skip a parse bd... */
903 --nbd;
904 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
905
906 /* ...and the TSO split header bd since they have no mapping */
907 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
908 --nbd;
909 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
910 }
911
912 /* now free frags */
913 while (nbd > 0) {
914
915 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c 916 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
1a983142
FT
917 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
918 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
a2fbb9ea
ET
919 if (--nbd)
920 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
921 }
922
923 /* release skb */
53e5e96e 924 WARN_ON(!skb);
54b9ddaa 925 dev_kfree_skb(skb);
a2fbb9ea
ET
926 tx_buf->first_bd = 0;
927 tx_buf->skb = NULL;
928
34f80b04 929 return new_cons;
a2fbb9ea
ET
930}
931
34f80b04 932static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 933{
34f80b04
EG
934 s16 used;
935 u16 prod;
936 u16 cons;
a2fbb9ea 937
a2fbb9ea
ET
938 prod = fp->tx_bd_prod;
939 cons = fp->tx_bd_cons;
940
34f80b04
EG
941 /* NUM_TX_RINGS = number of "next-page" entries
942 It will be used as a threshold */
943 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 944
34f80b04 945#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
946 WARN_ON(used < 0);
947 WARN_ON(used > fp->bp->tx_ring_size);
948 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 949#endif
a2fbb9ea 950
34f80b04 951 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
952}
953
54b9ddaa
VZ
954static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
955{
956 u16 hw_cons;
957
958 /* Tell compiler that status block fields can change */
959 barrier();
960 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
961 return hw_cons != fp->tx_pkt_cons;
962}
963
964static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
965{
966 struct bnx2x *bp = fp->bp;
555f6c78 967 struct netdev_queue *txq;
a2fbb9ea 968 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
a2fbb9ea
ET
969
970#ifdef BNX2X_STOP_ON_ERROR
971 if (unlikely(bp->panic))
54b9ddaa 972 return -1;
a2fbb9ea
ET
973#endif
974
54b9ddaa 975 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
976 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
977 sw_cons = fp->tx_pkt_cons;
978
979 while (sw_cons != hw_cons) {
980 u16 pkt_cons;
981
982 pkt_cons = TX_BD(sw_cons);
983
984 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
985
34f80b04 986 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
987 hw_cons, sw_cons, pkt_cons);
988
34f80b04 989/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
990 rmb();
991 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
992 }
993*/
994 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
995 sw_cons++;
a2fbb9ea
ET
996 }
997
998 fp->tx_pkt_cons = sw_cons;
999 fp->tx_bd_cons = bd_cons;
1000
c16cc0b4
VZ
1001 /* Need to make the tx_bd_cons update visible to start_xmit()
1002 * before checking for netif_tx_queue_stopped(). Without the
1003 * memory barrier, there is a small possibility that
1004 * start_xmit() will miss it and cause the queue to be stopped
1005 * forever.
1006 */
2d99cf16 1007 smp_mb();
c16cc0b4 1008
a2fbb9ea 1009 /* TBD need a thresh? */
555f6c78 1010 if (unlikely(netif_tx_queue_stopped(txq))) {
c16cc0b4
VZ
1011 /* Taking tx_lock() is needed to prevent reenabling the queue
1012 * while it's empty. This could have happen if rx_action() gets
1013 * suspended in bnx2x_tx_int() after the condition before
1014 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1015 *
1016 * stops the queue->sees fresh tx_bd_cons->releases the queue->
1017 * sends some packets consuming the whole queue again->
1018 * stops the queue
6044735d 1019 */
c16cc0b4
VZ
1020
1021 __netif_tx_lock(txq, smp_processor_id());
6044735d 1022
555f6c78 1023 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 1024 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 1025 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 1026 netif_tx_wake_queue(txq);
c16cc0b4
VZ
1027
1028 __netif_tx_unlock(txq);
a2fbb9ea 1029 }
54b9ddaa 1030 return 0;
a2fbb9ea
ET
1031}
1032
993ac7b5
MC
1033#ifdef BCM_CNIC
1034static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1035#endif
3196a88a 1036
a2fbb9ea
ET
1037static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1038 union eth_rx_cqe *rr_cqe)
1039{
1040 struct bnx2x *bp = fp->bp;
1041 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1042 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1043
34f80b04 1044 DP(BNX2X_MSG_SP,
a2fbb9ea 1045 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1046 fp->index, cid, command, bp->state,
34f80b04 1047 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
1048
1049 bp->spq_left++;
1050
0626b899 1051 if (fp->index) {
a2fbb9ea
ET
1052 switch (command | fp->state) {
1053 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1054 BNX2X_FP_STATE_OPENING):
1055 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1056 cid);
1057 fp->state = BNX2X_FP_STATE_OPEN;
1058 break;
1059
1060 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1061 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1062 cid);
1063 fp->state = BNX2X_FP_STATE_HALTED;
1064 break;
1065
1066 default:
34f80b04 1067 BNX2X_ERR("unexpected MC reply (%d) "
cdaa7cb8
VZ
1068 "fp[%d] state is %x\n",
1069 command, fp->index, fp->state);
34f80b04 1070 break;
a2fbb9ea 1071 }
34f80b04 1072 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1073 return;
1074 }
c14423fe 1075
a2fbb9ea
ET
1076 switch (command | bp->state) {
1077 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1078 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1079 bp->state = BNX2X_STATE_OPEN;
1080 break;
1081
1082 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1083 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1084 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1085 fp->state = BNX2X_FP_STATE_HALTED;
1086 break;
1087
a2fbb9ea 1088 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1089 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1090 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1091 break;
1092
993ac7b5
MC
1093#ifdef BCM_CNIC
1094 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1095 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1096 bnx2x_cnic_cfc_comp(bp, cid);
1097 break;
1098#endif
3196a88a 1099
a2fbb9ea 1100 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1101 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1102 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
1103 bp->set_mac_pending--;
1104 smp_wmb();
a2fbb9ea
ET
1105 break;
1106
49d66772 1107 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1108 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
1109 bp->set_mac_pending--;
1110 smp_wmb();
49d66772
ET
1111 break;
1112
a2fbb9ea 1113 default:
34f80b04 1114 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1115 command, bp->state);
34f80b04 1116 break;
a2fbb9ea 1117 }
34f80b04 1118 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1119}
1120
7a9b2557
VZ
1121static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1122 struct bnx2x_fastpath *fp, u16 index)
1123{
1124 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1125 struct page *page = sw_buf->page;
1126 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1127
1128 /* Skip "next page" elements */
1129 if (!page)
1130 return;
1131
1a983142 1132 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
4f40f2cb 1133 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1134 __free_pages(page, PAGES_PER_SGE_SHIFT);
1135
1136 sw_buf->page = NULL;
1137 sge->addr_hi = 0;
1138 sge->addr_lo = 0;
1139}
1140
1141static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1142 struct bnx2x_fastpath *fp, int last)
1143{
1144 int i;
1145
1146 for (i = 0; i < last; i++)
1147 bnx2x_free_rx_sge(bp, fp, i);
1148}
1149
1150static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1151 struct bnx2x_fastpath *fp, u16 index)
1152{
1153 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1154 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1155 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1156 dma_addr_t mapping;
1157
1158 if (unlikely(page == NULL))
1159 return -ENOMEM;
1160
1a983142
FT
1161 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1162 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
8d8bb39b 1163 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1164 __free_pages(page, PAGES_PER_SGE_SHIFT);
1165 return -ENOMEM;
1166 }
1167
1168 sw_buf->page = page;
1a983142 1169 dma_unmap_addr_set(sw_buf, mapping, mapping);
7a9b2557
VZ
1170
1171 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1172 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1173
1174 return 0;
1175}
1176
a2fbb9ea
ET
1177static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1178 struct bnx2x_fastpath *fp, u16 index)
1179{
1180 struct sk_buff *skb;
1181 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1182 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1183 dma_addr_t mapping;
1184
1185 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1186 if (unlikely(skb == NULL))
1187 return -ENOMEM;
1188
1a983142
FT
1189 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1190 DMA_FROM_DEVICE);
8d8bb39b 1191 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1192 dev_kfree_skb(skb);
1193 return -ENOMEM;
1194 }
1195
1196 rx_buf->skb = skb;
1a983142 1197 dma_unmap_addr_set(rx_buf, mapping, mapping);
a2fbb9ea
ET
1198
1199 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1200 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1201
1202 return 0;
1203}
1204
1205/* note that we are not allocating a new skb,
1206 * we are just moving one from cons to prod
1207 * we are not creating a new mapping,
1208 * so there is no need to check for dma_mapping_error().
1209 */
1210static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1211 struct sk_buff *skb, u16 cons, u16 prod)
1212{
1213 struct bnx2x *bp = fp->bp;
1214 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1215 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1216 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1217 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1218
1a983142
FT
1219 dma_sync_single_for_device(&bp->pdev->dev,
1220 dma_unmap_addr(cons_rx_buf, mapping),
1221 RX_COPY_THRESH, DMA_FROM_DEVICE);
a2fbb9ea
ET
1222
1223 prod_rx_buf->skb = cons_rx_buf->skb;
1a983142
FT
1224 dma_unmap_addr_set(prod_rx_buf, mapping,
1225 dma_unmap_addr(cons_rx_buf, mapping));
a2fbb9ea
ET
1226 *prod_bd = *cons_bd;
1227}
1228
7a9b2557
VZ
1229static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1230 u16 idx)
1231{
1232 u16 last_max = fp->last_max_sge;
1233
1234 if (SUB_S16(idx, last_max) > 0)
1235 fp->last_max_sge = idx;
1236}
1237
1238static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1239{
1240 int i, j;
1241
1242 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1243 int idx = RX_SGE_CNT * i - 1;
1244
1245 for (j = 0; j < 2; j++) {
1246 SGE_MASK_CLEAR_BIT(fp, idx);
1247 idx--;
1248 }
1249 }
1250}
1251
1252static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1253 struct eth_fast_path_rx_cqe *fp_cqe)
1254{
1255 struct bnx2x *bp = fp->bp;
4f40f2cb 1256 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1257 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1258 SGE_PAGE_SHIFT;
7a9b2557
VZ
1259 u16 last_max, last_elem, first_elem;
1260 u16 delta = 0;
1261 u16 i;
1262
1263 if (!sge_len)
1264 return;
1265
1266 /* First mark all used pages */
1267 for (i = 0; i < sge_len; i++)
1268 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1269
1270 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1271 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1272
1273 /* Here we assume that the last SGE index is the biggest */
1274 prefetch((void *)(fp->sge_mask));
1275 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1276
1277 last_max = RX_SGE(fp->last_max_sge);
1278 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1279 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1280
1281 /* If ring is not full */
1282 if (last_elem + 1 != first_elem)
1283 last_elem++;
1284
1285 /* Now update the prod */
1286 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1287 if (likely(fp->sge_mask[i]))
1288 break;
1289
1290 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1291 delta += RX_SGE_MASK_ELEM_SZ;
1292 }
1293
1294 if (delta > 0) {
1295 fp->rx_sge_prod += delta;
1296 /* clear page-end entries */
1297 bnx2x_clear_sge_mask_next_elems(fp);
1298 }
1299
1300 DP(NETIF_MSG_RX_STATUS,
1301 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1302 fp->last_max_sge, fp->rx_sge_prod);
1303}
1304
1305static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1306{
1307 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1308 memset(fp->sge_mask, 0xff,
1309 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1310
33471629
EG
1311 /* Clear the two last indices in the page to 1:
1312 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1313 hence will never be indicated and should be removed from
1314 the calculations. */
1315 bnx2x_clear_sge_mask_next_elems(fp);
1316}
1317
1318static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1319 struct sk_buff *skb, u16 cons, u16 prod)
1320{
1321 struct bnx2x *bp = fp->bp;
1322 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1323 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1324 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1325 dma_addr_t mapping;
1326
1327 /* move empty skb from pool to prod and map it */
1328 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1a983142
FT
1329 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1330 bp->rx_buf_size, DMA_FROM_DEVICE);
1331 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
7a9b2557
VZ
1332
1333 /* move partial skb from cons to pool (don't unmap yet) */
1334 fp->tpa_pool[queue] = *cons_rx_buf;
1335
1336 /* mark bin state as start - print error if current state != stop */
1337 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1338 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1339
1340 fp->tpa_state[queue] = BNX2X_TPA_START;
1341
1342 /* point prod_bd to new skb */
1343 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1344 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1345
1346#ifdef BNX2X_STOP_ON_ERROR
1347 fp->tpa_queue_used |= (1 << queue);
cdaa7cb8 1348#ifdef _ASM_GENERIC_INT_L64_H
7a9b2557
VZ
1349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1350#else
1351 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1352#endif
1353 fp->tpa_queue_used);
1354#endif
1355}
1356
1357static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1358 struct sk_buff *skb,
1359 struct eth_fast_path_rx_cqe *fp_cqe,
1360 u16 cqe_idx)
1361{
1362 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1363 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1364 u32 i, frag_len, frag_size, pages;
1365 int err;
1366 int j;
1367
1368 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1369 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1370
1371 /* This is needed in order to enable forwarding support */
1372 if (frag_size)
4f40f2cb 1373 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1374 max(frag_size, (u32)len_on_bd));
1375
1376#ifdef BNX2X_STOP_ON_ERROR
cdaa7cb8 1377 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
7a9b2557
VZ
1378 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1379 pages, cqe_idx);
1380 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1381 fp_cqe->pkt_len, len_on_bd);
1382 bnx2x_panic();
1383 return -EINVAL;
1384 }
1385#endif
1386
1387 /* Run through the SGL and compose the fragmented skb */
1388 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1389 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1390
1391 /* FW gives the indices of the SGE as if the ring is an array
1392 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1393 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1394 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1395 old_rx_pg = *rx_pg;
1396
1397 /* If we fail to allocate a substitute page, we simply stop
1398 where we are and drop the whole packet */
1399 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1400 if (unlikely(err)) {
de832a55 1401 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1402 return err;
1403 }
1404
1405 /* Unmap the page as we r going to pass it to the stack */
1a983142
FT
1406 dma_unmap_page(&bp->pdev->dev,
1407 dma_unmap_addr(&old_rx_pg, mapping),
1408 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
7a9b2557
VZ
1409
1410 /* Add one frag and update the appropriate fields in the skb */
1411 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1412
1413 skb->data_len += frag_len;
1414 skb->truesize += frag_len;
1415 skb->len += frag_len;
1416
1417 frag_size -= frag_len;
1418 }
1419
1420 return 0;
1421}
1422
1423static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1424 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1425 u16 cqe_idx)
1426{
1427 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1428 struct sk_buff *skb = rx_buf->skb;
1429 /* alloc new skb */
1430 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1431
1432 /* Unmap skb in the pool anyway, as we are going to change
1433 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1434 fails. */
1a983142
FT
1435 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1436 bp->rx_buf_size, DMA_FROM_DEVICE);
7a9b2557 1437
7a9b2557 1438 if (likely(new_skb)) {
66e855f3
YG
1439 /* fix ip xsum and give it to the stack */
1440 /* (no need to map the new skb) */
0c6671b0
EG
1441#ifdef BCM_VLAN
1442 int is_vlan_cqe =
1443 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1444 PARSING_FLAGS_VLAN);
1445 int is_not_hwaccel_vlan_cqe =
1446 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1447#endif
7a9b2557
VZ
1448
1449 prefetch(skb);
1450 prefetch(((char *)(skb)) + 128);
1451
7a9b2557
VZ
1452#ifdef BNX2X_STOP_ON_ERROR
1453 if (pad + len > bp->rx_buf_size) {
1454 BNX2X_ERR("skb_put is about to fail... "
1455 "pad %d len %d rx_buf_size %d\n",
1456 pad, len, bp->rx_buf_size);
1457 bnx2x_panic();
1458 return;
1459 }
1460#endif
1461
1462 skb_reserve(skb, pad);
1463 skb_put(skb, len);
1464
1465 skb->protocol = eth_type_trans(skb, bp->dev);
1466 skb->ip_summed = CHECKSUM_UNNECESSARY;
1467
1468 {
1469 struct iphdr *iph;
1470
1471 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1472#ifdef BCM_VLAN
1473 /* If there is no Rx VLAN offloading -
1474 take VLAN tag into an account */
1475 if (unlikely(is_not_hwaccel_vlan_cqe))
1476 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1477#endif
7a9b2557
VZ
1478 iph->check = 0;
1479 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1480 }
1481
1482 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1483 &cqe->fast_path_cqe, cqe_idx)) {
1484#ifdef BCM_VLAN
0c6671b0
EG
1485 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1486 (!is_not_hwaccel_vlan_cqe))
4fd89b7a
DK
1487 vlan_gro_receive(&fp->napi, bp->vlgrp,
1488 le16_to_cpu(cqe->fast_path_cqe.
1489 vlan_tag), skb);
7a9b2557
VZ
1490 else
1491#endif
4fd89b7a 1492 napi_gro_receive(&fp->napi, skb);
7a9b2557
VZ
1493 } else {
1494 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1495 " - dropping packet!\n");
1496 dev_kfree_skb(skb);
1497 }
1498
7a9b2557
VZ
1499
1500 /* put new skb in bin */
1501 fp->tpa_pool[queue].skb = new_skb;
1502
1503 } else {
66e855f3 1504 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1505 DP(NETIF_MSG_RX_STATUS,
1506 "Failed to allocate new skb - dropping packet!\n");
de832a55 1507 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1508 }
1509
1510 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1511}
1512
1513static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1514 struct bnx2x_fastpath *fp,
1515 u16 bd_prod, u16 rx_comp_prod,
1516 u16 rx_sge_prod)
1517{
8d9c5f34 1518 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1519 int i;
1520
1521 /* Update producers */
1522 rx_prods.bd_prod = bd_prod;
1523 rx_prods.cqe_prod = rx_comp_prod;
1524 rx_prods.sge_prod = rx_sge_prod;
1525
58f4c4cf
EG
1526 /*
1527 * Make sure that the BD and SGE data is updated before updating the
1528 * producers since FW might read the BD/SGE right after the producer
1529 * is updated.
1530 * This is only applicable for weak-ordered memory model archs such
1531 * as IA-64. The following barrier is also mandatory since FW will
1532 * assumes BDs must have buffers.
1533 */
1534 wmb();
1535
8d9c5f34
EG
1536 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1537 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1538 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1539 ((u32 *)&rx_prods)[i]);
1540
58f4c4cf
EG
1541 mmiowb(); /* keep prod updates ordered */
1542
7a9b2557 1543 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1544 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1545 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1546}
1547
a2fbb9ea
ET
1548static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1549{
1550 struct bnx2x *bp = fp->bp;
34f80b04 1551 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1552 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1553 int rx_pkt = 0;
1554
1555#ifdef BNX2X_STOP_ON_ERROR
1556 if (unlikely(bp->panic))
1557 return 0;
1558#endif
1559
34f80b04
EG
1560 /* CQ "next element" is of the size of the regular element,
1561 that's why it's ok here */
a2fbb9ea
ET
1562 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1563 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1564 hw_comp_cons++;
1565
1566 bd_cons = fp->rx_bd_cons;
1567 bd_prod = fp->rx_bd_prod;
34f80b04 1568 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1569 sw_comp_cons = fp->rx_comp_cons;
1570 sw_comp_prod = fp->rx_comp_prod;
1571
1572 /* Memory barrier necessary as speculative reads of the rx
1573 * buffer can be ahead of the index in the status block
1574 */
1575 rmb();
1576
1577 DP(NETIF_MSG_RX_STATUS,
1578 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1579 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1580
1581 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1582 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1583 struct sk_buff *skb;
1584 union eth_rx_cqe *cqe;
c68ed255 1585 u8 cqe_fp_flags, cqe_fp_status_flags;
34f80b04 1586 u16 len, pad;
a2fbb9ea
ET
1587
1588 comp_ring_cons = RCQ_BD(sw_comp_cons);
1589 bd_prod = RX_BD(bd_prod);
1590 bd_cons = RX_BD(bd_cons);
1591
619e7a66
EG
1592 /* Prefetch the page containing the BD descriptor
1593 at producer's index. It will be needed when new skb is
1594 allocated */
1595 prefetch((void *)(PAGE_ALIGN((unsigned long)
1596 (&fp->rx_desc_ring[bd_prod])) -
1597 PAGE_SIZE + 1));
1598
a2fbb9ea 1599 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1600 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
c68ed255 1601 cqe_fp_status_flags = cqe->fast_path_cqe.status_flags;
a2fbb9ea 1602
a2fbb9ea 1603 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1604 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1605 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1606 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1607 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1608 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1609
1610 /* is this a slowpath msg? */
34f80b04 1611 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1612 bnx2x_sp_event(fp, cqe);
1613 goto next_cqe;
1614
1615 /* this is an rx packet */
1616 } else {
1617 rx_buf = &fp->rx_buf_ring[bd_cons];
1618 skb = rx_buf->skb;
54b9ddaa
VZ
1619 prefetch(skb);
1620 prefetch((u8 *)skb + 256);
a2fbb9ea
ET
1621 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1622 pad = cqe->fast_path_cqe.placement_offset;
1623
7a9b2557
VZ
1624 /* If CQE is marked both TPA_START and TPA_END
1625 it is a non-TPA CQE */
1626 if ((!fp->disable_tpa) &&
1627 (TPA_TYPE(cqe_fp_flags) !=
1628 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1629 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1630
1631 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1632 DP(NETIF_MSG_RX_STATUS,
1633 "calling tpa_start on queue %d\n",
1634 queue);
1635
1636 bnx2x_tpa_start(fp, queue, skb,
1637 bd_cons, bd_prod);
1638 goto next_rx;
1639 }
1640
1641 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1642 DP(NETIF_MSG_RX_STATUS,
1643 "calling tpa_stop on queue %d\n",
1644 queue);
1645
1646 if (!BNX2X_RX_SUM_FIX(cqe))
1647 BNX2X_ERR("STOP on none TCP "
1648 "data\n");
1649
1650 /* This is a size of the linear data
1651 on this skb */
1652 len = le16_to_cpu(cqe->fast_path_cqe.
1653 len_on_bd);
1654 bnx2x_tpa_stop(bp, fp, queue, pad,
1655 len, cqe, comp_ring_cons);
1656#ifdef BNX2X_STOP_ON_ERROR
1657 if (bp->panic)
17cb4006 1658 return 0;
7a9b2557
VZ
1659#endif
1660
1661 bnx2x_update_sge_prod(fp,
1662 &cqe->fast_path_cqe);
1663 goto next_cqe;
1664 }
1665 }
1666
1a983142
FT
1667 dma_sync_single_for_device(&bp->pdev->dev,
1668 dma_unmap_addr(rx_buf, mapping),
1669 pad + RX_COPY_THRESH,
1670 DMA_FROM_DEVICE);
a2fbb9ea
ET
1671 prefetch(skb);
1672 prefetch(((char *)(skb)) + 128);
1673
1674 /* is this an error packet? */
34f80b04 1675 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1676 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1677 "ERROR flags %x rx packet %u\n",
1678 cqe_fp_flags, sw_comp_cons);
de832a55 1679 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1680 goto reuse_rx;
1681 }
1682
1683 /* Since we don't have a jumbo ring
1684 * copy small packets if mtu > 1500
1685 */
1686 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1687 (len <= RX_COPY_THRESH)) {
1688 struct sk_buff *new_skb;
1689
1690 new_skb = netdev_alloc_skb(bp->dev,
1691 len + pad);
1692 if (new_skb == NULL) {
1693 DP(NETIF_MSG_RX_ERR,
34f80b04 1694 "ERROR packet dropped "
a2fbb9ea 1695 "because of alloc failure\n");
de832a55 1696 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1697 goto reuse_rx;
1698 }
1699
1700 /* aligned copy */
1701 skb_copy_from_linear_data_offset(skb, pad,
1702 new_skb->data + pad, len);
1703 skb_reserve(new_skb, pad);
1704 skb_put(new_skb, len);
1705
1706 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1707
1708 skb = new_skb;
1709
a119a069
EG
1710 } else
1711 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1a983142
FT
1712 dma_unmap_single(&bp->pdev->dev,
1713 dma_unmap_addr(rx_buf, mapping),
437cf2f1 1714 bp->rx_buf_size,
1a983142 1715 DMA_FROM_DEVICE);
a2fbb9ea
ET
1716 skb_reserve(skb, pad);
1717 skb_put(skb, len);
1718
1719 } else {
1720 DP(NETIF_MSG_RX_ERR,
34f80b04 1721 "ERROR packet dropped because "
a2fbb9ea 1722 "of alloc failure\n");
de832a55 1723 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1724reuse_rx:
1725 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1726 goto next_rx;
1727 }
1728
1729 skb->protocol = eth_type_trans(skb, bp->dev);
1730
c68ed255
TH
1731 if ((bp->dev->features & ETH_FLAG_RXHASH) &&
1732 (cqe_fp_status_flags &
1733 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
1734 skb->rxhash = le32_to_cpu(
1735 cqe->fast_path_cqe.rss_hash_result);
1736
a2fbb9ea 1737 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1738 if (bp->rx_csum) {
1adcd8be
EG
1739 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1740 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1741 else
de832a55 1742 fp->eth_q_stats.hw_csum_err++;
66e855f3 1743 }
a2fbb9ea
ET
1744 }
1745
748e5439 1746 skb_record_rx_queue(skb, fp->index);
ab6ad5a4 1747
a2fbb9ea 1748#ifdef BCM_VLAN
0c6671b0 1749 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1750 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1751 PARSING_FLAGS_VLAN))
4fd89b7a
DK
1752 vlan_gro_receive(&fp->napi, bp->vlgrp,
1753 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
a2fbb9ea
ET
1754 else
1755#endif
4fd89b7a 1756 napi_gro_receive(&fp->napi, skb);
a2fbb9ea 1757
a2fbb9ea
ET
1758
1759next_rx:
1760 rx_buf->skb = NULL;
1761
1762 bd_cons = NEXT_RX_IDX(bd_cons);
1763 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1764 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1765 rx_pkt++;
a2fbb9ea
ET
1766next_cqe:
1767 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1768 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1769
34f80b04 1770 if (rx_pkt == budget)
a2fbb9ea
ET
1771 break;
1772 } /* while */
1773
1774 fp->rx_bd_cons = bd_cons;
34f80b04 1775 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1776 fp->rx_comp_cons = sw_comp_cons;
1777 fp->rx_comp_prod = sw_comp_prod;
1778
7a9b2557
VZ
1779 /* Update producers */
1780 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1781 fp->rx_sge_prod);
a2fbb9ea
ET
1782
1783 fp->rx_pkt += rx_pkt;
1784 fp->rx_calls++;
1785
1786 return rx_pkt;
1787}
1788
1789static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1790{
1791 struct bnx2x_fastpath *fp = fp_cookie;
1792 struct bnx2x *bp = fp->bp;
a2fbb9ea 1793
da5a662a
VZ
1794 /* Return here if interrupt is disabled */
1795 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1796 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1797 return IRQ_HANDLED;
1798 }
1799
34f80b04 1800 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1801 fp->index, fp->sb_id);
0626b899 1802 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1803
1804#ifdef BNX2X_STOP_ON_ERROR
1805 if (unlikely(bp->panic))
1806 return IRQ_HANDLED;
1807#endif
ca00392c 1808
54b9ddaa
VZ
1809 /* Handle Rx and Tx according to MSI-X vector */
1810 prefetch(fp->rx_cons_sb);
1811 prefetch(fp->tx_cons_sb);
1812 prefetch(&fp->status_blk->u_status_block.status_block_index);
1813 prefetch(&fp->status_blk->c_status_block.status_block_index);
1814 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
34f80b04 1815
a2fbb9ea
ET
1816 return IRQ_HANDLED;
1817}
1818
1819static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1820{
555f6c78 1821 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1822 u16 status = bnx2x_ack_int(bp);
34f80b04 1823 u16 mask;
ca00392c 1824 int i;
a2fbb9ea 1825
34f80b04 1826 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1827 if (unlikely(status == 0)) {
1828 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1829 return IRQ_NONE;
1830 }
f5372251 1831 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1832
34f80b04 1833 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1834 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1835 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1836 return IRQ_HANDLED;
1837 }
1838
3196a88a
EG
1839#ifdef BNX2X_STOP_ON_ERROR
1840 if (unlikely(bp->panic))
1841 return IRQ_HANDLED;
1842#endif
1843
ca00392c
EG
1844 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1845 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1846
ca00392c
EG
1847 mask = 0x2 << fp->sb_id;
1848 if (status & mask) {
54b9ddaa
VZ
1849 /* Handle Rx and Tx according to SB id */
1850 prefetch(fp->rx_cons_sb);
1851 prefetch(&fp->status_blk->u_status_block.
1852 status_block_index);
1853 prefetch(fp->tx_cons_sb);
1854 prefetch(&fp->status_blk->c_status_block.
1855 status_block_index);
1856 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1857 status &= ~mask;
1858 }
a2fbb9ea
ET
1859 }
1860
993ac7b5
MC
1861#ifdef BCM_CNIC
1862 mask = 0x2 << CNIC_SB_ID(bp);
1863 if (status & (mask | 0x1)) {
1864 struct cnic_ops *c_ops = NULL;
1865
1866 rcu_read_lock();
1867 c_ops = rcu_dereference(bp->cnic_ops);
1868 if (c_ops)
1869 c_ops->cnic_handler(bp->cnic_data, NULL);
1870 rcu_read_unlock();
1871
1872 status &= ~mask;
1873 }
1874#endif
a2fbb9ea 1875
34f80b04 1876 if (unlikely(status & 0x1)) {
1cf167f2 1877 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1878
1879 status &= ~0x1;
1880 if (!status)
1881 return IRQ_HANDLED;
1882 }
1883
cdaa7cb8
VZ
1884 if (unlikely(status))
1885 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 1886 status);
a2fbb9ea 1887
c18487ee 1888 return IRQ_HANDLED;
a2fbb9ea
ET
1889}
1890
c18487ee 1891/* end of fast path */
a2fbb9ea 1892
bb2a0f7a 1893static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1894
c18487ee
YR
1895/* Link */
1896
1897/*
1898 * General service functions
1899 */
a2fbb9ea 1900
4a37fb66 1901static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1902{
1903 u32 lock_status;
1904 u32 resource_bit = (1 << resource);
4a37fb66
YG
1905 int func = BP_FUNC(bp);
1906 u32 hw_lock_control_reg;
c18487ee 1907 int cnt;
a2fbb9ea 1908
c18487ee
YR
1909 /* Validating that the resource is within range */
1910 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1911 DP(NETIF_MSG_HW,
1912 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1913 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1914 return -EINVAL;
1915 }
a2fbb9ea 1916
4a37fb66
YG
1917 if (func <= 5) {
1918 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1919 } else {
1920 hw_lock_control_reg =
1921 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1922 }
1923
c18487ee 1924 /* Validating that the resource is not already taken */
4a37fb66 1925 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1926 if (lock_status & resource_bit) {
1927 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1928 lock_status, resource_bit);
1929 return -EEXIST;
1930 }
a2fbb9ea 1931
46230476
EG
1932 /* Try for 5 second every 5ms */
1933 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1934 /* Try to acquire the lock */
4a37fb66
YG
1935 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1936 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1937 if (lock_status & resource_bit)
1938 return 0;
a2fbb9ea 1939
c18487ee 1940 msleep(5);
a2fbb9ea 1941 }
c18487ee
YR
1942 DP(NETIF_MSG_HW, "Timeout\n");
1943 return -EAGAIN;
1944}
a2fbb9ea 1945
4a37fb66 1946static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1947{
1948 u32 lock_status;
1949 u32 resource_bit = (1 << resource);
4a37fb66
YG
1950 int func = BP_FUNC(bp);
1951 u32 hw_lock_control_reg;
a2fbb9ea 1952
72fd0718
VZ
1953 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1954
c18487ee
YR
1955 /* Validating that the resource is within range */
1956 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1957 DP(NETIF_MSG_HW,
1958 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1959 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1960 return -EINVAL;
1961 }
1962
4a37fb66
YG
1963 if (func <= 5) {
1964 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1965 } else {
1966 hw_lock_control_reg =
1967 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1968 }
1969
c18487ee 1970 /* Validating that the resource is currently taken */
4a37fb66 1971 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1972 if (!(lock_status & resource_bit)) {
1973 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1974 lock_status, resource_bit);
1975 return -EFAULT;
a2fbb9ea
ET
1976 }
1977
4a37fb66 1978 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1979 return 0;
1980}
1981
1982/* HW Lock for shared dual port PHYs */
4a37fb66 1983static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1984{
34f80b04 1985 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1986
46c6a674
EG
1987 if (bp->port.need_hw_lock)
1988 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1989}
a2fbb9ea 1990
4a37fb66 1991static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1992{
46c6a674
EG
1993 if (bp->port.need_hw_lock)
1994 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1995
34f80b04 1996 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1997}
a2fbb9ea 1998
4acac6a5
EG
1999int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2000{
2001 /* The GPIO should be swapped if swap register is set and active */
2002 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2003 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2004 int gpio_shift = gpio_num +
2005 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2006 u32 gpio_mask = (1 << gpio_shift);
2007 u32 gpio_reg;
2008 int value;
2009
2010 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2011 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2012 return -EINVAL;
2013 }
2014
2015 /* read GPIO value */
2016 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2017
2018 /* get the requested pin value */
2019 if ((gpio_reg & gpio_mask) == gpio_mask)
2020 value = 1;
2021 else
2022 value = 0;
2023
2024 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
2025
2026 return value;
2027}
2028
17de50b7 2029int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
2030{
2031 /* The GPIO should be swapped if swap register is set and active */
2032 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 2033 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
2034 int gpio_shift = gpio_num +
2035 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2036 u32 gpio_mask = (1 << gpio_shift);
2037 u32 gpio_reg;
a2fbb9ea 2038
c18487ee
YR
2039 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2040 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2041 return -EINVAL;
2042 }
a2fbb9ea 2043
4a37fb66 2044 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
2045 /* read GPIO and mask except the float bits */
2046 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 2047
c18487ee
YR
2048 switch (mode) {
2049 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2050 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2051 gpio_num, gpio_shift);
2052 /* clear FLOAT and set CLR */
2053 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2054 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2055 break;
a2fbb9ea 2056
c18487ee
YR
2057 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2058 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2059 gpio_num, gpio_shift);
2060 /* clear FLOAT and set SET */
2061 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2062 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2063 break;
a2fbb9ea 2064
17de50b7 2065 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
2066 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2067 gpio_num, gpio_shift);
2068 /* set FLOAT */
2069 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2070 break;
a2fbb9ea 2071
c18487ee
YR
2072 default:
2073 break;
a2fbb9ea
ET
2074 }
2075
c18487ee 2076 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2077 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2078
c18487ee 2079 return 0;
a2fbb9ea
ET
2080}
2081
4acac6a5
EG
2082int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2083{
2084 /* The GPIO should be swapped if swap register is set and active */
2085 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2086 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2087 int gpio_shift = gpio_num +
2088 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2089 u32 gpio_mask = (1 << gpio_shift);
2090 u32 gpio_reg;
2091
2092 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2093 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2094 return -EINVAL;
2095 }
2096
2097 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2098 /* read GPIO int */
2099 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2100
2101 switch (mode) {
2102 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2103 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2104 "output low\n", gpio_num, gpio_shift);
2105 /* clear SET and set CLR */
2106 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2107 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2108 break;
2109
2110 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2111 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2112 "output high\n", gpio_num, gpio_shift);
2113 /* clear CLR and set SET */
2114 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2115 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2116 break;
2117
2118 default:
2119 break;
2120 }
2121
2122 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2123 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2124
2125 return 0;
2126}
2127
c18487ee 2128static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2129{
c18487ee
YR
2130 u32 spio_mask = (1 << spio_num);
2131 u32 spio_reg;
a2fbb9ea 2132
c18487ee
YR
2133 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2134 (spio_num > MISC_REGISTERS_SPIO_7)) {
2135 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2136 return -EINVAL;
a2fbb9ea
ET
2137 }
2138
4a37fb66 2139 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2140 /* read SPIO and mask except the float bits */
2141 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2142
c18487ee 2143 switch (mode) {
6378c025 2144 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2145 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2146 /* clear FLOAT and set CLR */
2147 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2148 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2149 break;
a2fbb9ea 2150
6378c025 2151 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2152 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2153 /* clear FLOAT and set SET */
2154 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2155 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2156 break;
a2fbb9ea 2157
c18487ee
YR
2158 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2159 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2160 /* set FLOAT */
2161 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2162 break;
a2fbb9ea 2163
c18487ee
YR
2164 default:
2165 break;
a2fbb9ea
ET
2166 }
2167
c18487ee 2168 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2169 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2170
a2fbb9ea
ET
2171 return 0;
2172}
2173
c18487ee 2174static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2175{
ad33ea3a
EG
2176 switch (bp->link_vars.ieee_fc &
2177 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2178 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2179 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2180 ADVERTISED_Pause);
2181 break;
356e2385 2182
c18487ee 2183 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2184 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2185 ADVERTISED_Pause);
2186 break;
356e2385 2187
c18487ee 2188 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2189 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2190 break;
356e2385 2191
c18487ee 2192 default:
34f80b04 2193 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2194 ADVERTISED_Pause);
2195 break;
2196 }
2197}
f1410647 2198
c18487ee
YR
2199static void bnx2x_link_report(struct bnx2x *bp)
2200{
f34d28ea 2201 if (bp->flags & MF_FUNC_DIS) {
2691d51d 2202 netif_carrier_off(bp->dev);
7995c64e 2203 netdev_err(bp->dev, "NIC Link is Down\n");
2691d51d
EG
2204 return;
2205 }
2206
c18487ee 2207 if (bp->link_vars.link_up) {
35c5f8fe
EG
2208 u16 line_speed;
2209
c18487ee
YR
2210 if (bp->state == BNX2X_STATE_OPEN)
2211 netif_carrier_on(bp->dev);
7995c64e 2212 netdev_info(bp->dev, "NIC Link is Up, ");
f1410647 2213
35c5f8fe
EG
2214 line_speed = bp->link_vars.line_speed;
2215 if (IS_E1HMF(bp)) {
2216 u16 vn_max_rate;
2217
2218 vn_max_rate =
2219 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2220 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2221 if (vn_max_rate < line_speed)
2222 line_speed = vn_max_rate;
2223 }
7995c64e 2224 pr_cont("%d Mbps ", line_speed);
f1410647 2225
c18487ee 2226 if (bp->link_vars.duplex == DUPLEX_FULL)
7995c64e 2227 pr_cont("full duplex");
c18487ee 2228 else
7995c64e 2229 pr_cont("half duplex");
f1410647 2230
c0700f90
DM
2231 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2232 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
7995c64e 2233 pr_cont(", receive ");
356e2385
EG
2234 if (bp->link_vars.flow_ctrl &
2235 BNX2X_FLOW_CTRL_TX)
7995c64e 2236 pr_cont("& transmit ");
c18487ee 2237 } else {
7995c64e 2238 pr_cont(", transmit ");
c18487ee 2239 }
7995c64e 2240 pr_cont("flow control ON");
c18487ee 2241 }
7995c64e 2242 pr_cont("\n");
f1410647 2243
c18487ee
YR
2244 } else { /* link_down */
2245 netif_carrier_off(bp->dev);
7995c64e 2246 netdev_err(bp->dev, "NIC Link is Down\n");
f1410647 2247 }
c18487ee
YR
2248}
2249
b5bf9068 2250static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2251{
19680c48
EG
2252 if (!BP_NOMCP(bp)) {
2253 u8 rc;
a2fbb9ea 2254
19680c48 2255 /* Initialize link parameters structure variables */
8c99e7b0
YR
2256 /* It is recommended to turn off RX FC for jumbo frames
2257 for better performance */
0c593270 2258 if (bp->dev->mtu > 5000)
c0700f90 2259 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2260 else
c0700f90 2261 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2262
4a37fb66 2263 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2264
2265 if (load_mode == LOAD_DIAG)
2266 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2267
19680c48 2268 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2269
4a37fb66 2270 bnx2x_release_phy_lock(bp);
a2fbb9ea 2271
3c96c68b
EG
2272 bnx2x_calc_fc_adv(bp);
2273
b5bf9068
EG
2274 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2275 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2276 bnx2x_link_report(bp);
b5bf9068 2277 }
34f80b04 2278
19680c48
EG
2279 return rc;
2280 }
f5372251 2281 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2282 return -EINVAL;
a2fbb9ea
ET
2283}
2284
c18487ee 2285static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2286{
19680c48 2287 if (!BP_NOMCP(bp)) {
4a37fb66 2288 bnx2x_acquire_phy_lock(bp);
19680c48 2289 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2290 bnx2x_release_phy_lock(bp);
a2fbb9ea 2291
19680c48
EG
2292 bnx2x_calc_fc_adv(bp);
2293 } else
f5372251 2294 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2295}
a2fbb9ea 2296
c18487ee
YR
2297static void bnx2x__link_reset(struct bnx2x *bp)
2298{
19680c48 2299 if (!BP_NOMCP(bp)) {
4a37fb66 2300 bnx2x_acquire_phy_lock(bp);
589abe3a 2301 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2302 bnx2x_release_phy_lock(bp);
19680c48 2303 } else
f5372251 2304 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2305}
a2fbb9ea 2306
c18487ee
YR
2307static u8 bnx2x_link_test(struct bnx2x *bp)
2308{
2145a920 2309 u8 rc = 0;
a2fbb9ea 2310
2145a920
VZ
2311 if (!BP_NOMCP(bp)) {
2312 bnx2x_acquire_phy_lock(bp);
2313 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2314 bnx2x_release_phy_lock(bp);
2315 } else
2316 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 2317
c18487ee
YR
2318 return rc;
2319}
a2fbb9ea 2320
8a1c38d1 2321static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2322{
8a1c38d1
EG
2323 u32 r_param = bp->link_vars.line_speed / 8;
2324 u32 fair_periodic_timeout_usec;
2325 u32 t_fair;
34f80b04 2326
8a1c38d1
EG
2327 memset(&(bp->cmng.rs_vars), 0,
2328 sizeof(struct rate_shaping_vars_per_port));
2329 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2330
8a1c38d1
EG
2331 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2332 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2333
8a1c38d1
EG
2334 /* this is the threshold below which no timer arming will occur
2335 1.25 coefficient is for the threshold to be a little bigger
2336 than the real time, to compensate for timer in-accuracy */
2337 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2338 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2339
8a1c38d1
EG
2340 /* resolution of fairness timer */
2341 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2342 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2343 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2344
8a1c38d1
EG
2345 /* this is the threshold below which we won't arm the timer anymore */
2346 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2347
8a1c38d1
EG
2348 /* we multiply by 1e3/8 to get bytes/msec.
2349 We don't want the credits to pass a credit
2350 of the t_fair*FAIR_MEM (algorithm resolution) */
2351 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2352 /* since each tick is 4 usec */
2353 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2354}
2355
2691d51d
EG
2356/* Calculates the sum of vn_min_rates.
2357 It's needed for further normalizing of the min_rates.
2358 Returns:
2359 sum of vn_min_rates.
2360 or
2361 0 - if all the min_rates are 0.
2362 In the later case fainess algorithm should be deactivated.
2363 If not all min_rates are zero then those that are zeroes will be set to 1.
2364 */
2365static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2366{
2367 int all_zero = 1;
2368 int port = BP_PORT(bp);
2369 int vn;
2370
2371 bp->vn_weight_sum = 0;
2372 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2373 int func = 2*vn + port;
2374 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2375 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2376 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2377
2378 /* Skip hidden vns */
2379 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2380 continue;
2381
2382 /* If min rate is zero - set it to 1 */
2383 if (!vn_min_rate)
2384 vn_min_rate = DEF_MIN_RATE;
2385 else
2386 all_zero = 0;
2387
2388 bp->vn_weight_sum += vn_min_rate;
2389 }
2390
2391 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
2392 if (all_zero) {
2393 bp->cmng.flags.cmng_enables &=
2394 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2395 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2396 " fairness will be disabled\n");
2397 } else
2398 bp->cmng.flags.cmng_enables |=
2399 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
2400}
2401
8a1c38d1 2402static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2403{
2404 struct rate_shaping_vars_per_vn m_rs_vn;
2405 struct fairness_vars_per_vn m_fair_vn;
2406 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2407 u16 vn_min_rate, vn_max_rate;
2408 int i;
2409
2410 /* If function is hidden - set min and max to zeroes */
2411 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2412 vn_min_rate = 0;
2413 vn_max_rate = 0;
2414
2415 } else {
2416 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2417 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
2418 /* If min rate is zero - set it to 1 */
2419 if (!vn_min_rate)
34f80b04
EG
2420 vn_min_rate = DEF_MIN_RATE;
2421 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2422 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2423 }
8a1c38d1 2424 DP(NETIF_MSG_IFUP,
b015e3d1 2425 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 2426 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2427
2428 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2429 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2430
2431 /* global vn counter - maximal Mbps for this vn */
2432 m_rs_vn.vn_counter.rate = vn_max_rate;
2433
2434 /* quota - number of bytes transmitted in this period */
2435 m_rs_vn.vn_counter.quota =
2436 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2437
8a1c38d1 2438 if (bp->vn_weight_sum) {
34f80b04
EG
2439 /* credit for each period of the fairness algorithm:
2440 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2441 vn_weight_sum should not be larger than 10000, thus
2442 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2443 than zero */
34f80b04 2444 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
2445 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2446 (8 * bp->vn_weight_sum))),
2447 (bp->cmng.fair_vars.fair_threshold * 2));
2448 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
2449 m_fair_vn.vn_credit_delta);
2450 }
2451
34f80b04
EG
2452 /* Store it to internal memory */
2453 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2454 REG_WR(bp, BAR_XSTRORM_INTMEM +
2455 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2456 ((u32 *)(&m_rs_vn))[i]);
2457
2458 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2459 REG_WR(bp, BAR_XSTRORM_INTMEM +
2460 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2461 ((u32 *)(&m_fair_vn))[i]);
2462}
2463
8a1c38d1 2464
c18487ee
YR
2465/* This function is called upon link interrupt */
2466static void bnx2x_link_attn(struct bnx2x *bp)
2467{
d9e8b185 2468 u32 prev_link_status = bp->link_vars.link_status;
bb2a0f7a
YG
2469 /* Make sure that we are synced with the current statistics */
2470 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2471
c18487ee 2472 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2473
bb2a0f7a
YG
2474 if (bp->link_vars.link_up) {
2475
1c06328c 2476 /* dropless flow control */
a18f5128 2477 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2478 int port = BP_PORT(bp);
2479 u32 pause_enabled = 0;
2480
2481 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2482 pause_enabled = 1;
2483
2484 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2485 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2486 pause_enabled);
2487 }
2488
bb2a0f7a
YG
2489 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2490 struct host_port_stats *pstats;
2491
2492 pstats = bnx2x_sp(bp, port_stats);
2493 /* reset old bmac stats */
2494 memset(&(pstats->mac_stx[0]), 0,
2495 sizeof(struct mac_stx));
2496 }
f34d28ea 2497 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2498 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2499 }
2500
d9e8b185
VZ
2501 /* indicate link status only if link status actually changed */
2502 if (prev_link_status != bp->link_vars.link_status)
2503 bnx2x_link_report(bp);
34f80b04
EG
2504
2505 if (IS_E1HMF(bp)) {
8a1c38d1 2506 int port = BP_PORT(bp);
34f80b04 2507 int func;
8a1c38d1 2508 int vn;
34f80b04 2509
ab6ad5a4 2510 /* Set the attention towards other drivers on the same port */
34f80b04
EG
2511 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2512 if (vn == BP_E1HVN(bp))
2513 continue;
2514
8a1c38d1 2515 func = ((vn << 1) | port);
34f80b04
EG
2516 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2517 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2518 }
34f80b04 2519
8a1c38d1
EG
2520 if (bp->link_vars.link_up) {
2521 int i;
2522
2523 /* Init rate shaping and fairness contexts */
2524 bnx2x_init_port_minmax(bp);
34f80b04 2525
34f80b04 2526 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2527 bnx2x_init_vn_minmax(bp, 2*vn + port);
2528
2529 /* Store it to internal memory */
2530 for (i = 0;
2531 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2532 REG_WR(bp, BAR_XSTRORM_INTMEM +
2533 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2534 ((u32 *)(&bp->cmng))[i]);
2535 }
34f80b04 2536 }
c18487ee 2537}
a2fbb9ea 2538
c18487ee
YR
2539static void bnx2x__link_status_update(struct bnx2x *bp)
2540{
f34d28ea 2541 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2542 return;
a2fbb9ea 2543
c18487ee 2544 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2545
bb2a0f7a
YG
2546 if (bp->link_vars.link_up)
2547 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2548 else
2549 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2550
2691d51d
EG
2551 bnx2x_calc_vn_weight_sum(bp);
2552
c18487ee
YR
2553 /* indicate link status */
2554 bnx2x_link_report(bp);
a2fbb9ea 2555}
a2fbb9ea 2556
34f80b04
EG
2557static void bnx2x_pmf_update(struct bnx2x *bp)
2558{
2559 int port = BP_PORT(bp);
2560 u32 val;
2561
2562 bp->port.pmf = 1;
2563 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2564
2565 /* enable nig attention */
2566 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2567 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2568 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2569
2570 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2571}
2572
c18487ee 2573/* end of Link */
a2fbb9ea
ET
2574
2575/* slow path */
2576
2577/*
2578 * General service functions
2579 */
2580
2691d51d
EG
2581/* send the MCP a request, block until there is a reply */
2582u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2583{
2584 int func = BP_FUNC(bp);
2585 u32 seq = ++bp->fw_seq;
2586 u32 rc = 0;
2587 u32 cnt = 1;
2588 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2589
c4ff7cbf 2590 mutex_lock(&bp->fw_mb_mutex);
2691d51d
EG
2591 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2592 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2593
2594 do {
2595 /* let the FW do it's magic ... */
2596 msleep(delay);
2597
2598 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2599
c4ff7cbf
EG
2600 /* Give the FW up to 5 second (500*10ms) */
2601 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2602
2603 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2604 cnt*delay, rc, seq);
2605
2606 /* is this a reply to our command? */
2607 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2608 rc &= FW_MSG_CODE_MASK;
2609 else {
2610 /* FW BUG! */
2611 BNX2X_ERR("FW failed to respond!\n");
2612 bnx2x_fw_dump(bp);
2613 rc = 0;
2614 }
c4ff7cbf 2615 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2616
2617 return rc;
2618}
2619
e665bfda 2620static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2691d51d
EG
2621static void bnx2x_set_rx_mode(struct net_device *dev);
2622
2623static void bnx2x_e1h_disable(struct bnx2x *bp)
2624{
2625 int port = BP_PORT(bp);
2691d51d
EG
2626
2627 netif_tx_disable(bp->dev);
2691d51d
EG
2628
2629 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2630
2691d51d
EG
2631 netif_carrier_off(bp->dev);
2632}
2633
2634static void bnx2x_e1h_enable(struct bnx2x *bp)
2635{
2636 int port = BP_PORT(bp);
2637
2638 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2639
2691d51d
EG
2640 /* Tx queue should be only reenabled */
2641 netif_tx_wake_all_queues(bp->dev);
2642
061bc702
EG
2643 /*
2644 * Should not call netif_carrier_on since it will be called if the link
2645 * is up when checking for link state
2646 */
2691d51d
EG
2647}
2648
2649static void bnx2x_update_min_max(struct bnx2x *bp)
2650{
2651 int port = BP_PORT(bp);
2652 int vn, i;
2653
2654 /* Init rate shaping and fairness contexts */
2655 bnx2x_init_port_minmax(bp);
2656
2657 bnx2x_calc_vn_weight_sum(bp);
2658
2659 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2660 bnx2x_init_vn_minmax(bp, 2*vn + port);
2661
2662 if (bp->port.pmf) {
2663 int func;
2664
2665 /* Set the attention towards other drivers on the same port */
2666 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2667 if (vn == BP_E1HVN(bp))
2668 continue;
2669
2670 func = ((vn << 1) | port);
2671 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2672 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2673 }
2674
2675 /* Store it to internal memory */
2676 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2677 REG_WR(bp, BAR_XSTRORM_INTMEM +
2678 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2679 ((u32 *)(&bp->cmng))[i]);
2680 }
2681}
2682
2683static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2684{
2691d51d 2685 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
2686
2687 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2688
f34d28ea
EG
2689 /*
2690 * This is the only place besides the function initialization
2691 * where the bp->flags can change so it is done without any
2692 * locks
2693 */
2691d51d
EG
2694 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2695 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
f34d28ea 2696 bp->flags |= MF_FUNC_DIS;
2691d51d
EG
2697
2698 bnx2x_e1h_disable(bp);
2699 } else {
2700 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
f34d28ea 2701 bp->flags &= ~MF_FUNC_DIS;
2691d51d
EG
2702
2703 bnx2x_e1h_enable(bp);
2704 }
2705 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2706 }
2707 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2708
2709 bnx2x_update_min_max(bp);
2710 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2711 }
2712
2713 /* Report results to MCP */
2714 if (dcc_event)
2715 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2716 else
2717 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2718}
2719
28912902
MC
2720/* must be called under the spq lock */
2721static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2722{
2723 struct eth_spe *next_spe = bp->spq_prod_bd;
2724
2725 if (bp->spq_prod_bd == bp->spq_last_bd) {
2726 bp->spq_prod_bd = bp->spq;
2727 bp->spq_prod_idx = 0;
2728 DP(NETIF_MSG_TIMER, "end of spq\n");
2729 } else {
2730 bp->spq_prod_bd++;
2731 bp->spq_prod_idx++;
2732 }
2733 return next_spe;
2734}
2735
2736/* must be called under the spq lock */
2737static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2738{
2739 int func = BP_FUNC(bp);
2740
2741 /* Make sure that BD data is updated before writing the producer */
2742 wmb();
2743
2744 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2745 bp->spq_prod_idx);
2746 mmiowb();
2747}
2748
a2fbb9ea
ET
2749/* the slow path queue is odd since completions arrive on the fastpath ring */
2750static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2751 u32 data_hi, u32 data_lo, int common)
2752{
28912902 2753 struct eth_spe *spe;
a2fbb9ea 2754
a2fbb9ea
ET
2755#ifdef BNX2X_STOP_ON_ERROR
2756 if (unlikely(bp->panic))
2757 return -EIO;
2758#endif
2759
34f80b04 2760 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2761
2762 if (!bp->spq_left) {
2763 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2764 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2765 bnx2x_panic();
2766 return -EBUSY;
2767 }
f1410647 2768
28912902
MC
2769 spe = bnx2x_sp_get_next(bp);
2770
a2fbb9ea 2771 /* CID needs port number to be encoded int it */
28912902 2772 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
2773 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2774 HW_CID(bp, cid));
28912902 2775 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 2776 if (common)
28912902 2777 spe->hdr.type |=
a2fbb9ea
ET
2778 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2779
28912902
MC
2780 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2781 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
2782
2783 bp->spq_left--;
2784
cdaa7cb8
VZ
2785 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2786 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2787 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2788 (u32)(U64_LO(bp->spq_mapping) +
2789 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2790 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2791
28912902 2792 bnx2x_sp_prod_update(bp);
34f80b04 2793 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2794 return 0;
2795}
2796
2797/* acquire split MCP access lock register */
4a37fb66 2798static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2799{
72fd0718 2800 u32 j, val;
34f80b04 2801 int rc = 0;
a2fbb9ea
ET
2802
2803 might_sleep();
72fd0718 2804 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
2805 val = (1UL << 31);
2806 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2807 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2808 if (val & (1L << 31))
2809 break;
2810
2811 msleep(5);
2812 }
a2fbb9ea 2813 if (!(val & (1L << 31))) {
19680c48 2814 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2815 rc = -EBUSY;
2816 }
2817
2818 return rc;
2819}
2820
4a37fb66
YG
2821/* release split MCP access lock register */
2822static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 2823{
72fd0718 2824 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
2825}
2826
2827static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2828{
2829 struct host_def_status_block *def_sb = bp->def_status_blk;
2830 u16 rc = 0;
2831
2832 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2833 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2834 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2835 rc |= 1;
2836 }
2837 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2838 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2839 rc |= 2;
2840 }
2841 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2842 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2843 rc |= 4;
2844 }
2845 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2846 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2847 rc |= 8;
2848 }
2849 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2850 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2851 rc |= 16;
2852 }
2853 return rc;
2854}
2855
2856/*
2857 * slow path service functions
2858 */
2859
2860static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2861{
34f80b04 2862 int port = BP_PORT(bp);
5c862848
EG
2863 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2864 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2865 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2866 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2867 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2868 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2869 u32 aeu_mask;
87942b46 2870 u32 nig_mask = 0;
a2fbb9ea 2871
a2fbb9ea
ET
2872 if (bp->attn_state & asserted)
2873 BNX2X_ERR("IGU ERROR\n");
2874
3fcaf2e5
EG
2875 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2876 aeu_mask = REG_RD(bp, aeu_addr);
2877
a2fbb9ea 2878 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 2879 aeu_mask, asserted);
72fd0718 2880 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 2881 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2882
3fcaf2e5
EG
2883 REG_WR(bp, aeu_addr, aeu_mask);
2884 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2885
3fcaf2e5 2886 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2887 bp->attn_state |= asserted;
3fcaf2e5 2888 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2889
2890 if (asserted & ATTN_HARD_WIRED_MASK) {
2891 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2892
a5e9a7cf
EG
2893 bnx2x_acquire_phy_lock(bp);
2894
877e9aa4 2895 /* save nig interrupt mask */
87942b46 2896 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2897 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2898
c18487ee 2899 bnx2x_link_attn(bp);
a2fbb9ea
ET
2900
2901 /* handle unicore attn? */
2902 }
2903 if (asserted & ATTN_SW_TIMER_4_FUNC)
2904 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2905
2906 if (asserted & GPIO_2_FUNC)
2907 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2908
2909 if (asserted & GPIO_3_FUNC)
2910 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2911
2912 if (asserted & GPIO_4_FUNC)
2913 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2914
2915 if (port == 0) {
2916 if (asserted & ATTN_GENERAL_ATTN_1) {
2917 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2918 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2919 }
2920 if (asserted & ATTN_GENERAL_ATTN_2) {
2921 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2922 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2923 }
2924 if (asserted & ATTN_GENERAL_ATTN_3) {
2925 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2926 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2927 }
2928 } else {
2929 if (asserted & ATTN_GENERAL_ATTN_4) {
2930 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2931 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2932 }
2933 if (asserted & ATTN_GENERAL_ATTN_5) {
2934 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2935 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2936 }
2937 if (asserted & ATTN_GENERAL_ATTN_6) {
2938 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2939 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2940 }
2941 }
2942
2943 } /* if hardwired */
2944
5c862848
EG
2945 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2946 asserted, hc_addr);
2947 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2948
2949 /* now set back the mask */
a5e9a7cf 2950 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2951 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2952 bnx2x_release_phy_lock(bp);
2953 }
a2fbb9ea
ET
2954}
2955
fd4ef40d
EG
2956static inline void bnx2x_fan_failure(struct bnx2x *bp)
2957{
2958 int port = BP_PORT(bp);
2959
2960 /* mark the failure */
2961 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2962 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2963 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2964 bp->link_params.ext_phy_config);
2965
2966 /* log the failure */
cdaa7cb8
VZ
2967 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2968 " the driver to shutdown the card to prevent permanent"
2969 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 2970}
ab6ad5a4 2971
877e9aa4 2972static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2973{
34f80b04 2974 int port = BP_PORT(bp);
877e9aa4 2975 int reg_offset;
4d295db0 2976 u32 val, swap_val, swap_override;
877e9aa4 2977
34f80b04
EG
2978 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2979 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2980
34f80b04 2981 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2982
2983 val = REG_RD(bp, reg_offset);
2984 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2985 REG_WR(bp, reg_offset, val);
2986
2987 BNX2X_ERR("SPIO5 hw attention\n");
2988
fd4ef40d 2989 /* Fan failure attention */
35b19ba5
EG
2990 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2991 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2992 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2993 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2994 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2995 /* The PHY reset is controlled by GPIO 1 */
2996 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2997 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2998 break;
2999
4d295db0
EG
3000 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
3001 /* The PHY reset is controlled by GPIO 1 */
3002 /* fake the port number to cancel the swap done in
3003 set_gpio() */
3004 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
3005 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
3006 port = (swap_val && swap_override) ^ 1;
3007 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3008 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3009 break;
3010
877e9aa4
ET
3011 default:
3012 break;
3013 }
fd4ef40d 3014 bnx2x_fan_failure(bp);
877e9aa4 3015 }
34f80b04 3016
589abe3a
EG
3017 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3018 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3019 bnx2x_acquire_phy_lock(bp);
3020 bnx2x_handle_module_detect_int(&bp->link_params);
3021 bnx2x_release_phy_lock(bp);
3022 }
3023
34f80b04
EG
3024 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3025
3026 val = REG_RD(bp, reg_offset);
3027 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3028 REG_WR(bp, reg_offset, val);
3029
3030 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 3031 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
3032 bnx2x_panic();
3033 }
877e9aa4
ET
3034}
3035
3036static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3037{
3038 u32 val;
3039
0626b899 3040 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
3041
3042 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3043 BNX2X_ERR("DB hw attention 0x%x\n", val);
3044 /* DORQ discard attention */
3045 if (val & 0x2)
3046 BNX2X_ERR("FATAL error from DORQ\n");
3047 }
34f80b04
EG
3048
3049 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3050
3051 int port = BP_PORT(bp);
3052 int reg_offset;
3053
3054 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3055 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3056
3057 val = REG_RD(bp, reg_offset);
3058 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3059 REG_WR(bp, reg_offset, val);
3060
3061 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3062 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3063 bnx2x_panic();
3064 }
877e9aa4
ET
3065}
3066
3067static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3068{
3069 u32 val;
3070
3071 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3072
3073 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3074 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3075 /* CFC error attention */
3076 if (val & 0x2)
3077 BNX2X_ERR("FATAL error from CFC\n");
3078 }
3079
3080 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3081
3082 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3083 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3084 /* RQ_USDMDP_FIFO_OVERFLOW */
3085 if (val & 0x18000)
3086 BNX2X_ERR("FATAL error from PXP\n");
3087 }
34f80b04
EG
3088
3089 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3090
3091 int port = BP_PORT(bp);
3092 int reg_offset;
3093
3094 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3095 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3096
3097 val = REG_RD(bp, reg_offset);
3098 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3099 REG_WR(bp, reg_offset, val);
3100
3101 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3102 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3103 bnx2x_panic();
3104 }
877e9aa4
ET
3105}
3106
3107static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3108{
34f80b04
EG
3109 u32 val;
3110
877e9aa4
ET
3111 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3112
34f80b04
EG
3113 if (attn & BNX2X_PMF_LINK_ASSERT) {
3114 int func = BP_FUNC(bp);
3115
3116 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
3117 bp->mf_config = SHMEM_RD(bp,
3118 mf_cfg.func_mf_config[func].config);
2691d51d
EG
3119 val = SHMEM_RD(bp, func_mb[func].drv_status);
3120 if (val & DRV_STATUS_DCC_EVENT_MASK)
3121 bnx2x_dcc_event(bp,
3122 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3123 bnx2x__link_status_update(bp);
2691d51d 3124 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3125 bnx2x_pmf_update(bp);
3126
3127 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3128
3129 BNX2X_ERR("MC assert!\n");
3130 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3131 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3132 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3133 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3134 bnx2x_panic();
3135
3136 } else if (attn & BNX2X_MCP_ASSERT) {
3137
3138 BNX2X_ERR("MCP assert!\n");
3139 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3140 bnx2x_fw_dump(bp);
877e9aa4
ET
3141
3142 } else
3143 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3144 }
3145
3146 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3147 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3148 if (attn & BNX2X_GRC_TIMEOUT) {
3149 val = CHIP_IS_E1H(bp) ?
3150 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3151 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3152 }
3153 if (attn & BNX2X_GRC_RSV) {
3154 val = CHIP_IS_E1H(bp) ?
3155 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3156 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3157 }
877e9aa4 3158 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3159 }
3160}
3161
72fd0718
VZ
3162static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3163static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3164
3165
3166#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3167#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3168#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3169#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3170#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3171#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3172/*
3173 * should be run under rtnl lock
3174 */
3175static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3176{
3177 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3178 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3179 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3180 barrier();
3181 mmiowb();
3182}
3183
3184/*
3185 * should be run under rtnl lock
3186 */
3187static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3188{
3189 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3190 val |= (1 << 16);
3191 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3192 barrier();
3193 mmiowb();
3194}
3195
3196/*
3197 * should be run under rtnl lock
3198 */
3199static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3200{
3201 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3202 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3203 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3204}
3205
3206/*
3207 * should be run under rtnl lock
3208 */
3209static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3210{
3211 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3212
3213 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3214
3215 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3216 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3217 barrier();
3218 mmiowb();
3219}
3220
3221/*
3222 * should be run under rtnl lock
3223 */
3224static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3225{
3226 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3227
3228 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3229
3230 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3231 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3232 barrier();
3233 mmiowb();
3234
3235 return val1;
3236}
3237
3238/*
3239 * should be run under rtnl lock
3240 */
3241static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3242{
3243 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3244}
3245
3246static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3247{
3248 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3249 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3250}
3251
3252static inline void _print_next_block(int idx, const char *blk)
3253{
3254 if (idx)
3255 pr_cont(", ");
3256 pr_cont("%s", blk);
3257}
3258
3259static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3260{
3261 int i = 0;
3262 u32 cur_bit = 0;
3263 for (i = 0; sig; i++) {
3264 cur_bit = ((u32)0x1 << i);
3265 if (sig & cur_bit) {
3266 switch (cur_bit) {
3267 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3268 _print_next_block(par_num++, "BRB");
3269 break;
3270 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3271 _print_next_block(par_num++, "PARSER");
3272 break;
3273 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3274 _print_next_block(par_num++, "TSDM");
3275 break;
3276 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3277 _print_next_block(par_num++, "SEARCHER");
3278 break;
3279 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3280 _print_next_block(par_num++, "TSEMI");
3281 break;
3282 }
3283
3284 /* Clear the bit */
3285 sig &= ~cur_bit;
3286 }
3287 }
3288
3289 return par_num;
3290}
3291
3292static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3293{
3294 int i = 0;
3295 u32 cur_bit = 0;
3296 for (i = 0; sig; i++) {
3297 cur_bit = ((u32)0x1 << i);
3298 if (sig & cur_bit) {
3299 switch (cur_bit) {
3300 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3301 _print_next_block(par_num++, "PBCLIENT");
3302 break;
3303 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3304 _print_next_block(par_num++, "QM");
3305 break;
3306 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3307 _print_next_block(par_num++, "XSDM");
3308 break;
3309 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3310 _print_next_block(par_num++, "XSEMI");
3311 break;
3312 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3313 _print_next_block(par_num++, "DOORBELLQ");
3314 break;
3315 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3316 _print_next_block(par_num++, "VAUX PCI CORE");
3317 break;
3318 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3319 _print_next_block(par_num++, "DEBUG");
3320 break;
3321 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3322 _print_next_block(par_num++, "USDM");
3323 break;
3324 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3325 _print_next_block(par_num++, "USEMI");
3326 break;
3327 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3328 _print_next_block(par_num++, "UPB");
3329 break;
3330 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3331 _print_next_block(par_num++, "CSDM");
3332 break;
3333 }
3334
3335 /* Clear the bit */
3336 sig &= ~cur_bit;
3337 }
3338 }
3339
3340 return par_num;
3341}
3342
3343static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3344{
3345 int i = 0;
3346 u32 cur_bit = 0;
3347 for (i = 0; sig; i++) {
3348 cur_bit = ((u32)0x1 << i);
3349 if (sig & cur_bit) {
3350 switch (cur_bit) {
3351 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3352 _print_next_block(par_num++, "CSEMI");
3353 break;
3354 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3355 _print_next_block(par_num++, "PXP");
3356 break;
3357 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3358 _print_next_block(par_num++,
3359 "PXPPCICLOCKCLIENT");
3360 break;
3361 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3362 _print_next_block(par_num++, "CFC");
3363 break;
3364 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3365 _print_next_block(par_num++, "CDU");
3366 break;
3367 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3368 _print_next_block(par_num++, "IGU");
3369 break;
3370 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3371 _print_next_block(par_num++, "MISC");
3372 break;
3373 }
3374
3375 /* Clear the bit */
3376 sig &= ~cur_bit;
3377 }
3378 }
3379
3380 return par_num;
3381}
3382
3383static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3384{
3385 int i = 0;
3386 u32 cur_bit = 0;
3387 for (i = 0; sig; i++) {
3388 cur_bit = ((u32)0x1 << i);
3389 if (sig & cur_bit) {
3390 switch (cur_bit) {
3391 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3392 _print_next_block(par_num++, "MCP ROM");
3393 break;
3394 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3395 _print_next_block(par_num++, "MCP UMP RX");
3396 break;
3397 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3398 _print_next_block(par_num++, "MCP UMP TX");
3399 break;
3400 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3401 _print_next_block(par_num++, "MCP SCPAD");
3402 break;
3403 }
3404
3405 /* Clear the bit */
3406 sig &= ~cur_bit;
3407 }
3408 }
3409
3410 return par_num;
3411}
3412
3413static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3414 u32 sig2, u32 sig3)
3415{
3416 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3417 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3418 int par_num = 0;
3419 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3420 "[0]:0x%08x [1]:0x%08x "
3421 "[2]:0x%08x [3]:0x%08x\n",
3422 sig0 & HW_PRTY_ASSERT_SET_0,
3423 sig1 & HW_PRTY_ASSERT_SET_1,
3424 sig2 & HW_PRTY_ASSERT_SET_2,
3425 sig3 & HW_PRTY_ASSERT_SET_3);
3426 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3427 bp->dev->name);
3428 par_num = bnx2x_print_blocks_with_parity0(
3429 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3430 par_num = bnx2x_print_blocks_with_parity1(
3431 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3432 par_num = bnx2x_print_blocks_with_parity2(
3433 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3434 par_num = bnx2x_print_blocks_with_parity3(
3435 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3436 printk("\n");
3437 return true;
3438 } else
3439 return false;
3440}
3441
3442static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 3443{
a2fbb9ea 3444 struct attn_route attn;
72fd0718
VZ
3445 int port = BP_PORT(bp);
3446
3447 attn.sig[0] = REG_RD(bp,
3448 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3449 port*4);
3450 attn.sig[1] = REG_RD(bp,
3451 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3452 port*4);
3453 attn.sig[2] = REG_RD(bp,
3454 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3455 port*4);
3456 attn.sig[3] = REG_RD(bp,
3457 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3458 port*4);
3459
3460 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3461 attn.sig[3]);
3462}
3463
3464static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3465{
3466 struct attn_route attn, *group_mask;
34f80b04 3467 int port = BP_PORT(bp);
877e9aa4 3468 int index;
a2fbb9ea
ET
3469 u32 reg_addr;
3470 u32 val;
3fcaf2e5 3471 u32 aeu_mask;
a2fbb9ea
ET
3472
3473 /* need to take HW lock because MCP or other port might also
3474 try to handle this event */
4a37fb66 3475 bnx2x_acquire_alr(bp);
a2fbb9ea 3476
72fd0718
VZ
3477 if (bnx2x_chk_parity_attn(bp)) {
3478 bp->recovery_state = BNX2X_RECOVERY_INIT;
3479 bnx2x_set_reset_in_progress(bp);
3480 schedule_delayed_work(&bp->reset_task, 0);
3481 /* Disable HW interrupts */
3482 bnx2x_int_disable(bp);
3483 bnx2x_release_alr(bp);
3484 /* In case of parity errors don't handle attentions so that
3485 * other function would "see" parity errors.
3486 */
3487 return;
3488 }
3489
a2fbb9ea
ET
3490 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3491 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3492 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3493 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3494 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3495 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3496
3497 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3498 if (deasserted & (1 << index)) {
72fd0718 3499 group_mask = &bp->attn_group[index];
a2fbb9ea 3500
34f80b04 3501 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
72fd0718
VZ
3502 index, group_mask->sig[0], group_mask->sig[1],
3503 group_mask->sig[2], group_mask->sig[3]);
a2fbb9ea 3504
877e9aa4 3505 bnx2x_attn_int_deasserted3(bp,
72fd0718 3506 attn.sig[3] & group_mask->sig[3]);
877e9aa4 3507 bnx2x_attn_int_deasserted1(bp,
72fd0718 3508 attn.sig[1] & group_mask->sig[1]);
877e9aa4 3509 bnx2x_attn_int_deasserted2(bp,
72fd0718 3510 attn.sig[2] & group_mask->sig[2]);
877e9aa4 3511 bnx2x_attn_int_deasserted0(bp,
72fd0718 3512 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
3513 }
3514 }
3515
4a37fb66 3516 bnx2x_release_alr(bp);
a2fbb9ea 3517
5c862848 3518 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3519
3520 val = ~deasserted;
3fcaf2e5
EG
3521 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3522 val, reg_addr);
5c862848 3523 REG_WR(bp, reg_addr, val);
a2fbb9ea 3524
a2fbb9ea 3525 if (~bp->attn_state & deasserted)
3fcaf2e5 3526 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3527
3528 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3529 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3530
3fcaf2e5
EG
3531 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3532 aeu_mask = REG_RD(bp, reg_addr);
3533
3534 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3535 aeu_mask, deasserted);
72fd0718 3536 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 3537 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3538
3fcaf2e5
EG
3539 REG_WR(bp, reg_addr, aeu_mask);
3540 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3541
3542 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3543 bp->attn_state &= ~deasserted;
3544 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3545}
3546
3547static void bnx2x_attn_int(struct bnx2x *bp)
3548{
3549 /* read local copy of bits */
68d59484
EG
3550 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3551 attn_bits);
3552 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3553 attn_bits_ack);
a2fbb9ea
ET
3554 u32 attn_state = bp->attn_state;
3555
3556 /* look for changed bits */
3557 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3558 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3559
3560 DP(NETIF_MSG_HW,
3561 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3562 attn_bits, attn_ack, asserted, deasserted);
3563
3564 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3565 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3566
3567 /* handle bits that were raised */
3568 if (asserted)
3569 bnx2x_attn_int_asserted(bp, asserted);
3570
3571 if (deasserted)
3572 bnx2x_attn_int_deasserted(bp, deasserted);
3573}
3574
3575static void bnx2x_sp_task(struct work_struct *work)
3576{
1cf167f2 3577 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3578 u16 status;
3579
3580 /* Return here if interrupt is disabled */
3581 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3582 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3583 return;
3584 }
3585
3586 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3587/* if (status == 0) */
3588/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3589
cdaa7cb8 3590 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 3591
877e9aa4 3592 /* HW attentions */
cdaa7cb8 3593 if (status & 0x1) {
a2fbb9ea 3594 bnx2x_attn_int(bp);
cdaa7cb8
VZ
3595 status &= ~0x1;
3596 }
3597
3598 /* CStorm events: STAT_QUERY */
3599 if (status & 0x2) {
3600 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
3601 status &= ~0x2;
3602 }
3603
3604 if (unlikely(status))
3605 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3606 status);
a2fbb9ea 3607
68d59484 3608 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3609 IGU_INT_NOP, 1);
3610 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3611 IGU_INT_NOP, 1);
3612 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3613 IGU_INT_NOP, 1);
3614 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3615 IGU_INT_NOP, 1);
3616 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3617 IGU_INT_ENABLE, 1);
3618}
3619
3620static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3621{
3622 struct net_device *dev = dev_instance;
3623 struct bnx2x *bp = netdev_priv(dev);
3624
3625 /* Return here if interrupt is disabled */
3626 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3627 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3628 return IRQ_HANDLED;
3629 }
3630
8d9c5f34 3631 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3632
3633#ifdef BNX2X_STOP_ON_ERROR
3634 if (unlikely(bp->panic))
3635 return IRQ_HANDLED;
3636#endif
3637
993ac7b5
MC
3638#ifdef BCM_CNIC
3639 {
3640 struct cnic_ops *c_ops;
3641
3642 rcu_read_lock();
3643 c_ops = rcu_dereference(bp->cnic_ops);
3644 if (c_ops)
3645 c_ops->cnic_handler(bp->cnic_data, NULL);
3646 rcu_read_unlock();
3647 }
3648#endif
1cf167f2 3649 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3650
3651 return IRQ_HANDLED;
3652}
3653
3654/* end of slow path */
3655
3656/* Statistics */
3657
3658/****************************************************************************
3659* Macros
3660****************************************************************************/
3661
a2fbb9ea
ET
3662/* sum[hi:lo] += add[hi:lo] */
3663#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3664 do { \
3665 s_lo += a_lo; \
f5ba6772 3666 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3667 } while (0)
3668
3669/* difference = minuend - subtrahend */
3670#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3671 do { \
bb2a0f7a
YG
3672 if (m_lo < s_lo) { \
3673 /* underflow */ \
a2fbb9ea 3674 d_hi = m_hi - s_hi; \
bb2a0f7a 3675 if (d_hi > 0) { \
6378c025 3676 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3677 d_hi--; \
3678 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3679 } else { \
6378c025 3680 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3681 d_hi = 0; \
3682 d_lo = 0; \
3683 } \
bb2a0f7a
YG
3684 } else { \
3685 /* m_lo >= s_lo */ \
a2fbb9ea 3686 if (m_hi < s_hi) { \
bb2a0f7a
YG
3687 d_hi = 0; \
3688 d_lo = 0; \
3689 } else { \
6378c025 3690 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3691 d_hi = m_hi - s_hi; \
3692 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3693 } \
3694 } \
3695 } while (0)
3696
bb2a0f7a 3697#define UPDATE_STAT64(s, t) \
a2fbb9ea 3698 do { \
bb2a0f7a
YG
3699 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3700 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3701 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3702 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3703 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3704 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3705 } while (0)
3706
bb2a0f7a 3707#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3708 do { \
bb2a0f7a
YG
3709 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3710 diff.lo, new->s##_lo, old->s##_lo); \
3711 ADD_64(estats->t##_hi, diff.hi, \
3712 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3713 } while (0)
3714
3715/* sum[hi:lo] += add */
3716#define ADD_EXTEND_64(s_hi, s_lo, a) \
3717 do { \
3718 s_lo += a; \
3719 s_hi += (s_lo < a) ? 1 : 0; \
3720 } while (0)
3721
bb2a0f7a 3722#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3723 do { \
bb2a0f7a
YG
3724 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3725 pstats->mac_stx[1].s##_lo, \
3726 new->s); \
a2fbb9ea
ET
3727 } while (0)
3728
bb2a0f7a 3729#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3730 do { \
4781bfad
EG
3731 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3732 old_tclient->s = tclient->s; \
de832a55
EG
3733 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3734 } while (0)
3735
3736#define UPDATE_EXTEND_USTAT(s, t) \
3737 do { \
3738 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3739 old_uclient->s = uclient->s; \
3740 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3741 } while (0)
3742
3743#define UPDATE_EXTEND_XSTAT(s, t) \
3744 do { \
4781bfad
EG
3745 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3746 old_xclient->s = xclient->s; \
de832a55
EG
3747 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3748 } while (0)
3749
3750/* minuend -= subtrahend */
3751#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3752 do { \
3753 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3754 } while (0)
3755
3756/* minuend[hi:lo] -= subtrahend */
3757#define SUB_EXTEND_64(m_hi, m_lo, s) \
3758 do { \
3759 SUB_64(m_hi, 0, m_lo, s); \
3760 } while (0)
3761
3762#define SUB_EXTEND_USTAT(s, t) \
3763 do { \
3764 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3765 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3766 } while (0)
3767
3768/*
3769 * General service functions
3770 */
3771
3772static inline long bnx2x_hilo(u32 *hiref)
3773{
3774 u32 lo = *(hiref + 1);
3775#if (BITS_PER_LONG == 64)
3776 u32 hi = *hiref;
3777
3778 return HILO_U64(hi, lo);
3779#else
3780 return lo;
3781#endif
3782}
3783
3784/*
3785 * Init service functions
3786 */
3787
bb2a0f7a
YG
3788static void bnx2x_storm_stats_post(struct bnx2x *bp)
3789{
3790 if (!bp->stats_pending) {
3791 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3792 int i, rc;
bb2a0f7a
YG
3793
3794 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3795 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3796 for_each_queue(bp, i)
3797 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3798
3799 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3800 ((u32 *)&ramrod_data)[1],
3801 ((u32 *)&ramrod_data)[0], 0);
3802 if (rc == 0) {
3803 /* stats ramrod has it's own slot on the spq */
3804 bp->spq_left++;
3805 bp->stats_pending = 1;
3806 }
3807 }
3808}
3809
bb2a0f7a
YG
3810static void bnx2x_hw_stats_post(struct bnx2x *bp)
3811{
3812 struct dmae_command *dmae = &bp->stats_dmae;
3813 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3814
3815 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3816 if (CHIP_REV_IS_SLOW(bp))
3817 return;
bb2a0f7a
YG
3818
3819 /* loader */
3820 if (bp->executer_idx) {
3821 int loader_idx = PMF_DMAE_C(bp);
3822
3823 memset(dmae, 0, sizeof(struct dmae_command));
3824
3825 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3826 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3827 DMAE_CMD_DST_RESET |
3828#ifdef __BIG_ENDIAN
3829 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3830#else
3831 DMAE_CMD_ENDIANITY_DW_SWAP |
3832#endif
3833 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3834 DMAE_CMD_PORT_0) |
3835 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3836 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3837 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3838 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3839 sizeof(struct dmae_command) *
3840 (loader_idx + 1)) >> 2;
3841 dmae->dst_addr_hi = 0;
3842 dmae->len = sizeof(struct dmae_command) >> 2;
3843 if (CHIP_IS_E1(bp))
3844 dmae->len--;
3845 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3846 dmae->comp_addr_hi = 0;
3847 dmae->comp_val = 1;
3848
3849 *stats_comp = 0;
3850 bnx2x_post_dmae(bp, dmae, loader_idx);
3851
3852 } else if (bp->func_stx) {
3853 *stats_comp = 0;
3854 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3855 }
3856}
3857
3858static int bnx2x_stats_comp(struct bnx2x *bp)
3859{
3860 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3861 int cnt = 10;
3862
3863 might_sleep();
3864 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3865 if (!cnt) {
3866 BNX2X_ERR("timeout waiting for stats finished\n");
3867 break;
3868 }
3869 cnt--;
12469401 3870 msleep(1);
bb2a0f7a
YG
3871 }
3872 return 1;
3873}
3874
3875/*
3876 * Statistics service functions
3877 */
3878
3879static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3880{
3881 struct dmae_command *dmae;
3882 u32 opcode;
3883 int loader_idx = PMF_DMAE_C(bp);
3884 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3885
3886 /* sanity */
3887 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3888 BNX2X_ERR("BUG!\n");
3889 return;
3890 }
3891
3892 bp->executer_idx = 0;
3893
3894 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3895 DMAE_CMD_C_ENABLE |
3896 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3897#ifdef __BIG_ENDIAN
3898 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3899#else
3900 DMAE_CMD_ENDIANITY_DW_SWAP |
3901#endif
3902 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3903 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3904
3905 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3906 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3907 dmae->src_addr_lo = bp->port.port_stx >> 2;
3908 dmae->src_addr_hi = 0;
3909 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3910 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3911 dmae->len = DMAE_LEN32_RD_MAX;
3912 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3913 dmae->comp_addr_hi = 0;
3914 dmae->comp_val = 1;
3915
3916 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3917 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3918 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3919 dmae->src_addr_hi = 0;
7a9b2557
VZ
3920 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3921 DMAE_LEN32_RD_MAX * 4);
3922 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3923 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3924 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3925 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3926 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3927 dmae->comp_val = DMAE_COMP_VAL;
3928
3929 *stats_comp = 0;
3930 bnx2x_hw_stats_post(bp);
3931 bnx2x_stats_comp(bp);
3932}
3933
3934static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3935{
3936 struct dmae_command *dmae;
34f80b04 3937 int port = BP_PORT(bp);
bb2a0f7a 3938 int vn = BP_E1HVN(bp);
a2fbb9ea 3939 u32 opcode;
bb2a0f7a 3940 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3941 u32 mac_addr;
bb2a0f7a
YG
3942 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3943
3944 /* sanity */
3945 if (!bp->link_vars.link_up || !bp->port.pmf) {
3946 BNX2X_ERR("BUG!\n");
3947 return;
3948 }
a2fbb9ea
ET
3949
3950 bp->executer_idx = 0;
bb2a0f7a
YG
3951
3952 /* MCP */
3953 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3954 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3955 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3956#ifdef __BIG_ENDIAN
bb2a0f7a 3957 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3958#else
bb2a0f7a 3959 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3960#endif
bb2a0f7a
YG
3961 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3962 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3963
bb2a0f7a 3964 if (bp->port.port_stx) {
a2fbb9ea
ET
3965
3966 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3967 dmae->opcode = opcode;
bb2a0f7a
YG
3968 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3969 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3970 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3971 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3972 dmae->len = sizeof(struct host_port_stats) >> 2;
3973 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3974 dmae->comp_addr_hi = 0;
3975 dmae->comp_val = 1;
a2fbb9ea
ET
3976 }
3977
bb2a0f7a
YG
3978 if (bp->func_stx) {
3979
3980 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3981 dmae->opcode = opcode;
3982 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3983 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3984 dmae->dst_addr_lo = bp->func_stx >> 2;
3985 dmae->dst_addr_hi = 0;
3986 dmae->len = sizeof(struct host_func_stats) >> 2;
3987 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3988 dmae->comp_addr_hi = 0;
3989 dmae->comp_val = 1;
a2fbb9ea
ET
3990 }
3991
bb2a0f7a 3992 /* MAC */
a2fbb9ea
ET
3993 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3994 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3995 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3996#ifdef __BIG_ENDIAN
3997 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3998#else
3999 DMAE_CMD_ENDIANITY_DW_SWAP |
4000#endif
bb2a0f7a
YG
4001 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4002 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 4003
c18487ee 4004 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
4005
4006 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4007 NIG_REG_INGRESS_BMAC0_MEM);
4008
4009 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4010 BIGMAC_REGISTER_TX_STAT_GTBYT */
4011 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4012 dmae->opcode = opcode;
4013 dmae->src_addr_lo = (mac_addr +
4014 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4015 dmae->src_addr_hi = 0;
4016 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4017 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4018 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4019 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4020 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4021 dmae->comp_addr_hi = 0;
4022 dmae->comp_val = 1;
4023
4024 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4025 BIGMAC_REGISTER_RX_STAT_GRIPJ */
4026 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4027 dmae->opcode = opcode;
4028 dmae->src_addr_lo = (mac_addr +
4029 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4030 dmae->src_addr_hi = 0;
4031 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4032 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 4033 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4034 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
4035 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4036 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4037 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4038 dmae->comp_addr_hi = 0;
4039 dmae->comp_val = 1;
4040
c18487ee 4041 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
4042
4043 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4044
4045 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4046 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4047 dmae->opcode = opcode;
4048 dmae->src_addr_lo = (mac_addr +
4049 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4050 dmae->src_addr_hi = 0;
4051 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4052 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4053 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4054 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4055 dmae->comp_addr_hi = 0;
4056 dmae->comp_val = 1;
4057
4058 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4059 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4060 dmae->opcode = opcode;
4061 dmae->src_addr_lo = (mac_addr +
4062 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4063 dmae->src_addr_hi = 0;
4064 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4065 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 4066 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4067 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
4068 dmae->len = 1;
4069 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4070 dmae->comp_addr_hi = 0;
4071 dmae->comp_val = 1;
4072
4073 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4074 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4075 dmae->opcode = opcode;
4076 dmae->src_addr_lo = (mac_addr +
4077 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4078 dmae->src_addr_hi = 0;
4079 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4080 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 4081 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4082 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
4083 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4084 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4085 dmae->comp_addr_hi = 0;
4086 dmae->comp_val = 1;
4087 }
4088
4089 /* NIG */
bb2a0f7a
YG
4090 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4091 dmae->opcode = opcode;
4092 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4093 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4094 dmae->src_addr_hi = 0;
4095 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4096 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4097 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4098 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4099 dmae->comp_addr_hi = 0;
4100 dmae->comp_val = 1;
4101
4102 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4103 dmae->opcode = opcode;
4104 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4105 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4106 dmae->src_addr_hi = 0;
4107 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4108 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4109 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4110 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4111 dmae->len = (2*sizeof(u32)) >> 2;
4112 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4113 dmae->comp_addr_hi = 0;
4114 dmae->comp_val = 1;
4115
a2fbb9ea
ET
4116 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4117 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4118 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4119 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4120#ifdef __BIG_ENDIAN
4121 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4122#else
4123 DMAE_CMD_ENDIANITY_DW_SWAP |
4124#endif
bb2a0f7a
YG
4125 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4126 (vn << DMAE_CMD_E1HVN_SHIFT));
4127 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4128 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 4129 dmae->src_addr_hi = 0;
bb2a0f7a
YG
4130 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4131 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4132 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4133 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4134 dmae->len = (2*sizeof(u32)) >> 2;
4135 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4136 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4137 dmae->comp_val = DMAE_COMP_VAL;
4138
4139 *stats_comp = 0;
a2fbb9ea
ET
4140}
4141
bb2a0f7a 4142static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 4143{
bb2a0f7a
YG
4144 struct dmae_command *dmae = &bp->stats_dmae;
4145 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4146
bb2a0f7a
YG
4147 /* sanity */
4148 if (!bp->func_stx) {
4149 BNX2X_ERR("BUG!\n");
4150 return;
4151 }
a2fbb9ea 4152
bb2a0f7a
YG
4153 bp->executer_idx = 0;
4154 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 4155
bb2a0f7a
YG
4156 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4157 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4158 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4159#ifdef __BIG_ENDIAN
4160 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4161#else
4162 DMAE_CMD_ENDIANITY_DW_SWAP |
4163#endif
4164 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4165 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4166 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4167 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4168 dmae->dst_addr_lo = bp->func_stx >> 2;
4169 dmae->dst_addr_hi = 0;
4170 dmae->len = sizeof(struct host_func_stats) >> 2;
4171 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4172 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4173 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4174
bb2a0f7a
YG
4175 *stats_comp = 0;
4176}
a2fbb9ea 4177
bb2a0f7a
YG
4178static void bnx2x_stats_start(struct bnx2x *bp)
4179{
4180 if (bp->port.pmf)
4181 bnx2x_port_stats_init(bp);
4182
4183 else if (bp->func_stx)
4184 bnx2x_func_stats_init(bp);
4185
4186 bnx2x_hw_stats_post(bp);
4187 bnx2x_storm_stats_post(bp);
4188}
4189
4190static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4191{
4192 bnx2x_stats_comp(bp);
4193 bnx2x_stats_pmf_update(bp);
4194 bnx2x_stats_start(bp);
4195}
4196
4197static void bnx2x_stats_restart(struct bnx2x *bp)
4198{
4199 bnx2x_stats_comp(bp);
4200 bnx2x_stats_start(bp);
4201}
4202
4203static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4204{
4205 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4206 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 4207 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
4208 struct {
4209 u32 lo;
4210 u32 hi;
4211 } diff;
bb2a0f7a
YG
4212
4213 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4214 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4215 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4216 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4217 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4218 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 4219 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 4220 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 4221 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
4222 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4223 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4224 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4225 UPDATE_STAT64(tx_stat_gt127,
4226 tx_stat_etherstatspkts65octetsto127octets);
4227 UPDATE_STAT64(tx_stat_gt255,
4228 tx_stat_etherstatspkts128octetsto255octets);
4229 UPDATE_STAT64(tx_stat_gt511,
4230 tx_stat_etherstatspkts256octetsto511octets);
4231 UPDATE_STAT64(tx_stat_gt1023,
4232 tx_stat_etherstatspkts512octetsto1023octets);
4233 UPDATE_STAT64(tx_stat_gt1518,
4234 tx_stat_etherstatspkts1024octetsto1522octets);
4235 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4236 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4237 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4238 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4239 UPDATE_STAT64(tx_stat_gterr,
4240 tx_stat_dot3statsinternalmactransmiterrors);
4241 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
4242
4243 estats->pause_frames_received_hi =
4244 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4245 estats->pause_frames_received_lo =
4246 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4247
4248 estats->pause_frames_sent_hi =
4249 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4250 estats->pause_frames_sent_lo =
4251 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
4252}
4253
4254static void bnx2x_emac_stats_update(struct bnx2x *bp)
4255{
4256 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4257 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 4258 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
4259
4260 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4261 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4262 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4263 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4264 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4265 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4266 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4267 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4268 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4269 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4270 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4271 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4272 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4273 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4274 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4275 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4276 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4277 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4278 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4279 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4280 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4281 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4282 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4283 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4284 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4285 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4286 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4287 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4288 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4289 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4290 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
4291
4292 estats->pause_frames_received_hi =
4293 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4294 estats->pause_frames_received_lo =
4295 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4296 ADD_64(estats->pause_frames_received_hi,
4297 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4298 estats->pause_frames_received_lo,
4299 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4300
4301 estats->pause_frames_sent_hi =
4302 pstats->mac_stx[1].tx_stat_outxonsent_hi;
4303 estats->pause_frames_sent_lo =
4304 pstats->mac_stx[1].tx_stat_outxonsent_lo;
4305 ADD_64(estats->pause_frames_sent_hi,
4306 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4307 estats->pause_frames_sent_lo,
4308 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
4309}
4310
4311static int bnx2x_hw_stats_update(struct bnx2x *bp)
4312{
4313 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4314 struct nig_stats *old = &(bp->port.old_nig_stats);
4315 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4316 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
4317 struct {
4318 u32 lo;
4319 u32 hi;
4320 } diff;
bb2a0f7a
YG
4321
4322 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4323 bnx2x_bmac_stats_update(bp);
4324
4325 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4326 bnx2x_emac_stats_update(bp);
4327
4328 else { /* unreached */
c3eefaf6 4329 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
4330 return -1;
4331 }
a2fbb9ea 4332
bb2a0f7a
YG
4333 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4334 new->brb_discard - old->brb_discard);
66e855f3
YG
4335 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4336 new->brb_truncate - old->brb_truncate);
a2fbb9ea 4337
bb2a0f7a
YG
4338 UPDATE_STAT64_NIG(egress_mac_pkt0,
4339 etherstatspkts1024octetsto1522octets);
4340 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 4341
bb2a0f7a 4342 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 4343
bb2a0f7a
YG
4344 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4345 sizeof(struct mac_stx));
4346 estats->brb_drop_hi = pstats->brb_drop_hi;
4347 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 4348
bb2a0f7a 4349 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 4350
2145a920
VZ
4351 if (!BP_NOMCP(bp)) {
4352 u32 nig_timer_max =
4353 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4354 if (nig_timer_max != estats->nig_timer_max) {
4355 estats->nig_timer_max = nig_timer_max;
4356 BNX2X_ERR("NIG timer max (%u)\n",
4357 estats->nig_timer_max);
4358 }
de832a55
EG
4359 }
4360
bb2a0f7a 4361 return 0;
a2fbb9ea
ET
4362}
4363
bb2a0f7a 4364static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
4365{
4366 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 4367 struct tstorm_per_port_stats *tport =
de832a55 4368 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
4369 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4370 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
4371 int i;
4372
6fe49bb9
EG
4373 memcpy(&(fstats->total_bytes_received_hi),
4374 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
4375 sizeof(struct host_func_stats) - 2*sizeof(u32));
4376 estats->error_bytes_received_hi = 0;
4377 estats->error_bytes_received_lo = 0;
4378 estats->etherstatsoverrsizepkts_hi = 0;
4379 estats->etherstatsoverrsizepkts_lo = 0;
4380 estats->no_buff_discard_hi = 0;
4381 estats->no_buff_discard_lo = 0;
a2fbb9ea 4382
54b9ddaa 4383 for_each_queue(bp, i) {
de832a55
EG
4384 struct bnx2x_fastpath *fp = &bp->fp[i];
4385 int cl_id = fp->cl_id;
4386 struct tstorm_per_client_stats *tclient =
4387 &stats->tstorm_common.client_statistics[cl_id];
4388 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4389 struct ustorm_per_client_stats *uclient =
4390 &stats->ustorm_common.client_statistics[cl_id];
4391 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4392 struct xstorm_per_client_stats *xclient =
4393 &stats->xstorm_common.client_statistics[cl_id];
4394 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4395 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4396 u32 diff;
4397
4398 /* are storm stats valid? */
4399 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 4400 bp->stats_counter) {
de832a55 4401 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
cdaa7cb8 4402 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
de832a55
EG
4403 i, xclient->stats_counter, bp->stats_counter);
4404 return -1;
4405 }
4406 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 4407 bp->stats_counter) {
de832a55 4408 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
cdaa7cb8 4409 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
de832a55
EG
4410 i, tclient->stats_counter, bp->stats_counter);
4411 return -2;
4412 }
4413 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4414 bp->stats_counter) {
4415 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
cdaa7cb8 4416 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
de832a55
EG
4417 i, uclient->stats_counter, bp->stats_counter);
4418 return -4;
4419 }
a2fbb9ea 4420
de832a55 4421 qstats->total_bytes_received_hi =
ca00392c 4422 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4423 qstats->total_bytes_received_lo =
ca00392c
EG
4424 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4425
4426 ADD_64(qstats->total_bytes_received_hi,
4427 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4428 qstats->total_bytes_received_lo,
4429 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4430
4431 ADD_64(qstats->total_bytes_received_hi,
4432 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4433 qstats->total_bytes_received_lo,
4434 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4435
dea7aab1
VZ
4436 SUB_64(qstats->total_bytes_received_hi,
4437 le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
4438 qstats->total_bytes_received_lo,
4439 le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
4440
4441 SUB_64(qstats->total_bytes_received_hi,
4442 le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
4443 qstats->total_bytes_received_lo,
4444 le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
4445
4446 SUB_64(qstats->total_bytes_received_hi,
4447 le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
4448 qstats->total_bytes_received_lo,
4449 le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
4450
ca00392c
EG
4451 qstats->valid_bytes_received_hi =
4452 qstats->total_bytes_received_hi;
de832a55 4453 qstats->valid_bytes_received_lo =
ca00392c 4454 qstats->total_bytes_received_lo;
bb2a0f7a 4455
de832a55 4456 qstats->error_bytes_received_hi =
bb2a0f7a 4457 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4458 qstats->error_bytes_received_lo =
bb2a0f7a 4459 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4460
de832a55
EG
4461 ADD_64(qstats->total_bytes_received_hi,
4462 qstats->error_bytes_received_hi,
4463 qstats->total_bytes_received_lo,
4464 qstats->error_bytes_received_lo);
4465
4466 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4467 total_unicast_packets_received);
4468 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4469 total_multicast_packets_received);
4470 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4471 total_broadcast_packets_received);
4472 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4473 etherstatsoverrsizepkts);
4474 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4475
4476 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4477 total_unicast_packets_received);
4478 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4479 total_multicast_packets_received);
4480 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4481 total_broadcast_packets_received);
4482 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4483 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4484 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4485
4486 qstats->total_bytes_transmitted_hi =
ca00392c 4487 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4488 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4489 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4490
4491 ADD_64(qstats->total_bytes_transmitted_hi,
4492 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4493 qstats->total_bytes_transmitted_lo,
4494 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4495
4496 ADD_64(qstats->total_bytes_transmitted_hi,
4497 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4498 qstats->total_bytes_transmitted_lo,
4499 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4500
de832a55
EG
4501 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4502 total_unicast_packets_transmitted);
4503 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4504 total_multicast_packets_transmitted);
4505 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4506 total_broadcast_packets_transmitted);
4507
4508 old_tclient->checksum_discard = tclient->checksum_discard;
4509 old_tclient->ttl0_discard = tclient->ttl0_discard;
4510
4511 ADD_64(fstats->total_bytes_received_hi,
4512 qstats->total_bytes_received_hi,
4513 fstats->total_bytes_received_lo,
4514 qstats->total_bytes_received_lo);
4515 ADD_64(fstats->total_bytes_transmitted_hi,
4516 qstats->total_bytes_transmitted_hi,
4517 fstats->total_bytes_transmitted_lo,
4518 qstats->total_bytes_transmitted_lo);
4519 ADD_64(fstats->total_unicast_packets_received_hi,
4520 qstats->total_unicast_packets_received_hi,
4521 fstats->total_unicast_packets_received_lo,
4522 qstats->total_unicast_packets_received_lo);
4523 ADD_64(fstats->total_multicast_packets_received_hi,
4524 qstats->total_multicast_packets_received_hi,
4525 fstats->total_multicast_packets_received_lo,
4526 qstats->total_multicast_packets_received_lo);
4527 ADD_64(fstats->total_broadcast_packets_received_hi,
4528 qstats->total_broadcast_packets_received_hi,
4529 fstats->total_broadcast_packets_received_lo,
4530 qstats->total_broadcast_packets_received_lo);
4531 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4532 qstats->total_unicast_packets_transmitted_hi,
4533 fstats->total_unicast_packets_transmitted_lo,
4534 qstats->total_unicast_packets_transmitted_lo);
4535 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4536 qstats->total_multicast_packets_transmitted_hi,
4537 fstats->total_multicast_packets_transmitted_lo,
4538 qstats->total_multicast_packets_transmitted_lo);
4539 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4540 qstats->total_broadcast_packets_transmitted_hi,
4541 fstats->total_broadcast_packets_transmitted_lo,
4542 qstats->total_broadcast_packets_transmitted_lo);
4543 ADD_64(fstats->valid_bytes_received_hi,
4544 qstats->valid_bytes_received_hi,
4545 fstats->valid_bytes_received_lo,
4546 qstats->valid_bytes_received_lo);
4547
4548 ADD_64(estats->error_bytes_received_hi,
4549 qstats->error_bytes_received_hi,
4550 estats->error_bytes_received_lo,
4551 qstats->error_bytes_received_lo);
4552 ADD_64(estats->etherstatsoverrsizepkts_hi,
4553 qstats->etherstatsoverrsizepkts_hi,
4554 estats->etherstatsoverrsizepkts_lo,
4555 qstats->etherstatsoverrsizepkts_lo);
4556 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4557 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4558 }
4559
4560 ADD_64(fstats->total_bytes_received_hi,
4561 estats->rx_stat_ifhcinbadoctets_hi,
4562 fstats->total_bytes_received_lo,
4563 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4564
4565 memcpy(estats, &(fstats->total_bytes_received_hi),
4566 sizeof(struct host_func_stats) - 2*sizeof(u32));
4567
de832a55
EG
4568 ADD_64(estats->etherstatsoverrsizepkts_hi,
4569 estats->rx_stat_dot3statsframestoolong_hi,
4570 estats->etherstatsoverrsizepkts_lo,
4571 estats->rx_stat_dot3statsframestoolong_lo);
4572 ADD_64(estats->error_bytes_received_hi,
4573 estats->rx_stat_ifhcinbadoctets_hi,
4574 estats->error_bytes_received_lo,
4575 estats->rx_stat_ifhcinbadoctets_lo);
4576
4577 if (bp->port.pmf) {
4578 estats->mac_filter_discard =
4579 le32_to_cpu(tport->mac_filter_discard);
4580 estats->xxoverflow_discard =
4581 le32_to_cpu(tport->xxoverflow_discard);
4582 estats->brb_truncate_discard =
bb2a0f7a 4583 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4584 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4585 }
bb2a0f7a
YG
4586
4587 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4588
de832a55
EG
4589 bp->stats_pending = 0;
4590
a2fbb9ea
ET
4591 return 0;
4592}
4593
bb2a0f7a 4594static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4595{
bb2a0f7a 4596 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4597 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4598 int i;
a2fbb9ea
ET
4599
4600 nstats->rx_packets =
4601 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4602 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4603 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4604
4605 nstats->tx_packets =
4606 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4607 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4608 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4609
de832a55 4610 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4611
0e39e645 4612 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4613
de832a55 4614 nstats->rx_dropped = estats->mac_discard;
54b9ddaa 4615 for_each_queue(bp, i)
de832a55
EG
4616 nstats->rx_dropped +=
4617 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4618
a2fbb9ea
ET
4619 nstats->tx_dropped = 0;
4620
4621 nstats->multicast =
de832a55 4622 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4623
bb2a0f7a 4624 nstats->collisions =
de832a55 4625 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4626
4627 nstats->rx_length_errors =
de832a55
EG
4628 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4629 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4630 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4631 bnx2x_hilo(&estats->brb_truncate_hi);
4632 nstats->rx_crc_errors =
4633 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4634 nstats->rx_frame_errors =
4635 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4636 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4637 nstats->rx_missed_errors = estats->xxoverflow_discard;
4638
4639 nstats->rx_errors = nstats->rx_length_errors +
4640 nstats->rx_over_errors +
4641 nstats->rx_crc_errors +
4642 nstats->rx_frame_errors +
0e39e645
ET
4643 nstats->rx_fifo_errors +
4644 nstats->rx_missed_errors;
a2fbb9ea 4645
bb2a0f7a 4646 nstats->tx_aborted_errors =
de832a55
EG
4647 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4648 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4649 nstats->tx_carrier_errors =
4650 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4651 nstats->tx_fifo_errors = 0;
4652 nstats->tx_heartbeat_errors = 0;
4653 nstats->tx_window_errors = 0;
4654
4655 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4656 nstats->tx_carrier_errors +
4657 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4658}
4659
4660static void bnx2x_drv_stats_update(struct bnx2x *bp)
4661{
4662 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4663 int i;
4664
4665 estats->driver_xoff = 0;
4666 estats->rx_err_discard_pkt = 0;
4667 estats->rx_skb_alloc_failed = 0;
4668 estats->hw_csum_err = 0;
54b9ddaa 4669 for_each_queue(bp, i) {
de832a55
EG
4670 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4671
4672 estats->driver_xoff += qstats->driver_xoff;
4673 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4674 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4675 estats->hw_csum_err += qstats->hw_csum_err;
4676 }
a2fbb9ea
ET
4677}
4678
bb2a0f7a 4679static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4680{
bb2a0f7a 4681 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4682
bb2a0f7a
YG
4683 if (*stats_comp != DMAE_COMP_VAL)
4684 return;
4685
4686 if (bp->port.pmf)
de832a55 4687 bnx2x_hw_stats_update(bp);
a2fbb9ea 4688
de832a55
EG
4689 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4690 BNX2X_ERR("storm stats were not updated for 3 times\n");
4691 bnx2x_panic();
4692 return;
a2fbb9ea
ET
4693 }
4694
de832a55
EG
4695 bnx2x_net_stats_update(bp);
4696 bnx2x_drv_stats_update(bp);
4697
7995c64e 4698 if (netif_msg_timer(bp)) {
bb2a0f7a 4699 struct bnx2x_eth_stats *estats = &bp->eth_stats;
34f80b04 4700 int i;
a2fbb9ea 4701
dea7aab1
VZ
4702 printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
4703 bp->dev->name,
de832a55 4704 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea
ET
4705
4706 for_each_queue(bp, i) {
dea7aab1
VZ
4707 struct bnx2x_fastpath *fp = &bp->fp[i];
4708 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4709
4710 printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
4711 " rx pkt(%lu) rx calls(%lu %lu)\n",
4712 fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
4713 fp->rx_comp_cons),
4714 le16_to_cpu(*fp->rx_cons_sb),
4715 bnx2x_hilo(&qstats->
4716 total_unicast_packets_received_hi),
4717 fp->rx_calls, fp->rx_pkt);
4718 }
4719
4720 for_each_queue(bp, i) {
4721 struct bnx2x_fastpath *fp = &bp->fp[i];
4722 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4723 struct netdev_queue *txq =
4724 netdev_get_tx_queue(bp->dev, i);
4725
4726 printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
4727 " tx pkt(%lu) tx calls (%lu)"
4728 " %s (Xoff events %u)\n",
4729 fp->name, bnx2x_tx_avail(fp),
4730 le16_to_cpu(*fp->tx_cons_sb),
4731 bnx2x_hilo(&qstats->
4732 total_unicast_packets_transmitted_hi),
4733 fp->tx_pkt,
4734 (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
4735 qstats->driver_xoff);
a2fbb9ea
ET
4736 }
4737 }
4738
bb2a0f7a
YG
4739 bnx2x_hw_stats_post(bp);
4740 bnx2x_storm_stats_post(bp);
4741}
a2fbb9ea 4742
bb2a0f7a
YG
4743static void bnx2x_port_stats_stop(struct bnx2x *bp)
4744{
4745 struct dmae_command *dmae;
4746 u32 opcode;
4747 int loader_idx = PMF_DMAE_C(bp);
4748 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4749
bb2a0f7a 4750 bp->executer_idx = 0;
a2fbb9ea 4751
bb2a0f7a
YG
4752 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4753 DMAE_CMD_C_ENABLE |
4754 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4755#ifdef __BIG_ENDIAN
bb2a0f7a 4756 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4757#else
bb2a0f7a 4758 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4759#endif
bb2a0f7a
YG
4760 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4761 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4762
4763 if (bp->port.port_stx) {
4764
4765 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4766 if (bp->func_stx)
4767 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4768 else
4769 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4770 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4771 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4772 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4773 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4774 dmae->len = sizeof(struct host_port_stats) >> 2;
4775 if (bp->func_stx) {
4776 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4777 dmae->comp_addr_hi = 0;
4778 dmae->comp_val = 1;
4779 } else {
4780 dmae->comp_addr_lo =
4781 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4782 dmae->comp_addr_hi =
4783 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4784 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4785
bb2a0f7a
YG
4786 *stats_comp = 0;
4787 }
a2fbb9ea
ET
4788 }
4789
bb2a0f7a
YG
4790 if (bp->func_stx) {
4791
4792 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4793 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4794 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4795 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4796 dmae->dst_addr_lo = bp->func_stx >> 2;
4797 dmae->dst_addr_hi = 0;
4798 dmae->len = sizeof(struct host_func_stats) >> 2;
4799 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4800 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4801 dmae->comp_val = DMAE_COMP_VAL;
4802
4803 *stats_comp = 0;
a2fbb9ea 4804 }
bb2a0f7a
YG
4805}
4806
4807static void bnx2x_stats_stop(struct bnx2x *bp)
4808{
4809 int update = 0;
4810
4811 bnx2x_stats_comp(bp);
4812
4813 if (bp->port.pmf)
4814 update = (bnx2x_hw_stats_update(bp) == 0);
4815
4816 update |= (bnx2x_storm_stats_update(bp) == 0);
4817
4818 if (update) {
4819 bnx2x_net_stats_update(bp);
a2fbb9ea 4820
bb2a0f7a
YG
4821 if (bp->port.pmf)
4822 bnx2x_port_stats_stop(bp);
4823
4824 bnx2x_hw_stats_post(bp);
4825 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4826 }
4827}
4828
bb2a0f7a
YG
4829static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4830{
4831}
4832
4833static const struct {
4834 void (*action)(struct bnx2x *bp);
4835 enum bnx2x_stats_state next_state;
4836} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4837/* state event */
4838{
4839/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4840/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4841/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4842/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4843},
4844{
4845/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4846/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4847/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4848/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4849}
4850};
4851
4852static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4853{
4854 enum bnx2x_stats_state state = bp->stats_state;
4855
cdaa7cb8
VZ
4856 if (unlikely(bp->panic))
4857 return;
4858
bb2a0f7a
YG
4859 bnx2x_stats_stm[state][event].action(bp);
4860 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4861
8924665a
EG
4862 /* Make sure the state has been "changed" */
4863 smp_wmb();
4864
7995c64e 4865 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
bb2a0f7a
YG
4866 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4867 state, event, bp->stats_state);
4868}
4869
6fe49bb9
EG
4870static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4871{
4872 struct dmae_command *dmae;
4873 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4874
4875 /* sanity */
4876 if (!bp->port.pmf || !bp->port.port_stx) {
4877 BNX2X_ERR("BUG!\n");
4878 return;
4879 }
4880
4881 bp->executer_idx = 0;
4882
4883 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4884 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4885 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4886 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4887#ifdef __BIG_ENDIAN
4888 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4889#else
4890 DMAE_CMD_ENDIANITY_DW_SWAP |
4891#endif
4892 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4893 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4894 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4895 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4896 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4897 dmae->dst_addr_hi = 0;
4898 dmae->len = sizeof(struct host_port_stats) >> 2;
4899 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4900 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4901 dmae->comp_val = DMAE_COMP_VAL;
4902
4903 *stats_comp = 0;
4904 bnx2x_hw_stats_post(bp);
4905 bnx2x_stats_comp(bp);
4906}
4907
4908static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4909{
4910 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4911 int port = BP_PORT(bp);
4912 int func;
4913 u32 func_stx;
4914
4915 /* sanity */
4916 if (!bp->port.pmf || !bp->func_stx) {
4917 BNX2X_ERR("BUG!\n");
4918 return;
4919 }
4920
4921 /* save our func_stx */
4922 func_stx = bp->func_stx;
4923
4924 for (vn = VN_0; vn < vn_max; vn++) {
4925 func = 2*vn + port;
4926
4927 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4928 bnx2x_func_stats_init(bp);
4929 bnx2x_hw_stats_post(bp);
4930 bnx2x_stats_comp(bp);
4931 }
4932
4933 /* restore our func_stx */
4934 bp->func_stx = func_stx;
4935}
4936
4937static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4938{
4939 struct dmae_command *dmae = &bp->stats_dmae;
4940 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4941
4942 /* sanity */
4943 if (!bp->func_stx) {
4944 BNX2X_ERR("BUG!\n");
4945 return;
4946 }
4947
4948 bp->executer_idx = 0;
4949 memset(dmae, 0, sizeof(struct dmae_command));
4950
4951 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4952 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4953 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4954#ifdef __BIG_ENDIAN
4955 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4956#else
4957 DMAE_CMD_ENDIANITY_DW_SWAP |
4958#endif
4959 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4960 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4961 dmae->src_addr_lo = bp->func_stx >> 2;
4962 dmae->src_addr_hi = 0;
4963 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4964 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4965 dmae->len = sizeof(struct host_func_stats) >> 2;
4966 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4967 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4968 dmae->comp_val = DMAE_COMP_VAL;
4969
4970 *stats_comp = 0;
4971 bnx2x_hw_stats_post(bp);
4972 bnx2x_stats_comp(bp);
4973}
4974
4975static void bnx2x_stats_init(struct bnx2x *bp)
4976{
4977 int port = BP_PORT(bp);
4978 int func = BP_FUNC(bp);
4979 int i;
4980
4981 bp->stats_pending = 0;
4982 bp->executer_idx = 0;
4983 bp->stats_counter = 0;
4984
4985 /* port and func stats for management */
4986 if (!BP_NOMCP(bp)) {
4987 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4988 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4989
4990 } else {
4991 bp->port.port_stx = 0;
4992 bp->func_stx = 0;
4993 }
4994 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4995 bp->port.port_stx, bp->func_stx);
4996
4997 /* port stats */
4998 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4999 bp->port.old_nig_stats.brb_discard =
5000 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
5001 bp->port.old_nig_stats.brb_truncate =
5002 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
5003 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
5004 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
5005 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
5006 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
5007
5008 /* function stats */
5009 for_each_queue(bp, i) {
5010 struct bnx2x_fastpath *fp = &bp->fp[i];
5011
5012 memset(&fp->old_tclient, 0,
5013 sizeof(struct tstorm_per_client_stats));
5014 memset(&fp->old_uclient, 0,
5015 sizeof(struct ustorm_per_client_stats));
5016 memset(&fp->old_xclient, 0,
5017 sizeof(struct xstorm_per_client_stats));
5018 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
5019 }
5020
5021 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
5022 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
5023
5024 bp->stats_state = STATS_STATE_DISABLED;
5025
5026 if (bp->port.pmf) {
5027 if (bp->port.port_stx)
5028 bnx2x_port_stats_base_init(bp);
5029
5030 if (bp->func_stx)
5031 bnx2x_func_stats_base_init(bp);
5032
5033 } else if (bp->func_stx)
5034 bnx2x_func_stats_base_update(bp);
5035}
5036
a2fbb9ea
ET
5037static void bnx2x_timer(unsigned long data)
5038{
5039 struct bnx2x *bp = (struct bnx2x *) data;
5040
5041 if (!netif_running(bp->dev))
5042 return;
5043
5044 if (atomic_read(&bp->intr_sem) != 0)
f1410647 5045 goto timer_restart;
a2fbb9ea
ET
5046
5047 if (poll) {
5048 struct bnx2x_fastpath *fp = &bp->fp[0];
5049 int rc;
5050
7961f791 5051 bnx2x_tx_int(fp);
a2fbb9ea
ET
5052 rc = bnx2x_rx_int(fp, 1000);
5053 }
5054
34f80b04
EG
5055 if (!BP_NOMCP(bp)) {
5056 int func = BP_FUNC(bp);
a2fbb9ea
ET
5057 u32 drv_pulse;
5058 u32 mcp_pulse;
5059
5060 ++bp->fw_drv_pulse_wr_seq;
5061 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5062 /* TBD - add SYSTEM_TIME */
5063 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 5064 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 5065
34f80b04 5066 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
5067 MCP_PULSE_SEQ_MASK);
5068 /* The delta between driver pulse and mcp response
5069 * should be 1 (before mcp response) or 0 (after mcp response)
5070 */
5071 if ((drv_pulse != mcp_pulse) &&
5072 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5073 /* someone lost a heartbeat... */
5074 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5075 drv_pulse, mcp_pulse);
5076 }
5077 }
5078
f34d28ea 5079 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 5080 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 5081
f1410647 5082timer_restart:
a2fbb9ea
ET
5083 mod_timer(&bp->timer, jiffies + bp->current_interval);
5084}
5085
5086/* end of Statistics */
5087
5088/* nic init */
5089
5090/*
5091 * nic init service functions
5092 */
5093
34f80b04 5094static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 5095{
34f80b04
EG
5096 int port = BP_PORT(bp);
5097
ca00392c
EG
5098 /* "CSTORM" */
5099 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5100 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5101 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5102 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5103 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5104 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
5105}
5106
5c862848
EG
5107static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5108 dma_addr_t mapping, int sb_id)
34f80b04
EG
5109{
5110 int port = BP_PORT(bp);
bb2a0f7a 5111 int func = BP_FUNC(bp);
a2fbb9ea 5112 int index;
34f80b04 5113 u64 section;
a2fbb9ea
ET
5114
5115 /* USTORM */
5116 section = ((u64)mapping) + offsetof(struct host_status_block,
5117 u_status_block);
34f80b04 5118 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 5119
ca00392c
EG
5120 REG_WR(bp, BAR_CSTRORM_INTMEM +
5121 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5122 REG_WR(bp, BAR_CSTRORM_INTMEM +
5123 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 5124 U64_HI(section));
ca00392c
EG
5125 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5126 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
5127
5128 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
5129 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5130 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
5131
5132 /* CSTORM */
5133 section = ((u64)mapping) + offsetof(struct host_status_block,
5134 c_status_block);
34f80b04 5135 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5136
5137 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5138 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 5139 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5140 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 5141 U64_HI(section));
7a9b2557 5142 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 5143 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
5144
5145 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5146 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 5147 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
5148
5149 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5150}
5151
5152static void bnx2x_zero_def_sb(struct bnx2x *bp)
5153{
5154 int func = BP_FUNC(bp);
a2fbb9ea 5155
ca00392c 5156 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
5157 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5158 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
5159 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5160 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5161 sizeof(struct cstorm_def_status_block_u)/4);
5162 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5163 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5164 sizeof(struct cstorm_def_status_block_c)/4);
5165 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
5166 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5167 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
5168}
5169
5170static void bnx2x_init_def_sb(struct bnx2x *bp,
5171 struct host_def_status_block *def_sb,
34f80b04 5172 dma_addr_t mapping, int sb_id)
a2fbb9ea 5173{
34f80b04
EG
5174 int port = BP_PORT(bp);
5175 int func = BP_FUNC(bp);
a2fbb9ea
ET
5176 int index, val, reg_offset;
5177 u64 section;
5178
5179 /* ATTN */
5180 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5181 atten_status_block);
34f80b04 5182 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 5183
49d66772
ET
5184 bp->attn_state = 0;
5185
a2fbb9ea
ET
5186 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5187 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5188
34f80b04 5189 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
5190 bp->attn_group[index].sig[0] = REG_RD(bp,
5191 reg_offset + 0x10*index);
5192 bp->attn_group[index].sig[1] = REG_RD(bp,
5193 reg_offset + 0x4 + 0x10*index);
5194 bp->attn_group[index].sig[2] = REG_RD(bp,
5195 reg_offset + 0x8 + 0x10*index);
5196 bp->attn_group[index].sig[3] = REG_RD(bp,
5197 reg_offset + 0xc + 0x10*index);
5198 }
5199
a2fbb9ea
ET
5200 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5201 HC_REG_ATTN_MSG0_ADDR_L);
5202
5203 REG_WR(bp, reg_offset, U64_LO(section));
5204 REG_WR(bp, reg_offset + 4, U64_HI(section));
5205
5206 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5207
5208 val = REG_RD(bp, reg_offset);
34f80b04 5209 val |= sb_id;
a2fbb9ea
ET
5210 REG_WR(bp, reg_offset, val);
5211
5212 /* USTORM */
5213 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5214 u_def_status_block);
34f80b04 5215 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 5216
ca00392c
EG
5217 REG_WR(bp, BAR_CSTRORM_INTMEM +
5218 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5219 REG_WR(bp, BAR_CSTRORM_INTMEM +
5220 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 5221 U64_HI(section));
ca00392c
EG
5222 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5223 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
5224
5225 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
5226 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5227 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
5228
5229 /* CSTORM */
5230 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5231 c_def_status_block);
34f80b04 5232 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5233
5234 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5235 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 5236 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5237 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 5238 U64_HI(section));
5c862848 5239 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 5240 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
5241
5242 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5243 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 5244 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
5245
5246 /* TSTORM */
5247 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5248 t_def_status_block);
34f80b04 5249 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5250
5251 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5252 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 5253 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5254 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 5255 U64_HI(section));
5c862848 5256 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 5257 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
5258
5259 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5260 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 5261 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
5262
5263 /* XSTORM */
5264 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5265 x_def_status_block);
34f80b04 5266 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5267
5268 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 5269 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 5270 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 5271 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 5272 U64_HI(section));
5c862848 5273 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 5274 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
5275
5276 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5277 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 5278 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 5279
bb2a0f7a 5280 bp->stats_pending = 0;
66e855f3 5281 bp->set_mac_pending = 0;
bb2a0f7a 5282
34f80b04 5283 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
5284}
5285
5286static void bnx2x_update_coalesce(struct bnx2x *bp)
5287{
34f80b04 5288 int port = BP_PORT(bp);
a2fbb9ea
ET
5289 int i;
5290
5291 for_each_queue(bp, i) {
34f80b04 5292 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
5293
5294 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
5295 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5296 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5297 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 5298 bp->rx_ticks/(4 * BNX2X_BTR));
ca00392c
EG
5299 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5300 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5301 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 5302 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
5303
5304 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5305 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
5306 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5307 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 5308 bp->tx_ticks/(4 * BNX2X_BTR));
a2fbb9ea 5309 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
5310 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5311 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 5312 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
5313 }
5314}
5315
7a9b2557
VZ
5316static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5317 struct bnx2x_fastpath *fp, int last)
5318{
5319 int i;
5320
5321 for (i = 0; i < last; i++) {
5322 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5323 struct sk_buff *skb = rx_buf->skb;
5324
5325 if (skb == NULL) {
5326 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5327 continue;
5328 }
5329
5330 if (fp->tpa_state[i] == BNX2X_TPA_START)
1a983142
FT
5331 dma_unmap_single(&bp->pdev->dev,
5332 dma_unmap_addr(rx_buf, mapping),
5333 bp->rx_buf_size, DMA_FROM_DEVICE);
7a9b2557
VZ
5334
5335 dev_kfree_skb(skb);
5336 rx_buf->skb = NULL;
5337 }
5338}
5339
a2fbb9ea
ET
5340static void bnx2x_init_rx_rings(struct bnx2x *bp)
5341{
7a9b2557 5342 int func = BP_FUNC(bp);
32626230
EG
5343 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5344 ETH_MAX_AGGREGATION_QUEUES_E1H;
5345 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 5346 int i, j;
a2fbb9ea 5347
87942b46 5348 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
5349 DP(NETIF_MSG_IFUP,
5350 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 5351
7a9b2557 5352 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 5353
54b9ddaa 5354 for_each_queue(bp, j) {
32626230 5355 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 5356
32626230 5357 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
5358 fp->tpa_pool[i].skb =
5359 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5360 if (!fp->tpa_pool[i].skb) {
5361 BNX2X_ERR("Failed to allocate TPA "
5362 "skb pool for queue[%d] - "
5363 "disabling TPA on this "
5364 "queue!\n", j);
5365 bnx2x_free_tpa_pool(bp, fp, i);
5366 fp->disable_tpa = 1;
5367 break;
5368 }
1a983142 5369 dma_unmap_addr_set((struct sw_rx_bd *)
7a9b2557
VZ
5370 &bp->fp->tpa_pool[i],
5371 mapping, 0);
5372 fp->tpa_state[i] = BNX2X_TPA_STOP;
5373 }
5374 }
5375 }
5376
54b9ddaa 5377 for_each_queue(bp, j) {
a2fbb9ea
ET
5378 struct bnx2x_fastpath *fp = &bp->fp[j];
5379
5380 fp->rx_bd_cons = 0;
5381 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
5382 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5383
5384 /* "next page" elements initialization */
5385 /* SGE ring */
5386 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5387 struct eth_rx_sge *sge;
5388
5389 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5390 sge->addr_hi =
5391 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5392 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5393 sge->addr_lo =
5394 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5395 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5396 }
5397
5398 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 5399
7a9b2557 5400 /* RX BD ring */
a2fbb9ea
ET
5401 for (i = 1; i <= NUM_RX_RINGS; i++) {
5402 struct eth_rx_bd *rx_bd;
5403
5404 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5405 rx_bd->addr_hi =
5406 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 5407 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5408 rx_bd->addr_lo =
5409 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 5410 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5411 }
5412
34f80b04 5413 /* CQ ring */
a2fbb9ea
ET
5414 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5415 struct eth_rx_cqe_next_page *nextpg;
5416
5417 nextpg = (struct eth_rx_cqe_next_page *)
5418 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5419 nextpg->addr_hi =
5420 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 5421 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5422 nextpg->addr_lo =
5423 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 5424 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5425 }
5426
7a9b2557
VZ
5427 /* Allocate SGEs and initialize the ring elements */
5428 for (i = 0, ring_prod = 0;
5429 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 5430
7a9b2557
VZ
5431 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5432 BNX2X_ERR("was only able to allocate "
5433 "%d rx sges\n", i);
5434 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5435 /* Cleanup already allocated elements */
5436 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 5437 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
5438 fp->disable_tpa = 1;
5439 ring_prod = 0;
5440 break;
5441 }
5442 ring_prod = NEXT_SGE_IDX(ring_prod);
5443 }
5444 fp->rx_sge_prod = ring_prod;
5445
5446 /* Allocate BDs and initialize BD ring */
66e855f3 5447 fp->rx_comp_cons = 0;
7a9b2557 5448 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5449 for (i = 0; i < bp->rx_ring_size; i++) {
5450 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5451 BNX2X_ERR("was only able to allocate "
de832a55
EG
5452 "%d rx skbs on queue[%d]\n", i, j);
5453 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5454 break;
5455 }
5456 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5457 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5458 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5459 }
5460
7a9b2557
VZ
5461 fp->rx_bd_prod = ring_prod;
5462 /* must not have more available CQEs than BDs */
cdaa7cb8
VZ
5463 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5464 cqe_ring_prod);
a2fbb9ea
ET
5465 fp->rx_pkt = fp->rx_calls = 0;
5466
7a9b2557
VZ
5467 /* Warning!
5468 * this will generate an interrupt (to the TSTORM)
5469 * must only be done after chip is initialized
5470 */
5471 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5472 fp->rx_sge_prod);
a2fbb9ea
ET
5473 if (j != 0)
5474 continue;
5475
5476 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5477 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5478 U64_LO(fp->rx_comp_mapping));
5479 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5480 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5481 U64_HI(fp->rx_comp_mapping));
5482 }
5483}
5484
5485static void bnx2x_init_tx_ring(struct bnx2x *bp)
5486{
5487 int i, j;
5488
54b9ddaa 5489 for_each_queue(bp, j) {
a2fbb9ea
ET
5490 struct bnx2x_fastpath *fp = &bp->fp[j];
5491
5492 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5493 struct eth_tx_next_bd *tx_next_bd =
5494 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5495
ca00392c 5496 tx_next_bd->addr_hi =
a2fbb9ea 5497 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5498 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5499 tx_next_bd->addr_lo =
a2fbb9ea 5500 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5501 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5502 }
5503
ca00392c
EG
5504 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5505 fp->tx_db.data.zero_fill1 = 0;
5506 fp->tx_db.data.prod = 0;
5507
a2fbb9ea
ET
5508 fp->tx_pkt_prod = 0;
5509 fp->tx_pkt_cons = 0;
5510 fp->tx_bd_prod = 0;
5511 fp->tx_bd_cons = 0;
5512 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5513 fp->tx_pkt = 0;
5514 }
5515}
5516
5517static void bnx2x_init_sp_ring(struct bnx2x *bp)
5518{
34f80b04 5519 int func = BP_FUNC(bp);
a2fbb9ea
ET
5520
5521 spin_lock_init(&bp->spq_lock);
5522
5523 bp->spq_left = MAX_SPQ_PENDING;
5524 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5525 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5526 bp->spq_prod_bd = bp->spq;
5527 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5528
34f80b04 5529 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5530 U64_LO(bp->spq_mapping));
34f80b04
EG
5531 REG_WR(bp,
5532 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5533 U64_HI(bp->spq_mapping));
5534
34f80b04 5535 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5536 bp->spq_prod_idx);
5537}
5538
5539static void bnx2x_init_context(struct bnx2x *bp)
5540{
5541 int i;
5542
54b9ddaa
VZ
5543 /* Rx */
5544 for_each_queue(bp, i) {
a2fbb9ea
ET
5545 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5546 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5547 u8 cl_id = fp->cl_id;
a2fbb9ea 5548
34f80b04
EG
5549 context->ustorm_st_context.common.sb_index_numbers =
5550 BNX2X_RX_SB_INDEX_NUM;
0626b899 5551 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5552 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5553 context->ustorm_st_context.common.flags =
de832a55
EG
5554 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5555 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5556 context->ustorm_st_context.common.statistics_counter_id =
5557 cl_id;
8d9c5f34 5558 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5559 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5560 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5561 bp->rx_buf_size;
34f80b04 5562 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5563 U64_HI(fp->rx_desc_mapping);
34f80b04 5564 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5565 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5566 if (!fp->disable_tpa) {
5567 context->ustorm_st_context.common.flags |=
ca00392c 5568 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5569 context->ustorm_st_context.common.sge_buff_size =
cdaa7cb8
VZ
5570 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
5571 0xffff);
7a9b2557
VZ
5572 context->ustorm_st_context.common.sge_page_base_hi =
5573 U64_HI(fp->rx_sge_mapping);
5574 context->ustorm_st_context.common.sge_page_base_lo =
5575 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5576
5577 context->ustorm_st_context.common.max_sges_for_packet =
5578 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5579 context->ustorm_st_context.common.max_sges_for_packet =
5580 ((context->ustorm_st_context.common.
5581 max_sges_for_packet + PAGES_PER_SGE - 1) &
5582 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5583 }
5584
8d9c5f34
EG
5585 context->ustorm_ag_context.cdu_usage =
5586 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5587 CDU_REGION_NUMBER_UCM_AG,
5588 ETH_CONNECTION_TYPE);
5589
ca00392c
EG
5590 context->xstorm_ag_context.cdu_reserved =
5591 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5592 CDU_REGION_NUMBER_XCM_AG,
5593 ETH_CONNECTION_TYPE);
5594 }
5595
54b9ddaa
VZ
5596 /* Tx */
5597 for_each_queue(bp, i) {
ca00392c
EG
5598 struct bnx2x_fastpath *fp = &bp->fp[i];
5599 struct eth_context *context =
54b9ddaa 5600 bnx2x_sp(bp, context[i].eth);
ca00392c
EG
5601
5602 context->cstorm_st_context.sb_index_number =
5603 C_SB_ETH_TX_CQ_INDEX;
5604 context->cstorm_st_context.status_block_id = fp->sb_id;
5605
8d9c5f34
EG
5606 context->xstorm_st_context.tx_bd_page_base_hi =
5607 U64_HI(fp->tx_desc_mapping);
5608 context->xstorm_st_context.tx_bd_page_base_lo =
5609 U64_LO(fp->tx_desc_mapping);
ca00392c 5610 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5611 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5612 }
5613}
5614
5615static void bnx2x_init_ind_table(struct bnx2x *bp)
5616{
26c8fa4d 5617 int func = BP_FUNC(bp);
a2fbb9ea
ET
5618 int i;
5619
555f6c78 5620 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5621 return;
5622
555f6c78
EG
5623 DP(NETIF_MSG_IFUP,
5624 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5625 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5626 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5627 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 5628 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
5629}
5630
49d66772
ET
5631static void bnx2x_set_client_config(struct bnx2x *bp)
5632{
49d66772 5633 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5634 int port = BP_PORT(bp);
5635 int i;
49d66772 5636
e7799c5f 5637 tstorm_client.mtu = bp->dev->mtu;
49d66772 5638 tstorm_client.config_flags =
de832a55
EG
5639 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5640 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5641#ifdef BCM_VLAN
0c6671b0 5642 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5643 tstorm_client.config_flags |=
8d9c5f34 5644 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5645 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5646 }
5647#endif
49d66772
ET
5648
5649 for_each_queue(bp, i) {
de832a55
EG
5650 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5651
49d66772 5652 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5653 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5654 ((u32 *)&tstorm_client)[0]);
5655 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5656 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5657 ((u32 *)&tstorm_client)[1]);
5658 }
5659
34f80b04
EG
5660 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5661 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5662}
5663
a2fbb9ea
ET
5664static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5665{
a2fbb9ea 5666 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 5667 int mode = bp->rx_mode;
37b091ba 5668 int mask = bp->rx_mode_cl_mask;
34f80b04 5669 int func = BP_FUNC(bp);
581ce43d 5670 int port = BP_PORT(bp);
a2fbb9ea 5671 int i;
581ce43d
EG
5672 /* All but management unicast packets should pass to the host as well */
5673 u32 llh_mask =
5674 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5675 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5676 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5677 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5678
3196a88a 5679 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5680
5681 switch (mode) {
5682 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5683 tstorm_mac_filter.ucast_drop_all = mask;
5684 tstorm_mac_filter.mcast_drop_all = mask;
5685 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5686 break;
356e2385 5687
a2fbb9ea 5688 case BNX2X_RX_MODE_NORMAL:
34f80b04 5689 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5690 break;
356e2385 5691
a2fbb9ea 5692 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5693 tstorm_mac_filter.mcast_accept_all = mask;
5694 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5695 break;
356e2385 5696
a2fbb9ea 5697 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5698 tstorm_mac_filter.ucast_accept_all = mask;
5699 tstorm_mac_filter.mcast_accept_all = mask;
5700 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5701 /* pass management unicast packets as well */
5702 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5703 break;
356e2385 5704
a2fbb9ea 5705 default:
34f80b04
EG
5706 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5707 break;
a2fbb9ea
ET
5708 }
5709
581ce43d
EG
5710 REG_WR(bp,
5711 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5712 llh_mask);
5713
a2fbb9ea
ET
5714 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5715 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5716 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5717 ((u32 *)&tstorm_mac_filter)[i]);
5718
34f80b04 5719/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5720 ((u32 *)&tstorm_mac_filter)[i]); */
5721 }
a2fbb9ea 5722
49d66772
ET
5723 if (mode != BNX2X_RX_MODE_NONE)
5724 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5725}
5726
471de716
EG
5727static void bnx2x_init_internal_common(struct bnx2x *bp)
5728{
5729 int i;
5730
5731 /* Zero this manually as its initialization is
5732 currently missing in the initTool */
5733 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5734 REG_WR(bp, BAR_USTRORM_INTMEM +
5735 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5736}
5737
5738static void bnx2x_init_internal_port(struct bnx2x *bp)
5739{
5740 int port = BP_PORT(bp);
5741
ca00392c
EG
5742 REG_WR(bp,
5743 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5744 REG_WR(bp,
5745 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5746 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5747 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5748}
5749
5750static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5751{
a2fbb9ea
ET
5752 struct tstorm_eth_function_common_config tstorm_config = {0};
5753 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5754 int port = BP_PORT(bp);
5755 int func = BP_FUNC(bp);
de832a55
EG
5756 int i, j;
5757 u32 offset;
471de716 5758 u16 max_agg_size;
a2fbb9ea 5759
c68ed255
TH
5760 tstorm_config.config_flags = RSS_FLAGS(bp);
5761
5762 if (is_multi(bp))
a2fbb9ea 5763 tstorm_config.rss_result_mask = MULTI_MASK;
ca00392c
EG
5764
5765 /* Enable TPA if needed */
5766 if (bp->flags & TPA_ENABLE_FLAG)
5767 tstorm_config.config_flags |=
5768 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5769
8d9c5f34
EG
5770 if (IS_E1HMF(bp))
5771 tstorm_config.config_flags |=
5772 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5773
34f80b04
EG
5774 tstorm_config.leading_client_id = BP_L_ID(bp);
5775
a2fbb9ea 5776 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5777 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5778 (*(u32 *)&tstorm_config));
5779
c14423fe 5780 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 5781 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
5782 bnx2x_set_storm_rx_mode(bp);
5783
de832a55
EG
5784 for_each_queue(bp, i) {
5785 u8 cl_id = bp->fp[i].cl_id;
5786
5787 /* reset xstorm per client statistics */
5788 offset = BAR_XSTRORM_INTMEM +
5789 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5790 for (j = 0;
5791 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5792 REG_WR(bp, offset + j*4, 0);
5793
5794 /* reset tstorm per client statistics */
5795 offset = BAR_TSTRORM_INTMEM +
5796 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5797 for (j = 0;
5798 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5799 REG_WR(bp, offset + j*4, 0);
5800
5801 /* reset ustorm per client statistics */
5802 offset = BAR_USTRORM_INTMEM +
5803 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5804 for (j = 0;
5805 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5806 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5807 }
5808
5809 /* Init statistics related context */
34f80b04 5810 stats_flags.collect_eth = 1;
a2fbb9ea 5811
66e855f3 5812 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5813 ((u32 *)&stats_flags)[0]);
66e855f3 5814 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5815 ((u32 *)&stats_flags)[1]);
5816
66e855f3 5817 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5818 ((u32 *)&stats_flags)[0]);
66e855f3 5819 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5820 ((u32 *)&stats_flags)[1]);
5821
de832a55
EG
5822 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5823 ((u32 *)&stats_flags)[0]);
5824 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5825 ((u32 *)&stats_flags)[1]);
5826
66e855f3 5827 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5828 ((u32 *)&stats_flags)[0]);
66e855f3 5829 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5830 ((u32 *)&stats_flags)[1]);
5831
66e855f3
YG
5832 REG_WR(bp, BAR_XSTRORM_INTMEM +
5833 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5834 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5835 REG_WR(bp, BAR_XSTRORM_INTMEM +
5836 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5837 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5838
5839 REG_WR(bp, BAR_TSTRORM_INTMEM +
5840 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5841 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5842 REG_WR(bp, BAR_TSTRORM_INTMEM +
5843 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5844 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5845
de832a55
EG
5846 REG_WR(bp, BAR_USTRORM_INTMEM +
5847 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5848 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5849 REG_WR(bp, BAR_USTRORM_INTMEM +
5850 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5851 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5852
34f80b04
EG
5853 if (CHIP_IS_E1H(bp)) {
5854 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5855 IS_E1HMF(bp));
5856 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5857 IS_E1HMF(bp));
5858 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5859 IS_E1HMF(bp));
5860 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5861 IS_E1HMF(bp));
5862
7a9b2557
VZ
5863 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5864 bp->e1hov);
34f80b04
EG
5865 }
5866
4f40f2cb 5867 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
cdaa7cb8
VZ
5868 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
5869 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
54b9ddaa 5870 for_each_queue(bp, i) {
7a9b2557 5871 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5872
5873 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5874 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5875 U64_LO(fp->rx_comp_mapping));
5876 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5877 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5878 U64_HI(fp->rx_comp_mapping));
5879
ca00392c
EG
5880 /* Next page */
5881 REG_WR(bp, BAR_USTRORM_INTMEM +
5882 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5883 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5884 REG_WR(bp, BAR_USTRORM_INTMEM +
5885 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5886 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5887
7a9b2557 5888 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5889 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5890 max_agg_size);
5891 }
8a1c38d1 5892
1c06328c
EG
5893 /* dropless flow control */
5894 if (CHIP_IS_E1H(bp)) {
5895 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5896
5897 rx_pause.bd_thr_low = 250;
5898 rx_pause.cqe_thr_low = 250;
5899 rx_pause.cos = 1;
5900 rx_pause.sge_thr_low = 0;
5901 rx_pause.bd_thr_high = 350;
5902 rx_pause.cqe_thr_high = 350;
5903 rx_pause.sge_thr_high = 0;
5904
54b9ddaa 5905 for_each_queue(bp, i) {
1c06328c
EG
5906 struct bnx2x_fastpath *fp = &bp->fp[i];
5907
5908 if (!fp->disable_tpa) {
5909 rx_pause.sge_thr_low = 150;
5910 rx_pause.sge_thr_high = 250;
5911 }
5912
5913
5914 offset = BAR_USTRORM_INTMEM +
5915 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5916 fp->cl_id);
5917 for (j = 0;
5918 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5919 j++)
5920 REG_WR(bp, offset + j*4,
5921 ((u32 *)&rx_pause)[j]);
5922 }
5923 }
5924
8a1c38d1
EG
5925 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5926
5927 /* Init rate shaping and fairness contexts */
5928 if (IS_E1HMF(bp)) {
5929 int vn;
5930
5931 /* During init there is no active link
5932 Until link is up, set link rate to 10Gbps */
5933 bp->link_vars.line_speed = SPEED_10000;
5934 bnx2x_init_port_minmax(bp);
5935
b015e3d1
EG
5936 if (!BP_NOMCP(bp))
5937 bp->mf_config =
5938 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
5939 bnx2x_calc_vn_weight_sum(bp);
5940
5941 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5942 bnx2x_init_vn_minmax(bp, 2*vn + port);
5943
5944 /* Enable rate shaping and fairness */
b015e3d1 5945 bp->cmng.flags.cmng_enables |=
8a1c38d1 5946 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 5947
8a1c38d1
EG
5948 } else {
5949 /* rate shaping and fairness are disabled */
5950 DP(NETIF_MSG_IFUP,
5951 "single function mode minmax will be disabled\n");
5952 }
5953
5954
cdaa7cb8 5955 /* Store cmng structures to internal memory */
8a1c38d1
EG
5956 if (bp->port.pmf)
5957 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5958 REG_WR(bp, BAR_XSTRORM_INTMEM +
5959 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5960 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5961}
5962
471de716
EG
5963static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5964{
5965 switch (load_code) {
5966 case FW_MSG_CODE_DRV_LOAD_COMMON:
5967 bnx2x_init_internal_common(bp);
5968 /* no break */
5969
5970 case FW_MSG_CODE_DRV_LOAD_PORT:
5971 bnx2x_init_internal_port(bp);
5972 /* no break */
5973
5974 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5975 bnx2x_init_internal_func(bp);
5976 break;
5977
5978 default:
5979 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5980 break;
5981 }
5982}
5983
5984static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5985{
5986 int i;
5987
5988 for_each_queue(bp, i) {
5989 struct bnx2x_fastpath *fp = &bp->fp[i];
5990
34f80b04 5991 fp->bp = bp;
a2fbb9ea 5992 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5993 fp->index = i;
34f80b04 5994 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
5995#ifdef BCM_CNIC
5996 fp->sb_id = fp->cl_id + 1;
5997#else
34f80b04 5998 fp->sb_id = fp->cl_id;
37b091ba 5999#endif
34f80b04 6000 DP(NETIF_MSG_IFUP,
f5372251
EG
6001 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
6002 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 6003 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 6004 fp->sb_id);
5c862848 6005 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
6006 }
6007
16119785
EG
6008 /* ensure status block indices were read */
6009 rmb();
6010
6011
5c862848
EG
6012 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
6013 DEF_SB_ID);
6014 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
6015 bnx2x_update_coalesce(bp);
6016 bnx2x_init_rx_rings(bp);
6017 bnx2x_init_tx_ring(bp);
6018 bnx2x_init_sp_ring(bp);
6019 bnx2x_init_context(bp);
471de716 6020 bnx2x_init_internal(bp, load_code);
a2fbb9ea 6021 bnx2x_init_ind_table(bp);
0ef00459
EG
6022 bnx2x_stats_init(bp);
6023
6024 /* At this point, we are ready for interrupts */
6025 atomic_set(&bp->intr_sem, 0);
6026
6027 /* flush all before enabling interrupts */
6028 mb();
6029 mmiowb();
6030
615f8fd9 6031 bnx2x_int_enable(bp);
eb8da205
EG
6032
6033 /* Check for SPIO5 */
6034 bnx2x_attn_int_deasserted0(bp,
6035 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6036 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
6037}
6038
6039/* end of nic init */
6040
6041/*
6042 * gzip service functions
6043 */
6044
6045static int bnx2x_gunzip_init(struct bnx2x *bp)
6046{
1a983142
FT
6047 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6048 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
6049 if (bp->gunzip_buf == NULL)
6050 goto gunzip_nomem1;
6051
6052 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6053 if (bp->strm == NULL)
6054 goto gunzip_nomem2;
6055
6056 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6057 GFP_KERNEL);
6058 if (bp->strm->workspace == NULL)
6059 goto gunzip_nomem3;
6060
6061 return 0;
6062
6063gunzip_nomem3:
6064 kfree(bp->strm);
6065 bp->strm = NULL;
6066
6067gunzip_nomem2:
1a983142
FT
6068 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6069 bp->gunzip_mapping);
a2fbb9ea
ET
6070 bp->gunzip_buf = NULL;
6071
6072gunzip_nomem1:
cdaa7cb8
VZ
6073 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
6074 " un-compression\n");
a2fbb9ea
ET
6075 return -ENOMEM;
6076}
6077
6078static void bnx2x_gunzip_end(struct bnx2x *bp)
6079{
6080 kfree(bp->strm->workspace);
6081
6082 kfree(bp->strm);
6083 bp->strm = NULL;
6084
6085 if (bp->gunzip_buf) {
1a983142
FT
6086 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6087 bp->gunzip_mapping);
a2fbb9ea
ET
6088 bp->gunzip_buf = NULL;
6089 }
6090}
6091
94a78b79 6092static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
6093{
6094 int n, rc;
6095
6096 /* check gzip header */
94a78b79
VZ
6097 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6098 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 6099 return -EINVAL;
94a78b79 6100 }
a2fbb9ea
ET
6101
6102 n = 10;
6103
34f80b04 6104#define FNAME 0x8
a2fbb9ea
ET
6105
6106 if (zbuf[3] & FNAME)
6107 while ((zbuf[n++] != 0) && (n < len));
6108
94a78b79 6109 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
6110 bp->strm->avail_in = len - n;
6111 bp->strm->next_out = bp->gunzip_buf;
6112 bp->strm->avail_out = FW_BUF_SIZE;
6113
6114 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6115 if (rc != Z_OK)
6116 return rc;
6117
6118 rc = zlib_inflate(bp->strm, Z_FINISH);
6119 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
6120 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6121 bp->strm->msg);
a2fbb9ea
ET
6122
6123 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6124 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
6125 netdev_err(bp->dev, "Firmware decompression error:"
6126 " gunzip_outlen (%d) not aligned\n",
6127 bp->gunzip_outlen);
a2fbb9ea
ET
6128 bp->gunzip_outlen >>= 2;
6129
6130 zlib_inflateEnd(bp->strm);
6131
6132 if (rc == Z_STREAM_END)
6133 return 0;
6134
6135 return rc;
6136}
6137
6138/* nic load/unload */
6139
6140/*
34f80b04 6141 * General service functions
a2fbb9ea
ET
6142 */
6143
6144/* send a NIG loopback debug packet */
6145static void bnx2x_lb_pckt(struct bnx2x *bp)
6146{
a2fbb9ea 6147 u32 wb_write[3];
a2fbb9ea
ET
6148
6149 /* Ethernet source and destination addresses */
a2fbb9ea
ET
6150 wb_write[0] = 0x55555555;
6151 wb_write[1] = 0x55555555;
34f80b04 6152 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 6153 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
6154
6155 /* NON-IP protocol */
a2fbb9ea
ET
6156 wb_write[0] = 0x09000000;
6157 wb_write[1] = 0x55555555;
34f80b04 6158 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 6159 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
6160}
6161
6162/* some of the internal memories
6163 * are not directly readable from the driver
6164 * to test them we send debug packets
6165 */
6166static int bnx2x_int_mem_test(struct bnx2x *bp)
6167{
6168 int factor;
6169 int count, i;
6170 u32 val = 0;
6171
ad8d3948 6172 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 6173 factor = 120;
ad8d3948
EG
6174 else if (CHIP_REV_IS_EMUL(bp))
6175 factor = 200;
6176 else
a2fbb9ea 6177 factor = 1;
a2fbb9ea
ET
6178
6179 DP(NETIF_MSG_HW, "start part1\n");
6180
6181 /* Disable inputs of parser neighbor blocks */
6182 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6183 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6184 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 6185 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
6186
6187 /* Write 0 to parser credits for CFC search request */
6188 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6189
6190 /* send Ethernet packet */
6191 bnx2x_lb_pckt(bp);
6192
6193 /* TODO do i reset NIG statistic? */
6194 /* Wait until NIG register shows 1 packet of size 0x10 */
6195 count = 1000 * factor;
6196 while (count) {
34f80b04 6197
a2fbb9ea
ET
6198 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6199 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
6200 if (val == 0x10)
6201 break;
6202
6203 msleep(10);
6204 count--;
6205 }
6206 if (val != 0x10) {
6207 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6208 return -1;
6209 }
6210
6211 /* Wait until PRS register shows 1 packet */
6212 count = 1000 * factor;
6213 while (count) {
6214 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
6215 if (val == 1)
6216 break;
6217
6218 msleep(10);
6219 count--;
6220 }
6221 if (val != 0x1) {
6222 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6223 return -2;
6224 }
6225
6226 /* Reset and init BRB, PRS */
34f80b04 6227 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 6228 msleep(50);
34f80b04 6229 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 6230 msleep(50);
94a78b79
VZ
6231 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6232 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
6233
6234 DP(NETIF_MSG_HW, "part2\n");
6235
6236 /* Disable inputs of parser neighbor blocks */
6237 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6238 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6239 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 6240 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
6241
6242 /* Write 0 to parser credits for CFC search request */
6243 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6244
6245 /* send 10 Ethernet packets */
6246 for (i = 0; i < 10; i++)
6247 bnx2x_lb_pckt(bp);
6248
6249 /* Wait until NIG register shows 10 + 1
6250 packets of size 11*0x10 = 0xb0 */
6251 count = 1000 * factor;
6252 while (count) {
34f80b04 6253
a2fbb9ea
ET
6254 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6255 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
6256 if (val == 0xb0)
6257 break;
6258
6259 msleep(10);
6260 count--;
6261 }
6262 if (val != 0xb0) {
6263 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6264 return -3;
6265 }
6266
6267 /* Wait until PRS register shows 2 packets */
6268 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6269 if (val != 2)
6270 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6271
6272 /* Write 1 to parser credits for CFC search request */
6273 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6274
6275 /* Wait until PRS register shows 3 packets */
6276 msleep(10 * factor);
6277 /* Wait until NIG register shows 1 packet of size 0x10 */
6278 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6279 if (val != 3)
6280 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6281
6282 /* clear NIG EOP FIFO */
6283 for (i = 0; i < 11; i++)
6284 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6285 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6286 if (val != 1) {
6287 BNX2X_ERR("clear of NIG failed\n");
6288 return -4;
6289 }
6290
6291 /* Reset and init BRB, PRS, NIG */
6292 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6293 msleep(50);
6294 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6295 msleep(50);
94a78b79
VZ
6296 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6297 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 6298#ifndef BCM_CNIC
a2fbb9ea
ET
6299 /* set NIC mode */
6300 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6301#endif
6302
6303 /* Enable inputs of parser neighbor blocks */
6304 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6305 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6306 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 6307 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
6308
6309 DP(NETIF_MSG_HW, "done\n");
6310
6311 return 0; /* OK */
6312}
6313
6314static void enable_blocks_attention(struct bnx2x *bp)
6315{
6316 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6317 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6318 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6319 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6320 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6321 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6322 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6323 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6324 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
6325/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6326/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6327 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6328 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6329 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
6330/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6331/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6332 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6333 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6334 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6335 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
6336/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6337/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6338 if (CHIP_REV_IS_FPGA(bp))
6339 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6340 else
6341 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
6342 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6343 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6344 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
6345/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6346/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6347 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6348 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
6349/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6350 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
6351}
6352
72fd0718
VZ
6353static const struct {
6354 u32 addr;
6355 u32 mask;
6356} bnx2x_parity_mask[] = {
6357 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6358 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6359 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6360 {HC_REG_HC_PRTY_MASK, 0xffffffff},
6361 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6362 {QM_REG_QM_PRTY_MASK, 0x0},
6363 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6364 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6365 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6366 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6367 {CDU_REG_CDU_PRTY_MASK, 0x0},
6368 {CFC_REG_CFC_PRTY_MASK, 0x0},
6369 {DBG_REG_DBG_PRTY_MASK, 0x0},
6370 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6371 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6372 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6373 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6374 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6375 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6376 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6377 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6378 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6379 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6380 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6381 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6382 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6383 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6384 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6385};
6386
6387static void enable_blocks_parity(struct bnx2x *bp)
6388{
6389 int i, mask_arr_len =
6390 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6391
6392 for (i = 0; i < mask_arr_len; i++)
6393 REG_WR(bp, bnx2x_parity_mask[i].addr,
6394 bnx2x_parity_mask[i].mask);
6395}
6396
34f80b04 6397
81f75bbf
EG
6398static void bnx2x_reset_common(struct bnx2x *bp)
6399{
6400 /* reset_common */
6401 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6402 0xd3ffff7f);
6403 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6404}
6405
573f2035
EG
6406static void bnx2x_init_pxp(struct bnx2x *bp)
6407{
6408 u16 devctl;
6409 int r_order, w_order;
6410
6411 pci_read_config_word(bp->pdev,
6412 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6413 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6414 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6415 if (bp->mrrs == -1)
6416 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6417 else {
6418 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6419 r_order = bp->mrrs;
6420 }
6421
6422 bnx2x_init_pxp_arb(bp, r_order, w_order);
6423}
fd4ef40d
EG
6424
6425static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6426{
2145a920 6427 int is_required;
fd4ef40d 6428 u32 val;
2145a920 6429 int port;
fd4ef40d 6430
2145a920
VZ
6431 if (BP_NOMCP(bp))
6432 return;
6433
6434 is_required = 0;
fd4ef40d
EG
6435 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6436 SHARED_HW_CFG_FAN_FAILURE_MASK;
6437
6438 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6439 is_required = 1;
6440
6441 /*
6442 * The fan failure mechanism is usually related to the PHY type since
6443 * the power consumption of the board is affected by the PHY. Currently,
6444 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6445 */
6446 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6447 for (port = PORT_0; port < PORT_MAX; port++) {
6448 u32 phy_type =
6449 SHMEM_RD(bp, dev_info.port_hw_config[port].
6450 external_phy_config) &
6451 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6452 is_required |=
6453 ((phy_type ==
6454 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
6455 (phy_type ==
6456 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
6457 (phy_type ==
6458 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6459 }
6460
6461 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6462
6463 if (is_required == 0)
6464 return;
6465
6466 /* Fan failure is indicated by SPIO 5 */
6467 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6468 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6469
6470 /* set to active low mode */
6471 val = REG_RD(bp, MISC_REG_SPIO_INT);
6472 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 6473 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
6474 REG_WR(bp, MISC_REG_SPIO_INT, val);
6475
6476 /* enable interrupt to signal the IGU */
6477 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6478 val |= (1 << MISC_REGISTERS_SPIO_5);
6479 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6480}
6481
34f80b04 6482static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 6483{
a2fbb9ea 6484 u32 val, i;
37b091ba
MC
6485#ifdef BCM_CNIC
6486 u32 wb_write[2];
6487#endif
a2fbb9ea 6488
34f80b04 6489 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 6490
81f75bbf 6491 bnx2x_reset_common(bp);
34f80b04
EG
6492 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6493 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 6494
94a78b79 6495 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
6496 if (CHIP_IS_E1H(bp))
6497 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 6498
34f80b04
EG
6499 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6500 msleep(30);
6501 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 6502
94a78b79 6503 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
6504 if (CHIP_IS_E1(bp)) {
6505 /* enable HW interrupt from PXP on USDM overflow
6506 bit 16 on INT_MASK_0 */
6507 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6508 }
a2fbb9ea 6509
94a78b79 6510 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6511 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6512
6513#ifdef __BIG_ENDIAN
34f80b04
EG
6514 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6515 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6516 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6517 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6518 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6519 /* make sure this value is 0 */
6520 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6521
6522/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6523 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6524 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6525 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6526 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6527#endif
6528
34f80b04 6529 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 6530#ifdef BCM_CNIC
34f80b04
EG
6531 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6532 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6533 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6534#endif
6535
34f80b04
EG
6536 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6537 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6538
34f80b04
EG
6539 /* let the HW do it's magic ... */
6540 msleep(100);
6541 /* finish PXP init */
6542 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6543 if (val != 1) {
6544 BNX2X_ERR("PXP2 CFG failed\n");
6545 return -EBUSY;
6546 }
6547 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6548 if (val != 1) {
6549 BNX2X_ERR("PXP2 RD_INIT failed\n");
6550 return -EBUSY;
6551 }
a2fbb9ea 6552
34f80b04
EG
6553 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6554 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6555
94a78b79 6556 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6557
34f80b04
EG
6558 /* clean the DMAE memory */
6559 bp->dmae_ready = 1;
6560 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6561
94a78b79
VZ
6562 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6563 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6564 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6565 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6566
34f80b04
EG
6567 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6568 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6569 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6570 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6571
94a78b79 6572 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
6573
6574#ifdef BCM_CNIC
6575 wb_write[0] = 0;
6576 wb_write[1] = 0;
6577 for (i = 0; i < 64; i++) {
6578 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6579 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6580
6581 if (CHIP_IS_E1H(bp)) {
6582 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6583 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6584 wb_write, 2);
6585 }
6586 }
6587#endif
34f80b04
EG
6588 /* soft reset pulse */
6589 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6590 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 6591
37b091ba 6592#ifdef BCM_CNIC
94a78b79 6593 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6594#endif
a2fbb9ea 6595
94a78b79 6596 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6597 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6598 if (!CHIP_REV_IS_SLOW(bp)) {
6599 /* enable hw interrupt from doorbell Q */
6600 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6601 }
a2fbb9ea 6602
94a78b79
VZ
6603 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6604 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6605 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 6606#ifndef BCM_CNIC
3196a88a
EG
6607 /* set NIC mode */
6608 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 6609#endif
34f80b04
EG
6610 if (CHIP_IS_E1H(bp))
6611 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6612
94a78b79
VZ
6613 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6614 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6615 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6616 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6617
ca00392c
EG
6618 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6619 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6620 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6621 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6622
94a78b79
VZ
6623 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6624 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6625 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6626 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6627
34f80b04
EG
6628 /* sync semi rtc */
6629 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6630 0x80000000);
6631 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6632 0x80000000);
a2fbb9ea 6633
94a78b79
VZ
6634 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6635 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6636 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6637
34f80b04 6638 REG_WR(bp, SRC_REG_SOFT_RST, 1);
c68ed255
TH
6639 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
6640 REG_WR(bp, i, random32());
94a78b79 6641 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
6642#ifdef BCM_CNIC
6643 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6644 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6645 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6646 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6647 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6648 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6649 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6650 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6651 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6652 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6653#endif
34f80b04 6654 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6655
34f80b04
EG
6656 if (sizeof(union cdu_context) != 1024)
6657 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
6658 dev_alert(&bp->pdev->dev, "please adjust the size "
6659 "of cdu_context(%ld)\n",
7995c64e 6660 (long)sizeof(union cdu_context));
a2fbb9ea 6661
94a78b79 6662 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6663 val = (4 << 24) + (0 << 12) + 1024;
6664 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6665
94a78b79 6666 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6667 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6668 /* enable context validation interrupt from CFC */
6669 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6670
6671 /* set the thresholds to prevent CFC/CDU race */
6672 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6673
94a78b79
VZ
6674 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6675 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6676
94a78b79 6677 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6678 /* Reset PCIE errors for debug */
6679 REG_WR(bp, 0x2814, 0xffffffff);
6680 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6681
94a78b79 6682 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6683 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6684 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6685 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6686
94a78b79 6687 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6688 if (CHIP_IS_E1H(bp)) {
6689 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6690 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6691 }
6692
6693 if (CHIP_REV_IS_SLOW(bp))
6694 msleep(200);
6695
6696 /* finish CFC init */
6697 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6698 if (val != 1) {
6699 BNX2X_ERR("CFC LL_INIT failed\n");
6700 return -EBUSY;
6701 }
6702 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6703 if (val != 1) {
6704 BNX2X_ERR("CFC AC_INIT failed\n");
6705 return -EBUSY;
6706 }
6707 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6708 if (val != 1) {
6709 BNX2X_ERR("CFC CAM_INIT failed\n");
6710 return -EBUSY;
6711 }
6712 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6713
34f80b04
EG
6714 /* read NIG statistic
6715 to see if this is our first up since powerup */
6716 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6717 val = *bnx2x_sp(bp, wb_data[0]);
6718
6719 /* do internal memory self test */
6720 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6721 BNX2X_ERR("internal mem self test failed\n");
6722 return -EBUSY;
6723 }
6724
35b19ba5 6725 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6726 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6727 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6728 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6729 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6730 bp->port.need_hw_lock = 1;
6731 break;
6732
34f80b04
EG
6733 default:
6734 break;
6735 }
f1410647 6736
fd4ef40d
EG
6737 bnx2x_setup_fan_failure_detection(bp);
6738
34f80b04
EG
6739 /* clear PXP2 attentions */
6740 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6741
34f80b04 6742 enable_blocks_attention(bp);
72fd0718
VZ
6743 if (CHIP_PARITY_SUPPORTED(bp))
6744 enable_blocks_parity(bp);
a2fbb9ea 6745
6bbca910
YR
6746 if (!BP_NOMCP(bp)) {
6747 bnx2x_acquire_phy_lock(bp);
6748 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6749 bnx2x_release_phy_lock(bp);
6750 } else
6751 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6752
34f80b04
EG
6753 return 0;
6754}
a2fbb9ea 6755
34f80b04
EG
6756static int bnx2x_init_port(struct bnx2x *bp)
6757{
6758 int port = BP_PORT(bp);
94a78b79 6759 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6760 u32 low, high;
34f80b04 6761 u32 val;
a2fbb9ea 6762
cdaa7cb8 6763 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
6764
6765 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6766
94a78b79 6767 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6768 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6769
6770 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6771 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6772 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 6773 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6774
37b091ba
MC
6775#ifdef BCM_CNIC
6776 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 6777
94a78b79 6778 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
6779 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6780 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 6781#endif
cdaa7cb8 6782
94a78b79 6783 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6784
94a78b79 6785 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6786 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6787 /* no pause for emulation and FPGA */
6788 low = 0;
6789 high = 513;
6790 } else {
6791 if (IS_E1HMF(bp))
6792 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6793 else if (bp->dev->mtu > 4096) {
6794 if (bp->flags & ONE_PORT_FLAG)
6795 low = 160;
6796 else {
6797 val = bp->dev->mtu;
6798 /* (24*1024 + val*4)/256 */
6799 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6800 }
6801 } else
6802 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6803 high = low + 56; /* 14*1024/256 */
6804 }
6805 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6806 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6807
6808
94a78b79 6809 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6810
94a78b79 6811 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6812 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6813 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6814 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6815
94a78b79
VZ
6816 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6817 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6818 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6819 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6820
94a78b79 6821 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6822 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6823
94a78b79 6824 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6825
6826 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6827 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6828
6829 /* update threshold */
34f80b04 6830 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6831 /* update init credit */
34f80b04 6832 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6833
6834 /* probe changes */
34f80b04 6835 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6836 msleep(5);
34f80b04 6837 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 6838
37b091ba
MC
6839#ifdef BCM_CNIC
6840 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 6841#endif
94a78b79 6842 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6843 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6844
6845 if (CHIP_IS_E1(bp)) {
6846 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6847 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6848 }
94a78b79 6849 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6850
94a78b79 6851 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6852 /* init aeu_mask_attn_func_0/1:
6853 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6854 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6855 * bits 4-7 are used for "per vn group attention" */
6856 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6857 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6858
94a78b79 6859 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6860 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6861 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6862 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6863 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6864
94a78b79 6865 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6866
6867 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6868
6869 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6870 /* 0x2 disable e1hov, 0x1 enable */
6871 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6872 (IS_E1HMF(bp) ? 0x1 : 0x2));
6873
1c06328c
EG
6874 {
6875 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6876 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6877 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6878 }
34f80b04
EG
6879 }
6880
94a78b79 6881 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6882 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6883
35b19ba5 6884 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6885 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6886 {
6887 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6888
6889 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6890 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6891
6892 /* The GPIO should be swapped if the swap register is
6893 set and active */
6894 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6895 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6896
6897 /* Select function upon port-swap configuration */
6898 if (port == 0) {
6899 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6900 aeu_gpio_mask = (swap_val && swap_override) ?
6901 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6902 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6903 } else {
6904 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6905 aeu_gpio_mask = (swap_val && swap_override) ?
6906 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6907 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6908 }
6909 val = REG_RD(bp, offset);
6910 /* add GPIO3 to group */
6911 val |= aeu_gpio_mask;
6912 REG_WR(bp, offset, val);
6913 }
6914 break;
6915
35b19ba5 6916 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6917 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6918 /* add SPIO 5 to group 0 */
4d295db0
EG
6919 {
6920 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6921 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6922 val = REG_RD(bp, reg_addr);
f1410647 6923 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6924 REG_WR(bp, reg_addr, val);
6925 }
f1410647
ET
6926 break;
6927
6928 default:
6929 break;
6930 }
6931
c18487ee 6932 bnx2x__link_reset(bp);
a2fbb9ea 6933
34f80b04
EG
6934 return 0;
6935}
6936
6937#define ILT_PER_FUNC (768/2)
6938#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6939/* the phys address is shifted right 12 bits and has an added
6940 1=valid bit added to the 53rd bit
6941 then since this is a wide register(TM)
6942 we split it into two 32 bit writes
6943 */
6944#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6945#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6946#define PXP_ONE_ILT(x) (((x) << 10) | x)
6947#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6948
37b091ba
MC
6949#ifdef BCM_CNIC
6950#define CNIC_ILT_LINES 127
6951#define CNIC_CTX_PER_ILT 16
6952#else
34f80b04 6953#define CNIC_ILT_LINES 0
37b091ba 6954#endif
34f80b04
EG
6955
6956static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6957{
6958 int reg;
6959
6960 if (CHIP_IS_E1H(bp))
6961 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6962 else /* E1 */
6963 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6964
6965 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6966}
6967
6968static int bnx2x_init_func(struct bnx2x *bp)
6969{
6970 int port = BP_PORT(bp);
6971 int func = BP_FUNC(bp);
8badd27a 6972 u32 addr, val;
34f80b04
EG
6973 int i;
6974
cdaa7cb8 6975 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 6976
8badd27a
EG
6977 /* set MSI reconfigure capability */
6978 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6979 val = REG_RD(bp, addr);
6980 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6981 REG_WR(bp, addr, val);
6982
34f80b04
EG
6983 i = FUNC_ILT_BASE(func);
6984
6985 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6986 if (CHIP_IS_E1H(bp)) {
6987 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6988 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6989 } else /* E1 */
6990 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6991 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6992
37b091ba
MC
6993#ifdef BCM_CNIC
6994 i += 1 + CNIC_ILT_LINES;
6995 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6996 if (CHIP_IS_E1(bp))
6997 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6998 else {
6999 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
7000 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
7001 }
7002
7003 i++;
7004 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
7005 if (CHIP_IS_E1(bp))
7006 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
7007 else {
7008 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
7009 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
7010 }
7011
7012 i++;
7013 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
7014 if (CHIP_IS_E1(bp))
7015 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
7016 else {
7017 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
7018 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
7019 }
7020
7021 /* tell the searcher where the T2 table is */
7022 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
7023
7024 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
7025 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
7026
7027 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
7028 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
7029 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
7030
7031 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
7032#endif
34f80b04
EG
7033
7034 if (CHIP_IS_E1H(bp)) {
573f2035
EG
7035 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
7036 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
7037 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
7038 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
7039 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
7040 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
7041 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
7042 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
7043 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
7044
7045 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7046 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
7047 }
7048
7049 /* HC init per function */
7050 if (CHIP_IS_E1H(bp)) {
7051 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7052
7053 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7054 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7055 }
94a78b79 7056 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 7057
c14423fe 7058 /* Reset PCIE errors for debug */
a2fbb9ea
ET
7059 REG_WR(bp, 0x2114, 0xffffffff);
7060 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 7061
34f80b04
EG
7062 return 0;
7063}
7064
7065static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7066{
7067 int i, rc = 0;
a2fbb9ea 7068
34f80b04
EG
7069 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
7070 BP_FUNC(bp), load_code);
a2fbb9ea 7071
34f80b04
EG
7072 bp->dmae_ready = 0;
7073 mutex_init(&bp->dmae_mutex);
54016b26
EG
7074 rc = bnx2x_gunzip_init(bp);
7075 if (rc)
7076 return rc;
a2fbb9ea 7077
34f80b04
EG
7078 switch (load_code) {
7079 case FW_MSG_CODE_DRV_LOAD_COMMON:
7080 rc = bnx2x_init_common(bp);
7081 if (rc)
7082 goto init_hw_err;
7083 /* no break */
7084
7085 case FW_MSG_CODE_DRV_LOAD_PORT:
7086 bp->dmae_ready = 1;
7087 rc = bnx2x_init_port(bp);
7088 if (rc)
7089 goto init_hw_err;
7090 /* no break */
7091
7092 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7093 bp->dmae_ready = 1;
7094 rc = bnx2x_init_func(bp);
7095 if (rc)
7096 goto init_hw_err;
7097 break;
7098
7099 default:
7100 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7101 break;
7102 }
7103
7104 if (!BP_NOMCP(bp)) {
7105 int func = BP_FUNC(bp);
a2fbb9ea
ET
7106
7107 bp->fw_drv_pulse_wr_seq =
34f80b04 7108 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 7109 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
7110 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7111 }
a2fbb9ea 7112
34f80b04
EG
7113 /* this needs to be done before gunzip end */
7114 bnx2x_zero_def_sb(bp);
7115 for_each_queue(bp, i)
7116 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
7117#ifdef BCM_CNIC
7118 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7119#endif
34f80b04
EG
7120
7121init_hw_err:
7122 bnx2x_gunzip_end(bp);
7123
7124 return rc;
a2fbb9ea
ET
7125}
7126
a2fbb9ea
ET
7127static void bnx2x_free_mem(struct bnx2x *bp)
7128{
7129
7130#define BNX2X_PCI_FREE(x, y, size) \
7131 do { \
7132 if (x) { \
1a983142 7133 dma_free_coherent(&bp->pdev->dev, size, x, y); \
a2fbb9ea
ET
7134 x = NULL; \
7135 y = 0; \
7136 } \
7137 } while (0)
7138
7139#define BNX2X_FREE(x) \
7140 do { \
7141 if (x) { \
7142 vfree(x); \
7143 x = NULL; \
7144 } \
7145 } while (0)
7146
7147 int i;
7148
7149 /* fastpath */
555f6c78 7150 /* Common */
a2fbb9ea
ET
7151 for_each_queue(bp, i) {
7152
555f6c78 7153 /* status blocks */
a2fbb9ea
ET
7154 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7155 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 7156 sizeof(struct host_status_block));
555f6c78
EG
7157 }
7158 /* Rx */
54b9ddaa 7159 for_each_queue(bp, i) {
a2fbb9ea 7160
555f6c78 7161 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
7162 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7163 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7164 bnx2x_fp(bp, i, rx_desc_mapping),
7165 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7166
7167 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7168 bnx2x_fp(bp, i, rx_comp_mapping),
7169 sizeof(struct eth_fast_path_rx_cqe) *
7170 NUM_RCQ_BD);
a2fbb9ea 7171
7a9b2557 7172 /* SGE ring */
32626230 7173 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
7174 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7175 bnx2x_fp(bp, i, rx_sge_mapping),
7176 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7177 }
555f6c78 7178 /* Tx */
54b9ddaa 7179 for_each_queue(bp, i) {
555f6c78
EG
7180
7181 /* fastpath tx rings: tx_buf tx_desc */
7182 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7183 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7184 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 7185 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 7186 }
a2fbb9ea
ET
7187 /* end of fastpath */
7188
7189 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 7190 sizeof(struct host_def_status_block));
a2fbb9ea
ET
7191
7192 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 7193 sizeof(struct bnx2x_slowpath));
a2fbb9ea 7194
37b091ba 7195#ifdef BCM_CNIC
a2fbb9ea
ET
7196 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7197 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7198 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7199 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
7200 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7201 sizeof(struct host_status_block));
a2fbb9ea 7202#endif
7a9b2557 7203 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
7204
7205#undef BNX2X_PCI_FREE
7206#undef BNX2X_KFREE
7207}
7208
7209static int bnx2x_alloc_mem(struct bnx2x *bp)
7210{
7211
7212#define BNX2X_PCI_ALLOC(x, y, size) \
7213 do { \
1a983142 7214 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
a2fbb9ea
ET
7215 if (x == NULL) \
7216 goto alloc_mem_err; \
7217 memset(x, 0, size); \
7218 } while (0)
7219
7220#define BNX2X_ALLOC(x, size) \
7221 do { \
7222 x = vmalloc(size); \
7223 if (x == NULL) \
7224 goto alloc_mem_err; \
7225 memset(x, 0, size); \
7226 } while (0)
7227
7228 int i;
7229
7230 /* fastpath */
555f6c78 7231 /* Common */
a2fbb9ea
ET
7232 for_each_queue(bp, i) {
7233 bnx2x_fp(bp, i, bp) = bp;
7234
555f6c78 7235 /* status blocks */
a2fbb9ea
ET
7236 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7237 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 7238 sizeof(struct host_status_block));
555f6c78
EG
7239 }
7240 /* Rx */
54b9ddaa 7241 for_each_queue(bp, i) {
a2fbb9ea 7242
555f6c78 7243 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
7244 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7245 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7246 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7247 &bnx2x_fp(bp, i, rx_desc_mapping),
7248 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7249
7250 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7251 &bnx2x_fp(bp, i, rx_comp_mapping),
7252 sizeof(struct eth_fast_path_rx_cqe) *
7253 NUM_RCQ_BD);
7254
7a9b2557
VZ
7255 /* SGE ring */
7256 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7257 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7258 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7259 &bnx2x_fp(bp, i, rx_sge_mapping),
7260 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 7261 }
555f6c78 7262 /* Tx */
54b9ddaa 7263 for_each_queue(bp, i) {
555f6c78 7264
555f6c78
EG
7265 /* fastpath tx rings: tx_buf tx_desc */
7266 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7267 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7268 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7269 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 7270 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 7271 }
a2fbb9ea
ET
7272 /* end of fastpath */
7273
7274 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7275 sizeof(struct host_def_status_block));
7276
7277 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7278 sizeof(struct bnx2x_slowpath));
7279
37b091ba 7280#ifdef BCM_CNIC
a2fbb9ea
ET
7281 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7282
a2fbb9ea
ET
7283 /* allocate searcher T2 table
7284 we allocate 1/4 of alloc num for T2
7285 (which is not entered into the ILT) */
7286 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7287
37b091ba 7288 /* Initialize T2 (for 1024 connections) */
a2fbb9ea 7289 for (i = 0; i < 16*1024; i += 64)
37b091ba 7290 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 7291
37b091ba 7292 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
a2fbb9ea
ET
7293 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7294
7295 /* QM queues (128*MAX_CONN) */
7296 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
37b091ba
MC
7297
7298 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7299 sizeof(struct host_status_block));
a2fbb9ea
ET
7300#endif
7301
7302 /* Slow path ring */
7303 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7304
7305 return 0;
7306
7307alloc_mem_err:
7308 bnx2x_free_mem(bp);
7309 return -ENOMEM;
7310
7311#undef BNX2X_PCI_ALLOC
7312#undef BNX2X_ALLOC
7313}
7314
7315static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7316{
7317 int i;
7318
54b9ddaa 7319 for_each_queue(bp, i) {
a2fbb9ea
ET
7320 struct bnx2x_fastpath *fp = &bp->fp[i];
7321
7322 u16 bd_cons = fp->tx_bd_cons;
7323 u16 sw_prod = fp->tx_pkt_prod;
7324 u16 sw_cons = fp->tx_pkt_cons;
7325
a2fbb9ea
ET
7326 while (sw_cons != sw_prod) {
7327 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7328 sw_cons++;
7329 }
7330 }
7331}
7332
7333static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7334{
7335 int i, j;
7336
54b9ddaa 7337 for_each_queue(bp, j) {
a2fbb9ea
ET
7338 struct bnx2x_fastpath *fp = &bp->fp[j];
7339
a2fbb9ea
ET
7340 for (i = 0; i < NUM_RX_BD; i++) {
7341 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7342 struct sk_buff *skb = rx_buf->skb;
7343
7344 if (skb == NULL)
7345 continue;
7346
1a983142
FT
7347 dma_unmap_single(&bp->pdev->dev,
7348 dma_unmap_addr(rx_buf, mapping),
7349 bp->rx_buf_size, DMA_FROM_DEVICE);
a2fbb9ea
ET
7350
7351 rx_buf->skb = NULL;
7352 dev_kfree_skb(skb);
7353 }
7a9b2557 7354 if (!fp->disable_tpa)
32626230
EG
7355 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7356 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 7357 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
7358 }
7359}
7360
7361static void bnx2x_free_skbs(struct bnx2x *bp)
7362{
7363 bnx2x_free_tx_skbs(bp);
7364 bnx2x_free_rx_skbs(bp);
7365}
7366
7367static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7368{
34f80b04 7369 int i, offset = 1;
a2fbb9ea
ET
7370
7371 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 7372 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
7373 bp->msix_table[0].vector);
7374
37b091ba
MC
7375#ifdef BCM_CNIC
7376 offset++;
7377#endif
a2fbb9ea 7378 for_each_queue(bp, i) {
c14423fe 7379 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 7380 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
7381 bnx2x_fp(bp, i, state));
7382
34f80b04 7383 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 7384 }
a2fbb9ea
ET
7385}
7386
6cbe5065 7387static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
a2fbb9ea 7388{
a2fbb9ea 7389 if (bp->flags & USING_MSIX_FLAG) {
6cbe5065
VZ
7390 if (!disable_only)
7391 bnx2x_free_msix_irqs(bp);
a2fbb9ea 7392 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
7393 bp->flags &= ~USING_MSIX_FLAG;
7394
8badd27a 7395 } else if (bp->flags & USING_MSI_FLAG) {
6cbe5065
VZ
7396 if (!disable_only)
7397 free_irq(bp->pdev->irq, bp->dev);
8badd27a
EG
7398 pci_disable_msi(bp->pdev);
7399 bp->flags &= ~USING_MSI_FLAG;
7400
6cbe5065 7401 } else if (!disable_only)
a2fbb9ea
ET
7402 free_irq(bp->pdev->irq, bp->dev);
7403}
7404
7405static int bnx2x_enable_msix(struct bnx2x *bp)
7406{
8badd27a
EG
7407 int i, rc, offset = 1;
7408 int igu_vec = 0;
a2fbb9ea 7409
8badd27a
EG
7410 bp->msix_table[0].entry = igu_vec;
7411 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 7412
37b091ba
MC
7413#ifdef BCM_CNIC
7414 igu_vec = BP_L_ID(bp) + offset;
7415 bp->msix_table[1].entry = igu_vec;
7416 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7417 offset++;
7418#endif
34f80b04 7419 for_each_queue(bp, i) {
8badd27a 7420 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
7421 bp->msix_table[i + offset].entry = igu_vec;
7422 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7423 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
7424 }
7425
34f80b04 7426 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 7427 BNX2X_NUM_QUEUES(bp) + offset);
1ac218c8
VZ
7428
7429 /*
7430 * reconfigure number of tx/rx queues according to available
7431 * MSI-X vectors
7432 */
7433 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
7434 /* vectors available for FP */
7435 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
7436
7437 DP(NETIF_MSG_IFUP,
7438 "Trying to use less MSI-X vectors: %d\n", rc);
7439
7440 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
7441
7442 if (rc) {
7443 DP(NETIF_MSG_IFUP,
7444 "MSI-X is not attainable rc %d\n", rc);
7445 return rc;
7446 }
7447
7448 bp->num_queues = min(bp->num_queues, fp_vec);
7449
7450 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
7451 bp->num_queues);
7452 } else if (rc) {
8badd27a
EG
7453 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7454 return rc;
34f80b04 7455 }
8badd27a 7456
a2fbb9ea
ET
7457 bp->flags |= USING_MSIX_FLAG;
7458
7459 return 0;
a2fbb9ea
ET
7460}
7461
a2fbb9ea
ET
7462static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7463{
34f80b04 7464 int i, rc, offset = 1;
a2fbb9ea 7465
a2fbb9ea
ET
7466 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7467 bp->dev->name, bp->dev);
a2fbb9ea
ET
7468 if (rc) {
7469 BNX2X_ERR("request sp irq failed\n");
7470 return -EBUSY;
7471 }
7472
37b091ba
MC
7473#ifdef BCM_CNIC
7474 offset++;
7475#endif
a2fbb9ea 7476 for_each_queue(bp, i) {
555f6c78 7477 struct bnx2x_fastpath *fp = &bp->fp[i];
54b9ddaa
VZ
7478 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7479 bp->dev->name, i);
ca00392c 7480
34f80b04 7481 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 7482 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 7483 if (rc) {
555f6c78 7484 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
7485 bnx2x_free_msix_irqs(bp);
7486 return -EBUSY;
7487 }
7488
555f6c78 7489 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
7490 }
7491
555f6c78 7492 i = BNX2X_NUM_QUEUES(bp);
cdaa7cb8
VZ
7493 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
7494 " ... fp[%d] %d\n",
7495 bp->msix_table[0].vector,
7496 0, bp->msix_table[offset].vector,
7497 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 7498
a2fbb9ea 7499 return 0;
a2fbb9ea
ET
7500}
7501
8badd27a
EG
7502static int bnx2x_enable_msi(struct bnx2x *bp)
7503{
7504 int rc;
7505
7506 rc = pci_enable_msi(bp->pdev);
7507 if (rc) {
7508 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7509 return -1;
7510 }
7511 bp->flags |= USING_MSI_FLAG;
7512
7513 return 0;
7514}
7515
a2fbb9ea
ET
7516static int bnx2x_req_irq(struct bnx2x *bp)
7517{
8badd27a 7518 unsigned long flags;
34f80b04 7519 int rc;
a2fbb9ea 7520
8badd27a
EG
7521 if (bp->flags & USING_MSI_FLAG)
7522 flags = 0;
7523 else
7524 flags = IRQF_SHARED;
7525
7526 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 7527 bp->dev->name, bp->dev);
a2fbb9ea
ET
7528 if (!rc)
7529 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7530
7531 return rc;
a2fbb9ea
ET
7532}
7533
65abd74d
YG
7534static void bnx2x_napi_enable(struct bnx2x *bp)
7535{
7536 int i;
7537
54b9ddaa 7538 for_each_queue(bp, i)
65abd74d
YG
7539 napi_enable(&bnx2x_fp(bp, i, napi));
7540}
7541
7542static void bnx2x_napi_disable(struct bnx2x *bp)
7543{
7544 int i;
7545
54b9ddaa 7546 for_each_queue(bp, i)
65abd74d
YG
7547 napi_disable(&bnx2x_fp(bp, i, napi));
7548}
7549
7550static void bnx2x_netif_start(struct bnx2x *bp)
7551{
e1510706
EG
7552 int intr_sem;
7553
7554 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7555 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7556
7557 if (intr_sem) {
65abd74d 7558 if (netif_running(bp->dev)) {
65abd74d
YG
7559 bnx2x_napi_enable(bp);
7560 bnx2x_int_enable(bp);
555f6c78
EG
7561 if (bp->state == BNX2X_STATE_OPEN)
7562 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
7563 }
7564 }
7565}
7566
f8ef6e44 7567static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 7568{
f8ef6e44 7569 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 7570 bnx2x_napi_disable(bp);
762d5f6c 7571 netif_tx_disable(bp->dev);
65abd74d
YG
7572}
7573
a2fbb9ea
ET
7574/*
7575 * Init service functions
7576 */
7577
e665bfda
MC
7578/**
7579 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7580 *
7581 * @param bp driver descriptor
7582 * @param set set or clear an entry (1 or 0)
7583 * @param mac pointer to a buffer containing a MAC
7584 * @param cl_bit_vec bit vector of clients to register a MAC for
7585 * @param cam_offset offset in a CAM to use
7586 * @param with_bcast set broadcast MAC as well
7587 */
7588static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7589 u32 cl_bit_vec, u8 cam_offset,
7590 u8 with_bcast)
a2fbb9ea
ET
7591{
7592 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 7593 int port = BP_PORT(bp);
a2fbb9ea
ET
7594
7595 /* CAM allocation
7596 * unicasts 0-31:port0 32-63:port1
7597 * multicast 64-127:port0 128-191:port1
7598 */
e665bfda
MC
7599 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7600 config->hdr.offset = cam_offset;
7601 config->hdr.client_id = 0xff;
a2fbb9ea
ET
7602 config->hdr.reserved1 = 0;
7603
7604 /* primary MAC */
7605 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 7606 swab16(*(u16 *)&mac[0]);
a2fbb9ea 7607 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 7608 swab16(*(u16 *)&mac[2]);
a2fbb9ea 7609 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 7610 swab16(*(u16 *)&mac[4]);
34f80b04 7611 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7612 if (set)
7613 config->config_table[0].target_table_entry.flags = 0;
7614 else
7615 CAM_INVALIDATE(config->config_table[0]);
ca00392c 7616 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 7617 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
7618 config->config_table[0].target_table_entry.vlan_id = 0;
7619
3101c2bc
YG
7620 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7621 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7622 config->config_table[0].cam_entry.msb_mac_addr,
7623 config->config_table[0].cam_entry.middle_mac_addr,
7624 config->config_table[0].cam_entry.lsb_mac_addr);
7625
7626 /* broadcast */
e665bfda
MC
7627 if (with_bcast) {
7628 config->config_table[1].cam_entry.msb_mac_addr =
7629 cpu_to_le16(0xffff);
7630 config->config_table[1].cam_entry.middle_mac_addr =
7631 cpu_to_le16(0xffff);
7632 config->config_table[1].cam_entry.lsb_mac_addr =
7633 cpu_to_le16(0xffff);
7634 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7635 if (set)
7636 config->config_table[1].target_table_entry.flags =
7637 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7638 else
7639 CAM_INVALIDATE(config->config_table[1]);
7640 config->config_table[1].target_table_entry.clients_bit_vector =
7641 cpu_to_le32(cl_bit_vec);
7642 config->config_table[1].target_table_entry.vlan_id = 0;
7643 }
a2fbb9ea
ET
7644
7645 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7646 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7647 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7648}
7649
e665bfda
MC
7650/**
7651 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7652 *
7653 * @param bp driver descriptor
7654 * @param set set or clear an entry (1 or 0)
7655 * @param mac pointer to a buffer containing a MAC
7656 * @param cl_bit_vec bit vector of clients to register a MAC for
7657 * @param cam_offset offset in a CAM to use
7658 */
7659static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7660 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
7661{
7662 struct mac_configuration_cmd_e1h *config =
7663 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7664
8d9c5f34 7665 config->hdr.length = 1;
e665bfda
MC
7666 config->hdr.offset = cam_offset;
7667 config->hdr.client_id = 0xff;
34f80b04
EG
7668 config->hdr.reserved1 = 0;
7669
7670 /* primary MAC */
7671 config->config_table[0].msb_mac_addr =
e665bfda 7672 swab16(*(u16 *)&mac[0]);
34f80b04 7673 config->config_table[0].middle_mac_addr =
e665bfda 7674 swab16(*(u16 *)&mac[2]);
34f80b04 7675 config->config_table[0].lsb_mac_addr =
e665bfda 7676 swab16(*(u16 *)&mac[4]);
ca00392c 7677 config->config_table[0].clients_bit_vector =
e665bfda 7678 cpu_to_le32(cl_bit_vec);
34f80b04
EG
7679 config->config_table[0].vlan_id = 0;
7680 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7681 if (set)
7682 config->config_table[0].flags = BP_PORT(bp);
7683 else
7684 config->config_table[0].flags =
7685 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7686
e665bfda 7687 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 7688 (set ? "setting" : "clearing"),
34f80b04
EG
7689 config->config_table[0].msb_mac_addr,
7690 config->config_table[0].middle_mac_addr,
e665bfda 7691 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
7692
7693 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7694 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7695 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7696}
7697
a2fbb9ea
ET
7698static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7699 int *state_p, int poll)
7700{
7701 /* can take a while if any port is running */
8b3a0f0b 7702 int cnt = 5000;
a2fbb9ea 7703
c14423fe
ET
7704 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7705 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7706
7707 might_sleep();
34f80b04 7708 while (cnt--) {
a2fbb9ea
ET
7709 if (poll) {
7710 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7711 /* if index is different from 0
7712 * the reply for some commands will
3101c2bc 7713 * be on the non default queue
a2fbb9ea
ET
7714 */
7715 if (idx)
7716 bnx2x_rx_int(&bp->fp[idx], 10);
7717 }
a2fbb9ea 7718
3101c2bc 7719 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7720 if (*state_p == state) {
7721#ifdef BNX2X_STOP_ON_ERROR
7722 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7723#endif
a2fbb9ea 7724 return 0;
8b3a0f0b 7725 }
a2fbb9ea 7726
a2fbb9ea 7727 msleep(1);
e3553b29
EG
7728
7729 if (bp->panic)
7730 return -EIO;
a2fbb9ea
ET
7731 }
7732
a2fbb9ea 7733 /* timeout! */
49d66772
ET
7734 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7735 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7736#ifdef BNX2X_STOP_ON_ERROR
7737 bnx2x_panic();
7738#endif
a2fbb9ea 7739
49d66772 7740 return -EBUSY;
a2fbb9ea
ET
7741}
7742
e665bfda
MC
7743static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7744{
7745 bp->set_mac_pending++;
7746 smp_wmb();
7747
7748 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7749 (1 << bp->fp->cl_id), BP_FUNC(bp));
7750
7751 /* Wait for a completion */
7752 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7753}
7754
7755static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7756{
7757 bp->set_mac_pending++;
7758 smp_wmb();
7759
7760 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7761 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7762 1);
7763
7764 /* Wait for a completion */
7765 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7766}
7767
993ac7b5
MC
7768#ifdef BCM_CNIC
7769/**
7770 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7771 * MAC(s). This function will wait until the ramdord completion
7772 * returns.
7773 *
7774 * @param bp driver handle
7775 * @param set set or clear the CAM entry
7776 *
7777 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7778 */
7779static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7780{
7781 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7782
7783 bp->set_mac_pending++;
7784 smp_wmb();
7785
7786 /* Send a SET_MAC ramrod */
7787 if (CHIP_IS_E1(bp))
7788 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7789 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7790 1);
7791 else
7792 /* CAM allocation for E1H
7793 * unicasts: by func number
7794 * multicast: 20+FUNC*20, 20 each
7795 */
7796 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7797 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7798
7799 /* Wait for a completion when setting */
7800 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7801
7802 return 0;
7803}
7804#endif
7805
a2fbb9ea
ET
7806static int bnx2x_setup_leading(struct bnx2x *bp)
7807{
34f80b04 7808 int rc;
a2fbb9ea 7809
c14423fe 7810 /* reset IGU state */
34f80b04 7811 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7812
7813 /* SETUP ramrod */
7814 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7815
34f80b04
EG
7816 /* Wait for completion */
7817 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7818
34f80b04 7819 return rc;
a2fbb9ea
ET
7820}
7821
7822static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7823{
555f6c78
EG
7824 struct bnx2x_fastpath *fp = &bp->fp[index];
7825
a2fbb9ea 7826 /* reset IGU state */
555f6c78 7827 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7828
228241eb 7829 /* SETUP ramrod */
555f6c78
EG
7830 fp->state = BNX2X_FP_STATE_OPENING;
7831 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7832 fp->cl_id, 0);
a2fbb9ea
ET
7833
7834 /* Wait for completion */
7835 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7836 &(fp->state), 0);
a2fbb9ea
ET
7837}
7838
a2fbb9ea 7839static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7840
54b9ddaa 7841static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
ca00392c 7842{
ca00392c
EG
7843
7844 switch (bp->multi_mode) {
7845 case ETH_RSS_MODE_DISABLED:
54b9ddaa 7846 bp->num_queues = 1;
ca00392c
EG
7847 break;
7848
7849 case ETH_RSS_MODE_REGULAR:
54b9ddaa
VZ
7850 if (num_queues)
7851 bp->num_queues = min_t(u32, num_queues,
7852 BNX2X_MAX_QUEUES(bp));
ca00392c 7853 else
54b9ddaa
VZ
7854 bp->num_queues = min_t(u32, num_online_cpus(),
7855 BNX2X_MAX_QUEUES(bp));
ca00392c
EG
7856 break;
7857
7858
7859 default:
54b9ddaa 7860 bp->num_queues = 1;
ca00392c
EG
7861 break;
7862 }
ca00392c
EG
7863}
7864
54b9ddaa 7865static int bnx2x_set_num_queues(struct bnx2x *bp)
a2fbb9ea 7866{
ca00392c 7867 int rc = 0;
a2fbb9ea 7868
8badd27a
EG
7869 switch (int_mode) {
7870 case INT_MODE_INTx:
7871 case INT_MODE_MSI:
54b9ddaa 7872 bp->num_queues = 1;
ca00392c 7873 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a 7874 break;
8badd27a 7875 default:
54b9ddaa
VZ
7876 /* Set number of queues according to bp->multi_mode value */
7877 bnx2x_set_num_queues_msix(bp);
ca00392c 7878
54b9ddaa
VZ
7879 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7880 bp->num_queues);
ca00392c 7881
2dfe0e1f
EG
7882 /* if we can't use MSI-X we only need one fp,
7883 * so try to enable MSI-X with the requested number of fp's
7884 * and fallback to MSI or legacy INTx with one fp
7885 */
ca00392c 7886 rc = bnx2x_enable_msix(bp);
54b9ddaa 7887 if (rc)
34f80b04 7888 /* failed to enable MSI-X */
54b9ddaa 7889 bp->num_queues = 1;
8badd27a 7890 break;
a2fbb9ea 7891 }
54b9ddaa 7892 bp->dev->real_num_tx_queues = bp->num_queues;
ca00392c 7893 return rc;
8badd27a
EG
7894}
7895
993ac7b5
MC
7896#ifdef BCM_CNIC
7897static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7898static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7899#endif
8badd27a
EG
7900
7901/* must be called with rtnl_lock */
7902static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7903{
7904 u32 load_code;
ca00392c
EG
7905 int i, rc;
7906
8badd27a 7907#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7908 if (unlikely(bp->panic))
7909 return -EPERM;
7910#endif
7911
7912 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7913
54b9ddaa 7914 rc = bnx2x_set_num_queues(bp);
c14423fe 7915
6cbe5065
VZ
7916 if (bnx2x_alloc_mem(bp)) {
7917 bnx2x_free_irq(bp, true);
a2fbb9ea 7918 return -ENOMEM;
6cbe5065 7919 }
a2fbb9ea 7920
54b9ddaa 7921 for_each_queue(bp, i)
7a9b2557
VZ
7922 bnx2x_fp(bp, i, disable_tpa) =
7923 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7924
54b9ddaa 7925 for_each_queue(bp, i)
2dfe0e1f
EG
7926 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7927 bnx2x_poll, 128);
7928
2dfe0e1f
EG
7929 bnx2x_napi_enable(bp);
7930
34f80b04
EG
7931 if (bp->flags & USING_MSIX_FLAG) {
7932 rc = bnx2x_req_msix_irqs(bp);
7933 if (rc) {
6cbe5065 7934 bnx2x_free_irq(bp, true);
2dfe0e1f 7935 goto load_error1;
34f80b04
EG
7936 }
7937 } else {
ca00392c 7938 /* Fall to INTx if failed to enable MSI-X due to lack of
54b9ddaa 7939 memory (in bnx2x_set_num_queues()) */
8badd27a
EG
7940 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7941 bnx2x_enable_msi(bp);
34f80b04
EG
7942 bnx2x_ack_int(bp);
7943 rc = bnx2x_req_irq(bp);
7944 if (rc) {
2dfe0e1f 7945 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6cbe5065 7946 bnx2x_free_irq(bp, true);
2dfe0e1f 7947 goto load_error1;
a2fbb9ea 7948 }
8badd27a
EG
7949 if (bp->flags & USING_MSI_FLAG) {
7950 bp->dev->irq = bp->pdev->irq;
7995c64e
JP
7951 netdev_info(bp->dev, "using MSI IRQ %d\n",
7952 bp->pdev->irq);
8badd27a 7953 }
a2fbb9ea
ET
7954 }
7955
2dfe0e1f
EG
7956 /* Send LOAD_REQUEST command to MCP
7957 Returns the type of LOAD command:
7958 if it is the first port to be initialized
7959 common blocks should be initialized, otherwise - not
7960 */
7961 if (!BP_NOMCP(bp)) {
7962 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7963 if (!load_code) {
7964 BNX2X_ERR("MCP response failure, aborting\n");
7965 rc = -EBUSY;
7966 goto load_error2;
7967 }
7968 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7969 rc = -EBUSY; /* other port in diagnostic mode */
7970 goto load_error2;
7971 }
7972
7973 } else {
7974 int port = BP_PORT(bp);
7975
f5372251 7976 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7977 load_count[0], load_count[1], load_count[2]);
7978 load_count[0]++;
7979 load_count[1 + port]++;
f5372251 7980 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7981 load_count[0], load_count[1], load_count[2]);
7982 if (load_count[0] == 1)
7983 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7984 else if (load_count[1 + port] == 1)
7985 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7986 else
7987 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7988 }
7989
7990 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7991 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7992 bp->port.pmf = 1;
7993 else
7994 bp->port.pmf = 0;
7995 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7996
a2fbb9ea 7997 /* Initialize HW */
34f80b04
EG
7998 rc = bnx2x_init_hw(bp, load_code);
7999 if (rc) {
a2fbb9ea 8000 BNX2X_ERR("HW init failed, aborting\n");
f1e1a199
VZ
8001 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8002 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8003 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
2dfe0e1f 8004 goto load_error2;
a2fbb9ea
ET
8005 }
8006
a2fbb9ea 8007 /* Setup NIC internals and enable interrupts */
471de716 8008 bnx2x_nic_init(bp, load_code);
a2fbb9ea 8009
2691d51d
EG
8010 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
8011 (bp->common.shmem2_base))
8012 SHMEM2_WR(bp, dcc_support,
8013 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
8014 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
8015
a2fbb9ea 8016 /* Send LOAD_DONE command to MCP */
34f80b04 8017 if (!BP_NOMCP(bp)) {
228241eb
ET
8018 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8019 if (!load_code) {
da5a662a 8020 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 8021 rc = -EBUSY;
2dfe0e1f 8022 goto load_error3;
a2fbb9ea
ET
8023 }
8024 }
8025
8026 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
8027
34f80b04
EG
8028 rc = bnx2x_setup_leading(bp);
8029 if (rc) {
da5a662a 8030 BNX2X_ERR("Setup leading failed!\n");
e3553b29 8031#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 8032 goto load_error3;
e3553b29
EG
8033#else
8034 bp->panic = 1;
8035 return -EBUSY;
8036#endif
34f80b04 8037 }
a2fbb9ea 8038
34f80b04
EG
8039 if (CHIP_IS_E1H(bp))
8040 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 8041 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
f34d28ea 8042 bp->flags |= MF_FUNC_DIS;
34f80b04 8043 }
a2fbb9ea 8044
ca00392c 8045 if (bp->state == BNX2X_STATE_OPEN) {
37b091ba
MC
8046#ifdef BCM_CNIC
8047 /* Enable Timer scan */
8048 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
8049#endif
34f80b04
EG
8050 for_each_nondefault_queue(bp, i) {
8051 rc = bnx2x_setup_multi(bp, i);
8052 if (rc)
37b091ba
MC
8053#ifdef BCM_CNIC
8054 goto load_error4;
8055#else
2dfe0e1f 8056 goto load_error3;
37b091ba 8057#endif
34f80b04 8058 }
a2fbb9ea 8059
ca00392c 8060 if (CHIP_IS_E1(bp))
e665bfda 8061 bnx2x_set_eth_mac_addr_e1(bp, 1);
ca00392c 8062 else
e665bfda 8063 bnx2x_set_eth_mac_addr_e1h(bp, 1);
993ac7b5
MC
8064#ifdef BCM_CNIC
8065 /* Set iSCSI L2 MAC */
8066 mutex_lock(&bp->cnic_mutex);
8067 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
8068 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8069 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
4a6e47a4
MC
8070 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
8071 CNIC_SB_ID(bp));
993ac7b5
MC
8072 }
8073 mutex_unlock(&bp->cnic_mutex);
8074#endif
ca00392c 8075 }
34f80b04
EG
8076
8077 if (bp->port.pmf)
b5bf9068 8078 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
8079
8080 /* Start fast path */
34f80b04
EG
8081 switch (load_mode) {
8082 case LOAD_NORMAL:
ca00392c
EG
8083 if (bp->state == BNX2X_STATE_OPEN) {
8084 /* Tx queue should be only reenabled */
8085 netif_tx_wake_all_queues(bp->dev);
8086 }
2dfe0e1f 8087 /* Initialize the receive filter. */
34f80b04
EG
8088 bnx2x_set_rx_mode(bp->dev);
8089 break;
8090
8091 case LOAD_OPEN:
555f6c78 8092 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
8093 if (bp->state != BNX2X_STATE_OPEN)
8094 netif_tx_disable(bp->dev);
2dfe0e1f 8095 /* Initialize the receive filter. */
34f80b04 8096 bnx2x_set_rx_mode(bp->dev);
34f80b04 8097 break;
a2fbb9ea 8098
34f80b04 8099 case LOAD_DIAG:
2dfe0e1f 8100 /* Initialize the receive filter. */
a2fbb9ea 8101 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
8102 bp->state = BNX2X_STATE_DIAG;
8103 break;
8104
8105 default:
8106 break;
a2fbb9ea
ET
8107 }
8108
34f80b04
EG
8109 if (!bp->port.pmf)
8110 bnx2x__link_status_update(bp);
8111
a2fbb9ea
ET
8112 /* start the timer */
8113 mod_timer(&bp->timer, jiffies + bp->current_interval);
8114
993ac7b5
MC
8115#ifdef BCM_CNIC
8116 bnx2x_setup_cnic_irq_info(bp);
8117 if (bp->state == BNX2X_STATE_OPEN)
8118 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8119#endif
72fd0718 8120 bnx2x_inc_load_cnt(bp);
34f80b04 8121
a2fbb9ea
ET
8122 return 0;
8123
37b091ba
MC
8124#ifdef BCM_CNIC
8125load_error4:
8126 /* Disable Timer scan */
8127 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8128#endif
2dfe0e1f
EG
8129load_error3:
8130 bnx2x_int_disable_sync(bp, 1);
8131 if (!BP_NOMCP(bp)) {
8132 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8133 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8134 }
8135 bp->port.pmf = 0;
7a9b2557
VZ
8136 /* Free SKBs, SGEs, TPA pool and driver internals */
8137 bnx2x_free_skbs(bp);
54b9ddaa 8138 for_each_queue(bp, i)
3196a88a 8139 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 8140load_error2:
d1014634 8141 /* Release IRQs */
6cbe5065 8142 bnx2x_free_irq(bp, false);
2dfe0e1f
EG
8143load_error1:
8144 bnx2x_napi_disable(bp);
54b9ddaa 8145 for_each_queue(bp, i)
7cde1c8b 8146 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8147 bnx2x_free_mem(bp);
8148
34f80b04 8149 return rc;
a2fbb9ea
ET
8150}
8151
8152static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8153{
555f6c78 8154 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
8155 int rc;
8156
c14423fe 8157 /* halt the connection */
555f6c78
EG
8158 fp->state = BNX2X_FP_STATE_HALTING;
8159 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 8160
34f80b04 8161 /* Wait for completion */
a2fbb9ea 8162 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 8163 &(fp->state), 1);
c14423fe 8164 if (rc) /* timeout */
a2fbb9ea
ET
8165 return rc;
8166
8167 /* delete cfc entry */
8168 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8169
34f80b04
EG
8170 /* Wait for completion */
8171 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 8172 &(fp->state), 1);
34f80b04 8173 return rc;
a2fbb9ea
ET
8174}
8175
da5a662a 8176static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 8177{
4781bfad 8178 __le16 dsb_sp_prod_idx;
c14423fe 8179 /* if the other port is handling traffic,
a2fbb9ea 8180 this can take a lot of time */
34f80b04
EG
8181 int cnt = 500;
8182 int rc;
a2fbb9ea
ET
8183
8184 might_sleep();
8185
8186 /* Send HALT ramrod */
8187 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 8188 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 8189
34f80b04
EG
8190 /* Wait for completion */
8191 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8192 &(bp->fp[0].state), 1);
8193 if (rc) /* timeout */
da5a662a 8194 return rc;
a2fbb9ea 8195
49d66772 8196 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 8197
228241eb 8198 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
8199 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8200
49d66772 8201 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
8202 we are going to reset the chip anyway
8203 so there is not much to do if this times out
8204 */
34f80b04 8205 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
8206 if (!cnt) {
8207 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8208 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8209 *bp->dsb_sp_prod, dsb_sp_prod_idx);
8210#ifdef BNX2X_STOP_ON_ERROR
8211 bnx2x_panic();
8212#endif
36e552ab 8213 rc = -EBUSY;
34f80b04
EG
8214 break;
8215 }
8216 cnt--;
da5a662a 8217 msleep(1);
5650d9d4 8218 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
8219 }
8220 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8221 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
8222
8223 return rc;
a2fbb9ea
ET
8224}
8225
34f80b04
EG
8226static void bnx2x_reset_func(struct bnx2x *bp)
8227{
8228 int port = BP_PORT(bp);
8229 int func = BP_FUNC(bp);
8230 int base, i;
8231
8232 /* Configure IGU */
8233 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8234 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8235
37b091ba
MC
8236#ifdef BCM_CNIC
8237 /* Disable Timer scan */
8238 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8239 /*
8240 * Wait for at least 10ms and up to 2 second for the timers scan to
8241 * complete
8242 */
8243 for (i = 0; i < 200; i++) {
8244 msleep(10);
8245 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8246 break;
8247 }
8248#endif
34f80b04
EG
8249 /* Clear ILT */
8250 base = FUNC_ILT_BASE(func);
8251 for (i = base; i < base + ILT_PER_FUNC; i++)
8252 bnx2x_ilt_wr(bp, i, 0);
8253}
8254
8255static void bnx2x_reset_port(struct bnx2x *bp)
8256{
8257 int port = BP_PORT(bp);
8258 u32 val;
8259
8260 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8261
8262 /* Do not rcv packets to BRB */
8263 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8264 /* Do not direct rcv packets that are not for MCP to the BRB */
8265 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8266 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8267
8268 /* Configure AEU */
8269 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8270
8271 msleep(100);
8272 /* Check for BRB port occupancy */
8273 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8274 if (val)
8275 DP(NETIF_MSG_IFDOWN,
33471629 8276 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
8277
8278 /* TODO: Close Doorbell port? */
8279}
8280
34f80b04
EG
8281static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8282{
8283 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
8284 BP_FUNC(bp), reset_code);
8285
8286 switch (reset_code) {
8287 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8288 bnx2x_reset_port(bp);
8289 bnx2x_reset_func(bp);
8290 bnx2x_reset_common(bp);
8291 break;
8292
8293 case FW_MSG_CODE_DRV_UNLOAD_PORT:
8294 bnx2x_reset_port(bp);
8295 bnx2x_reset_func(bp);
8296 break;
8297
8298 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8299 bnx2x_reset_func(bp);
8300 break;
49d66772 8301
34f80b04
EG
8302 default:
8303 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8304 break;
8305 }
8306}
8307
72fd0718 8308static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 8309{
da5a662a 8310 int port = BP_PORT(bp);
a2fbb9ea 8311 u32 reset_code = 0;
da5a662a 8312 int i, cnt, rc;
a2fbb9ea 8313
555f6c78 8314 /* Wait until tx fastpath tasks complete */
54b9ddaa 8315 for_each_queue(bp, i) {
228241eb
ET
8316 struct bnx2x_fastpath *fp = &bp->fp[i];
8317
34f80b04 8318 cnt = 1000;
e8b5fc51 8319 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 8320
7961f791 8321 bnx2x_tx_int(fp);
34f80b04
EG
8322 if (!cnt) {
8323 BNX2X_ERR("timeout waiting for queue[%d]\n",
8324 i);
8325#ifdef BNX2X_STOP_ON_ERROR
8326 bnx2x_panic();
8327 return -EBUSY;
8328#else
8329 break;
8330#endif
8331 }
8332 cnt--;
da5a662a 8333 msleep(1);
34f80b04 8334 }
228241eb 8335 }
da5a662a
VZ
8336 /* Give HW time to discard old tx messages */
8337 msleep(1);
a2fbb9ea 8338
3101c2bc
YG
8339 if (CHIP_IS_E1(bp)) {
8340 struct mac_configuration_cmd *config =
8341 bnx2x_sp(bp, mcast_config);
8342
e665bfda 8343 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 8344
8d9c5f34 8345 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
8346 CAM_INVALIDATE(config->config_table[i]);
8347
8d9c5f34 8348 config->hdr.length = i;
3101c2bc
YG
8349 if (CHIP_REV_IS_SLOW(bp))
8350 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8351 else
8352 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 8353 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
8354 config->hdr.reserved1 = 0;
8355
e665bfda
MC
8356 bp->set_mac_pending++;
8357 smp_wmb();
8358
3101c2bc
YG
8359 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8360 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8361 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8362
8363 } else { /* E1H */
65abd74d
YG
8364 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8365
e665bfda 8366 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
8367
8368 for (i = 0; i < MC_HASH_SIZE; i++)
8369 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
8370
8371 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 8372 }
993ac7b5
MC
8373#ifdef BCM_CNIC
8374 /* Clear iSCSI L2 MAC */
8375 mutex_lock(&bp->cnic_mutex);
8376 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8377 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8378 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8379 }
8380 mutex_unlock(&bp->cnic_mutex);
8381#endif
3101c2bc 8382
65abd74d
YG
8383 if (unload_mode == UNLOAD_NORMAL)
8384 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8385
7d0446c2 8386 else if (bp->flags & NO_WOL_FLAG)
65abd74d 8387 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 8388
7d0446c2 8389 else if (bp->wol) {
65abd74d
YG
8390 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8391 u8 *mac_addr = bp->dev->dev_addr;
8392 u32 val;
8393 /* The mac address is written to entries 1-4 to
8394 preserve entry 0 which is used by the PMF */
8395 u8 entry = (BP_E1HVN(bp) + 1)*8;
8396
8397 val = (mac_addr[0] << 8) | mac_addr[1];
8398 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8399
8400 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8401 (mac_addr[4] << 8) | mac_addr[5];
8402 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8403
8404 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8405
8406 } else
8407 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8408
34f80b04
EG
8409 /* Close multi and leading connections
8410 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
8411 for_each_nondefault_queue(bp, i)
8412 if (bnx2x_stop_multi(bp, i))
228241eb 8413 goto unload_error;
a2fbb9ea 8414
da5a662a
VZ
8415 rc = bnx2x_stop_leading(bp);
8416 if (rc) {
34f80b04 8417 BNX2X_ERR("Stop leading failed!\n");
da5a662a 8418#ifdef BNX2X_STOP_ON_ERROR
34f80b04 8419 return -EBUSY;
da5a662a
VZ
8420#else
8421 goto unload_error;
34f80b04 8422#endif
228241eb
ET
8423 }
8424
8425unload_error:
34f80b04 8426 if (!BP_NOMCP(bp))
228241eb 8427 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 8428 else {
f5372251 8429 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
8430 load_count[0], load_count[1], load_count[2]);
8431 load_count[0]--;
da5a662a 8432 load_count[1 + port]--;
f5372251 8433 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
8434 load_count[0], load_count[1], load_count[2]);
8435 if (load_count[0] == 0)
8436 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 8437 else if (load_count[1 + port] == 0)
34f80b04
EG
8438 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8439 else
8440 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8441 }
a2fbb9ea 8442
34f80b04
EG
8443 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8444 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8445 bnx2x__link_reset(bp);
a2fbb9ea
ET
8446
8447 /* Reset the chip */
228241eb 8448 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
8449
8450 /* Report UNLOAD_DONE to MCP */
34f80b04 8451 if (!BP_NOMCP(bp))
a2fbb9ea 8452 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 8453
72fd0718
VZ
8454}
8455
8456static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8457{
8458 u32 val;
8459
8460 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8461
8462 if (CHIP_IS_E1(bp)) {
8463 int port = BP_PORT(bp);
8464 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8465 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8466
8467 val = REG_RD(bp, addr);
8468 val &= ~(0x300);
8469 REG_WR(bp, addr, val);
8470 } else if (CHIP_IS_E1H(bp)) {
8471 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8472 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8473 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8474 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8475 }
8476}
8477
8478/* must be called with rtnl_lock */
8479static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8480{
8481 int i;
8482
8483 if (bp->state == BNX2X_STATE_CLOSED) {
8484 /* Interface has been removed - nothing to recover */
8485 bp->recovery_state = BNX2X_RECOVERY_DONE;
8486 bp->is_leader = 0;
8487 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8488 smp_wmb();
8489
8490 return -EINVAL;
8491 }
8492
8493#ifdef BCM_CNIC
8494 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8495#endif
8496 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8497
8498 /* Set "drop all" */
8499 bp->rx_mode = BNX2X_RX_MODE_NONE;
8500 bnx2x_set_storm_rx_mode(bp);
8501
8502 /* Disable HW interrupts, NAPI and Tx */
8503 bnx2x_netif_stop(bp, 1);
8504
8505 del_timer_sync(&bp->timer);
8506 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8507 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8508 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8509
8510 /* Release IRQs */
8511 bnx2x_free_irq(bp, false);
8512
8513 /* Cleanup the chip if needed */
8514 if (unload_mode != UNLOAD_RECOVERY)
8515 bnx2x_chip_cleanup(bp, unload_mode);
8516
9a035440 8517 bp->port.pmf = 0;
a2fbb9ea 8518
7a9b2557 8519 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 8520 bnx2x_free_skbs(bp);
54b9ddaa 8521 for_each_queue(bp, i)
3196a88a 8522 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 8523 for_each_queue(bp, i)
7cde1c8b 8524 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8525 bnx2x_free_mem(bp);
8526
8527 bp->state = BNX2X_STATE_CLOSED;
228241eb 8528
a2fbb9ea
ET
8529 netif_carrier_off(bp->dev);
8530
72fd0718
VZ
8531 /* The last driver must disable a "close the gate" if there is no
8532 * parity attention or "process kill" pending.
8533 */
8534 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8535 bnx2x_reset_is_done(bp))
8536 bnx2x_disable_close_the_gate(bp);
8537
8538 /* Reset MCP mail box sequence if there is on going recovery */
8539 if (unload_mode == UNLOAD_RECOVERY)
8540 bp->fw_seq = 0;
8541
8542 return 0;
8543}
8544
8545/* Close gates #2, #3 and #4: */
8546static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8547{
8548 u32 val, addr;
8549
8550 /* Gates #2 and #4a are closed/opened for "not E1" only */
8551 if (!CHIP_IS_E1(bp)) {
8552 /* #4 */
8553 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8554 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8555 close ? (val | 0x1) : (val & (~(u32)1)));
8556 /* #2 */
8557 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8558 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8559 close ? (val | 0x1) : (val & (~(u32)1)));
8560 }
8561
8562 /* #3 */
8563 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8564 val = REG_RD(bp, addr);
8565 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8566
8567 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8568 close ? "closing" : "opening");
8569 mmiowb();
8570}
8571
8572#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8573
8574static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8575{
8576 /* Do some magic... */
8577 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8578 *magic_val = val & SHARED_MF_CLP_MAGIC;
8579 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8580}
8581
8582/* Restore the value of the `magic' bit.
8583 *
8584 * @param pdev Device handle.
8585 * @param magic_val Old value of the `magic' bit.
8586 */
8587static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8588{
8589 /* Restore the `magic' bit value... */
8590 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8591 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8592 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8593 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8594 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8595 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8596}
8597
8598/* Prepares for MCP reset: takes care of CLP configurations.
8599 *
8600 * @param bp
8601 * @param magic_val Old value of 'magic' bit.
8602 */
8603static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8604{
8605 u32 shmem;
8606 u32 validity_offset;
8607
8608 DP(NETIF_MSG_HW, "Starting\n");
8609
8610 /* Set `magic' bit in order to save MF config */
8611 if (!CHIP_IS_E1(bp))
8612 bnx2x_clp_reset_prep(bp, magic_val);
8613
8614 /* Get shmem offset */
8615 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8616 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8617
8618 /* Clear validity map flags */
8619 if (shmem > 0)
8620 REG_WR(bp, shmem + validity_offset, 0);
8621}
8622
8623#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8624#define MCP_ONE_TIMEOUT 100 /* 100 ms */
8625
8626/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8627 * depending on the HW type.
8628 *
8629 * @param bp
8630 */
8631static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8632{
8633 /* special handling for emulation and FPGA,
8634 wait 10 times longer */
8635 if (CHIP_REV_IS_SLOW(bp))
8636 msleep(MCP_ONE_TIMEOUT*10);
8637 else
8638 msleep(MCP_ONE_TIMEOUT);
8639}
8640
8641static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8642{
8643 u32 shmem, cnt, validity_offset, val;
8644 int rc = 0;
8645
8646 msleep(100);
8647
8648 /* Get shmem offset */
8649 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8650 if (shmem == 0) {
8651 BNX2X_ERR("Shmem 0 return failure\n");
8652 rc = -ENOTTY;
8653 goto exit_lbl;
8654 }
8655
8656 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8657
8658 /* Wait for MCP to come up */
8659 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8660 /* TBD: its best to check validity map of last port.
8661 * currently checks on port 0.
8662 */
8663 val = REG_RD(bp, shmem + validity_offset);
8664 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8665 shmem + validity_offset, val);
8666
8667 /* check that shared memory is valid. */
8668 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8669 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8670 break;
8671
8672 bnx2x_mcp_wait_one(bp);
8673 }
8674
8675 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8676
8677 /* Check that shared memory is valid. This indicates that MCP is up. */
8678 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8679 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8680 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8681 rc = -ENOTTY;
8682 goto exit_lbl;
8683 }
8684
8685exit_lbl:
8686 /* Restore the `magic' bit value */
8687 if (!CHIP_IS_E1(bp))
8688 bnx2x_clp_reset_done(bp, magic_val);
8689
8690 return rc;
8691}
8692
8693static void bnx2x_pxp_prep(struct bnx2x *bp)
8694{
8695 if (!CHIP_IS_E1(bp)) {
8696 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8697 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8698 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8699 mmiowb();
8700 }
8701}
8702
8703/*
8704 * Reset the whole chip except for:
8705 * - PCIE core
8706 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8707 * one reset bit)
8708 * - IGU
8709 * - MISC (including AEU)
8710 * - GRC
8711 * - RBCN, RBCP
8712 */
8713static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8714{
8715 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8716
8717 not_reset_mask1 =
8718 MISC_REGISTERS_RESET_REG_1_RST_HC |
8719 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8720 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8721
8722 not_reset_mask2 =
8723 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8724 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8725 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8726 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8727 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8728 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8729 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8730 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8731
8732 reset_mask1 = 0xffffffff;
8733
8734 if (CHIP_IS_E1(bp))
8735 reset_mask2 = 0xffff;
8736 else
8737 reset_mask2 = 0x1ffff;
8738
8739 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8740 reset_mask1 & (~not_reset_mask1));
8741 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8742 reset_mask2 & (~not_reset_mask2));
8743
8744 barrier();
8745 mmiowb();
8746
8747 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8748 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8749 mmiowb();
8750}
8751
8752static int bnx2x_process_kill(struct bnx2x *bp)
8753{
8754 int cnt = 1000;
8755 u32 val = 0;
8756 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8757
8758
8759 /* Empty the Tetris buffer, wait for 1s */
8760 do {
8761 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8762 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8763 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8764 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8765 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8766 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8767 ((port_is_idle_0 & 0x1) == 0x1) &&
8768 ((port_is_idle_1 & 0x1) == 0x1) &&
8769 (pgl_exp_rom2 == 0xffffffff))
8770 break;
8771 msleep(1);
8772 } while (cnt-- > 0);
8773
8774 if (cnt <= 0) {
8775 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8776 " are still"
8777 " outstanding read requests after 1s!\n");
8778 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8779 " port_is_idle_0=0x%08x,"
8780 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8781 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8782 pgl_exp_rom2);
8783 return -EAGAIN;
8784 }
8785
8786 barrier();
8787
8788 /* Close gates #2, #3 and #4 */
8789 bnx2x_set_234_gates(bp, true);
8790
8791 /* TBD: Indicate that "process kill" is in progress to MCP */
8792
8793 /* Clear "unprepared" bit */
8794 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8795 barrier();
8796
8797 /* Make sure all is written to the chip before the reset */
8798 mmiowb();
8799
8800 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8801 * PSWHST, GRC and PSWRD Tetris buffer.
8802 */
8803 msleep(1);
8804
8805 /* Prepare to chip reset: */
8806 /* MCP */
8807 bnx2x_reset_mcp_prep(bp, &val);
8808
8809 /* PXP */
8810 bnx2x_pxp_prep(bp);
8811 barrier();
8812
8813 /* reset the chip */
8814 bnx2x_process_kill_chip_reset(bp);
8815 barrier();
8816
8817 /* Recover after reset: */
8818 /* MCP */
8819 if (bnx2x_reset_mcp_comp(bp, val))
8820 return -EAGAIN;
8821
8822 /* PXP */
8823 bnx2x_pxp_prep(bp);
8824
8825 /* Open the gates #2, #3 and #4 */
8826 bnx2x_set_234_gates(bp, false);
8827
8828 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8829 * reset state, re-enable attentions. */
8830
a2fbb9ea
ET
8831 return 0;
8832}
8833
72fd0718
VZ
8834static int bnx2x_leader_reset(struct bnx2x *bp)
8835{
8836 int rc = 0;
8837 /* Try to recover after the failure */
8838 if (bnx2x_process_kill(bp)) {
8839 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8840 bp->dev->name);
8841 rc = -EAGAIN;
8842 goto exit_leader_reset;
8843 }
8844
8845 /* Clear "reset is in progress" bit and update the driver state */
8846 bnx2x_set_reset_done(bp);
8847 bp->recovery_state = BNX2X_RECOVERY_DONE;
8848
8849exit_leader_reset:
8850 bp->is_leader = 0;
8851 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8852 smp_wmb();
8853 return rc;
8854}
8855
8856static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8857
8858/* Assumption: runs under rtnl lock. This together with the fact
8859 * that it's called only from bnx2x_reset_task() ensure that it
8860 * will never be called when netif_running(bp->dev) is false.
8861 */
8862static void bnx2x_parity_recover(struct bnx2x *bp)
8863{
8864 DP(NETIF_MSG_HW, "Handling parity\n");
8865 while (1) {
8866 switch (bp->recovery_state) {
8867 case BNX2X_RECOVERY_INIT:
8868 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8869 /* Try to get a LEADER_LOCK HW lock */
8870 if (bnx2x_trylock_hw_lock(bp,
8871 HW_LOCK_RESOURCE_RESERVED_08))
8872 bp->is_leader = 1;
8873
8874 /* Stop the driver */
8875 /* If interface has been removed - break */
8876 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8877 return;
8878
8879 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8880 /* Ensure "is_leader" and "recovery_state"
8881 * update values are seen on other CPUs
8882 */
8883 smp_wmb();
8884 break;
8885
8886 case BNX2X_RECOVERY_WAIT:
8887 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8888 if (bp->is_leader) {
8889 u32 load_counter = bnx2x_get_load_cnt(bp);
8890 if (load_counter) {
8891 /* Wait until all other functions get
8892 * down.
8893 */
8894 schedule_delayed_work(&bp->reset_task,
8895 HZ/10);
8896 return;
8897 } else {
8898 /* If all other functions got down -
8899 * try to bring the chip back to
8900 * normal. In any case it's an exit
8901 * point for a leader.
8902 */
8903 if (bnx2x_leader_reset(bp) ||
8904 bnx2x_nic_load(bp, LOAD_NORMAL)) {
8905 printk(KERN_ERR"%s: Recovery "
8906 "has failed. Power cycle is "
8907 "needed.\n", bp->dev->name);
8908 /* Disconnect this device */
8909 netif_device_detach(bp->dev);
8910 /* Block ifup for all function
8911 * of this ASIC until
8912 * "process kill" or power
8913 * cycle.
8914 */
8915 bnx2x_set_reset_in_progress(bp);
8916 /* Shut down the power */
8917 bnx2x_set_power_state(bp,
8918 PCI_D3hot);
8919 return;
8920 }
8921
8922 return;
8923 }
8924 } else { /* non-leader */
8925 if (!bnx2x_reset_is_done(bp)) {
8926 /* Try to get a LEADER_LOCK HW lock as
8927 * long as a former leader may have
8928 * been unloaded by the user or
8929 * released a leadership by another
8930 * reason.
8931 */
8932 if (bnx2x_trylock_hw_lock(bp,
8933 HW_LOCK_RESOURCE_RESERVED_08)) {
8934 /* I'm a leader now! Restart a
8935 * switch case.
8936 */
8937 bp->is_leader = 1;
8938 break;
8939 }
8940
8941 schedule_delayed_work(&bp->reset_task,
8942 HZ/10);
8943 return;
8944
8945 } else { /* A leader has completed
8946 * the "process kill". It's an exit
8947 * point for a non-leader.
8948 */
8949 bnx2x_nic_load(bp, LOAD_NORMAL);
8950 bp->recovery_state =
8951 BNX2X_RECOVERY_DONE;
8952 smp_wmb();
8953 return;
8954 }
8955 }
8956 default:
8957 return;
8958 }
8959 }
8960}
8961
8962/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8963 * scheduled on a general queue in order to prevent a dead lock.
8964 */
34f80b04
EG
8965static void bnx2x_reset_task(struct work_struct *work)
8966{
72fd0718 8967 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
8968
8969#ifdef BNX2X_STOP_ON_ERROR
8970 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8971 " so reset not done to allow debug dump,\n"
72fd0718 8972 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
8973 return;
8974#endif
8975
8976 rtnl_lock();
8977
8978 if (!netif_running(bp->dev))
8979 goto reset_task_exit;
8980
72fd0718
VZ
8981 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8982 bnx2x_parity_recover(bp);
8983 else {
8984 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8985 bnx2x_nic_load(bp, LOAD_NORMAL);
8986 }
34f80b04
EG
8987
8988reset_task_exit:
8989 rtnl_unlock();
8990}
8991
a2fbb9ea
ET
8992/* end of nic load/unload */
8993
8994/* ethtool_ops */
8995
8996/*
8997 * Init service functions
8998 */
8999
f1ef27ef
EG
9000static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
9001{
9002 switch (func) {
9003 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
9004 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
9005 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
9006 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
9007 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
9008 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
9009 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
9010 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
9011 default:
9012 BNX2X_ERR("Unsupported function index: %d\n", func);
9013 return (u32)(-1);
9014 }
9015}
9016
9017static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
9018{
9019 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
9020
9021 /* Flush all outstanding writes */
9022 mmiowb();
9023
9024 /* Pretend to be function 0 */
9025 REG_WR(bp, reg, 0);
9026 /* Flush the GRC transaction (in the chip) */
9027 new_val = REG_RD(bp, reg);
9028 if (new_val != 0) {
9029 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
9030 new_val);
9031 BUG();
9032 }
9033
9034 /* From now we are in the "like-E1" mode */
9035 bnx2x_int_disable(bp);
9036
9037 /* Flush all outstanding writes */
9038 mmiowb();
9039
9040 /* Restore the original funtion settings */
9041 REG_WR(bp, reg, orig_func);
9042 new_val = REG_RD(bp, reg);
9043 if (new_val != orig_func) {
9044 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
9045 orig_func, new_val);
9046 BUG();
9047 }
9048}
9049
9050static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
9051{
9052 if (CHIP_IS_E1H(bp))
9053 bnx2x_undi_int_disable_e1h(bp, func);
9054 else
9055 bnx2x_int_disable(bp);
9056}
9057
34f80b04
EG
9058static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
9059{
9060 u32 val;
9061
9062 /* Check if there is any driver already loaded */
9063 val = REG_RD(bp, MISC_REG_UNPREPARED);
9064 if (val == 0x1) {
9065 /* Check if it is the UNDI driver
9066 * UNDI driver initializes CID offset for normal bell to 0x7
9067 */
4a37fb66 9068 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
9069 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9070 if (val == 0x7) {
9071 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 9072 /* save our func */
34f80b04 9073 int func = BP_FUNC(bp);
da5a662a
VZ
9074 u32 swap_en;
9075 u32 swap_val;
34f80b04 9076
b4661739
EG
9077 /* clear the UNDI indication */
9078 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9079
34f80b04
EG
9080 BNX2X_DEV_INFO("UNDI is active! reset device\n");
9081
9082 /* try unload UNDI on port 0 */
9083 bp->func = 0;
da5a662a
VZ
9084 bp->fw_seq =
9085 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9086 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 9087 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
9088
9089 /* if UNDI is loaded on the other port */
9090 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9091
da5a662a
VZ
9092 /* send "DONE" for previous unload */
9093 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9094
9095 /* unload UNDI on port 1 */
34f80b04 9096 bp->func = 1;
da5a662a
VZ
9097 bp->fw_seq =
9098 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9099 DRV_MSG_SEQ_NUMBER_MASK);
9100 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9101
9102 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
9103 }
9104
b4661739
EG
9105 /* now it's safe to release the lock */
9106 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9107
f1ef27ef 9108 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
9109
9110 /* close input traffic and wait for it */
9111 /* Do not rcv packets to BRB */
9112 REG_WR(bp,
9113 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9114 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9115 /* Do not direct rcv packets that are not for MCP to
9116 * the BRB */
9117 REG_WR(bp,
9118 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9119 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9120 /* clear AEU */
9121 REG_WR(bp,
9122 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9123 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9124 msleep(10);
9125
9126 /* save NIG port swap info */
9127 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9128 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
9129 /* reset device */
9130 REG_WR(bp,
9131 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 9132 0xd3ffffff);
34f80b04
EG
9133 REG_WR(bp,
9134 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9135 0x1403);
da5a662a
VZ
9136 /* take the NIG out of reset and restore swap values */
9137 REG_WR(bp,
9138 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9139 MISC_REGISTERS_RESET_REG_1_RST_NIG);
9140 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9141 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9142
9143 /* send unload done to the MCP */
9144 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9145
9146 /* restore our func and fw_seq */
9147 bp->func = func;
9148 bp->fw_seq =
9149 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9150 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
9151
9152 } else
9153 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
9154 }
9155}
9156
9157static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9158{
9159 u32 val, val2, val3, val4, id;
72ce58c3 9160 u16 pmc;
34f80b04
EG
9161
9162 /* Get the chip revision id and number. */
9163 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9164 val = REG_RD(bp, MISC_REG_CHIP_NUM);
9165 id = ((val & 0xffff) << 16);
9166 val = REG_RD(bp, MISC_REG_CHIP_REV);
9167 id |= ((val & 0xf) << 12);
9168 val = REG_RD(bp, MISC_REG_CHIP_METAL);
9169 id |= ((val & 0xff) << 4);
5a40e08e 9170 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
9171 id |= (val & 0xf);
9172 bp->common.chip_id = id;
9173 bp->link_params.chip_id = bp->common.chip_id;
9174 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9175
1c06328c
EG
9176 val = (REG_RD(bp, 0x2874) & 0x55);
9177 if ((bp->common.chip_id & 0x1) ||
9178 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9179 bp->flags |= ONE_PORT_FLAG;
9180 BNX2X_DEV_INFO("single port device\n");
9181 }
9182
34f80b04
EG
9183 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9184 bp->common.flash_size = (NVRAM_1MB_SIZE <<
9185 (val & MCPR_NVM_CFG4_FLASH_SIZE));
9186 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9187 bp->common.flash_size, bp->common.flash_size);
9188
9189 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 9190 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 9191 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
9192 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
9193 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
9194
9195 if (!bp->common.shmem_base ||
9196 (bp->common.shmem_base < 0xA0000) ||
9197 (bp->common.shmem_base >= 0xC0000)) {
9198 BNX2X_DEV_INFO("MCP not active\n");
9199 bp->flags |= NO_MCP_FLAG;
9200 return;
9201 }
9202
9203 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9204 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9205 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
cdaa7cb8 9206 BNX2X_ERROR("BAD MCP validity signature\n");
34f80b04
EG
9207
9208 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 9209 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
9210
9211 bp->link_params.hw_led_mode = ((bp->common.hw_config &
9212 SHARED_HW_CFG_LED_MODE_MASK) >>
9213 SHARED_HW_CFG_LED_MODE_SHIFT);
9214
c2c8b03e
EG
9215 bp->link_params.feature_config_flags = 0;
9216 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9217 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9218 bp->link_params.feature_config_flags |=
9219 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9220 else
9221 bp->link_params.feature_config_flags &=
9222 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9223
34f80b04
EG
9224 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9225 bp->common.bc_ver = val;
9226 BNX2X_DEV_INFO("bc_ver %X\n", val);
9227 if (val < BNX2X_BC_VER) {
9228 /* for now only warn
9229 * later we might need to enforce this */
cdaa7cb8
VZ
9230 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
9231 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 9232 }
4d295db0
EG
9233 bp->link_params.feature_config_flags |=
9234 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9235 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
9236
9237 if (BP_E1HVN(bp) == 0) {
9238 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9239 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9240 } else {
9241 /* no WOL capability for E1HVN != 0 */
9242 bp->flags |= NO_WOL_FLAG;
9243 }
9244 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 9245 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
9246
9247 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9248 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9249 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9250 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9251
cdaa7cb8
VZ
9252 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9253 val, val2, val3, val4);
34f80b04
EG
9254}
9255
9256static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9257 u32 switch_cfg)
a2fbb9ea 9258{
34f80b04 9259 int port = BP_PORT(bp);
a2fbb9ea
ET
9260 u32 ext_phy_type;
9261
a2fbb9ea
ET
9262 switch (switch_cfg) {
9263 case SWITCH_CFG_1G:
9264 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9265
c18487ee
YR
9266 ext_phy_type =
9267 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
9268 switch (ext_phy_type) {
9269 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9270 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9271 ext_phy_type);
9272
34f80b04
EG
9273 bp->port.supported |= (SUPPORTED_10baseT_Half |
9274 SUPPORTED_10baseT_Full |
9275 SUPPORTED_100baseT_Half |
9276 SUPPORTED_100baseT_Full |
9277 SUPPORTED_1000baseT_Full |
9278 SUPPORTED_2500baseX_Full |
9279 SUPPORTED_TP |
9280 SUPPORTED_FIBRE |
9281 SUPPORTED_Autoneg |
9282 SUPPORTED_Pause |
9283 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9284 break;
9285
9286 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9287 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9288 ext_phy_type);
9289
34f80b04
EG
9290 bp->port.supported |= (SUPPORTED_10baseT_Half |
9291 SUPPORTED_10baseT_Full |
9292 SUPPORTED_100baseT_Half |
9293 SUPPORTED_100baseT_Full |
9294 SUPPORTED_1000baseT_Full |
9295 SUPPORTED_TP |
9296 SUPPORTED_FIBRE |
9297 SUPPORTED_Autoneg |
9298 SUPPORTED_Pause |
9299 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9300 break;
9301
9302 default:
9303 BNX2X_ERR("NVRAM config error. "
9304 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 9305 bp->link_params.ext_phy_config);
a2fbb9ea
ET
9306 return;
9307 }
9308
34f80b04
EG
9309 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9310 port*0x10);
9311 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
9312 break;
9313
9314 case SWITCH_CFG_10G:
9315 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9316
c18487ee
YR
9317 ext_phy_type =
9318 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
9319 switch (ext_phy_type) {
9320 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9321 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9322 ext_phy_type);
9323
34f80b04
EG
9324 bp->port.supported |= (SUPPORTED_10baseT_Half |
9325 SUPPORTED_10baseT_Full |
9326 SUPPORTED_100baseT_Half |
9327 SUPPORTED_100baseT_Full |
9328 SUPPORTED_1000baseT_Full |
9329 SUPPORTED_2500baseX_Full |
9330 SUPPORTED_10000baseT_Full |
9331 SUPPORTED_TP |
9332 SUPPORTED_FIBRE |
9333 SUPPORTED_Autoneg |
9334 SUPPORTED_Pause |
9335 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9336 break;
9337
589abe3a
EG
9338 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9339 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 9340 ext_phy_type);
f1410647 9341
34f80b04 9342 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 9343 SUPPORTED_1000baseT_Full |
34f80b04 9344 SUPPORTED_FIBRE |
589abe3a 9345 SUPPORTED_Autoneg |
34f80b04
EG
9346 SUPPORTED_Pause |
9347 SUPPORTED_Asym_Pause);
f1410647
ET
9348 break;
9349
589abe3a
EG
9350 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9351 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
9352 ext_phy_type);
9353
34f80b04 9354 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 9355 SUPPORTED_2500baseX_Full |
34f80b04 9356 SUPPORTED_1000baseT_Full |
589abe3a
EG
9357 SUPPORTED_FIBRE |
9358 SUPPORTED_Autoneg |
9359 SUPPORTED_Pause |
9360 SUPPORTED_Asym_Pause);
9361 break;
9362
9363 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9364 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9365 ext_phy_type);
9366
9367 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
9368 SUPPORTED_FIBRE |
9369 SUPPORTED_Pause |
9370 SUPPORTED_Asym_Pause);
f1410647
ET
9371 break;
9372
589abe3a
EG
9373 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9374 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
9375 ext_phy_type);
9376
34f80b04
EG
9377 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9378 SUPPORTED_1000baseT_Full |
9379 SUPPORTED_FIBRE |
34f80b04
EG
9380 SUPPORTED_Pause |
9381 SUPPORTED_Asym_Pause);
f1410647
ET
9382 break;
9383
589abe3a
EG
9384 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9385 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
9386 ext_phy_type);
9387
34f80b04 9388 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 9389 SUPPORTED_1000baseT_Full |
34f80b04 9390 SUPPORTED_Autoneg |
589abe3a 9391 SUPPORTED_FIBRE |
34f80b04
EG
9392 SUPPORTED_Pause |
9393 SUPPORTED_Asym_Pause);
c18487ee
YR
9394 break;
9395
4d295db0
EG
9396 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9397 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9398 ext_phy_type);
9399
9400 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9401 SUPPORTED_1000baseT_Full |
9402 SUPPORTED_Autoneg |
9403 SUPPORTED_FIBRE |
9404 SUPPORTED_Pause |
9405 SUPPORTED_Asym_Pause);
9406 break;
9407
f1410647
ET
9408 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9409 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9410 ext_phy_type);
9411
34f80b04
EG
9412 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9413 SUPPORTED_TP |
9414 SUPPORTED_Autoneg |
9415 SUPPORTED_Pause |
9416 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9417 break;
9418
28577185
EG
9419 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9420 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9421 ext_phy_type);
9422
9423 bp->port.supported |= (SUPPORTED_10baseT_Half |
9424 SUPPORTED_10baseT_Full |
9425 SUPPORTED_100baseT_Half |
9426 SUPPORTED_100baseT_Full |
9427 SUPPORTED_1000baseT_Full |
9428 SUPPORTED_10000baseT_Full |
9429 SUPPORTED_TP |
9430 SUPPORTED_Autoneg |
9431 SUPPORTED_Pause |
9432 SUPPORTED_Asym_Pause);
9433 break;
9434
c18487ee
YR
9435 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9436 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9437 bp->link_params.ext_phy_config);
9438 break;
9439
a2fbb9ea
ET
9440 default:
9441 BNX2X_ERR("NVRAM config error. "
9442 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 9443 bp->link_params.ext_phy_config);
a2fbb9ea
ET
9444 return;
9445 }
9446
34f80b04
EG
9447 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9448 port*0x18);
9449 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 9450
a2fbb9ea
ET
9451 break;
9452
9453 default:
9454 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 9455 bp->port.link_config);
a2fbb9ea
ET
9456 return;
9457 }
34f80b04 9458 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
9459
9460 /* mask what we support according to speed_cap_mask */
c18487ee
YR
9461 if (!(bp->link_params.speed_cap_mask &
9462 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 9463 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 9464
c18487ee
YR
9465 if (!(bp->link_params.speed_cap_mask &
9466 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 9467 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 9468
c18487ee
YR
9469 if (!(bp->link_params.speed_cap_mask &
9470 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 9471 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 9472
c18487ee
YR
9473 if (!(bp->link_params.speed_cap_mask &
9474 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 9475 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 9476
c18487ee
YR
9477 if (!(bp->link_params.speed_cap_mask &
9478 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
9479 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9480 SUPPORTED_1000baseT_Full);
a2fbb9ea 9481
c18487ee
YR
9482 if (!(bp->link_params.speed_cap_mask &
9483 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 9484 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 9485
c18487ee
YR
9486 if (!(bp->link_params.speed_cap_mask &
9487 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 9488 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 9489
34f80b04 9490 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
9491}
9492
34f80b04 9493static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 9494{
c18487ee 9495 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 9496
34f80b04 9497 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 9498 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 9499 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 9500 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 9501 bp->port.advertising = bp->port.supported;
a2fbb9ea 9502 } else {
c18487ee
YR
9503 u32 ext_phy_type =
9504 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9505
9506 if ((ext_phy_type ==
9507 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9508 (ext_phy_type ==
9509 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 9510 /* force 10G, no AN */
c18487ee 9511 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 9512 bp->port.advertising =
a2fbb9ea
ET
9513 (ADVERTISED_10000baseT_Full |
9514 ADVERTISED_FIBRE);
9515 break;
9516 }
9517 BNX2X_ERR("NVRAM config error. "
9518 "Invalid link_config 0x%x"
9519 " Autoneg not supported\n",
34f80b04 9520 bp->port.link_config);
a2fbb9ea
ET
9521 return;
9522 }
9523 break;
9524
9525 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 9526 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 9527 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
9528 bp->port.advertising = (ADVERTISED_10baseT_Full |
9529 ADVERTISED_TP);
a2fbb9ea 9530 } else {
cdaa7cb8
VZ
9531 BNX2X_ERROR("NVRAM config error. "
9532 "Invalid link_config 0x%x"
9533 " speed_cap_mask 0x%x\n",
9534 bp->port.link_config,
9535 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9536 return;
9537 }
9538 break;
9539
9540 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 9541 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
9542 bp->link_params.req_line_speed = SPEED_10;
9543 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
9544 bp->port.advertising = (ADVERTISED_10baseT_Half |
9545 ADVERTISED_TP);
a2fbb9ea 9546 } else {
cdaa7cb8
VZ
9547 BNX2X_ERROR("NVRAM config error. "
9548 "Invalid link_config 0x%x"
9549 " speed_cap_mask 0x%x\n",
9550 bp->port.link_config,
9551 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9552 return;
9553 }
9554 break;
9555
9556 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 9557 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 9558 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
9559 bp->port.advertising = (ADVERTISED_100baseT_Full |
9560 ADVERTISED_TP);
a2fbb9ea 9561 } else {
cdaa7cb8
VZ
9562 BNX2X_ERROR("NVRAM config error. "
9563 "Invalid link_config 0x%x"
9564 " speed_cap_mask 0x%x\n",
9565 bp->port.link_config,
9566 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9567 return;
9568 }
9569 break;
9570
9571 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 9572 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
9573 bp->link_params.req_line_speed = SPEED_100;
9574 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
9575 bp->port.advertising = (ADVERTISED_100baseT_Half |
9576 ADVERTISED_TP);
a2fbb9ea 9577 } else {
cdaa7cb8
VZ
9578 BNX2X_ERROR("NVRAM config error. "
9579 "Invalid link_config 0x%x"
9580 " speed_cap_mask 0x%x\n",
9581 bp->port.link_config,
9582 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9583 return;
9584 }
9585 break;
9586
9587 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 9588 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 9589 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
9590 bp->port.advertising = (ADVERTISED_1000baseT_Full |
9591 ADVERTISED_TP);
a2fbb9ea 9592 } else {
cdaa7cb8
VZ
9593 BNX2X_ERROR("NVRAM config error. "
9594 "Invalid link_config 0x%x"
9595 " speed_cap_mask 0x%x\n",
9596 bp->port.link_config,
9597 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9598 return;
9599 }
9600 break;
9601
9602 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 9603 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 9604 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
9605 bp->port.advertising = (ADVERTISED_2500baseX_Full |
9606 ADVERTISED_TP);
a2fbb9ea 9607 } else {
cdaa7cb8
VZ
9608 BNX2X_ERROR("NVRAM config error. "
9609 "Invalid link_config 0x%x"
9610 " speed_cap_mask 0x%x\n",
9611 bp->port.link_config,
9612 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9613 return;
9614 }
9615 break;
9616
9617 case PORT_FEATURE_LINK_SPEED_10G_CX4:
9618 case PORT_FEATURE_LINK_SPEED_10G_KX4:
9619 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 9620 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 9621 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
9622 bp->port.advertising = (ADVERTISED_10000baseT_Full |
9623 ADVERTISED_FIBRE);
a2fbb9ea 9624 } else {
cdaa7cb8
VZ
9625 BNX2X_ERROR("NVRAM config error. "
9626 "Invalid link_config 0x%x"
9627 " speed_cap_mask 0x%x\n",
9628 bp->port.link_config,
9629 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9630 return;
9631 }
9632 break;
9633
9634 default:
cdaa7cb8
VZ
9635 BNX2X_ERROR("NVRAM config error. "
9636 "BAD link speed link_config 0x%x\n",
9637 bp->port.link_config);
c18487ee 9638 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 9639 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
9640 break;
9641 }
a2fbb9ea 9642
34f80b04
EG
9643 bp->link_params.req_flow_ctrl = (bp->port.link_config &
9644 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 9645 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 9646 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 9647 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9648
c18487ee 9649 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 9650 " advertising 0x%x\n",
c18487ee
YR
9651 bp->link_params.req_line_speed,
9652 bp->link_params.req_duplex,
34f80b04 9653 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
9654}
9655
e665bfda
MC
9656static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9657{
9658 mac_hi = cpu_to_be16(mac_hi);
9659 mac_lo = cpu_to_be32(mac_lo);
9660 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9661 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9662}
9663
34f80b04 9664static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 9665{
34f80b04
EG
9666 int port = BP_PORT(bp);
9667 u32 val, val2;
589abe3a 9668 u32 config;
c2c8b03e 9669 u16 i;
01cd4528 9670 u32 ext_phy_type;
a2fbb9ea 9671
c18487ee 9672 bp->link_params.bp = bp;
34f80b04 9673 bp->link_params.port = port;
c18487ee 9674
c18487ee 9675 bp->link_params.lane_config =
a2fbb9ea 9676 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 9677 bp->link_params.ext_phy_config =
a2fbb9ea
ET
9678 SHMEM_RD(bp,
9679 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
9680 /* BCM8727_NOC => BCM8727 no over current */
9681 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9682 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9683 bp->link_params.ext_phy_config &=
9684 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9685 bp->link_params.ext_phy_config |=
9686 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9687 bp->link_params.feature_config_flags |=
9688 FEATURE_CONFIG_BCM8727_NOC;
9689 }
9690
c18487ee 9691 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
9692 SHMEM_RD(bp,
9693 dev_info.port_hw_config[port].speed_capability_mask);
9694
34f80b04 9695 bp->port.link_config =
a2fbb9ea
ET
9696 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9697
c2c8b03e
EG
9698 /* Get the 4 lanes xgxs config rx and tx */
9699 for (i = 0; i < 2; i++) {
9700 val = SHMEM_RD(bp,
9701 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9702 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9703 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9704
9705 val = SHMEM_RD(bp,
9706 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9707 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9708 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9709 }
9710
3ce2c3f9
EG
9711 /* If the device is capable of WoL, set the default state according
9712 * to the HW
9713 */
4d295db0 9714 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
9715 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9716 (config & PORT_FEATURE_WOL_ENABLED));
9717
c2c8b03e
EG
9718 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
9719 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
9720 bp->link_params.lane_config,
9721 bp->link_params.ext_phy_config,
34f80b04 9722 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 9723
4d295db0
EG
9724 bp->link_params.switch_cfg |= (bp->port.link_config &
9725 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 9726 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
9727
9728 bnx2x_link_settings_requested(bp);
9729
01cd4528
EG
9730 /*
9731 * If connected directly, work with the internal PHY, otherwise, work
9732 * with the external PHY
9733 */
9734 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9735 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9736 bp->mdio.prtad = bp->link_params.phy_addr;
9737
9738 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9739 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9740 bp->mdio.prtad =
659bc5c4 9741 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 9742
a2fbb9ea
ET
9743 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9744 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 9745 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
9746 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9747 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
9748
9749#ifdef BCM_CNIC
9750 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9751 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9752 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9753#endif
34f80b04
EG
9754}
9755
9756static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9757{
9758 int func = BP_FUNC(bp);
9759 u32 val, val2;
9760 int rc = 0;
a2fbb9ea 9761
34f80b04 9762 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 9763
34f80b04
EG
9764 bp->e1hov = 0;
9765 bp->e1hmf = 0;
2145a920 9766 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
34f80b04
EG
9767 bp->mf_config =
9768 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 9769
2691d51d 9770 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 9771 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 9772 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 9773 bp->e1hmf = 1;
2691d51d
EG
9774 BNX2X_DEV_INFO("%s function mode\n",
9775 IS_E1HMF(bp) ? "multi" : "single");
9776
9777 if (IS_E1HMF(bp)) {
9778 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9779 e1hov_tag) &
9780 FUNC_MF_CFG_E1HOV_TAG_MASK);
9781 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9782 bp->e1hov = val;
9783 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9784 "(0x%04x)\n",
9785 func, bp->e1hov, bp->e1hov);
9786 } else {
cdaa7cb8
VZ
9787 BNX2X_ERROR("No valid E1HOV for func %d,"
9788 " aborting\n", func);
34f80b04
EG
9789 rc = -EPERM;
9790 }
2691d51d
EG
9791 } else {
9792 if (BP_E1HVN(bp)) {
cdaa7cb8
VZ
9793 BNX2X_ERROR("VN %d in single function mode,"
9794 " aborting\n", BP_E1HVN(bp));
2691d51d
EG
9795 rc = -EPERM;
9796 }
34f80b04
EG
9797 }
9798 }
a2fbb9ea 9799
34f80b04
EG
9800 if (!BP_NOMCP(bp)) {
9801 bnx2x_get_port_hwinfo(bp);
9802
9803 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9804 DRV_MSG_SEQ_NUMBER_MASK);
9805 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9806 }
9807
9808 if (IS_E1HMF(bp)) {
9809 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9810 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
9811 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9812 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9813 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9814 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9815 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9816 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9817 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
9818 bp->dev->dev_addr[5] = (u8)(val & 0xff);
9819 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9820 ETH_ALEN);
9821 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9822 ETH_ALEN);
a2fbb9ea 9823 }
34f80b04
EG
9824
9825 return rc;
a2fbb9ea
ET
9826 }
9827
34f80b04
EG
9828 if (BP_NOMCP(bp)) {
9829 /* only supposed to happen on emulation/FPGA */
cdaa7cb8 9830 BNX2X_ERROR("warning: random MAC workaround active\n");
34f80b04
EG
9831 random_ether_addr(bp->dev->dev_addr);
9832 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9833 }
a2fbb9ea 9834
34f80b04
EG
9835 return rc;
9836}
9837
34f24c7f
VZ
9838static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9839{
9840 int cnt, i, block_end, rodi;
9841 char vpd_data[BNX2X_VPD_LEN+1];
9842 char str_id_reg[VENDOR_ID_LEN+1];
9843 char str_id_cap[VENDOR_ID_LEN+1];
9844 u8 len;
9845
9846 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9847 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9848
9849 if (cnt < BNX2X_VPD_LEN)
9850 goto out_not_found;
9851
9852 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9853 PCI_VPD_LRDT_RO_DATA);
9854 if (i < 0)
9855 goto out_not_found;
9856
9857
9858 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9859 pci_vpd_lrdt_size(&vpd_data[i]);
9860
9861 i += PCI_VPD_LRDT_TAG_SIZE;
9862
9863 if (block_end > BNX2X_VPD_LEN)
9864 goto out_not_found;
9865
9866 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9867 PCI_VPD_RO_KEYWORD_MFR_ID);
9868 if (rodi < 0)
9869 goto out_not_found;
9870
9871 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9872
9873 if (len != VENDOR_ID_LEN)
9874 goto out_not_found;
9875
9876 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9877
9878 /* vendor specific info */
9879 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9880 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9881 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9882 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9883
9884 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9885 PCI_VPD_RO_KEYWORD_VENDOR0);
9886 if (rodi >= 0) {
9887 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9888
9889 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9890
9891 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9892 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9893 bp->fw_ver[len] = ' ';
9894 }
9895 }
9896 return;
9897 }
9898out_not_found:
9899 return;
9900}
9901
34f80b04
EG
9902static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9903{
9904 int func = BP_FUNC(bp);
87942b46 9905 int timer_interval;
34f80b04
EG
9906 int rc;
9907
da5a662a
VZ
9908 /* Disable interrupt handling until HW is initialized */
9909 atomic_set(&bp->intr_sem, 1);
e1510706 9910 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 9911
34f80b04 9912 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 9913 mutex_init(&bp->fw_mb_mutex);
993ac7b5
MC
9914#ifdef BCM_CNIC
9915 mutex_init(&bp->cnic_mutex);
9916#endif
a2fbb9ea 9917
1cf167f2 9918 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 9919 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
9920
9921 rc = bnx2x_get_hwinfo(bp);
9922
34f24c7f 9923 bnx2x_read_fwinfo(bp);
34f80b04
EG
9924 /* need to reset chip if undi was active */
9925 if (!BP_NOMCP(bp))
9926 bnx2x_undi_unload(bp);
9927
9928 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 9929 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
9930
9931 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
9932 dev_err(&bp->pdev->dev, "MCP disabled, "
9933 "must load devices in order!\n");
34f80b04 9934
555f6c78 9935 /* Set multi queue mode */
8badd27a
EG
9936 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9937 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
cdaa7cb8
VZ
9938 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
9939 "requested is not MSI-X\n");
555f6c78
EG
9940 multi_mode = ETH_RSS_MODE_DISABLED;
9941 }
9942 bp->multi_mode = multi_mode;
9943
9944
4fd89b7a
DK
9945 bp->dev->features |= NETIF_F_GRO;
9946
7a9b2557
VZ
9947 /* Set TPA flags */
9948 if (disable_tpa) {
9949 bp->flags &= ~TPA_ENABLE_FLAG;
9950 bp->dev->features &= ~NETIF_F_LRO;
9951 } else {
9952 bp->flags |= TPA_ENABLE_FLAG;
9953 bp->dev->features |= NETIF_F_LRO;
9954 }
9955
a18f5128
EG
9956 if (CHIP_IS_E1(bp))
9957 bp->dropless_fc = 0;
9958 else
9959 bp->dropless_fc = dropless_fc;
9960
8d5726c4 9961 bp->mrrs = mrrs;
7a9b2557 9962
34f80b04
EG
9963 bp->tx_ring_size = MAX_TX_AVAIL;
9964 bp->rx_ring_size = MAX_RX_AVAIL;
9965
9966 bp->rx_csum = 1;
34f80b04 9967
7d323bfd
EG
9968 /* make sure that the numbers are in the right granularity */
9969 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9970 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
34f80b04 9971
87942b46
EG
9972 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9973 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
9974
9975 init_timer(&bp->timer);
9976 bp->timer.expires = jiffies + bp->current_interval;
9977 bp->timer.data = (unsigned long) bp;
9978 bp->timer.function = bnx2x_timer;
9979
9980 return rc;
a2fbb9ea
ET
9981}
9982
9983/*
9984 * ethtool service functions
9985 */
9986
9987/* All ethtool functions called with rtnl_lock */
9988
9989static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9990{
9991 struct bnx2x *bp = netdev_priv(dev);
9992
34f80b04
EG
9993 cmd->supported = bp->port.supported;
9994 cmd->advertising = bp->port.advertising;
a2fbb9ea 9995
f34d28ea
EG
9996 if ((bp->state == BNX2X_STATE_OPEN) &&
9997 !(bp->flags & MF_FUNC_DIS) &&
9998 (bp->link_vars.link_up)) {
c18487ee
YR
9999 cmd->speed = bp->link_vars.line_speed;
10000 cmd->duplex = bp->link_vars.duplex;
b015e3d1
EG
10001 if (IS_E1HMF(bp)) {
10002 u16 vn_max_rate;
34f80b04 10003
b015e3d1
EG
10004 vn_max_rate =
10005 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
34f80b04 10006 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
b015e3d1
EG
10007 if (vn_max_rate < cmd->speed)
10008 cmd->speed = vn_max_rate;
10009 }
10010 } else {
10011 cmd->speed = -1;
10012 cmd->duplex = -1;
34f80b04 10013 }
a2fbb9ea 10014
c18487ee
YR
10015 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
10016 u32 ext_phy_type =
10017 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
10018
10019 switch (ext_phy_type) {
10020 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 10021 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 10022 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
10023 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
10024 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
10025 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 10026 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
10027 cmd->port = PORT_FIBRE;
10028 break;
10029
10030 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 10031 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
10032 cmd->port = PORT_TP;
10033 break;
10034
c18487ee
YR
10035 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
10036 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
10037 bp->link_params.ext_phy_config);
10038 break;
10039
f1410647
ET
10040 default:
10041 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
10042 bp->link_params.ext_phy_config);
10043 break;
f1410647
ET
10044 }
10045 } else
a2fbb9ea 10046 cmd->port = PORT_TP;
a2fbb9ea 10047
01cd4528 10048 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
10049 cmd->transceiver = XCVR_INTERNAL;
10050
c18487ee 10051 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 10052 cmd->autoneg = AUTONEG_ENABLE;
f1410647 10053 else
a2fbb9ea 10054 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
10055
10056 cmd->maxtxpkt = 0;
10057 cmd->maxrxpkt = 0;
10058
10059 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10060 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10061 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10062 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10063 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10064 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10065 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10066
10067 return 0;
10068}
10069
10070static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10071{
10072 struct bnx2x *bp = netdev_priv(dev);
10073 u32 advertising;
10074
34f80b04
EG
10075 if (IS_E1HMF(bp))
10076 return 0;
10077
a2fbb9ea
ET
10078 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10079 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10080 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10081 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10082 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10083 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10084 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10085
a2fbb9ea 10086 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
10087 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10088 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 10089 return -EINVAL;
f1410647 10090 }
a2fbb9ea
ET
10091
10092 /* advertise the requested speed and duplex if supported */
34f80b04 10093 cmd->advertising &= bp->port.supported;
a2fbb9ea 10094
c18487ee
YR
10095 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10096 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
10097 bp->port.advertising |= (ADVERTISED_Autoneg |
10098 cmd->advertising);
a2fbb9ea
ET
10099
10100 } else { /* forced speed */
10101 /* advertise the requested speed and duplex if supported */
10102 switch (cmd->speed) {
10103 case SPEED_10:
10104 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 10105 if (!(bp->port.supported &
f1410647
ET
10106 SUPPORTED_10baseT_Full)) {
10107 DP(NETIF_MSG_LINK,
10108 "10M full not supported\n");
a2fbb9ea 10109 return -EINVAL;
f1410647 10110 }
a2fbb9ea
ET
10111
10112 advertising = (ADVERTISED_10baseT_Full |
10113 ADVERTISED_TP);
10114 } else {
34f80b04 10115 if (!(bp->port.supported &
f1410647
ET
10116 SUPPORTED_10baseT_Half)) {
10117 DP(NETIF_MSG_LINK,
10118 "10M half not supported\n");
a2fbb9ea 10119 return -EINVAL;
f1410647 10120 }
a2fbb9ea
ET
10121
10122 advertising = (ADVERTISED_10baseT_Half |
10123 ADVERTISED_TP);
10124 }
10125 break;
10126
10127 case SPEED_100:
10128 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 10129 if (!(bp->port.supported &
f1410647
ET
10130 SUPPORTED_100baseT_Full)) {
10131 DP(NETIF_MSG_LINK,
10132 "100M full not supported\n");
a2fbb9ea 10133 return -EINVAL;
f1410647 10134 }
a2fbb9ea
ET
10135
10136 advertising = (ADVERTISED_100baseT_Full |
10137 ADVERTISED_TP);
10138 } else {
34f80b04 10139 if (!(bp->port.supported &
f1410647
ET
10140 SUPPORTED_100baseT_Half)) {
10141 DP(NETIF_MSG_LINK,
10142 "100M half not supported\n");
a2fbb9ea 10143 return -EINVAL;
f1410647 10144 }
a2fbb9ea
ET
10145
10146 advertising = (ADVERTISED_100baseT_Half |
10147 ADVERTISED_TP);
10148 }
10149 break;
10150
10151 case SPEED_1000:
f1410647
ET
10152 if (cmd->duplex != DUPLEX_FULL) {
10153 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 10154 return -EINVAL;
f1410647 10155 }
a2fbb9ea 10156
34f80b04 10157 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 10158 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 10159 return -EINVAL;
f1410647 10160 }
a2fbb9ea
ET
10161
10162 advertising = (ADVERTISED_1000baseT_Full |
10163 ADVERTISED_TP);
10164 break;
10165
10166 case SPEED_2500:
f1410647
ET
10167 if (cmd->duplex != DUPLEX_FULL) {
10168 DP(NETIF_MSG_LINK,
10169 "2.5G half not supported\n");
a2fbb9ea 10170 return -EINVAL;
f1410647 10171 }
a2fbb9ea 10172
34f80b04 10173 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
10174 DP(NETIF_MSG_LINK,
10175 "2.5G full not supported\n");
a2fbb9ea 10176 return -EINVAL;
f1410647 10177 }
a2fbb9ea 10178
f1410647 10179 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
10180 ADVERTISED_TP);
10181 break;
10182
10183 case SPEED_10000:
f1410647
ET
10184 if (cmd->duplex != DUPLEX_FULL) {
10185 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 10186 return -EINVAL;
f1410647 10187 }
a2fbb9ea 10188
34f80b04 10189 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 10190 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 10191 return -EINVAL;
f1410647 10192 }
a2fbb9ea
ET
10193
10194 advertising = (ADVERTISED_10000baseT_Full |
10195 ADVERTISED_FIBRE);
10196 break;
10197
10198 default:
f1410647 10199 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
10200 return -EINVAL;
10201 }
10202
c18487ee
YR
10203 bp->link_params.req_line_speed = cmd->speed;
10204 bp->link_params.req_duplex = cmd->duplex;
34f80b04 10205 bp->port.advertising = advertising;
a2fbb9ea
ET
10206 }
10207
c18487ee 10208 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 10209 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 10210 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 10211 bp->port.advertising);
a2fbb9ea 10212
34f80b04 10213 if (netif_running(dev)) {
bb2a0f7a 10214 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10215 bnx2x_link_set(bp);
10216 }
a2fbb9ea
ET
10217
10218 return 0;
10219}
10220
0a64ea57
EG
10221#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10222#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10223
10224static int bnx2x_get_regs_len(struct net_device *dev)
10225{
0a64ea57 10226 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 10227 int regdump_len = 0;
0a64ea57
EG
10228 int i;
10229
0a64ea57
EG
10230 if (CHIP_IS_E1(bp)) {
10231 for (i = 0; i < REGS_COUNT; i++)
10232 if (IS_E1_ONLINE(reg_addrs[i].info))
10233 regdump_len += reg_addrs[i].size;
10234
10235 for (i = 0; i < WREGS_COUNT_E1; i++)
10236 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10237 regdump_len += wreg_addrs_e1[i].size *
10238 (1 + wreg_addrs_e1[i].read_regs_count);
10239
10240 } else { /* E1H */
10241 for (i = 0; i < REGS_COUNT; i++)
10242 if (IS_E1H_ONLINE(reg_addrs[i].info))
10243 regdump_len += reg_addrs[i].size;
10244
10245 for (i = 0; i < WREGS_COUNT_E1H; i++)
10246 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10247 regdump_len += wreg_addrs_e1h[i].size *
10248 (1 + wreg_addrs_e1h[i].read_regs_count);
10249 }
10250 regdump_len *= 4;
10251 regdump_len += sizeof(struct dump_hdr);
10252
10253 return regdump_len;
10254}
10255
10256static void bnx2x_get_regs(struct net_device *dev,
10257 struct ethtool_regs *regs, void *_p)
10258{
10259 u32 *p = _p, i, j;
10260 struct bnx2x *bp = netdev_priv(dev);
10261 struct dump_hdr dump_hdr = {0};
10262
10263 regs->version = 0;
10264 memset(p, 0, regs->len);
10265
10266 if (!netif_running(bp->dev))
10267 return;
10268
10269 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10270 dump_hdr.dump_sign = dump_sign_all;
10271 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10272 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10273 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10274 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10275 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10276
10277 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10278 p += dump_hdr.hdr_size + 1;
10279
10280 if (CHIP_IS_E1(bp)) {
10281 for (i = 0; i < REGS_COUNT; i++)
10282 if (IS_E1_ONLINE(reg_addrs[i].info))
10283 for (j = 0; j < reg_addrs[i].size; j++)
10284 *p++ = REG_RD(bp,
10285 reg_addrs[i].addr + j*4);
10286
10287 } else { /* E1H */
10288 for (i = 0; i < REGS_COUNT; i++)
10289 if (IS_E1H_ONLINE(reg_addrs[i].info))
10290 for (j = 0; j < reg_addrs[i].size; j++)
10291 *p++ = REG_RD(bp,
10292 reg_addrs[i].addr + j*4);
10293 }
10294}
10295
0d28e49a
EG
10296#define PHY_FW_VER_LEN 10
10297
10298static void bnx2x_get_drvinfo(struct net_device *dev,
10299 struct ethtool_drvinfo *info)
10300{
10301 struct bnx2x *bp = netdev_priv(dev);
10302 u8 phy_fw_ver[PHY_FW_VER_LEN];
10303
10304 strcpy(info->driver, DRV_MODULE_NAME);
10305 strcpy(info->version, DRV_MODULE_VERSION);
10306
10307 phy_fw_ver[0] = '\0';
10308 if (bp->port.pmf) {
10309 bnx2x_acquire_phy_lock(bp);
10310 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10311 (bp->state != BNX2X_STATE_CLOSED),
10312 phy_fw_ver, PHY_FW_VER_LEN);
10313 bnx2x_release_phy_lock(bp);
10314 }
10315
34f24c7f
VZ
10316 strncpy(info->fw_version, bp->fw_ver, 32);
10317 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10318 "bc %d.%d.%d%s%s",
0d28e49a
EG
10319 (bp->common.bc_ver & 0xff0000) >> 16,
10320 (bp->common.bc_ver & 0xff00) >> 8,
10321 (bp->common.bc_ver & 0xff),
34f24c7f 10322 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
0d28e49a
EG
10323 strcpy(info->bus_info, pci_name(bp->pdev));
10324 info->n_stats = BNX2X_NUM_STATS;
10325 info->testinfo_len = BNX2X_NUM_TESTS;
10326 info->eedump_len = bp->common.flash_size;
10327 info->regdump_len = bnx2x_get_regs_len(dev);
10328}
10329
a2fbb9ea
ET
10330static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10331{
10332 struct bnx2x *bp = netdev_priv(dev);
10333
10334 if (bp->flags & NO_WOL_FLAG) {
10335 wol->supported = 0;
10336 wol->wolopts = 0;
10337 } else {
10338 wol->supported = WAKE_MAGIC;
10339 if (bp->wol)
10340 wol->wolopts = WAKE_MAGIC;
10341 else
10342 wol->wolopts = 0;
10343 }
10344 memset(&wol->sopass, 0, sizeof(wol->sopass));
10345}
10346
10347static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10348{
10349 struct bnx2x *bp = netdev_priv(dev);
10350
10351 if (wol->wolopts & ~WAKE_MAGIC)
10352 return -EINVAL;
10353
10354 if (wol->wolopts & WAKE_MAGIC) {
10355 if (bp->flags & NO_WOL_FLAG)
10356 return -EINVAL;
10357
10358 bp->wol = 1;
34f80b04 10359 } else
a2fbb9ea 10360 bp->wol = 0;
34f80b04 10361
a2fbb9ea
ET
10362 return 0;
10363}
10364
10365static u32 bnx2x_get_msglevel(struct net_device *dev)
10366{
10367 struct bnx2x *bp = netdev_priv(dev);
10368
7995c64e 10369 return bp->msg_enable;
a2fbb9ea
ET
10370}
10371
10372static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10373{
10374 struct bnx2x *bp = netdev_priv(dev);
10375
10376 if (capable(CAP_NET_ADMIN))
7995c64e 10377 bp->msg_enable = level;
a2fbb9ea
ET
10378}
10379
10380static int bnx2x_nway_reset(struct net_device *dev)
10381{
10382 struct bnx2x *bp = netdev_priv(dev);
10383
34f80b04
EG
10384 if (!bp->port.pmf)
10385 return 0;
a2fbb9ea 10386
34f80b04 10387 if (netif_running(dev)) {
bb2a0f7a 10388 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10389 bnx2x_link_set(bp);
10390 }
a2fbb9ea
ET
10391
10392 return 0;
10393}
10394
ab6ad5a4 10395static u32 bnx2x_get_link(struct net_device *dev)
01e53298
NO
10396{
10397 struct bnx2x *bp = netdev_priv(dev);
10398
f34d28ea
EG
10399 if (bp->flags & MF_FUNC_DIS)
10400 return 0;
10401
01e53298
NO
10402 return bp->link_vars.link_up;
10403}
10404
a2fbb9ea
ET
10405static int bnx2x_get_eeprom_len(struct net_device *dev)
10406{
10407 struct bnx2x *bp = netdev_priv(dev);
10408
34f80b04 10409 return bp->common.flash_size;
a2fbb9ea
ET
10410}
10411
10412static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10413{
34f80b04 10414 int port = BP_PORT(bp);
a2fbb9ea
ET
10415 int count, i;
10416 u32 val = 0;
10417
10418 /* adjust timeout for emulation/FPGA */
10419 count = NVRAM_TIMEOUT_COUNT;
10420 if (CHIP_REV_IS_SLOW(bp))
10421 count *= 100;
10422
10423 /* request access to nvram interface */
10424 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10425 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10426
10427 for (i = 0; i < count*10; i++) {
10428 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10429 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10430 break;
10431
10432 udelay(5);
10433 }
10434
10435 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 10436 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
10437 return -EBUSY;
10438 }
10439
10440 return 0;
10441}
10442
10443static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10444{
34f80b04 10445 int port = BP_PORT(bp);
a2fbb9ea
ET
10446 int count, i;
10447 u32 val = 0;
10448
10449 /* adjust timeout for emulation/FPGA */
10450 count = NVRAM_TIMEOUT_COUNT;
10451 if (CHIP_REV_IS_SLOW(bp))
10452 count *= 100;
10453
10454 /* relinquish nvram interface */
10455 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10456 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10457
10458 for (i = 0; i < count*10; i++) {
10459 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10460 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10461 break;
10462
10463 udelay(5);
10464 }
10465
10466 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 10467 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
10468 return -EBUSY;
10469 }
10470
10471 return 0;
10472}
10473
10474static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10475{
10476 u32 val;
10477
10478 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10479
10480 /* enable both bits, even on read */
10481 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10482 (val | MCPR_NVM_ACCESS_ENABLE_EN |
10483 MCPR_NVM_ACCESS_ENABLE_WR_EN));
10484}
10485
10486static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10487{
10488 u32 val;
10489
10490 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10491
10492 /* disable both bits, even after read */
10493 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10494 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10495 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10496}
10497
4781bfad 10498static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
10499 u32 cmd_flags)
10500{
f1410647 10501 int count, i, rc;
a2fbb9ea
ET
10502 u32 val;
10503
10504 /* build the command word */
10505 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10506
10507 /* need to clear DONE bit separately */
10508 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10509
10510 /* address of the NVRAM to read from */
10511 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10512 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10513
10514 /* issue a read command */
10515 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10516
10517 /* adjust timeout for emulation/FPGA */
10518 count = NVRAM_TIMEOUT_COUNT;
10519 if (CHIP_REV_IS_SLOW(bp))
10520 count *= 100;
10521
10522 /* wait for completion */
10523 *ret_val = 0;
10524 rc = -EBUSY;
10525 for (i = 0; i < count; i++) {
10526 udelay(5);
10527 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10528
10529 if (val & MCPR_NVM_COMMAND_DONE) {
10530 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
10531 /* we read nvram data in cpu order
10532 * but ethtool sees it as an array of bytes
10533 * converting to big-endian will do the work */
4781bfad 10534 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
10535 rc = 0;
10536 break;
10537 }
10538 }
10539
10540 return rc;
10541}
10542
10543static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10544 int buf_size)
10545{
10546 int rc;
10547 u32 cmd_flags;
4781bfad 10548 __be32 val;
a2fbb9ea
ET
10549
10550 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 10551 DP(BNX2X_MSG_NVM,
c14423fe 10552 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
10553 offset, buf_size);
10554 return -EINVAL;
10555 }
10556
34f80b04
EG
10557 if (offset + buf_size > bp->common.flash_size) {
10558 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10559 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10560 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10561 return -EINVAL;
10562 }
10563
10564 /* request access to nvram interface */
10565 rc = bnx2x_acquire_nvram_lock(bp);
10566 if (rc)
10567 return rc;
10568
10569 /* enable access to nvram interface */
10570 bnx2x_enable_nvram_access(bp);
10571
10572 /* read the first word(s) */
10573 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10574 while ((buf_size > sizeof(u32)) && (rc == 0)) {
10575 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10576 memcpy(ret_buf, &val, 4);
10577
10578 /* advance to the next dword */
10579 offset += sizeof(u32);
10580 ret_buf += sizeof(u32);
10581 buf_size -= sizeof(u32);
10582 cmd_flags = 0;
10583 }
10584
10585 if (rc == 0) {
10586 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10587 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10588 memcpy(ret_buf, &val, 4);
10589 }
10590
10591 /* disable access to nvram interface */
10592 bnx2x_disable_nvram_access(bp);
10593 bnx2x_release_nvram_lock(bp);
10594
10595 return rc;
10596}
10597
10598static int bnx2x_get_eeprom(struct net_device *dev,
10599 struct ethtool_eeprom *eeprom, u8 *eebuf)
10600{
10601 struct bnx2x *bp = netdev_priv(dev);
10602 int rc;
10603
2add3acb
EG
10604 if (!netif_running(dev))
10605 return -EAGAIN;
10606
34f80b04 10607 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
10608 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10609 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10610 eeprom->len, eeprom->len);
10611
10612 /* parameters already validated in ethtool_get_eeprom */
10613
10614 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10615
10616 return rc;
10617}
10618
10619static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10620 u32 cmd_flags)
10621{
f1410647 10622 int count, i, rc;
a2fbb9ea
ET
10623
10624 /* build the command word */
10625 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10626
10627 /* need to clear DONE bit separately */
10628 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10629
10630 /* write the data */
10631 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10632
10633 /* address of the NVRAM to write to */
10634 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10635 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10636
10637 /* issue the write command */
10638 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10639
10640 /* adjust timeout for emulation/FPGA */
10641 count = NVRAM_TIMEOUT_COUNT;
10642 if (CHIP_REV_IS_SLOW(bp))
10643 count *= 100;
10644
10645 /* wait for completion */
10646 rc = -EBUSY;
10647 for (i = 0; i < count; i++) {
10648 udelay(5);
10649 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10650 if (val & MCPR_NVM_COMMAND_DONE) {
10651 rc = 0;
10652 break;
10653 }
10654 }
10655
10656 return rc;
10657}
10658
f1410647 10659#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
10660
10661static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10662 int buf_size)
10663{
10664 int rc;
10665 u32 cmd_flags;
10666 u32 align_offset;
4781bfad 10667 __be32 val;
a2fbb9ea 10668
34f80b04
EG
10669 if (offset + buf_size > bp->common.flash_size) {
10670 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10671 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10672 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10673 return -EINVAL;
10674 }
10675
10676 /* request access to nvram interface */
10677 rc = bnx2x_acquire_nvram_lock(bp);
10678 if (rc)
10679 return rc;
10680
10681 /* enable access to nvram interface */
10682 bnx2x_enable_nvram_access(bp);
10683
10684 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10685 align_offset = (offset & ~0x03);
10686 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10687
10688 if (rc == 0) {
10689 val &= ~(0xff << BYTE_OFFSET(offset));
10690 val |= (*data_buf << BYTE_OFFSET(offset));
10691
10692 /* nvram data is returned as an array of bytes
10693 * convert it back to cpu order */
10694 val = be32_to_cpu(val);
10695
a2fbb9ea
ET
10696 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10697 cmd_flags);
10698 }
10699
10700 /* disable access to nvram interface */
10701 bnx2x_disable_nvram_access(bp);
10702 bnx2x_release_nvram_lock(bp);
10703
10704 return rc;
10705}
10706
10707static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10708 int buf_size)
10709{
10710 int rc;
10711 u32 cmd_flags;
10712 u32 val;
10713 u32 written_so_far;
10714
34f80b04 10715 if (buf_size == 1) /* ethtool */
a2fbb9ea 10716 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
10717
10718 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 10719 DP(BNX2X_MSG_NVM,
c14423fe 10720 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
10721 offset, buf_size);
10722 return -EINVAL;
10723 }
10724
34f80b04
EG
10725 if (offset + buf_size > bp->common.flash_size) {
10726 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10727 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10728 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10729 return -EINVAL;
10730 }
10731
10732 /* request access to nvram interface */
10733 rc = bnx2x_acquire_nvram_lock(bp);
10734 if (rc)
10735 return rc;
10736
10737 /* enable access to nvram interface */
10738 bnx2x_enable_nvram_access(bp);
10739
10740 written_so_far = 0;
10741 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10742 while ((written_so_far < buf_size) && (rc == 0)) {
10743 if (written_so_far == (buf_size - sizeof(u32)))
10744 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10745 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10746 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10747 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10748 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10749
10750 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
10751
10752 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10753
10754 /* advance to the next dword */
10755 offset += sizeof(u32);
10756 data_buf += sizeof(u32);
10757 written_so_far += sizeof(u32);
10758 cmd_flags = 0;
10759 }
10760
10761 /* disable access to nvram interface */
10762 bnx2x_disable_nvram_access(bp);
10763 bnx2x_release_nvram_lock(bp);
10764
10765 return rc;
10766}
10767
10768static int bnx2x_set_eeprom(struct net_device *dev,
10769 struct ethtool_eeprom *eeprom, u8 *eebuf)
10770{
10771 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
10772 int port = BP_PORT(bp);
10773 int rc = 0;
a2fbb9ea 10774
9f4c9583
EG
10775 if (!netif_running(dev))
10776 return -EAGAIN;
10777
34f80b04 10778 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
10779 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10780 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10781 eeprom->len, eeprom->len);
10782
10783 /* parameters already validated in ethtool_set_eeprom */
10784
f57a6025
EG
10785 /* PHY eeprom can be accessed only by the PMF */
10786 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10787 !bp->port.pmf)
10788 return -EINVAL;
10789
10790 if (eeprom->magic == 0x50485950) {
10791 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10792 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 10793
f57a6025
EG
10794 bnx2x_acquire_phy_lock(bp);
10795 rc |= bnx2x_link_reset(&bp->link_params,
10796 &bp->link_vars, 0);
10797 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10798 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10799 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10800 MISC_REGISTERS_GPIO_HIGH, port);
10801 bnx2x_release_phy_lock(bp);
10802 bnx2x_link_report(bp);
10803
10804 } else if (eeprom->magic == 0x50485952) {
10805 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
f34d28ea 10806 if (bp->state == BNX2X_STATE_OPEN) {
4a37fb66 10807 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
10808 rc |= bnx2x_link_reset(&bp->link_params,
10809 &bp->link_vars, 1);
10810
10811 rc |= bnx2x_phy_init(&bp->link_params,
10812 &bp->link_vars);
4a37fb66 10813 bnx2x_release_phy_lock(bp);
f57a6025
EG
10814 bnx2x_calc_fc_adv(bp);
10815 }
10816 } else if (eeprom->magic == 0x53985943) {
10817 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10818 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10819 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10820 u8 ext_phy_addr =
659bc5c4 10821 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
f57a6025
EG
10822
10823 /* DSP Remove Download Mode */
10824 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10825 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 10826
f57a6025
EG
10827 bnx2x_acquire_phy_lock(bp);
10828
10829 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10830
10831 /* wait 0.5 sec to allow it to run */
10832 msleep(500);
10833 bnx2x_ext_phy_hw_reset(bp, port);
10834 msleep(500);
10835 bnx2x_release_phy_lock(bp);
10836 }
10837 } else
c18487ee 10838 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
10839
10840 return rc;
10841}
10842
10843static int bnx2x_get_coalesce(struct net_device *dev,
10844 struct ethtool_coalesce *coal)
10845{
10846 struct bnx2x *bp = netdev_priv(dev);
10847
10848 memset(coal, 0, sizeof(struct ethtool_coalesce));
10849
10850 coal->rx_coalesce_usecs = bp->rx_ticks;
10851 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
10852
10853 return 0;
10854}
10855
10856static int bnx2x_set_coalesce(struct net_device *dev,
10857 struct ethtool_coalesce *coal)
10858{
10859 struct bnx2x *bp = netdev_priv(dev);
10860
cdaa7cb8
VZ
10861 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
10862 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
10863 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea 10864
cdaa7cb8
VZ
10865 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
10866 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
10867 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea 10868
34f80b04 10869 if (netif_running(dev))
a2fbb9ea
ET
10870 bnx2x_update_coalesce(bp);
10871
10872 return 0;
10873}
10874
10875static void bnx2x_get_ringparam(struct net_device *dev,
10876 struct ethtool_ringparam *ering)
10877{
10878 struct bnx2x *bp = netdev_priv(dev);
10879
10880 ering->rx_max_pending = MAX_RX_AVAIL;
10881 ering->rx_mini_max_pending = 0;
10882 ering->rx_jumbo_max_pending = 0;
10883
10884 ering->rx_pending = bp->rx_ring_size;
10885 ering->rx_mini_pending = 0;
10886 ering->rx_jumbo_pending = 0;
10887
10888 ering->tx_max_pending = MAX_TX_AVAIL;
10889 ering->tx_pending = bp->tx_ring_size;
10890}
10891
10892static int bnx2x_set_ringparam(struct net_device *dev,
10893 struct ethtool_ringparam *ering)
10894{
10895 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10896 int rc = 0;
a2fbb9ea 10897
72fd0718
VZ
10898 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10899 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10900 return -EAGAIN;
10901 }
10902
a2fbb9ea
ET
10903 if ((ering->rx_pending > MAX_RX_AVAIL) ||
10904 (ering->tx_pending > MAX_TX_AVAIL) ||
10905 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10906 return -EINVAL;
10907
10908 bp->rx_ring_size = ering->rx_pending;
10909 bp->tx_ring_size = ering->tx_pending;
10910
34f80b04
EG
10911 if (netif_running(dev)) {
10912 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10913 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
10914 }
10915
34f80b04 10916 return rc;
a2fbb9ea
ET
10917}
10918
10919static void bnx2x_get_pauseparam(struct net_device *dev,
10920 struct ethtool_pauseparam *epause)
10921{
10922 struct bnx2x *bp = netdev_priv(dev);
10923
356e2385
EG
10924 epause->autoneg = (bp->link_params.req_flow_ctrl ==
10925 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
10926 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10927
c0700f90
DM
10928 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10929 BNX2X_FLOW_CTRL_RX);
10930 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10931 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
10932
10933 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10934 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10935 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10936}
10937
10938static int bnx2x_set_pauseparam(struct net_device *dev,
10939 struct ethtool_pauseparam *epause)
10940{
10941 struct bnx2x *bp = netdev_priv(dev);
10942
34f80b04
EG
10943 if (IS_E1HMF(bp))
10944 return 0;
10945
a2fbb9ea
ET
10946 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10947 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10948 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10949
c0700f90 10950 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 10951
f1410647 10952 if (epause->rx_pause)
c0700f90 10953 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 10954
f1410647 10955 if (epause->tx_pause)
c0700f90 10956 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 10957
c0700f90
DM
10958 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10959 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 10960
c18487ee 10961 if (epause->autoneg) {
34f80b04 10962 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 10963 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
10964 return -EINVAL;
10965 }
a2fbb9ea 10966
c18487ee 10967 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 10968 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 10969 }
a2fbb9ea 10970
c18487ee
YR
10971 DP(NETIF_MSG_LINK,
10972 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
10973
10974 if (netif_running(dev)) {
bb2a0f7a 10975 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10976 bnx2x_link_set(bp);
10977 }
a2fbb9ea
ET
10978
10979 return 0;
10980}
10981
df0f2343
VZ
10982static int bnx2x_set_flags(struct net_device *dev, u32 data)
10983{
10984 struct bnx2x *bp = netdev_priv(dev);
10985 int changed = 0;
10986 int rc = 0;
10987
72fd0718
VZ
10988 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10989 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10990 return -EAGAIN;
10991 }
10992
df0f2343
VZ
10993 /* TPA requires Rx CSUM offloading */
10994 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
d43a7e67
VZ
10995 if (!disable_tpa) {
10996 if (!(dev->features & NETIF_F_LRO)) {
10997 dev->features |= NETIF_F_LRO;
10998 bp->flags |= TPA_ENABLE_FLAG;
10999 changed = 1;
11000 }
11001 } else
11002 rc = -EINVAL;
df0f2343
VZ
11003 } else if (dev->features & NETIF_F_LRO) {
11004 dev->features &= ~NETIF_F_LRO;
11005 bp->flags &= ~TPA_ENABLE_FLAG;
11006 changed = 1;
11007 }
11008
c68ed255
TH
11009 if (data & ETH_FLAG_RXHASH)
11010 dev->features |= NETIF_F_RXHASH;
11011 else
11012 dev->features &= ~NETIF_F_RXHASH;
11013
df0f2343
VZ
11014 if (changed && netif_running(dev)) {
11015 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11016 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11017 }
11018
11019 return rc;
11020}
11021
a2fbb9ea
ET
11022static u32 bnx2x_get_rx_csum(struct net_device *dev)
11023{
11024 struct bnx2x *bp = netdev_priv(dev);
11025
11026 return bp->rx_csum;
11027}
11028
11029static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
11030{
11031 struct bnx2x *bp = netdev_priv(dev);
df0f2343 11032 int rc = 0;
a2fbb9ea 11033
72fd0718
VZ
11034 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11035 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11036 return -EAGAIN;
11037 }
11038
a2fbb9ea 11039 bp->rx_csum = data;
df0f2343
VZ
11040
11041 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
11042 TPA'ed packets will be discarded due to wrong TCP CSUM */
11043 if (!data) {
11044 u32 flags = ethtool_op_get_flags(dev);
11045
11046 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
11047 }
11048
11049 return rc;
a2fbb9ea
ET
11050}
11051
11052static int bnx2x_set_tso(struct net_device *dev, u32 data)
11053{
755735eb 11054 if (data) {
a2fbb9ea 11055 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
11056 dev->features |= NETIF_F_TSO6;
11057 } else {
a2fbb9ea 11058 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
11059 dev->features &= ~NETIF_F_TSO6;
11060 }
11061
a2fbb9ea
ET
11062 return 0;
11063}
11064
f3c87cdd 11065static const struct {
a2fbb9ea
ET
11066 char string[ETH_GSTRING_LEN];
11067} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
11068 { "register_test (offline)" },
11069 { "memory_test (offline)" },
11070 { "loopback_test (offline)" },
11071 { "nvram_test (online)" },
11072 { "interrupt_test (online)" },
11073 { "link_test (online)" },
d3d4f495 11074 { "idle check (online)" }
a2fbb9ea
ET
11075};
11076
f3c87cdd
YG
11077static int bnx2x_test_registers(struct bnx2x *bp)
11078{
11079 int idx, i, rc = -ENODEV;
11080 u32 wr_val = 0;
9dabc424 11081 int port = BP_PORT(bp);
f3c87cdd 11082 static const struct {
cdaa7cb8
VZ
11083 u32 offset0;
11084 u32 offset1;
11085 u32 mask;
f3c87cdd
YG
11086 } reg_tbl[] = {
11087/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
11088 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
11089 { HC_REG_AGG_INT_0, 4, 0x000003ff },
11090 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
11091 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
11092 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
11093 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
11094 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11095 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
11096 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11097/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
11098 { QM_REG_CONNNUM_0, 4, 0x000fffff },
11099 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
11100 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
11101 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
11102 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11103 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
11104 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 11105 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
11106 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
11107/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
11108 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
11109 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
11110 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
11111 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
11112 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
11113 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
11114 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
11115 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
11116 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
11117/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
11118 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
11119 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
11120 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11121 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
11122 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11123 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
11124
11125 { 0xffffffff, 0, 0x00000000 }
11126 };
11127
11128 if (!netif_running(bp->dev))
11129 return rc;
11130
11131 /* Repeat the test twice:
11132 First by writing 0x00000000, second by writing 0xffffffff */
11133 for (idx = 0; idx < 2; idx++) {
11134
11135 switch (idx) {
11136 case 0:
11137 wr_val = 0;
11138 break;
11139 case 1:
11140 wr_val = 0xffffffff;
11141 break;
11142 }
11143
11144 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11145 u32 offset, mask, save_val, val;
f3c87cdd
YG
11146
11147 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11148 mask = reg_tbl[i].mask;
11149
11150 save_val = REG_RD(bp, offset);
11151
8eb5a20c 11152 REG_WR(bp, offset, (wr_val & mask));
f3c87cdd
YG
11153 val = REG_RD(bp, offset);
11154
11155 /* Restore the original register's value */
11156 REG_WR(bp, offset, save_val);
11157
cdaa7cb8
VZ
11158 /* verify value is as expected */
11159 if ((val & mask) != (wr_val & mask)) {
11160 DP(NETIF_MSG_PROBE,
11161 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11162 offset, val, wr_val, mask);
f3c87cdd 11163 goto test_reg_exit;
cdaa7cb8 11164 }
f3c87cdd
YG
11165 }
11166 }
11167
11168 rc = 0;
11169
11170test_reg_exit:
11171 return rc;
11172}
11173
11174static int bnx2x_test_memory(struct bnx2x *bp)
11175{
11176 int i, j, rc = -ENODEV;
11177 u32 val;
11178 static const struct {
11179 u32 offset;
11180 int size;
11181 } mem_tbl[] = {
11182 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
11183 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11184 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
11185 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
11186 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
11187 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
11188 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
11189
11190 { 0xffffffff, 0 }
11191 };
11192 static const struct {
11193 char *name;
11194 u32 offset;
9dabc424
YG
11195 u32 e1_mask;
11196 u32 e1h_mask;
f3c87cdd 11197 } prty_tbl[] = {
9dabc424
YG
11198 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
11199 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
11200 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
11201 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
11202 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
11203 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
11204
11205 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
11206 };
11207
11208 if (!netif_running(bp->dev))
11209 return rc;
11210
11211 /* Go through all the memories */
11212 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11213 for (j = 0; j < mem_tbl[i].size; j++)
11214 REG_RD(bp, mem_tbl[i].offset + j*4);
11215
11216 /* Check the parity status */
11217 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11218 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
11219 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11220 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
11221 DP(NETIF_MSG_HW,
11222 "%s is 0x%x\n", prty_tbl[i].name, val);
11223 goto test_mem_exit;
11224 }
11225 }
11226
11227 rc = 0;
11228
11229test_mem_exit:
11230 return rc;
11231}
11232
f3c87cdd
YG
11233static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11234{
11235 int cnt = 1000;
11236
11237 if (link_up)
11238 while (bnx2x_link_test(bp) && cnt--)
11239 msleep(10);
11240}
11241
11242static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11243{
11244 unsigned int pkt_size, num_pkts, i;
11245 struct sk_buff *skb;
11246 unsigned char *packet;
ca00392c 11247 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
54b9ddaa 11248 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
f3c87cdd
YG
11249 u16 tx_start_idx, tx_idx;
11250 u16 rx_start_idx, rx_idx;
ca00392c 11251 u16 pkt_prod, bd_prod;
f3c87cdd 11252 struct sw_tx_bd *tx_buf;
ca00392c
EG
11253 struct eth_tx_start_bd *tx_start_bd;
11254 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
11255 dma_addr_t mapping;
11256 union eth_rx_cqe *cqe;
11257 u8 cqe_fp_flags;
11258 struct sw_rx_bd *rx_buf;
11259 u16 len;
11260 int rc = -ENODEV;
11261
b5bf9068
EG
11262 /* check the loopback mode */
11263 switch (loopback_mode) {
11264 case BNX2X_PHY_LOOPBACK:
11265 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11266 return -EINVAL;
11267 break;
11268 case BNX2X_MAC_LOOPBACK:
f3c87cdd 11269 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 11270 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
11271 break;
11272 default:
f3c87cdd 11273 return -EINVAL;
b5bf9068 11274 }
f3c87cdd 11275
b5bf9068
EG
11276 /* prepare the loopback packet */
11277 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11278 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
11279 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11280 if (!skb) {
11281 rc = -ENOMEM;
11282 goto test_loopback_exit;
11283 }
11284 packet = skb_put(skb, pkt_size);
11285 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
11286 memset(packet + ETH_ALEN, 0, ETH_ALEN);
11287 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
11288 for (i = ETH_HLEN; i < pkt_size; i++)
11289 packet[i] = (unsigned char) (i & 0xff);
11290
b5bf9068 11291 /* send the loopback packet */
f3c87cdd 11292 num_pkts = 0;
ca00392c
EG
11293 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11294 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 11295
ca00392c
EG
11296 pkt_prod = fp_tx->tx_pkt_prod++;
11297 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11298 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 11299 tx_buf->skb = skb;
ca00392c 11300 tx_buf->flags = 0;
f3c87cdd 11301
ca00392c
EG
11302 bd_prod = TX_BD(fp_tx->tx_bd_prod);
11303 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
1a983142
FT
11304 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11305 skb_headlen(skb), DMA_TO_DEVICE);
ca00392c
EG
11306 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11307 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11308 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11309 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11310 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11311 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11312 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11313 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11314
11315 /* turn on parsing and get a BD */
11316 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11317 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11318
11319 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 11320
58f4c4cf
EG
11321 wmb();
11322
ca00392c
EG
11323 fp_tx->tx_db.data.prod += 2;
11324 barrier();
54b9ddaa 11325 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
f3c87cdd
YG
11326
11327 mmiowb();
11328
11329 num_pkts++;
ca00392c 11330 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
11331
11332 udelay(100);
11333
ca00392c 11334 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
11335 if (tx_idx != tx_start_idx + num_pkts)
11336 goto test_loopback_exit;
11337
ca00392c 11338 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
11339 if (rx_idx != rx_start_idx + num_pkts)
11340 goto test_loopback_exit;
11341
ca00392c 11342 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
11343 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11344 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11345 goto test_loopback_rx_exit;
11346
11347 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11348 if (len != pkt_size)
11349 goto test_loopback_rx_exit;
11350
ca00392c 11351 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
11352 skb = rx_buf->skb;
11353 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11354 for (i = ETH_HLEN; i < pkt_size; i++)
11355 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11356 goto test_loopback_rx_exit;
11357
11358 rc = 0;
11359
11360test_loopback_rx_exit:
f3c87cdd 11361
ca00392c
EG
11362 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11363 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11364 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11365 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
11366
11367 /* Update producers */
ca00392c
EG
11368 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11369 fp_rx->rx_sge_prod);
f3c87cdd
YG
11370
11371test_loopback_exit:
11372 bp->link_params.loopback_mode = LOOPBACK_NONE;
11373
11374 return rc;
11375}
11376
11377static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11378{
b5bf9068 11379 int rc = 0, res;
f3c87cdd 11380
2145a920
VZ
11381 if (BP_NOMCP(bp))
11382 return rc;
11383
f3c87cdd
YG
11384 if (!netif_running(bp->dev))
11385 return BNX2X_LOOPBACK_FAILED;
11386
f8ef6e44 11387 bnx2x_netif_stop(bp, 1);
3910c8ae 11388 bnx2x_acquire_phy_lock(bp);
f3c87cdd 11389
b5bf9068
EG
11390 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11391 if (res) {
11392 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
11393 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
11394 }
11395
b5bf9068
EG
11396 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11397 if (res) {
11398 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
11399 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
11400 }
11401
3910c8ae 11402 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
11403 bnx2x_netif_start(bp);
11404
11405 return rc;
11406}
11407
11408#define CRC32_RESIDUAL 0xdebb20e3
11409
11410static int bnx2x_test_nvram(struct bnx2x *bp)
11411{
11412 static const struct {
11413 int offset;
11414 int size;
11415 } nvram_tbl[] = {
11416 { 0, 0x14 }, /* bootstrap */
11417 { 0x14, 0xec }, /* dir */
11418 { 0x100, 0x350 }, /* manuf_info */
11419 { 0x450, 0xf0 }, /* feature_info */
11420 { 0x640, 0x64 }, /* upgrade_key_info */
11421 { 0x6a4, 0x64 },
11422 { 0x708, 0x70 }, /* manuf_key_info */
11423 { 0x778, 0x70 },
11424 { 0, 0 }
11425 };
4781bfad 11426 __be32 buf[0x350 / 4];
f3c87cdd
YG
11427 u8 *data = (u8 *)buf;
11428 int i, rc;
ab6ad5a4 11429 u32 magic, crc;
f3c87cdd 11430
2145a920
VZ
11431 if (BP_NOMCP(bp))
11432 return 0;
11433
f3c87cdd
YG
11434 rc = bnx2x_nvram_read(bp, 0, data, 4);
11435 if (rc) {
f5372251 11436 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
11437 goto test_nvram_exit;
11438 }
11439
11440 magic = be32_to_cpu(buf[0]);
11441 if (magic != 0x669955aa) {
11442 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11443 rc = -ENODEV;
11444 goto test_nvram_exit;
11445 }
11446
11447 for (i = 0; nvram_tbl[i].size; i++) {
11448
11449 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11450 nvram_tbl[i].size);
11451 if (rc) {
11452 DP(NETIF_MSG_PROBE,
f5372251 11453 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
11454 goto test_nvram_exit;
11455 }
11456
ab6ad5a4
EG
11457 crc = ether_crc_le(nvram_tbl[i].size, data);
11458 if (crc != CRC32_RESIDUAL) {
f3c87cdd 11459 DP(NETIF_MSG_PROBE,
ab6ad5a4 11460 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
f3c87cdd
YG
11461 rc = -ENODEV;
11462 goto test_nvram_exit;
11463 }
11464 }
11465
11466test_nvram_exit:
11467 return rc;
11468}
11469
11470static int bnx2x_test_intr(struct bnx2x *bp)
11471{
11472 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11473 int i, rc;
11474
11475 if (!netif_running(bp->dev))
11476 return -ENODEV;
11477
8d9c5f34 11478 config->hdr.length = 0;
af246401 11479 if (CHIP_IS_E1(bp))
0c43f43f
VZ
11480 /* use last unicast entries */
11481 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
af246401
EG
11482 else
11483 config->hdr.offset = BP_FUNC(bp);
0626b899 11484 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
11485 config->hdr.reserved1 = 0;
11486
e665bfda
MC
11487 bp->set_mac_pending++;
11488 smp_wmb();
f3c87cdd
YG
11489 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11490 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11491 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11492 if (rc == 0) {
f3c87cdd
YG
11493 for (i = 0; i < 10; i++) {
11494 if (!bp->set_mac_pending)
11495 break;
e665bfda 11496 smp_rmb();
f3c87cdd
YG
11497 msleep_interruptible(10);
11498 }
11499 if (i == 10)
11500 rc = -ENODEV;
11501 }
11502
11503 return rc;
11504}
11505
a2fbb9ea
ET
11506static void bnx2x_self_test(struct net_device *dev,
11507 struct ethtool_test *etest, u64 *buf)
11508{
11509 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea 11510
72fd0718
VZ
11511 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11512 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11513 etest->flags |= ETH_TEST_FL_FAILED;
11514 return;
11515 }
11516
a2fbb9ea
ET
11517 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11518
f3c87cdd 11519 if (!netif_running(dev))
a2fbb9ea 11520 return;
a2fbb9ea 11521
33471629 11522 /* offline tests are not supported in MF mode */
f3c87cdd
YG
11523 if (IS_E1HMF(bp))
11524 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11525
11526 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
11527 int port = BP_PORT(bp);
11528 u32 val;
f3c87cdd
YG
11529 u8 link_up;
11530
279abdf5
EG
11531 /* save current value of input enable for TX port IF */
11532 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11533 /* disable input for TX port IF */
11534 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11535
061bc702 11536 link_up = (bnx2x_link_test(bp) == 0);
f3c87cdd
YG
11537 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11538 bnx2x_nic_load(bp, LOAD_DIAG);
11539 /* wait until link state is restored */
11540 bnx2x_wait_for_link(bp, link_up);
11541
11542 if (bnx2x_test_registers(bp) != 0) {
11543 buf[0] = 1;
11544 etest->flags |= ETH_TEST_FL_FAILED;
11545 }
11546 if (bnx2x_test_memory(bp) != 0) {
11547 buf[1] = 1;
11548 etest->flags |= ETH_TEST_FL_FAILED;
11549 }
11550 buf[2] = bnx2x_test_loopback(bp, link_up);
11551 if (buf[2] != 0)
11552 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 11553
f3c87cdd 11554 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
11555
11556 /* restore input for TX port IF */
11557 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11558
f3c87cdd
YG
11559 bnx2x_nic_load(bp, LOAD_NORMAL);
11560 /* wait until link state is restored */
11561 bnx2x_wait_for_link(bp, link_up);
11562 }
11563 if (bnx2x_test_nvram(bp) != 0) {
11564 buf[3] = 1;
a2fbb9ea
ET
11565 etest->flags |= ETH_TEST_FL_FAILED;
11566 }
f3c87cdd
YG
11567 if (bnx2x_test_intr(bp) != 0) {
11568 buf[4] = 1;
11569 etest->flags |= ETH_TEST_FL_FAILED;
11570 }
11571 if (bp->port.pmf)
11572 if (bnx2x_link_test(bp) != 0) {
11573 buf[5] = 1;
11574 etest->flags |= ETH_TEST_FL_FAILED;
11575 }
f3c87cdd
YG
11576
11577#ifdef BNX2X_EXTRA_DEBUG
11578 bnx2x_panic_dump(bp);
11579#endif
a2fbb9ea
ET
11580}
11581
de832a55
EG
11582static const struct {
11583 long offset;
11584 int size;
11585 u8 string[ETH_GSTRING_LEN];
11586} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11587/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11588 { Q_STATS_OFFSET32(error_bytes_received_hi),
11589 8, "[%d]: rx_error_bytes" },
11590 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11591 8, "[%d]: rx_ucast_packets" },
11592 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11593 8, "[%d]: rx_mcast_packets" },
11594 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11595 8, "[%d]: rx_bcast_packets" },
11596 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11597 { Q_STATS_OFFSET32(rx_err_discard_pkt),
11598 4, "[%d]: rx_phy_ip_err_discards"},
11599 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11600 4, "[%d]: rx_skb_alloc_discard" },
11601 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11602
11603/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11604 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
dea7aab1
VZ
11605 8, "[%d]: tx_ucast_packets" },
11606 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11607 8, "[%d]: tx_mcast_packets" },
11608 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11609 8, "[%d]: tx_bcast_packets" }
de832a55
EG
11610};
11611
bb2a0f7a
YG
11612static const struct {
11613 long offset;
11614 int size;
11615 u32 flags;
66e855f3
YG
11616#define STATS_FLAGS_PORT 1
11617#define STATS_FLAGS_FUNC 2
de832a55 11618#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 11619 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 11620} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
11621/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11622 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 11623 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 11624 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 11625 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 11626 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 11627 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 11628 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 11629 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 11630 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 11631 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 11632 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 11633 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 11634 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
11635 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11636 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11637 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11638 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11639/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11640 8, STATS_FLAGS_PORT, "rx_fragments" },
11641 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11642 8, STATS_FLAGS_PORT, "rx_jabbers" },
11643 { STATS_OFFSET32(no_buff_discard_hi),
11644 8, STATS_FLAGS_BOTH, "rx_discards" },
11645 { STATS_OFFSET32(mac_filter_discard),
11646 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11647 { STATS_OFFSET32(xxoverflow_discard),
11648 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11649 { STATS_OFFSET32(brb_drop_hi),
11650 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11651 { STATS_OFFSET32(brb_truncate_hi),
11652 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11653 { STATS_OFFSET32(pause_frames_received_hi),
11654 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11655 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11656 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11657 { STATS_OFFSET32(nig_timer_max),
11658 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11659/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11660 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11661 { STATS_OFFSET32(rx_skb_alloc_failed),
11662 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11663 { STATS_OFFSET32(hw_csum_err),
11664 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11665
11666 { STATS_OFFSET32(total_bytes_transmitted_hi),
11667 8, STATS_FLAGS_BOTH, "tx_bytes" },
11668 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11669 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11670 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
dea7aab1
VZ
11671 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
11672 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11673 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
11674 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11675 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
de832a55
EG
11676 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11677 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11678 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11679 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
dea7aab1 11680/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 11681 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 11682 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 11683 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
dea7aab1 11684 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 11685 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 11686 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 11687 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 11688 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 11689 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 11690 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 11691 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 11692 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 11693 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 11694 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 11695 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 11696 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 11697 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 11698 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 11699 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
dea7aab1 11700/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 11701 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 11702 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 11703 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
dea7aab1 11704 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 11705 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
11706 { STATS_OFFSET32(pause_frames_sent_hi),
11707 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
11708};
11709
de832a55
EG
11710#define IS_PORT_STAT(i) \
11711 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11712#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11713#define IS_E1HMF_MODE_STAT(bp) \
7995c64e 11714 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
66e855f3 11715
15f0a394
BH
11716static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11717{
11718 struct bnx2x *bp = netdev_priv(dev);
11719 int i, num_stats;
11720
cdaa7cb8 11721 switch (stringset) {
15f0a394
BH
11722 case ETH_SS_STATS:
11723 if (is_multi(bp)) {
54b9ddaa 11724 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
15f0a394
BH
11725 if (!IS_E1HMF_MODE_STAT(bp))
11726 num_stats += BNX2X_NUM_STATS;
11727 } else {
11728 if (IS_E1HMF_MODE_STAT(bp)) {
11729 num_stats = 0;
11730 for (i = 0; i < BNX2X_NUM_STATS; i++)
11731 if (IS_FUNC_STAT(i))
11732 num_stats++;
11733 } else
11734 num_stats = BNX2X_NUM_STATS;
11735 }
11736 return num_stats;
11737
11738 case ETH_SS_TEST:
11739 return BNX2X_NUM_TESTS;
11740
11741 default:
11742 return -EINVAL;
11743 }
11744}
11745
a2fbb9ea
ET
11746static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11747{
bb2a0f7a 11748 struct bnx2x *bp = netdev_priv(dev);
de832a55 11749 int i, j, k;
bb2a0f7a 11750
a2fbb9ea
ET
11751 switch (stringset) {
11752 case ETH_SS_STATS:
de832a55
EG
11753 if (is_multi(bp)) {
11754 k = 0;
54b9ddaa 11755 for_each_queue(bp, i) {
de832a55
EG
11756 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11757 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11758 bnx2x_q_stats_arr[j].string, i);
11759 k += BNX2X_NUM_Q_STATS;
11760 }
11761 if (IS_E1HMF_MODE_STAT(bp))
11762 break;
11763 for (j = 0; j < BNX2X_NUM_STATS; j++)
11764 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11765 bnx2x_stats_arr[j].string);
11766 } else {
11767 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11768 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11769 continue;
11770 strcpy(buf + j*ETH_GSTRING_LEN,
11771 bnx2x_stats_arr[i].string);
11772 j++;
11773 }
bb2a0f7a 11774 }
a2fbb9ea
ET
11775 break;
11776
11777 case ETH_SS_TEST:
11778 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11779 break;
11780 }
11781}
11782
a2fbb9ea
ET
11783static void bnx2x_get_ethtool_stats(struct net_device *dev,
11784 struct ethtool_stats *stats, u64 *buf)
11785{
11786 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
11787 u32 *hw_stats, *offset;
11788 int i, j, k;
bb2a0f7a 11789
de832a55
EG
11790 if (is_multi(bp)) {
11791 k = 0;
54b9ddaa 11792 for_each_queue(bp, i) {
de832a55
EG
11793 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11794 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11795 if (bnx2x_q_stats_arr[j].size == 0) {
11796 /* skip this counter */
11797 buf[k + j] = 0;
11798 continue;
11799 }
11800 offset = (hw_stats +
11801 bnx2x_q_stats_arr[j].offset);
11802 if (bnx2x_q_stats_arr[j].size == 4) {
11803 /* 4-byte counter */
11804 buf[k + j] = (u64) *offset;
11805 continue;
11806 }
11807 /* 8-byte counter */
11808 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11809 }
11810 k += BNX2X_NUM_Q_STATS;
11811 }
11812 if (IS_E1HMF_MODE_STAT(bp))
11813 return;
11814 hw_stats = (u32 *)&bp->eth_stats;
11815 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11816 if (bnx2x_stats_arr[j].size == 0) {
11817 /* skip this counter */
11818 buf[k + j] = 0;
11819 continue;
11820 }
11821 offset = (hw_stats + bnx2x_stats_arr[j].offset);
11822 if (bnx2x_stats_arr[j].size == 4) {
11823 /* 4-byte counter */
11824 buf[k + j] = (u64) *offset;
11825 continue;
11826 }
11827 /* 8-byte counter */
11828 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 11829 }
de832a55
EG
11830 } else {
11831 hw_stats = (u32 *)&bp->eth_stats;
11832 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11833 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11834 continue;
11835 if (bnx2x_stats_arr[i].size == 0) {
11836 /* skip this counter */
11837 buf[j] = 0;
11838 j++;
11839 continue;
11840 }
11841 offset = (hw_stats + bnx2x_stats_arr[i].offset);
11842 if (bnx2x_stats_arr[i].size == 4) {
11843 /* 4-byte counter */
11844 buf[j] = (u64) *offset;
11845 j++;
11846 continue;
11847 }
11848 /* 8-byte counter */
11849 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 11850 j++;
a2fbb9ea 11851 }
a2fbb9ea
ET
11852 }
11853}
11854
11855static int bnx2x_phys_id(struct net_device *dev, u32 data)
11856{
11857 struct bnx2x *bp = netdev_priv(dev);
11858 int i;
11859
34f80b04
EG
11860 if (!netif_running(dev))
11861 return 0;
11862
11863 if (!bp->port.pmf)
11864 return 0;
11865
a2fbb9ea
ET
11866 if (data == 0)
11867 data = 2;
11868
11869 for (i = 0; i < (data * 2); i++) {
c18487ee 11870 if ((i % 2) == 0)
7846e471
YR
11871 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11872 SPEED_1000);
c18487ee 11873 else
7846e471 11874 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
c18487ee 11875
a2fbb9ea
ET
11876 msleep_interruptible(500);
11877 if (signal_pending(current))
11878 break;
11879 }
11880
c18487ee 11881 if (bp->link_vars.link_up)
7846e471
YR
11882 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11883 bp->link_vars.line_speed);
a2fbb9ea
ET
11884
11885 return 0;
11886}
11887
0fc0b732 11888static const struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
11889 .get_settings = bnx2x_get_settings,
11890 .set_settings = bnx2x_set_settings,
11891 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
11892 .get_regs_len = bnx2x_get_regs_len,
11893 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
11894 .get_wol = bnx2x_get_wol,
11895 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
11896 .get_msglevel = bnx2x_get_msglevel,
11897 .set_msglevel = bnx2x_set_msglevel,
11898 .nway_reset = bnx2x_nway_reset,
01e53298 11899 .get_link = bnx2x_get_link,
7a9b2557
VZ
11900 .get_eeprom_len = bnx2x_get_eeprom_len,
11901 .get_eeprom = bnx2x_get_eeprom,
11902 .set_eeprom = bnx2x_set_eeprom,
11903 .get_coalesce = bnx2x_get_coalesce,
11904 .set_coalesce = bnx2x_set_coalesce,
11905 .get_ringparam = bnx2x_get_ringparam,
11906 .set_ringparam = bnx2x_set_ringparam,
11907 .get_pauseparam = bnx2x_get_pauseparam,
11908 .set_pauseparam = bnx2x_set_pauseparam,
11909 .get_rx_csum = bnx2x_get_rx_csum,
11910 .set_rx_csum = bnx2x_set_rx_csum,
11911 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 11912 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
11913 .set_flags = bnx2x_set_flags,
11914 .get_flags = ethtool_op_get_flags,
11915 .get_sg = ethtool_op_get_sg,
11916 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
11917 .get_tso = ethtool_op_get_tso,
11918 .set_tso = bnx2x_set_tso,
7a9b2557 11919 .self_test = bnx2x_self_test,
15f0a394 11920 .get_sset_count = bnx2x_get_sset_count,
7a9b2557 11921 .get_strings = bnx2x_get_strings,
a2fbb9ea 11922 .phys_id = bnx2x_phys_id,
bb2a0f7a 11923 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
11924};
11925
11926/* end of ethtool_ops */
11927
11928/****************************************************************************
11929* General service functions
11930****************************************************************************/
11931
11932static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11933{
11934 u16 pmcsr;
11935
11936 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11937
11938 switch (state) {
11939 case PCI_D0:
34f80b04 11940 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
11941 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11942 PCI_PM_CTRL_PME_STATUS));
11943
11944 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 11945 /* delay required during transition out of D3hot */
a2fbb9ea 11946 msleep(20);
34f80b04 11947 break;
a2fbb9ea 11948
34f80b04 11949 case PCI_D3hot:
d3dbfee0
VZ
11950 /* If there are other clients above don't
11951 shut down the power */
11952 if (atomic_read(&bp->pdev->enable_cnt) != 1)
11953 return 0;
11954 /* Don't shut down the power for emulation and FPGA */
11955 if (CHIP_REV_IS_SLOW(bp))
11956 return 0;
11957
34f80b04
EG
11958 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11959 pmcsr |= 3;
a2fbb9ea 11960
34f80b04
EG
11961 if (bp->wol)
11962 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 11963
34f80b04
EG
11964 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11965 pmcsr);
a2fbb9ea 11966
34f80b04
EG
11967 /* No more memory access after this point until
11968 * device is brought back to D0.
11969 */
11970 break;
11971
11972 default:
11973 return -EINVAL;
11974 }
11975 return 0;
a2fbb9ea
ET
11976}
11977
237907c1
EG
11978static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11979{
11980 u16 rx_cons_sb;
11981
11982 /* Tell compiler that status block fields can change */
11983 barrier();
11984 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11985 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
11986 rx_cons_sb++;
11987 return (fp->rx_comp_cons != rx_cons_sb);
11988}
11989
34f80b04
EG
11990/*
11991 * net_device service functions
11992 */
11993
a2fbb9ea
ET
11994static int bnx2x_poll(struct napi_struct *napi, int budget)
11995{
54b9ddaa 11996 int work_done = 0;
a2fbb9ea
ET
11997 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
11998 napi);
11999 struct bnx2x *bp = fp->bp;
a2fbb9ea 12000
54b9ddaa 12001 while (1) {
a2fbb9ea 12002#ifdef BNX2X_STOP_ON_ERROR
54b9ddaa
VZ
12003 if (unlikely(bp->panic)) {
12004 napi_complete(napi);
12005 return 0;
12006 }
a2fbb9ea
ET
12007#endif
12008
54b9ddaa
VZ
12009 if (bnx2x_has_tx_work(fp))
12010 bnx2x_tx_int(fp);
356e2385 12011
54b9ddaa
VZ
12012 if (bnx2x_has_rx_work(fp)) {
12013 work_done += bnx2x_rx_int(fp, budget - work_done);
a2fbb9ea 12014
54b9ddaa
VZ
12015 /* must not complete if we consumed full budget */
12016 if (work_done >= budget)
12017 break;
12018 }
a2fbb9ea 12019
54b9ddaa
VZ
12020 /* Fall out from the NAPI loop if needed */
12021 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12022 bnx2x_update_fpsb_idx(fp);
12023 /* bnx2x_has_rx_work() reads the status block, thus we need
12024 * to ensure that status block indices have been actually read
12025 * (bnx2x_update_fpsb_idx) prior to this check
12026 * (bnx2x_has_rx_work) so that we won't write the "newer"
12027 * value of the status block to IGU (if there was a DMA right
12028 * after bnx2x_has_rx_work and if there is no rmb, the memory
12029 * reading (bnx2x_update_fpsb_idx) may be postponed to right
12030 * before bnx2x_ack_sb). In this case there will never be
12031 * another interrupt until there is another update of the
12032 * status block, while there is still unhandled work.
12033 */
12034 rmb();
a2fbb9ea 12035
54b9ddaa
VZ
12036 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12037 napi_complete(napi);
12038 /* Re-enable interrupts */
12039 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
12040 le16_to_cpu(fp->fp_c_idx),
12041 IGU_INT_NOP, 1);
12042 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
12043 le16_to_cpu(fp->fp_u_idx),
12044 IGU_INT_ENABLE, 1);
12045 break;
12046 }
12047 }
a2fbb9ea 12048 }
356e2385 12049
a2fbb9ea
ET
12050 return work_done;
12051}
12052
755735eb
EG
12053
12054/* we split the first BD into headers and data BDs
33471629 12055 * to ease the pain of our fellow microcode engineers
755735eb
EG
12056 * we use one mapping for both BDs
12057 * So far this has only been observed to happen
12058 * in Other Operating Systems(TM)
12059 */
12060static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
12061 struct bnx2x_fastpath *fp,
ca00392c
EG
12062 struct sw_tx_bd *tx_buf,
12063 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
12064 u16 bd_prod, int nbd)
12065{
ca00392c 12066 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
12067 struct eth_tx_bd *d_tx_bd;
12068 dma_addr_t mapping;
12069 int old_len = le16_to_cpu(h_tx_bd->nbytes);
12070
12071 /* first fix first BD */
12072 h_tx_bd->nbd = cpu_to_le16(nbd);
12073 h_tx_bd->nbytes = cpu_to_le16(hlen);
12074
12075 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
12076 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
12077 h_tx_bd->addr_lo, h_tx_bd->nbd);
12078
12079 /* now get a new data BD
12080 * (after the pbd) and fill it */
12081 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 12082 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
12083
12084 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
12085 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
12086
12087 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12088 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12089 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
12090
12091 /* this marks the BD as one that has no individual mapping */
12092 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
12093
755735eb
EG
12094 DP(NETIF_MSG_TX_QUEUED,
12095 "TSO split data size is %d (%x:%x)\n",
12096 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
12097
ca00392c
EG
12098 /* update tx_bd */
12099 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
12100
12101 return bd_prod;
12102}
12103
12104static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
12105{
12106 if (fix > 0)
12107 csum = (u16) ~csum_fold(csum_sub(csum,
12108 csum_partial(t_header - fix, fix, 0)));
12109
12110 else if (fix < 0)
12111 csum = (u16) ~csum_fold(csum_add(csum,
12112 csum_partial(t_header, -fix, 0)));
12113
12114 return swab16(csum);
12115}
12116
12117static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12118{
12119 u32 rc;
12120
12121 if (skb->ip_summed != CHECKSUM_PARTIAL)
12122 rc = XMIT_PLAIN;
12123
12124 else {
4781bfad 12125 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
12126 rc = XMIT_CSUM_V6;
12127 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12128 rc |= XMIT_CSUM_TCP;
12129
12130 } else {
12131 rc = XMIT_CSUM_V4;
12132 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12133 rc |= XMIT_CSUM_TCP;
12134 }
12135 }
12136
12137 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
d6a2f98b 12138 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
755735eb
EG
12139
12140 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
d6a2f98b 12141 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
755735eb
EG
12142
12143 return rc;
12144}
12145
632da4d6 12146#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
12147/* check if packet requires linearization (packet is too fragmented)
12148 no need to check fragmentation if page size > 8K (there will be no
12149 violation to FW restrictions) */
755735eb
EG
12150static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12151 u32 xmit_type)
12152{
12153 int to_copy = 0;
12154 int hlen = 0;
12155 int first_bd_sz = 0;
12156
12157 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12158 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12159
12160 if (xmit_type & XMIT_GSO) {
12161 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12162 /* Check if LSO packet needs to be copied:
12163 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12164 int wnd_size = MAX_FETCH_BD - 3;
33471629 12165 /* Number of windows to check */
755735eb
EG
12166 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12167 int wnd_idx = 0;
12168 int frag_idx = 0;
12169 u32 wnd_sum = 0;
12170
12171 /* Headers length */
12172 hlen = (int)(skb_transport_header(skb) - skb->data) +
12173 tcp_hdrlen(skb);
12174
12175 /* Amount of data (w/o headers) on linear part of SKB*/
12176 first_bd_sz = skb_headlen(skb) - hlen;
12177
12178 wnd_sum = first_bd_sz;
12179
12180 /* Calculate the first sum - it's special */
12181 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12182 wnd_sum +=
12183 skb_shinfo(skb)->frags[frag_idx].size;
12184
12185 /* If there was data on linear skb data - check it */
12186 if (first_bd_sz > 0) {
12187 if (unlikely(wnd_sum < lso_mss)) {
12188 to_copy = 1;
12189 goto exit_lbl;
12190 }
12191
12192 wnd_sum -= first_bd_sz;
12193 }
12194
12195 /* Others are easier: run through the frag list and
12196 check all windows */
12197 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12198 wnd_sum +=
12199 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12200
12201 if (unlikely(wnd_sum < lso_mss)) {
12202 to_copy = 1;
12203 break;
12204 }
12205 wnd_sum -=
12206 skb_shinfo(skb)->frags[wnd_idx].size;
12207 }
755735eb
EG
12208 } else {
12209 /* in non-LSO too fragmented packet should always
12210 be linearized */
12211 to_copy = 1;
12212 }
12213 }
12214
12215exit_lbl:
12216 if (unlikely(to_copy))
12217 DP(NETIF_MSG_TX_QUEUED,
12218 "Linearization IS REQUIRED for %s packet. "
12219 "num_frags %d hlen %d first_bd_sz %d\n",
12220 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12221 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12222
12223 return to_copy;
12224}
632da4d6 12225#endif
755735eb
EG
12226
12227/* called with netif_tx_lock
a2fbb9ea 12228 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 12229 * netif_wake_queue()
a2fbb9ea 12230 */
61357325 12231static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
a2fbb9ea
ET
12232{
12233 struct bnx2x *bp = netdev_priv(dev);
54b9ddaa 12234 struct bnx2x_fastpath *fp;
555f6c78 12235 struct netdev_queue *txq;
a2fbb9ea 12236 struct sw_tx_bd *tx_buf;
ca00392c
EG
12237 struct eth_tx_start_bd *tx_start_bd;
12238 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
12239 struct eth_tx_parse_bd *pbd = NULL;
12240 u16 pkt_prod, bd_prod;
755735eb 12241 int nbd, fp_index;
a2fbb9ea 12242 dma_addr_t mapping;
755735eb 12243 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
12244 int i;
12245 u8 hlen = 0;
ca00392c 12246 __le16 pkt_size = 0;
dea7aab1
VZ
12247 struct ethhdr *eth;
12248 u8 mac_type = UNICAST_ADDRESS;
a2fbb9ea
ET
12249
12250#ifdef BNX2X_STOP_ON_ERROR
12251 if (unlikely(bp->panic))
12252 return NETDEV_TX_BUSY;
12253#endif
12254
555f6c78
EG
12255 fp_index = skb_get_queue_mapping(skb);
12256 txq = netdev_get_tx_queue(dev, fp_index);
12257
54b9ddaa 12258 fp = &bp->fp[fp_index];
755735eb 12259
231fd58a 12260 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
54b9ddaa 12261 fp->eth_q_stats.driver_xoff++;
555f6c78 12262 netif_tx_stop_queue(txq);
a2fbb9ea
ET
12263 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12264 return NETDEV_TX_BUSY;
12265 }
12266
755735eb
EG
12267 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
12268 " gso type %x xmit_type %x\n",
12269 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12270 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12271
dea7aab1
VZ
12272 eth = (struct ethhdr *)skb->data;
12273
12274 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12275 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12276 if (is_broadcast_ether_addr(eth->h_dest))
12277 mac_type = BROADCAST_ADDRESS;
12278 else
12279 mac_type = MULTICAST_ADDRESS;
12280 }
12281
632da4d6 12282#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
12283 /* First, check if we need to linearize the skb (due to FW
12284 restrictions). No need to check fragmentation if page size > 8K
12285 (there will be no violation to FW restrictions) */
755735eb
EG
12286 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12287 /* Statistics of linearization */
12288 bp->lin_cnt++;
12289 if (skb_linearize(skb) != 0) {
12290 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12291 "silently dropping this SKB\n");
12292 dev_kfree_skb_any(skb);
da5a662a 12293 return NETDEV_TX_OK;
755735eb
EG
12294 }
12295 }
632da4d6 12296#endif
755735eb 12297
a2fbb9ea 12298 /*
755735eb 12299 Please read carefully. First we use one BD which we mark as start,
ca00392c 12300 then we have a parsing info BD (used for TSO or xsum),
755735eb 12301 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
12302 (don't forget to mark the last one as last,
12303 and to unmap only AFTER you write to the BD ...)
755735eb 12304 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
12305 */
12306
12307 pkt_prod = fp->tx_pkt_prod++;
755735eb 12308 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 12309
755735eb 12310 /* get a tx_buf and first BD */
a2fbb9ea 12311 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 12312 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 12313
ca00392c 12314 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
dea7aab1
VZ
12315 tx_start_bd->general_data = (mac_type <<
12316 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 12317 /* header nbd */
ca00392c 12318 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 12319
755735eb
EG
12320 /* remember the first BD of the packet */
12321 tx_buf->first_bd = fp->tx_bd_prod;
12322 tx_buf->skb = skb;
ca00392c 12323 tx_buf->flags = 0;
a2fbb9ea
ET
12324
12325 DP(NETIF_MSG_TX_QUEUED,
12326 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 12327 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 12328
0c6671b0
EG
12329#ifdef BCM_VLAN
12330 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12331 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
12332 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12333 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 12334 } else
0c6671b0 12335#endif
ca00392c 12336 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 12337
ca00392c
EG
12338 /* turn on parsing and get a BD */
12339 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12340 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 12341
ca00392c 12342 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
12343
12344 if (xmit_type & XMIT_CSUM) {
ca00392c 12345 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
12346
12347 /* for now NS flag is not used in Linux */
4781bfad
EG
12348 pbd->global_data =
12349 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12350 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 12351
755735eb
EG
12352 pbd->ip_hlen = (skb_transport_header(skb) -
12353 skb_network_header(skb)) / 2;
12354
12355 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 12356
755735eb 12357 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 12358 hlen = hlen*2;
a2fbb9ea 12359
ca00392c 12360 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
12361
12362 if (xmit_type & XMIT_CSUM_V4)
ca00392c 12363 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
12364 ETH_TX_BD_FLAGS_IP_CSUM;
12365 else
ca00392c
EG
12366 tx_start_bd->bd_flags.as_bitfield |=
12367 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
12368
12369 if (xmit_type & XMIT_CSUM_TCP) {
12370 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12371
12372 } else {
12373 s8 fix = SKB_CS_OFF(skb); /* signed! */
12374
ca00392c 12375 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 12376
755735eb 12377 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
12378 "hlen %d fix %d csum before fix %x\n",
12379 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
12380
12381 /* HW bug: fixup the CSUM */
12382 pbd->tcp_pseudo_csum =
12383 bnx2x_csum_fix(skb_transport_header(skb),
12384 SKB_CS(skb), fix);
12385
12386 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12387 pbd->tcp_pseudo_csum);
12388 }
a2fbb9ea
ET
12389 }
12390
1a983142
FT
12391 mapping = dma_map_single(&bp->pdev->dev, skb->data,
12392 skb_headlen(skb), DMA_TO_DEVICE);
a2fbb9ea 12393
ca00392c
EG
12394 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12395 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12396 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12397 tx_start_bd->nbd = cpu_to_le16(nbd);
12398 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12399 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
12400
12401 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 12402 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
12403 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12404 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12405 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 12406
755735eb 12407 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
12408
12409 DP(NETIF_MSG_TX_QUEUED,
12410 "TSO packet len %d hlen %d total len %d tso size %d\n",
12411 skb->len, hlen, skb_headlen(skb),
12412 skb_shinfo(skb)->gso_size);
12413
ca00392c 12414 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 12415
755735eb 12416 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
12417 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12418 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
12419
12420 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12421 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
12422 pbd->tcp_flags = pbd_tcp_flags(skb);
12423
12424 if (xmit_type & XMIT_GSO_V4) {
12425 pbd->ip_id = swab16(ip_hdr(skb)->id);
12426 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
12427 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12428 ip_hdr(skb)->daddr,
12429 0, IPPROTO_TCP, 0));
755735eb
EG
12430
12431 } else
12432 pbd->tcp_pseudo_csum =
12433 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12434 &ipv6_hdr(skb)->daddr,
12435 0, IPPROTO_TCP, 0));
12436
a2fbb9ea
ET
12437 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12438 }
ca00392c 12439 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 12440
755735eb
EG
12441 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12442 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 12443
755735eb 12444 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
12445 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12446 if (total_pkt_bd == NULL)
12447 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 12448
1a983142
FT
12449 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12450 frag->page_offset,
12451 frag->size, DMA_TO_DEVICE);
a2fbb9ea 12452
ca00392c
EG
12453 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12454 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12455 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12456 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 12457
755735eb 12458 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
12459 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
12460 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12461 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
12462 }
12463
ca00392c 12464 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 12465
a2fbb9ea
ET
12466 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12467
755735eb 12468 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
12469 * if the packet contains or ends with it
12470 */
12471 if (TX_BD_POFF(bd_prod) < nbd)
12472 nbd++;
12473
ca00392c
EG
12474 if (total_pkt_bd != NULL)
12475 total_pkt_bd->total_pkt_bytes = pkt_size;
12476
a2fbb9ea
ET
12477 if (pbd)
12478 DP(NETIF_MSG_TX_QUEUED,
12479 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
12480 " tcp_flags %x xsum %x seq %u hlen %u\n",
12481 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12482 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 12483 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 12484
755735eb 12485 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 12486
58f4c4cf
EG
12487 /*
12488 * Make sure that the BD data is updated before updating the producer
12489 * since FW might read the BD right after the producer is updated.
12490 * This is only applicable for weak-ordered memory model archs such
12491 * as IA-64. The following barrier is also mandatory since FW will
12492 * assumes packets must have BDs.
12493 */
12494 wmb();
12495
ca00392c
EG
12496 fp->tx_db.data.prod += nbd;
12497 barrier();
54b9ddaa 12498 DOORBELL(bp, fp->index, fp->tx_db.raw);
a2fbb9ea
ET
12499
12500 mmiowb();
12501
755735eb 12502 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
12503
12504 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 12505 netif_tx_stop_queue(txq);
9baddeb8
SG
12506
12507 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12508 * ordering of set_bit() in netif_tx_stop_queue() and read of
12509 * fp->bd_tx_cons */
58f4c4cf 12510 smp_mb();
9baddeb8 12511
54b9ddaa 12512 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 12513 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 12514 netif_tx_wake_queue(txq);
a2fbb9ea 12515 }
54b9ddaa 12516 fp->tx_pkt++;
a2fbb9ea
ET
12517
12518 return NETDEV_TX_OK;
12519}
12520
bb2a0f7a 12521/* called with rtnl_lock */
a2fbb9ea
ET
12522static int bnx2x_open(struct net_device *dev)
12523{
12524 struct bnx2x *bp = netdev_priv(dev);
12525
6eccabb3
EG
12526 netif_carrier_off(dev);
12527
a2fbb9ea
ET
12528 bnx2x_set_power_state(bp, PCI_D0);
12529
72fd0718
VZ
12530 if (!bnx2x_reset_is_done(bp)) {
12531 do {
12532 /* Reset MCP mail box sequence if there is on going
12533 * recovery
12534 */
12535 bp->fw_seq = 0;
12536
12537 /* If it's the first function to load and reset done
12538 * is still not cleared it may mean that. We don't
12539 * check the attention state here because it may have
12540 * already been cleared by a "common" reset but we
12541 * shell proceed with "process kill" anyway.
12542 */
12543 if ((bnx2x_get_load_cnt(bp) == 0) &&
12544 bnx2x_trylock_hw_lock(bp,
12545 HW_LOCK_RESOURCE_RESERVED_08) &&
12546 (!bnx2x_leader_reset(bp))) {
12547 DP(NETIF_MSG_HW, "Recovered in open\n");
12548 break;
12549 }
12550
12551 bnx2x_set_power_state(bp, PCI_D3hot);
12552
12553 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12554 " completed yet. Try again later. If u still see this"
12555 " message after a few retries then power cycle is"
12556 " required.\n", bp->dev->name);
12557
12558 return -EAGAIN;
12559 } while (0);
12560 }
12561
12562 bp->recovery_state = BNX2X_RECOVERY_DONE;
12563
bb2a0f7a 12564 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
12565}
12566
bb2a0f7a 12567/* called with rtnl_lock */
a2fbb9ea
ET
12568static int bnx2x_close(struct net_device *dev)
12569{
a2fbb9ea
ET
12570 struct bnx2x *bp = netdev_priv(dev);
12571
12572 /* Unload the driver, release IRQs */
bb2a0f7a 12573 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 12574 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
12575
12576 return 0;
12577}
12578
f5372251 12579/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
12580static void bnx2x_set_rx_mode(struct net_device *dev)
12581{
12582 struct bnx2x *bp = netdev_priv(dev);
12583 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12584 int port = BP_PORT(bp);
12585
12586 if (bp->state != BNX2X_STATE_OPEN) {
12587 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12588 return;
12589 }
12590
12591 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12592
12593 if (dev->flags & IFF_PROMISC)
12594 rx_mode = BNX2X_RX_MODE_PROMISC;
12595
12596 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
12597 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12598 CHIP_IS_E1(bp)))
34f80b04
EG
12599 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12600
12601 else { /* some multicasts */
12602 if (CHIP_IS_E1(bp)) {
12603 int i, old, offset;
22bedad3 12604 struct netdev_hw_addr *ha;
34f80b04
EG
12605 struct mac_configuration_cmd *config =
12606 bnx2x_sp(bp, mcast_config);
12607
0ddf477b 12608 i = 0;
22bedad3 12609 netdev_for_each_mc_addr(ha, dev) {
34f80b04
EG
12610 config->config_table[i].
12611 cam_entry.msb_mac_addr =
22bedad3 12612 swab16(*(u16 *)&ha->addr[0]);
34f80b04
EG
12613 config->config_table[i].
12614 cam_entry.middle_mac_addr =
22bedad3 12615 swab16(*(u16 *)&ha->addr[2]);
34f80b04
EG
12616 config->config_table[i].
12617 cam_entry.lsb_mac_addr =
22bedad3 12618 swab16(*(u16 *)&ha->addr[4]);
34f80b04
EG
12619 config->config_table[i].cam_entry.flags =
12620 cpu_to_le16(port);
12621 config->config_table[i].
12622 target_table_entry.flags = 0;
ca00392c
EG
12623 config->config_table[i].target_table_entry.
12624 clients_bit_vector =
12625 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
12626 config->config_table[i].
12627 target_table_entry.vlan_id = 0;
12628
12629 DP(NETIF_MSG_IFUP,
12630 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12631 config->config_table[i].
12632 cam_entry.msb_mac_addr,
12633 config->config_table[i].
12634 cam_entry.middle_mac_addr,
12635 config->config_table[i].
12636 cam_entry.lsb_mac_addr);
0ddf477b 12637 i++;
34f80b04 12638 }
8d9c5f34 12639 old = config->hdr.length;
34f80b04
EG
12640 if (old > i) {
12641 for (; i < old; i++) {
12642 if (CAM_IS_INVALID(config->
12643 config_table[i])) {
af246401 12644 /* already invalidated */
34f80b04
EG
12645 break;
12646 }
12647 /* invalidate */
12648 CAM_INVALIDATE(config->
12649 config_table[i]);
12650 }
12651 }
12652
12653 if (CHIP_REV_IS_SLOW(bp))
12654 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12655 else
12656 offset = BNX2X_MAX_MULTICAST*(1 + port);
12657
8d9c5f34 12658 config->hdr.length = i;
34f80b04 12659 config->hdr.offset = offset;
8d9c5f34 12660 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
12661 config->hdr.reserved1 = 0;
12662
e665bfda
MC
12663 bp->set_mac_pending++;
12664 smp_wmb();
12665
34f80b04
EG
12666 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12667 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12668 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12669 0);
12670 } else { /* E1H */
12671 /* Accept one or more multicasts */
22bedad3 12672 struct netdev_hw_addr *ha;
34f80b04
EG
12673 u32 mc_filter[MC_HASH_SIZE];
12674 u32 crc, bit, regidx;
12675 int i;
12676
12677 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12678
22bedad3 12679 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 12680 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
22bedad3 12681 ha->addr);
34f80b04 12682
22bedad3 12683 crc = crc32c_le(0, ha->addr, ETH_ALEN);
34f80b04
EG
12684 bit = (crc >> 24) & 0xff;
12685 regidx = bit >> 5;
12686 bit &= 0x1f;
12687 mc_filter[regidx] |= (1 << bit);
12688 }
12689
12690 for (i = 0; i < MC_HASH_SIZE; i++)
12691 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12692 mc_filter[i]);
12693 }
12694 }
12695
12696 bp->rx_mode = rx_mode;
12697 bnx2x_set_storm_rx_mode(bp);
12698}
12699
12700/* called with rtnl_lock */
a2fbb9ea
ET
12701static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12702{
12703 struct sockaddr *addr = p;
12704 struct bnx2x *bp = netdev_priv(dev);
12705
34f80b04 12706 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
12707 return -EINVAL;
12708
12709 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
12710 if (netif_running(dev)) {
12711 if (CHIP_IS_E1(bp))
e665bfda 12712 bnx2x_set_eth_mac_addr_e1(bp, 1);
34f80b04 12713 else
e665bfda 12714 bnx2x_set_eth_mac_addr_e1h(bp, 1);
34f80b04 12715 }
a2fbb9ea
ET
12716
12717 return 0;
12718}
12719
c18487ee 12720/* called with rtnl_lock */
01cd4528
EG
12721static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12722 int devad, u16 addr)
a2fbb9ea 12723{
01cd4528
EG
12724 struct bnx2x *bp = netdev_priv(netdev);
12725 u16 value;
12726 int rc;
12727 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 12728
01cd4528
EG
12729 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12730 prtad, devad, addr);
a2fbb9ea 12731
01cd4528
EG
12732 if (prtad != bp->mdio.prtad) {
12733 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12734 prtad, bp->mdio.prtad);
12735 return -EINVAL;
12736 }
12737
12738 /* The HW expects different devad if CL22 is used */
12739 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 12740
01cd4528
EG
12741 bnx2x_acquire_phy_lock(bp);
12742 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12743 devad, addr, &value);
12744 bnx2x_release_phy_lock(bp);
12745 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 12746
01cd4528
EG
12747 if (!rc)
12748 rc = value;
12749 return rc;
12750}
a2fbb9ea 12751
01cd4528
EG
12752/* called with rtnl_lock */
12753static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12754 u16 addr, u16 value)
12755{
12756 struct bnx2x *bp = netdev_priv(netdev);
12757 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12758 int rc;
12759
12760 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12761 " value 0x%x\n", prtad, devad, addr, value);
12762
12763 if (prtad != bp->mdio.prtad) {
12764 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12765 prtad, bp->mdio.prtad);
12766 return -EINVAL;
a2fbb9ea
ET
12767 }
12768
01cd4528
EG
12769 /* The HW expects different devad if CL22 is used */
12770 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 12771
01cd4528
EG
12772 bnx2x_acquire_phy_lock(bp);
12773 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12774 devad, addr, value);
12775 bnx2x_release_phy_lock(bp);
12776 return rc;
12777}
c18487ee 12778
01cd4528
EG
12779/* called with rtnl_lock */
12780static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12781{
12782 struct bnx2x *bp = netdev_priv(dev);
12783 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 12784
01cd4528
EG
12785 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12786 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 12787
01cd4528
EG
12788 if (!netif_running(dev))
12789 return -EAGAIN;
12790
12791 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
12792}
12793
34f80b04 12794/* called with rtnl_lock */
a2fbb9ea
ET
12795static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12796{
12797 struct bnx2x *bp = netdev_priv(dev);
34f80b04 12798 int rc = 0;
a2fbb9ea 12799
72fd0718
VZ
12800 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12801 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12802 return -EAGAIN;
12803 }
12804
a2fbb9ea
ET
12805 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12806 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12807 return -EINVAL;
12808
12809 /* This does not race with packet allocation
c14423fe 12810 * because the actual alloc size is
a2fbb9ea
ET
12811 * only updated as part of load
12812 */
12813 dev->mtu = new_mtu;
12814
12815 if (netif_running(dev)) {
34f80b04
EG
12816 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12817 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 12818 }
34f80b04
EG
12819
12820 return rc;
a2fbb9ea
ET
12821}
12822
12823static void bnx2x_tx_timeout(struct net_device *dev)
12824{
12825 struct bnx2x *bp = netdev_priv(dev);
12826
12827#ifdef BNX2X_STOP_ON_ERROR
12828 if (!bp->panic)
12829 bnx2x_panic();
12830#endif
12831 /* This allows the netif to be shutdown gracefully before resetting */
72fd0718 12832 schedule_delayed_work(&bp->reset_task, 0);
a2fbb9ea
ET
12833}
12834
12835#ifdef BCM_VLAN
34f80b04 12836/* called with rtnl_lock */
a2fbb9ea
ET
12837static void bnx2x_vlan_rx_register(struct net_device *dev,
12838 struct vlan_group *vlgrp)
12839{
12840 struct bnx2x *bp = netdev_priv(dev);
12841
12842 bp->vlgrp = vlgrp;
0c6671b0
EG
12843
12844 /* Set flags according to the required capabilities */
12845 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12846
12847 if (dev->features & NETIF_F_HW_VLAN_TX)
12848 bp->flags |= HW_VLAN_TX_FLAG;
12849
12850 if (dev->features & NETIF_F_HW_VLAN_RX)
12851 bp->flags |= HW_VLAN_RX_FLAG;
12852
a2fbb9ea 12853 if (netif_running(dev))
49d66772 12854 bnx2x_set_client_config(bp);
a2fbb9ea 12855}
34f80b04 12856
a2fbb9ea
ET
12857#endif
12858
257ddbda 12859#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
12860static void poll_bnx2x(struct net_device *dev)
12861{
12862 struct bnx2x *bp = netdev_priv(dev);
12863
12864 disable_irq(bp->pdev->irq);
12865 bnx2x_interrupt(bp->pdev->irq, dev);
12866 enable_irq(bp->pdev->irq);
12867}
12868#endif
12869
c64213cd
SH
12870static const struct net_device_ops bnx2x_netdev_ops = {
12871 .ndo_open = bnx2x_open,
12872 .ndo_stop = bnx2x_close,
12873 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 12874 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
12875 .ndo_set_mac_address = bnx2x_change_mac_addr,
12876 .ndo_validate_addr = eth_validate_addr,
12877 .ndo_do_ioctl = bnx2x_ioctl,
12878 .ndo_change_mtu = bnx2x_change_mtu,
12879 .ndo_tx_timeout = bnx2x_tx_timeout,
12880#ifdef BCM_VLAN
12881 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
12882#endif
257ddbda 12883#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
12884 .ndo_poll_controller = poll_bnx2x,
12885#endif
12886};
12887
34f80b04
EG
12888static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12889 struct net_device *dev)
a2fbb9ea
ET
12890{
12891 struct bnx2x *bp;
12892 int rc;
12893
12894 SET_NETDEV_DEV(dev, &pdev->dev);
12895 bp = netdev_priv(dev);
12896
34f80b04
EG
12897 bp->dev = dev;
12898 bp->pdev = pdev;
a2fbb9ea 12899 bp->flags = 0;
34f80b04 12900 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
12901
12902 rc = pci_enable_device(pdev);
12903 if (rc) {
cdaa7cb8
VZ
12904 dev_err(&bp->pdev->dev,
12905 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
12906 goto err_out;
12907 }
12908
12909 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
12910 dev_err(&bp->pdev->dev,
12911 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
12912 rc = -ENODEV;
12913 goto err_out_disable;
12914 }
12915
12916 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
12917 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
12918 " base address, aborting\n");
a2fbb9ea
ET
12919 rc = -ENODEV;
12920 goto err_out_disable;
12921 }
12922
34f80b04
EG
12923 if (atomic_read(&pdev->enable_cnt) == 1) {
12924 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12925 if (rc) {
cdaa7cb8
VZ
12926 dev_err(&bp->pdev->dev,
12927 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
12928 goto err_out_disable;
12929 }
a2fbb9ea 12930
34f80b04
EG
12931 pci_set_master(pdev);
12932 pci_save_state(pdev);
12933 }
a2fbb9ea
ET
12934
12935 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12936 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
12937 dev_err(&bp->pdev->dev,
12938 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
12939 rc = -EIO;
12940 goto err_out_release;
12941 }
12942
12943 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12944 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
12945 dev_err(&bp->pdev->dev,
12946 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
12947 rc = -EIO;
12948 goto err_out_release;
12949 }
12950
1a983142 12951 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 12952 bp->flags |= USING_DAC_FLAG;
1a983142 12953 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
12954 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
12955 " failed, aborting\n");
a2fbb9ea
ET
12956 rc = -EIO;
12957 goto err_out_release;
12958 }
12959
1a983142 12960 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
12961 dev_err(&bp->pdev->dev,
12962 "System does not support DMA, aborting\n");
a2fbb9ea
ET
12963 rc = -EIO;
12964 goto err_out_release;
12965 }
12966
34f80b04
EG
12967 dev->mem_start = pci_resource_start(pdev, 0);
12968 dev->base_addr = dev->mem_start;
12969 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
12970
12971 dev->irq = pdev->irq;
12972
275f165f 12973 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 12974 if (!bp->regview) {
cdaa7cb8
VZ
12975 dev_err(&bp->pdev->dev,
12976 "Cannot map register space, aborting\n");
a2fbb9ea
ET
12977 rc = -ENOMEM;
12978 goto err_out_release;
12979 }
12980
34f80b04
EG
12981 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12982 min_t(u64, BNX2X_DB_SIZE,
12983 pci_resource_len(pdev, 2)));
a2fbb9ea 12984 if (!bp->doorbells) {
cdaa7cb8
VZ
12985 dev_err(&bp->pdev->dev,
12986 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
12987 rc = -ENOMEM;
12988 goto err_out_unmap;
12989 }
12990
12991 bnx2x_set_power_state(bp, PCI_D0);
12992
34f80b04
EG
12993 /* clean indirect addresses */
12994 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12995 PCICFG_VENDOR_ID_OFFSET);
12996 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
12997 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
12998 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
12999 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 13000
72fd0718
VZ
13001 /* Reset the load counter */
13002 bnx2x_clear_load_cnt(bp);
13003
34f80b04 13004 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 13005
c64213cd 13006 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 13007 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
13008 dev->features |= NETIF_F_SG;
13009 dev->features |= NETIF_F_HW_CSUM;
13010 if (bp->flags & USING_DAC_FLAG)
13011 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
13012 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13013 dev->features |= NETIF_F_TSO6;
34f80b04
EG
13014#ifdef BCM_VLAN
13015 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 13016 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
13017
13018 dev->vlan_features |= NETIF_F_SG;
13019 dev->vlan_features |= NETIF_F_HW_CSUM;
13020 if (bp->flags & USING_DAC_FLAG)
13021 dev->vlan_features |= NETIF_F_HIGHDMA;
13022 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13023 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 13024#endif
a2fbb9ea 13025
01cd4528
EG
13026 /* get_port_hwinfo() will set prtad and mmds properly */
13027 bp->mdio.prtad = MDIO_PRTAD_NONE;
13028 bp->mdio.mmds = 0;
13029 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13030 bp->mdio.dev = dev;
13031 bp->mdio.mdio_read = bnx2x_mdio_read;
13032 bp->mdio.mdio_write = bnx2x_mdio_write;
13033
a2fbb9ea
ET
13034 return 0;
13035
13036err_out_unmap:
13037 if (bp->regview) {
13038 iounmap(bp->regview);
13039 bp->regview = NULL;
13040 }
a2fbb9ea
ET
13041 if (bp->doorbells) {
13042 iounmap(bp->doorbells);
13043 bp->doorbells = NULL;
13044 }
13045
13046err_out_release:
34f80b04
EG
13047 if (atomic_read(&pdev->enable_cnt) == 1)
13048 pci_release_regions(pdev);
a2fbb9ea
ET
13049
13050err_out_disable:
13051 pci_disable_device(pdev);
13052 pci_set_drvdata(pdev, NULL);
13053
13054err_out:
13055 return rc;
13056}
13057
37f9ce62
EG
13058static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
13059 int *width, int *speed)
25047950
ET
13060{
13061 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
13062
37f9ce62 13063 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 13064
37f9ce62
EG
13065 /* return value of 1=2.5GHz 2=5GHz */
13066 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 13067}
37f9ce62 13068
94a78b79
VZ
13069static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
13070{
37f9ce62 13071 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
13072 struct bnx2x_fw_file_hdr *fw_hdr;
13073 struct bnx2x_fw_file_section *sections;
94a78b79 13074 u32 offset, len, num_ops;
37f9ce62 13075 u16 *ops_offsets;
94a78b79 13076 int i;
37f9ce62 13077 const u8 *fw_ver;
94a78b79
VZ
13078
13079 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
13080 return -EINVAL;
13081
13082 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13083 sections = (struct bnx2x_fw_file_section *)fw_hdr;
13084
13085 /* Make sure none of the offsets and sizes make us read beyond
13086 * the end of the firmware data */
13087 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13088 offset = be32_to_cpu(sections[i].offset);
13089 len = be32_to_cpu(sections[i].len);
13090 if (offset + len > firmware->size) {
cdaa7cb8
VZ
13091 dev_err(&bp->pdev->dev,
13092 "Section %d length is out of bounds\n", i);
94a78b79
VZ
13093 return -EINVAL;
13094 }
13095 }
13096
13097 /* Likewise for the init_ops offsets */
13098 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13099 ops_offsets = (u16 *)(firmware->data + offset);
13100 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13101
13102 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13103 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
13104 dev_err(&bp->pdev->dev,
13105 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
13106 return -EINVAL;
13107 }
13108 }
13109
13110 /* Check FW version */
13111 offset = be32_to_cpu(fw_hdr->fw_version.offset);
13112 fw_ver = firmware->data + offset;
13113 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13114 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13115 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13116 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
13117 dev_err(&bp->pdev->dev,
13118 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
13119 fw_ver[0], fw_ver[1], fw_ver[2],
13120 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
13121 BCM_5710_FW_MINOR_VERSION,
13122 BCM_5710_FW_REVISION_VERSION,
13123 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 13124 return -EINVAL;
94a78b79
VZ
13125 }
13126
13127 return 0;
13128}
13129
ab6ad5a4 13130static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 13131{
ab6ad5a4
EG
13132 const __be32 *source = (const __be32 *)_source;
13133 u32 *target = (u32 *)_target;
94a78b79 13134 u32 i;
94a78b79
VZ
13135
13136 for (i = 0; i < n/4; i++)
13137 target[i] = be32_to_cpu(source[i]);
13138}
13139
13140/*
13141 Ops array is stored in the following format:
13142 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13143 */
ab6ad5a4 13144static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 13145{
ab6ad5a4
EG
13146 const __be32 *source = (const __be32 *)_source;
13147 struct raw_op *target = (struct raw_op *)_target;
94a78b79 13148 u32 i, j, tmp;
94a78b79 13149
ab6ad5a4 13150 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
13151 tmp = be32_to_cpu(source[j]);
13152 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
13153 target[i].offset = tmp & 0xffffff;
13154 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
13155 }
13156}
ab6ad5a4
EG
13157
13158static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 13159{
ab6ad5a4
EG
13160 const __be16 *source = (const __be16 *)_source;
13161 u16 *target = (u16 *)_target;
94a78b79 13162 u32 i;
94a78b79
VZ
13163
13164 for (i = 0; i < n/2; i++)
13165 target[i] = be16_to_cpu(source[i]);
13166}
13167
7995c64e
JP
13168#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
13169do { \
13170 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13171 bp->arr = kmalloc(len, GFP_KERNEL); \
13172 if (!bp->arr) { \
13173 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
13174 goto lbl; \
13175 } \
13176 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13177 (u8 *)bp->arr, len); \
13178} while (0)
94a78b79 13179
94a78b79
VZ
13180static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
13181{
45229b42 13182 const char *fw_file_name;
94a78b79 13183 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 13184 int rc;
94a78b79 13185
94a78b79 13186 if (CHIP_IS_E1(bp))
45229b42 13187 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 13188 else if (CHIP_IS_E1H(bp))
45229b42 13189 fw_file_name = FW_FILE_NAME_E1H;
cdaa7cb8
VZ
13190 else {
13191 dev_err(dev, "Unsupported chip revision\n");
13192 return -EINVAL;
13193 }
94a78b79 13194
cdaa7cb8 13195 dev_info(dev, "Loading %s\n", fw_file_name);
94a78b79
VZ
13196
13197 rc = request_firmware(&bp->firmware, fw_file_name, dev);
13198 if (rc) {
cdaa7cb8 13199 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
13200 goto request_firmware_exit;
13201 }
13202
13203 rc = bnx2x_check_firmware(bp);
13204 if (rc) {
cdaa7cb8 13205 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
13206 goto request_firmware_exit;
13207 }
13208
13209 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13210
13211 /* Initialize the pointers to the init arrays */
13212 /* Blob */
13213 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13214
13215 /* Opcodes */
13216 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13217
13218 /* Offsets */
ab6ad5a4
EG
13219 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13220 be16_to_cpu_n);
94a78b79
VZ
13221
13222 /* STORMs firmware */
573f2035
EG
13223 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13224 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13225 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13226 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13227 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13228 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13229 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13230 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13231 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13232 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13233 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13234 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13235 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13236 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13237 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13238 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
13239
13240 return 0;
ab6ad5a4 13241
94a78b79
VZ
13242init_offsets_alloc_err:
13243 kfree(bp->init_ops);
13244init_ops_alloc_err:
13245 kfree(bp->init_data);
13246request_firmware_exit:
13247 release_firmware(bp->firmware);
13248
13249 return rc;
13250}
13251
13252
a2fbb9ea
ET
13253static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13254 const struct pci_device_id *ent)
13255{
a2fbb9ea
ET
13256 struct net_device *dev = NULL;
13257 struct bnx2x *bp;
37f9ce62 13258 int pcie_width, pcie_speed;
25047950 13259 int rc;
a2fbb9ea 13260
a2fbb9ea 13261 /* dev zeroed in init_etherdev */
555f6c78 13262 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04 13263 if (!dev) {
cdaa7cb8 13264 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 13265 return -ENOMEM;
34f80b04 13266 }
a2fbb9ea 13267
a2fbb9ea 13268 bp = netdev_priv(dev);
7995c64e 13269 bp->msg_enable = debug;
a2fbb9ea 13270
df4770de
EG
13271 pci_set_drvdata(pdev, dev);
13272
34f80b04 13273 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
13274 if (rc < 0) {
13275 free_netdev(dev);
13276 return rc;
13277 }
13278
34f80b04 13279 rc = bnx2x_init_bp(bp);
693fc0d1
EG
13280 if (rc)
13281 goto init_one_exit;
13282
94a78b79
VZ
13283 /* Set init arrays */
13284 rc = bnx2x_init_firmware(bp, &pdev->dev);
13285 if (rc) {
cdaa7cb8 13286 dev_err(&pdev->dev, "Error loading firmware\n");
94a78b79
VZ
13287 goto init_one_exit;
13288 }
13289
693fc0d1 13290 rc = register_netdev(dev);
34f80b04 13291 if (rc) {
693fc0d1 13292 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
13293 goto init_one_exit;
13294 }
13295
37f9ce62 13296 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
cdaa7cb8
VZ
13297 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
13298 " IRQ %d, ", board_info[ent->driver_data].name,
13299 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13300 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13301 dev->base_addr, bp->pdev->irq);
13302 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 13303
a2fbb9ea 13304 return 0;
34f80b04
EG
13305
13306init_one_exit:
13307 if (bp->regview)
13308 iounmap(bp->regview);
13309
13310 if (bp->doorbells)
13311 iounmap(bp->doorbells);
13312
13313 free_netdev(dev);
13314
13315 if (atomic_read(&pdev->enable_cnt) == 1)
13316 pci_release_regions(pdev);
13317
13318 pci_disable_device(pdev);
13319 pci_set_drvdata(pdev, NULL);
13320
13321 return rc;
a2fbb9ea
ET
13322}
13323
13324static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13325{
13326 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
13327 struct bnx2x *bp;
13328
13329 if (!dev) {
cdaa7cb8 13330 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
13331 return;
13332 }
228241eb 13333 bp = netdev_priv(dev);
a2fbb9ea 13334
a2fbb9ea
ET
13335 unregister_netdev(dev);
13336
72fd0718
VZ
13337 /* Make sure RESET task is not scheduled before continuing */
13338 cancel_delayed_work_sync(&bp->reset_task);
13339
94a78b79
VZ
13340 kfree(bp->init_ops_offsets);
13341 kfree(bp->init_ops);
13342 kfree(bp->init_data);
13343 release_firmware(bp->firmware);
13344
a2fbb9ea
ET
13345 if (bp->regview)
13346 iounmap(bp->regview);
13347
13348 if (bp->doorbells)
13349 iounmap(bp->doorbells);
13350
13351 free_netdev(dev);
34f80b04
EG
13352
13353 if (atomic_read(&pdev->enable_cnt) == 1)
13354 pci_release_regions(pdev);
13355
a2fbb9ea
ET
13356 pci_disable_device(pdev);
13357 pci_set_drvdata(pdev, NULL);
13358}
13359
13360static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13361{
13362 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
13363 struct bnx2x *bp;
13364
34f80b04 13365 if (!dev) {
cdaa7cb8 13366 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
34f80b04
EG
13367 return -ENODEV;
13368 }
13369 bp = netdev_priv(dev);
a2fbb9ea 13370
34f80b04 13371 rtnl_lock();
a2fbb9ea 13372
34f80b04 13373 pci_save_state(pdev);
228241eb 13374
34f80b04
EG
13375 if (!netif_running(dev)) {
13376 rtnl_unlock();
13377 return 0;
13378 }
a2fbb9ea
ET
13379
13380 netif_device_detach(dev);
a2fbb9ea 13381
da5a662a 13382 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 13383
a2fbb9ea 13384 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 13385
34f80b04
EG
13386 rtnl_unlock();
13387
a2fbb9ea
ET
13388 return 0;
13389}
13390
13391static int bnx2x_resume(struct pci_dev *pdev)
13392{
13393 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 13394 struct bnx2x *bp;
a2fbb9ea
ET
13395 int rc;
13396
228241eb 13397 if (!dev) {
cdaa7cb8 13398 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
13399 return -ENODEV;
13400 }
228241eb 13401 bp = netdev_priv(dev);
a2fbb9ea 13402
72fd0718
VZ
13403 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13404 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13405 return -EAGAIN;
13406 }
13407
34f80b04
EG
13408 rtnl_lock();
13409
228241eb 13410 pci_restore_state(pdev);
34f80b04
EG
13411
13412 if (!netif_running(dev)) {
13413 rtnl_unlock();
13414 return 0;
13415 }
13416
a2fbb9ea
ET
13417 bnx2x_set_power_state(bp, PCI_D0);
13418 netif_device_attach(dev);
13419
da5a662a 13420 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 13421
34f80b04
EG
13422 rtnl_unlock();
13423
13424 return rc;
a2fbb9ea
ET
13425}
13426
f8ef6e44
YG
13427static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13428{
13429 int i;
13430
13431 bp->state = BNX2X_STATE_ERROR;
13432
13433 bp->rx_mode = BNX2X_RX_MODE_NONE;
13434
13435 bnx2x_netif_stop(bp, 0);
13436
13437 del_timer_sync(&bp->timer);
13438 bp->stats_state = STATS_STATE_DISABLED;
13439 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13440
13441 /* Release IRQs */
6cbe5065 13442 bnx2x_free_irq(bp, false);
f8ef6e44
YG
13443
13444 if (CHIP_IS_E1(bp)) {
13445 struct mac_configuration_cmd *config =
13446 bnx2x_sp(bp, mcast_config);
13447
8d9c5f34 13448 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
13449 CAM_INVALIDATE(config->config_table[i]);
13450 }
13451
13452 /* Free SKBs, SGEs, TPA pool and driver internals */
13453 bnx2x_free_skbs(bp);
54b9ddaa 13454 for_each_queue(bp, i)
f8ef6e44 13455 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 13456 for_each_queue(bp, i)
7cde1c8b 13457 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
13458 bnx2x_free_mem(bp);
13459
13460 bp->state = BNX2X_STATE_CLOSED;
13461
13462 netif_carrier_off(bp->dev);
13463
13464 return 0;
13465}
13466
13467static void bnx2x_eeh_recover(struct bnx2x *bp)
13468{
13469 u32 val;
13470
13471 mutex_init(&bp->port.phy_mutex);
13472
13473 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13474 bp->link_params.shmem_base = bp->common.shmem_base;
13475 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13476
13477 if (!bp->common.shmem_base ||
13478 (bp->common.shmem_base < 0xA0000) ||
13479 (bp->common.shmem_base >= 0xC0000)) {
13480 BNX2X_DEV_INFO("MCP not active\n");
13481 bp->flags |= NO_MCP_FLAG;
13482 return;
13483 }
13484
13485 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13486 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13487 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13488 BNX2X_ERR("BAD MCP validity signature\n");
13489
13490 if (!BP_NOMCP(bp)) {
13491 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13492 & DRV_MSG_SEQ_NUMBER_MASK);
13493 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13494 }
13495}
13496
493adb1f
WX
13497/**
13498 * bnx2x_io_error_detected - called when PCI error is detected
13499 * @pdev: Pointer to PCI device
13500 * @state: The current pci connection state
13501 *
13502 * This function is called after a PCI bus error affecting
13503 * this device has been detected.
13504 */
13505static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13506 pci_channel_state_t state)
13507{
13508 struct net_device *dev = pci_get_drvdata(pdev);
13509 struct bnx2x *bp = netdev_priv(dev);
13510
13511 rtnl_lock();
13512
13513 netif_device_detach(dev);
13514
07ce50e4
DN
13515 if (state == pci_channel_io_perm_failure) {
13516 rtnl_unlock();
13517 return PCI_ERS_RESULT_DISCONNECT;
13518 }
13519
493adb1f 13520 if (netif_running(dev))
f8ef6e44 13521 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
13522
13523 pci_disable_device(pdev);
13524
13525 rtnl_unlock();
13526
13527 /* Request a slot reset */
13528 return PCI_ERS_RESULT_NEED_RESET;
13529}
13530
13531/**
13532 * bnx2x_io_slot_reset - called after the PCI bus has been reset
13533 * @pdev: Pointer to PCI device
13534 *
13535 * Restart the card from scratch, as if from a cold-boot.
13536 */
13537static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13538{
13539 struct net_device *dev = pci_get_drvdata(pdev);
13540 struct bnx2x *bp = netdev_priv(dev);
13541
13542 rtnl_lock();
13543
13544 if (pci_enable_device(pdev)) {
13545 dev_err(&pdev->dev,
13546 "Cannot re-enable PCI device after reset\n");
13547 rtnl_unlock();
13548 return PCI_ERS_RESULT_DISCONNECT;
13549 }
13550
13551 pci_set_master(pdev);
13552 pci_restore_state(pdev);
13553
13554 if (netif_running(dev))
13555 bnx2x_set_power_state(bp, PCI_D0);
13556
13557 rtnl_unlock();
13558
13559 return PCI_ERS_RESULT_RECOVERED;
13560}
13561
13562/**
13563 * bnx2x_io_resume - called when traffic can start flowing again
13564 * @pdev: Pointer to PCI device
13565 *
13566 * This callback is called when the error recovery driver tells us that
13567 * its OK to resume normal operation.
13568 */
13569static void bnx2x_io_resume(struct pci_dev *pdev)
13570{
13571 struct net_device *dev = pci_get_drvdata(pdev);
13572 struct bnx2x *bp = netdev_priv(dev);
13573
72fd0718
VZ
13574 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13575 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13576 return;
13577 }
13578
493adb1f
WX
13579 rtnl_lock();
13580
f8ef6e44
YG
13581 bnx2x_eeh_recover(bp);
13582
493adb1f 13583 if (netif_running(dev))
f8ef6e44 13584 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
13585
13586 netif_device_attach(dev);
13587
13588 rtnl_unlock();
13589}
13590
13591static struct pci_error_handlers bnx2x_err_handler = {
13592 .error_detected = bnx2x_io_error_detected,
356e2385
EG
13593 .slot_reset = bnx2x_io_slot_reset,
13594 .resume = bnx2x_io_resume,
493adb1f
WX
13595};
13596
a2fbb9ea 13597static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
13598 .name = DRV_MODULE_NAME,
13599 .id_table = bnx2x_pci_tbl,
13600 .probe = bnx2x_init_one,
13601 .remove = __devexit_p(bnx2x_remove_one),
13602 .suspend = bnx2x_suspend,
13603 .resume = bnx2x_resume,
13604 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
13605};
13606
13607static int __init bnx2x_init(void)
13608{
dd21ca6d
SG
13609 int ret;
13610
7995c64e 13611 pr_info("%s", version);
938cf541 13612
1cf167f2
EG
13613 bnx2x_wq = create_singlethread_workqueue("bnx2x");
13614 if (bnx2x_wq == NULL) {
7995c64e 13615 pr_err("Cannot create workqueue\n");
1cf167f2
EG
13616 return -ENOMEM;
13617 }
13618
dd21ca6d
SG
13619 ret = pci_register_driver(&bnx2x_pci_driver);
13620 if (ret) {
7995c64e 13621 pr_err("Cannot register driver\n");
dd21ca6d
SG
13622 destroy_workqueue(bnx2x_wq);
13623 }
13624 return ret;
a2fbb9ea
ET
13625}
13626
13627static void __exit bnx2x_cleanup(void)
13628{
13629 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
13630
13631 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
13632}
13633
13634module_init(bnx2x_init);
13635module_exit(bnx2x_cleanup);
13636
993ac7b5
MC
13637#ifdef BCM_CNIC
13638
13639/* count denotes the number of new completions we have seen */
13640static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13641{
13642 struct eth_spe *spe;
13643
13644#ifdef BNX2X_STOP_ON_ERROR
13645 if (unlikely(bp->panic))
13646 return;
13647#endif
13648
13649 spin_lock_bh(&bp->spq_lock);
13650 bp->cnic_spq_pending -= count;
13651
13652 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13653 bp->cnic_spq_pending++) {
13654
13655 if (!bp->cnic_kwq_pending)
13656 break;
13657
13658 spe = bnx2x_sp_get_next(bp);
13659 *spe = *bp->cnic_kwq_cons;
13660
13661 bp->cnic_kwq_pending--;
13662
13663 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13664 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13665
13666 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13667 bp->cnic_kwq_cons = bp->cnic_kwq;
13668 else
13669 bp->cnic_kwq_cons++;
13670 }
13671 bnx2x_sp_prod_update(bp);
13672 spin_unlock_bh(&bp->spq_lock);
13673}
13674
13675static int bnx2x_cnic_sp_queue(struct net_device *dev,
13676 struct kwqe_16 *kwqes[], u32 count)
13677{
13678 struct bnx2x *bp = netdev_priv(dev);
13679 int i;
13680
13681#ifdef BNX2X_STOP_ON_ERROR
13682 if (unlikely(bp->panic))
13683 return -EIO;
13684#endif
13685
13686 spin_lock_bh(&bp->spq_lock);
13687
13688 for (i = 0; i < count; i++) {
13689 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13690
13691 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13692 break;
13693
13694 *bp->cnic_kwq_prod = *spe;
13695
13696 bp->cnic_kwq_pending++;
13697
13698 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13699 spe->hdr.conn_and_cmd_data, spe->hdr.type,
13700 spe->data.mac_config_addr.hi,
13701 spe->data.mac_config_addr.lo,
13702 bp->cnic_kwq_pending);
13703
13704 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13705 bp->cnic_kwq_prod = bp->cnic_kwq;
13706 else
13707 bp->cnic_kwq_prod++;
13708 }
13709
13710 spin_unlock_bh(&bp->spq_lock);
13711
13712 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13713 bnx2x_cnic_sp_post(bp, 0);
13714
13715 return i;
13716}
13717
13718static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13719{
13720 struct cnic_ops *c_ops;
13721 int rc = 0;
13722
13723 mutex_lock(&bp->cnic_mutex);
13724 c_ops = bp->cnic_ops;
13725 if (c_ops)
13726 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13727 mutex_unlock(&bp->cnic_mutex);
13728
13729 return rc;
13730}
13731
13732static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13733{
13734 struct cnic_ops *c_ops;
13735 int rc = 0;
13736
13737 rcu_read_lock();
13738 c_ops = rcu_dereference(bp->cnic_ops);
13739 if (c_ops)
13740 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13741 rcu_read_unlock();
13742
13743 return rc;
13744}
13745
13746/*
13747 * for commands that have no data
13748 */
13749static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13750{
13751 struct cnic_ctl_info ctl = {0};
13752
13753 ctl.cmd = cmd;
13754
13755 return bnx2x_cnic_ctl_send(bp, &ctl);
13756}
13757
13758static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13759{
13760 struct cnic_ctl_info ctl;
13761
13762 /* first we tell CNIC and only then we count this as a completion */
13763 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13764 ctl.data.comp.cid = cid;
13765
13766 bnx2x_cnic_ctl_send_bh(bp, &ctl);
13767 bnx2x_cnic_sp_post(bp, 1);
13768}
13769
13770static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13771{
13772 struct bnx2x *bp = netdev_priv(dev);
13773 int rc = 0;
13774
13775 switch (ctl->cmd) {
13776 case DRV_CTL_CTXTBL_WR_CMD: {
13777 u32 index = ctl->data.io.offset;
13778 dma_addr_t addr = ctl->data.io.dma_addr;
13779
13780 bnx2x_ilt_wr(bp, index, addr);
13781 break;
13782 }
13783
13784 case DRV_CTL_COMPLETION_CMD: {
13785 int count = ctl->data.comp.comp_count;
13786
13787 bnx2x_cnic_sp_post(bp, count);
13788 break;
13789 }
13790
13791 /* rtnl_lock is held. */
13792 case DRV_CTL_START_L2_CMD: {
13793 u32 cli = ctl->data.ring.client_id;
13794
13795 bp->rx_mode_cl_mask |= (1 << cli);
13796 bnx2x_set_storm_rx_mode(bp);
13797 break;
13798 }
13799
13800 /* rtnl_lock is held. */
13801 case DRV_CTL_STOP_L2_CMD: {
13802 u32 cli = ctl->data.ring.client_id;
13803
13804 bp->rx_mode_cl_mask &= ~(1 << cli);
13805 bnx2x_set_storm_rx_mode(bp);
13806 break;
13807 }
13808
13809 default:
13810 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13811 rc = -EINVAL;
13812 }
13813
13814 return rc;
13815}
13816
13817static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13818{
13819 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13820
13821 if (bp->flags & USING_MSIX_FLAG) {
13822 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13823 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13824 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13825 } else {
13826 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13827 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13828 }
13829 cp->irq_arr[0].status_blk = bp->cnic_sb;
13830 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13831 cp->irq_arr[1].status_blk = bp->def_status_blk;
13832 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13833
13834 cp->num_irq = 2;
13835}
13836
13837static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13838 void *data)
13839{
13840 struct bnx2x *bp = netdev_priv(dev);
13841 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13842
13843 if (ops == NULL)
13844 return -EINVAL;
13845
13846 if (atomic_read(&bp->intr_sem) != 0)
13847 return -EBUSY;
13848
13849 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13850 if (!bp->cnic_kwq)
13851 return -ENOMEM;
13852
13853 bp->cnic_kwq_cons = bp->cnic_kwq;
13854 bp->cnic_kwq_prod = bp->cnic_kwq;
13855 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13856
13857 bp->cnic_spq_pending = 0;
13858 bp->cnic_kwq_pending = 0;
13859
13860 bp->cnic_data = data;
13861
13862 cp->num_irq = 0;
13863 cp->drv_state = CNIC_DRV_STATE_REGD;
13864
13865 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13866
13867 bnx2x_setup_cnic_irq_info(bp);
13868 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13869 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13870 rcu_assign_pointer(bp->cnic_ops, ops);
13871
13872 return 0;
13873}
13874
13875static int bnx2x_unregister_cnic(struct net_device *dev)
13876{
13877 struct bnx2x *bp = netdev_priv(dev);
13878 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13879
13880 mutex_lock(&bp->cnic_mutex);
13881 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13882 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13883 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13884 }
13885 cp->drv_state = 0;
13886 rcu_assign_pointer(bp->cnic_ops, NULL);
13887 mutex_unlock(&bp->cnic_mutex);
13888 synchronize_rcu();
13889 kfree(bp->cnic_kwq);
13890 bp->cnic_kwq = NULL;
13891
13892 return 0;
13893}
13894
13895struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13896{
13897 struct bnx2x *bp = netdev_priv(dev);
13898 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13899
13900 cp->drv_owner = THIS_MODULE;
13901 cp->chip_id = CHIP_ID(bp);
13902 cp->pdev = bp->pdev;
13903 cp->io_base = bp->regview;
13904 cp->io_base2 = bp->doorbells;
13905 cp->max_kwqe_pending = 8;
13906 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13907 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13908 cp->ctx_tbl_len = CNIC_ILT_LINES;
13909 cp->starting_cid = BCM_CNIC_CID_START;
13910 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13911 cp->drv_ctl = bnx2x_drv_ctl;
13912 cp->drv_register_cnic = bnx2x_register_cnic;
13913 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13914
13915 return cp;
13916}
13917EXPORT_SYMBOL(bnx2x_cnic_probe);
13918
13919#endif /* BCM_CNIC */
94a78b79 13920