]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x/bnx2x_main.c
bnx2x: Apply logic changes for the new scheme
[net-next-2.6.git] / drivers / net / bnx2x / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea 51#include <linux/io.h>
45229b42 52#include <linux/stringify.h>
a2fbb9ea 53
b0efbb99 54#define BNX2X_MAIN
a2fbb9ea
ET
55#include "bnx2x.h"
56#include "bnx2x_init.h"
94a78b79 57#include "bnx2x_init_ops.h"
9f6c9258 58#include "bnx2x_cmn.h"
a2fbb9ea 59
a2fbb9ea 60
94a78b79
VZ
61#include <linux/firmware.h>
62#include "bnx2x_fw_file_hdr.h"
63/* FW files */
45229b42
BH
64#define FW_FILE_VERSION \
65 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
66 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
67 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
68 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
69#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
70#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
94a78b79 71
34f80b04
EG
72/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ)
a2fbb9ea 74
53a10565 75static char version[] __devinitdata =
34f80b04 76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
24e3fcef 79MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 80MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
81MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
83MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H);
a2fbb9ea 85
555f6c78
EG
86static int multi_mode = 1;
87module_param(multi_mode, int, 0);
ca00392c
EG
88MODULE_PARM_DESC(multi_mode, " Multi queue mode "
89 "(0 Disable; 1 Enable (default))");
90
54b9ddaa
VZ
91static int num_queues;
92module_param(num_queues, int, 0);
93MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
94 " (default is as a number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
cdaa7cb8
VZ
102MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
103 "(1 INT#x; 2 MSI)");
8badd27a 104
a18f5128
EG
105static int dropless_fc;
106module_param(dropless_fc, int, 0);
107MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
108
9898f86d 109static int poll;
a2fbb9ea 110module_param(poll, int, 0);
9898f86d 111MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
112
113static int mrrs = -1;
114module_param(mrrs, int, 0);
115MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
116
9898f86d 117static int debug;
a2fbb9ea 118module_param(debug, int, 0);
9898f86d
EG
119MODULE_PARM_DESC(debug, " Default debug msglevel");
120
1cf167f2 121static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
122
123enum bnx2x_board_type {
124 BCM57710 = 0,
34f80b04
EG
125 BCM57711 = 1,
126 BCM57711E = 2,
a2fbb9ea
ET
127};
128
34f80b04 129/* indexed by board_type, above */
53a10565 130static struct {
a2fbb9ea
ET
131 char *name;
132} board_info[] __devinitdata = {
34f80b04
EG
133 { "Broadcom NetXtreme II BCM57710 XGb" },
134 { "Broadcom NetXtreme II BCM57711 XGb" },
135 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
136};
137
34f80b04 138
a3aa1884 139static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
140 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
143 { 0 }
144};
145
146MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148/****************************************************************************
149* General service functions
150****************************************************************************/
151
152/* used only at init
153 * locking is done by mcp
154 */
573f2035 155void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
156{
157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160 PCICFG_VENDOR_ID_OFFSET);
161}
162
a2fbb9ea
ET
163static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164{
165 u32 val;
166
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170 PCICFG_VENDOR_ID_OFFSET);
171
172 return val;
173}
a2fbb9ea 174
6c719d00 175const u32 dmae_reg_go_c[] = {
a2fbb9ea
ET
176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180};
181
182/* copy command into DMAE command memory and set DMAE command go */
6c719d00 183void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
a2fbb9ea
ET
184{
185 u32 cmd_offset;
186 int i;
187
188 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
189 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
190 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
191
ad8d3948
EG
192 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
193 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
194 }
195 REG_WR(bp, dmae_reg_go_c[idx], 1);
196}
197
ad8d3948
EG
198void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
199 u32 len32)
a2fbb9ea 200{
5ff7b6d4 201 struct dmae_command dmae;
a2fbb9ea 202 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
203 int cnt = 200;
204
205 if (!bp->dmae_ready) {
206 u32 *data = bnx2x_sp(bp, wb_data[0]);
207
208 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
209 " using indirect\n", dst_addr, len32);
210 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
211 return;
212 }
213
5ff7b6d4 214 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 215
5ff7b6d4
EG
216 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
217 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
218 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 219#ifdef __BIG_ENDIAN
5ff7b6d4 220 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 221#else
5ff7b6d4 222 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 223#endif
5ff7b6d4
EG
224 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
225 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
226 dmae.src_addr_lo = U64_LO(dma_addr);
227 dmae.src_addr_hi = U64_HI(dma_addr);
228 dmae.dst_addr_lo = dst_addr >> 2;
229 dmae.dst_addr_hi = 0;
230 dmae.len = len32;
231 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
232 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
233 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 234
c3eefaf6 235 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
236 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
237 "dst_addr [%x:%08x (%08x)]\n"
238 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
239 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
240 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
241 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 242 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
243 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
244 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 245
5ff7b6d4
EG
246 mutex_lock(&bp->dmae_mutex);
247
a2fbb9ea
ET
248 *wb_comp = 0;
249
5ff7b6d4 250 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
251
252 udelay(5);
ad8d3948
EG
253
254 while (*wb_comp != DMAE_COMP_VAL) {
255 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
256
ad8d3948 257 if (!cnt) {
c3eefaf6 258 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
259 break;
260 }
ad8d3948 261 cnt--;
12469401
YG
262 /* adjust delay for emulation/FPGA */
263 if (CHIP_REV_IS_SLOW(bp))
264 msleep(100);
265 else
266 udelay(5);
a2fbb9ea 267 }
ad8d3948
EG
268
269 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
270}
271
c18487ee 272void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 273{
5ff7b6d4 274 struct dmae_command dmae;
a2fbb9ea 275 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
276 int cnt = 200;
277
278 if (!bp->dmae_ready) {
279 u32 *data = bnx2x_sp(bp, wb_data[0]);
280 int i;
281
282 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
283 " using indirect\n", src_addr, len32);
284 for (i = 0; i < len32; i++)
285 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
286 return;
287 }
288
5ff7b6d4 289 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 290
5ff7b6d4
EG
291 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
292 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
293 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 294#ifdef __BIG_ENDIAN
5ff7b6d4 295 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 296#else
5ff7b6d4 297 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 298#endif
5ff7b6d4
EG
299 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
300 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
301 dmae.src_addr_lo = src_addr >> 2;
302 dmae.src_addr_hi = 0;
303 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
304 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
305 dmae.len = len32;
306 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
307 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
308 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 309
c3eefaf6 310 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
311 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
312 "dst_addr [%x:%08x (%08x)]\n"
313 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
314 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
315 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
316 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 317
5ff7b6d4
EG
318 mutex_lock(&bp->dmae_mutex);
319
320 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
321 *wb_comp = 0;
322
5ff7b6d4 323 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
324
325 udelay(5);
ad8d3948
EG
326
327 while (*wb_comp != DMAE_COMP_VAL) {
328
ad8d3948 329 if (!cnt) {
c3eefaf6 330 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
331 break;
332 }
ad8d3948 333 cnt--;
12469401
YG
334 /* adjust delay for emulation/FPGA */
335 if (CHIP_REV_IS_SLOW(bp))
336 msleep(100);
337 else
338 udelay(5);
a2fbb9ea 339 }
ad8d3948 340 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
341 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
342 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
343
344 mutex_unlock(&bp->dmae_mutex);
345}
346
573f2035
EG
347void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
348 u32 addr, u32 len)
349{
02e3c6cb 350 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
351 int offset = 0;
352
02e3c6cb 353 while (len > dmae_wr_max) {
573f2035 354 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
355 addr + offset, dmae_wr_max);
356 offset += dmae_wr_max * 4;
357 len -= dmae_wr_max;
573f2035
EG
358 }
359
360 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
361}
362
ad8d3948
EG
363/* used only for slowpath so not inlined */
364static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
365{
366 u32 wb_write[2];
367
368 wb_write[0] = val_hi;
369 wb_write[1] = val_lo;
370 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 371}
a2fbb9ea 372
ad8d3948
EG
373#ifdef USE_WB_RD
374static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
375{
376 u32 wb_data[2];
377
378 REG_RD_DMAE(bp, reg, wb_data, 2);
379
380 return HILO_U64(wb_data[0], wb_data[1]);
381}
382#endif
383
a2fbb9ea
ET
384static int bnx2x_mc_assert(struct bnx2x *bp)
385{
a2fbb9ea 386 char last_idx;
34f80b04
EG
387 int i, rc = 0;
388 u32 row0, row1, row2, row3;
389
390 /* XSTORM */
391 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
392 XSTORM_ASSERT_LIST_INDEX_OFFSET);
393 if (last_idx)
394 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
395
396 /* print the asserts */
397 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
398
399 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
400 XSTORM_ASSERT_LIST_OFFSET(i));
401 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
402 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
403 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
405 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
407
408 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
409 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
410 " 0x%08x 0x%08x 0x%08x\n",
411 i, row3, row2, row1, row0);
412 rc++;
413 } else {
414 break;
415 }
416 }
417
418 /* TSTORM */
419 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
420 TSTORM_ASSERT_LIST_INDEX_OFFSET);
421 if (last_idx)
422 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
423
424 /* print the asserts */
425 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
426
427 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
428 TSTORM_ASSERT_LIST_OFFSET(i));
429 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
430 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
431 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
433 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
435
436 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
437 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
438 " 0x%08x 0x%08x 0x%08x\n",
439 i, row3, row2, row1, row0);
440 rc++;
441 } else {
442 break;
443 }
444 }
445
446 /* CSTORM */
447 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
448 CSTORM_ASSERT_LIST_INDEX_OFFSET);
449 if (last_idx)
450 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
451
452 /* print the asserts */
453 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
454
455 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
456 CSTORM_ASSERT_LIST_OFFSET(i));
457 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
458 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
459 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
461 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
463
464 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
465 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
466 " 0x%08x 0x%08x 0x%08x\n",
467 i, row3, row2, row1, row0);
468 rc++;
469 } else {
470 break;
471 }
472 }
473
474 /* USTORM */
475 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
476 USTORM_ASSERT_LIST_INDEX_OFFSET);
477 if (last_idx)
478 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
479
480 /* print the asserts */
481 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
482
483 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
484 USTORM_ASSERT_LIST_OFFSET(i));
485 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
486 USTORM_ASSERT_LIST_OFFSET(i) + 4);
487 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i) + 8);
489 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 12);
491
492 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
493 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
494 " 0x%08x 0x%08x 0x%08x\n",
495 i, row3, row2, row1, row0);
496 rc++;
497 } else {
498 break;
a2fbb9ea
ET
499 }
500 }
34f80b04 501
a2fbb9ea
ET
502 return rc;
503}
c14423fe 504
a2fbb9ea
ET
505static void bnx2x_fw_dump(struct bnx2x *bp)
506{
cdaa7cb8 507 u32 addr;
a2fbb9ea 508 u32 mark, offset;
4781bfad 509 __be32 data[9];
a2fbb9ea
ET
510 int word;
511
2145a920
VZ
512 if (BP_NOMCP(bp)) {
513 BNX2X_ERR("NO MCP - can not dump\n");
514 return;
515 }
cdaa7cb8
VZ
516
517 addr = bp->common.shmem_base - 0x0800 + 4;
518 mark = REG_RD(bp, addr);
519 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
7995c64e 520 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 521
7995c64e 522 pr_err("");
cdaa7cb8 523 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
a2fbb9ea 524 for (word = 0; word < 8; word++)
cdaa7cb8 525 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 526 data[8] = 0x0;
7995c64e 527 pr_cont("%s", (char *)data);
a2fbb9ea 528 }
cdaa7cb8 529 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 530 for (word = 0; word < 8; word++)
cdaa7cb8 531 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 532 data[8] = 0x0;
7995c64e 533 pr_cont("%s", (char *)data);
a2fbb9ea 534 }
7995c64e 535 pr_err("end of fw dump\n");
a2fbb9ea
ET
536}
537
6c719d00 538void bnx2x_panic_dump(struct bnx2x *bp)
a2fbb9ea
ET
539{
540 int i;
541 u16 j, start, end;
542
66e855f3
YG
543 bp->stats_state = STATS_STATE_DISABLED;
544 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
545
a2fbb9ea
ET
546 BNX2X_ERR("begin crash dump -----------------\n");
547
8440d2b6
EG
548 /* Indices */
549 /* Common */
cdaa7cb8
VZ
550 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
551 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
552 " spq_prod_idx(0x%x)\n",
8440d2b6
EG
553 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
554 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
555
556 /* Rx */
54b9ddaa 557 for_each_queue(bp, i) {
a2fbb9ea 558 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 559
cdaa7cb8
VZ
560 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
561 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
562 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 563 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
564 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
565 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8
VZ
566 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
567 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
8440d2b6
EG
568 fp->rx_sge_prod, fp->last_max_sge,
569 le16_to_cpu(fp->fp_u_idx),
570 fp->status_blk->u_status_block.status_block_index);
571 }
a2fbb9ea 572
8440d2b6 573 /* Tx */
54b9ddaa 574 for_each_queue(bp, i) {
8440d2b6 575 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 576
cdaa7cb8
VZ
577 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
578 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
579 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
580 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
581 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
cdaa7cb8
VZ
582 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
583 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 584 fp->status_blk->c_status_block.status_block_index,
ca00392c 585 fp->tx_db.data.prod);
8440d2b6 586 }
a2fbb9ea 587
8440d2b6
EG
588 /* Rings */
589 /* Rx */
54b9ddaa 590 for_each_queue(bp, i) {
8440d2b6 591 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
592
593 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
594 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 595 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
596 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
597 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
598
c3eefaf6
EG
599 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
600 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
601 }
602
3196a88a
EG
603 start = RX_SGE(fp->rx_sge_prod);
604 end = RX_SGE(fp->last_max_sge);
8440d2b6 605 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
606 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
607 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
608
c3eefaf6
EG
609 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
610 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
611 }
612
a2fbb9ea
ET
613 start = RCQ_BD(fp->rx_comp_cons - 10);
614 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 615 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
616 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
617
c3eefaf6
EG
618 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
619 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
620 }
621 }
622
8440d2b6 623 /* Tx */
54b9ddaa 624 for_each_queue(bp, i) {
8440d2b6
EG
625 struct bnx2x_fastpath *fp = &bp->fp[i];
626
627 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
628 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
629 for (j = start; j != end; j = TX_BD(j + 1)) {
630 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
631
c3eefaf6
EG
632 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
633 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
634 }
635
636 start = TX_BD(fp->tx_bd_cons - 10);
637 end = TX_BD(fp->tx_bd_cons + 254);
638 for (j = start; j != end; j = TX_BD(j + 1)) {
639 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
640
c3eefaf6
EG
641 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
642 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
643 }
644 }
a2fbb9ea 645
34f80b04 646 bnx2x_fw_dump(bp);
a2fbb9ea
ET
647 bnx2x_mc_assert(bp);
648 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
649}
650
9f6c9258 651void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 652{
34f80b04 653 int port = BP_PORT(bp);
a2fbb9ea
ET
654 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
655 u32 val = REG_RD(bp, addr);
656 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 657 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
658
659 if (msix) {
8badd27a
EG
660 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
661 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
662 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
664 } else if (msi) {
665 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
666 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
669 } else {
670 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 671 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
672 HC_CONFIG_0_REG_INT_LINE_EN_0 |
673 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 674
8badd27a
EG
675 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
676 val, port, addr);
615f8fd9
ET
677
678 REG_WR(bp, addr, val);
679
a2fbb9ea
ET
680 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
681 }
682
8badd27a
EG
683 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
684 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
685
686 REG_WR(bp, addr, val);
37dbbf32
EG
687 /*
688 * Ensure that HC_CONFIG is written before leading/trailing edge config
689 */
690 mmiowb();
691 barrier();
34f80b04
EG
692
693 if (CHIP_IS_E1H(bp)) {
694 /* init leading/trailing edge */
695 if (IS_E1HMF(bp)) {
8badd27a 696 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 697 if (bp->port.pmf)
4acac6a5
EG
698 /* enable nig and gpio3 attention */
699 val |= 0x1100;
34f80b04
EG
700 } else
701 val = 0xffff;
702
703 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
704 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
705 }
37dbbf32
EG
706
707 /* Make sure that interrupts are indeed enabled from here on */
708 mmiowb();
a2fbb9ea
ET
709}
710
615f8fd9 711static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 712{
34f80b04 713 int port = BP_PORT(bp);
a2fbb9ea
ET
714 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
715 u32 val = REG_RD(bp, addr);
716
717 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
718 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
719 HC_CONFIG_0_REG_INT_LINE_EN_0 |
720 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
721
722 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
723 val, port, addr);
724
8badd27a
EG
725 /* flush all outstanding writes */
726 mmiowb();
727
a2fbb9ea
ET
728 REG_WR(bp, addr, val);
729 if (REG_RD(bp, addr) != val)
730 BNX2X_ERR("BUG! proper val not read from IGU!\n");
731}
732
9f6c9258 733void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 734{
a2fbb9ea 735 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 736 int i, offset;
a2fbb9ea 737
34f80b04 738 /* disable interrupt handling */
a2fbb9ea 739 atomic_inc(&bp->intr_sem);
e1510706
EG
740 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
741
f8ef6e44
YG
742 if (disable_hw)
743 /* prevent the HW from sending interrupts */
744 bnx2x_int_disable(bp);
a2fbb9ea
ET
745
746 /* make sure all ISRs are done */
747 if (msix) {
8badd27a
EG
748 synchronize_irq(bp->msix_table[0].vector);
749 offset = 1;
37b091ba
MC
750#ifdef BCM_CNIC
751 offset++;
752#endif
a2fbb9ea 753 for_each_queue(bp, i)
8badd27a 754 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
755 } else
756 synchronize_irq(bp->pdev->irq);
757
758 /* make sure sp_task is not running */
1cf167f2
EG
759 cancel_delayed_work(&bp->sp_task);
760 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
761}
762
34f80b04 763/* fast path */
a2fbb9ea
ET
764
765/*
34f80b04 766 * General service functions
a2fbb9ea
ET
767 */
768
72fd0718
VZ
769/* Return true if succeeded to acquire the lock */
770static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
771{
772 u32 lock_status;
773 u32 resource_bit = (1 << resource);
774 int func = BP_FUNC(bp);
775 u32 hw_lock_control_reg;
776
777 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
778
779 /* Validating that the resource is within range */
780 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
781 DP(NETIF_MSG_HW,
782 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
783 resource, HW_LOCK_MAX_RESOURCE_VALUE);
0fdf4d09 784 return false;
72fd0718
VZ
785 }
786
787 if (func <= 5)
788 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
789 else
790 hw_lock_control_reg =
791 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
792
793 /* Try to acquire the lock */
794 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
795 lock_status = REG_RD(bp, hw_lock_control_reg);
796 if (lock_status & resource_bit)
797 return true;
798
799 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
800 return false;
801}
802
a2fbb9ea 803
993ac7b5
MC
804#ifdef BCM_CNIC
805static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
806#endif
3196a88a 807
9f6c9258 808void bnx2x_sp_event(struct bnx2x_fastpath *fp,
a2fbb9ea
ET
809 union eth_rx_cqe *rr_cqe)
810{
811 struct bnx2x *bp = fp->bp;
812 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
813 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
814
34f80b04 815 DP(BNX2X_MSG_SP,
a2fbb9ea 816 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 817 fp->index, cid, command, bp->state,
34f80b04 818 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
819
820 bp->spq_left++;
821
0626b899 822 if (fp->index) {
a2fbb9ea
ET
823 switch (command | fp->state) {
824 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
825 BNX2X_FP_STATE_OPENING):
826 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
827 cid);
828 fp->state = BNX2X_FP_STATE_OPEN;
829 break;
830
831 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
832 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
833 cid);
834 fp->state = BNX2X_FP_STATE_HALTED;
835 break;
836
837 default:
34f80b04 838 BNX2X_ERR("unexpected MC reply (%d) "
cdaa7cb8
VZ
839 "fp[%d] state is %x\n",
840 command, fp->index, fp->state);
34f80b04 841 break;
a2fbb9ea 842 }
34f80b04 843 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
844 return;
845 }
c14423fe 846
a2fbb9ea
ET
847 switch (command | bp->state) {
848 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
849 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
850 bp->state = BNX2X_STATE_OPEN;
851 break;
852
853 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
854 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
855 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
856 fp->state = BNX2X_FP_STATE_HALTED;
857 break;
858
a2fbb9ea 859 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 860 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 861 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
862 break;
863
993ac7b5
MC
864#ifdef BCM_CNIC
865 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
866 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
867 bnx2x_cnic_cfc_comp(bp, cid);
868 break;
869#endif
3196a88a 870
a2fbb9ea 871 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 872 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 873 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
874 bp->set_mac_pending--;
875 smp_wmb();
a2fbb9ea
ET
876 break;
877
49d66772 878 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 879 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
880 bp->set_mac_pending--;
881 smp_wmb();
49d66772
ET
882 break;
883
a2fbb9ea 884 default:
34f80b04 885 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 886 command, bp->state);
34f80b04 887 break;
a2fbb9ea 888 }
34f80b04 889 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
890}
891
9f6c9258 892irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
a2fbb9ea 893{
555f6c78 894 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 895 u16 status = bnx2x_ack_int(bp);
34f80b04 896 u16 mask;
ca00392c 897 int i;
a2fbb9ea 898
34f80b04 899 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
900 if (unlikely(status == 0)) {
901 DP(NETIF_MSG_INTR, "not our interrupt!\n");
902 return IRQ_NONE;
903 }
f5372251 904 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 905
34f80b04 906 /* Return here if interrupt is disabled */
a2fbb9ea
ET
907 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
908 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
909 return IRQ_HANDLED;
910 }
911
3196a88a
EG
912#ifdef BNX2X_STOP_ON_ERROR
913 if (unlikely(bp->panic))
914 return IRQ_HANDLED;
915#endif
916
ca00392c
EG
917 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
918 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 919
ca00392c
EG
920 mask = 0x2 << fp->sb_id;
921 if (status & mask) {
54b9ddaa
VZ
922 /* Handle Rx and Tx according to SB id */
923 prefetch(fp->rx_cons_sb);
924 prefetch(&fp->status_blk->u_status_block.
925 status_block_index);
926 prefetch(fp->tx_cons_sb);
927 prefetch(&fp->status_blk->c_status_block.
928 status_block_index);
929 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
930 status &= ~mask;
931 }
a2fbb9ea
ET
932 }
933
993ac7b5
MC
934#ifdef BCM_CNIC
935 mask = 0x2 << CNIC_SB_ID(bp);
936 if (status & (mask | 0x1)) {
937 struct cnic_ops *c_ops = NULL;
938
939 rcu_read_lock();
940 c_ops = rcu_dereference(bp->cnic_ops);
941 if (c_ops)
942 c_ops->cnic_handler(bp->cnic_data, NULL);
943 rcu_read_unlock();
944
945 status &= ~mask;
946 }
947#endif
a2fbb9ea 948
34f80b04 949 if (unlikely(status & 0x1)) {
1cf167f2 950 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
951
952 status &= ~0x1;
953 if (!status)
954 return IRQ_HANDLED;
955 }
956
cdaa7cb8
VZ
957 if (unlikely(status))
958 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 959 status);
a2fbb9ea 960
c18487ee 961 return IRQ_HANDLED;
a2fbb9ea
ET
962}
963
c18487ee 964/* end of fast path */
a2fbb9ea 965
a2fbb9ea 966
c18487ee
YR
967/* Link */
968
969/*
970 * General service functions
971 */
a2fbb9ea 972
9f6c9258 973int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
974{
975 u32 lock_status;
976 u32 resource_bit = (1 << resource);
4a37fb66
YG
977 int func = BP_FUNC(bp);
978 u32 hw_lock_control_reg;
c18487ee 979 int cnt;
a2fbb9ea 980
c18487ee
YR
981 /* Validating that the resource is within range */
982 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
983 DP(NETIF_MSG_HW,
984 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
985 resource, HW_LOCK_MAX_RESOURCE_VALUE);
986 return -EINVAL;
987 }
a2fbb9ea 988
4a37fb66
YG
989 if (func <= 5) {
990 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
991 } else {
992 hw_lock_control_reg =
993 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
994 }
995
c18487ee 996 /* Validating that the resource is not already taken */
4a37fb66 997 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
998 if (lock_status & resource_bit) {
999 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1000 lock_status, resource_bit);
1001 return -EEXIST;
1002 }
a2fbb9ea 1003
46230476
EG
1004 /* Try for 5 second every 5ms */
1005 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1006 /* Try to acquire the lock */
4a37fb66
YG
1007 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1008 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1009 if (lock_status & resource_bit)
1010 return 0;
a2fbb9ea 1011
c18487ee 1012 msleep(5);
a2fbb9ea 1013 }
c18487ee
YR
1014 DP(NETIF_MSG_HW, "Timeout\n");
1015 return -EAGAIN;
1016}
a2fbb9ea 1017
9f6c9258 1018int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1019{
1020 u32 lock_status;
1021 u32 resource_bit = (1 << resource);
4a37fb66
YG
1022 int func = BP_FUNC(bp);
1023 u32 hw_lock_control_reg;
a2fbb9ea 1024
72fd0718
VZ
1025 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1026
c18487ee
YR
1027 /* Validating that the resource is within range */
1028 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1029 DP(NETIF_MSG_HW,
1030 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1031 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1032 return -EINVAL;
1033 }
1034
4a37fb66
YG
1035 if (func <= 5) {
1036 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1037 } else {
1038 hw_lock_control_reg =
1039 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1040 }
1041
c18487ee 1042 /* Validating that the resource is currently taken */
4a37fb66 1043 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1044 if (!(lock_status & resource_bit)) {
1045 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1046 lock_status, resource_bit);
1047 return -EFAULT;
a2fbb9ea
ET
1048 }
1049
9f6c9258
DK
1050 REG_WR(bp, hw_lock_control_reg, resource_bit);
1051 return 0;
c18487ee 1052}
a2fbb9ea 1053
9f6c9258 1054
4acac6a5
EG
1055int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1056{
1057 /* The GPIO should be swapped if swap register is set and active */
1058 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1059 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1060 int gpio_shift = gpio_num +
1061 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1062 u32 gpio_mask = (1 << gpio_shift);
1063 u32 gpio_reg;
1064 int value;
1065
1066 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1067 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1068 return -EINVAL;
1069 }
1070
1071 /* read GPIO value */
1072 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1073
1074 /* get the requested pin value */
1075 if ((gpio_reg & gpio_mask) == gpio_mask)
1076 value = 1;
1077 else
1078 value = 0;
1079
1080 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1081
1082 return value;
1083}
1084
17de50b7 1085int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1086{
1087 /* The GPIO should be swapped if swap register is set and active */
1088 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1089 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1090 int gpio_shift = gpio_num +
1091 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1092 u32 gpio_mask = (1 << gpio_shift);
1093 u32 gpio_reg;
a2fbb9ea 1094
c18487ee
YR
1095 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1096 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1097 return -EINVAL;
1098 }
a2fbb9ea 1099
4a37fb66 1100 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1101 /* read GPIO and mask except the float bits */
1102 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1103
c18487ee
YR
1104 switch (mode) {
1105 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1106 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1107 gpio_num, gpio_shift);
1108 /* clear FLOAT and set CLR */
1109 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1110 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1111 break;
a2fbb9ea 1112
c18487ee
YR
1113 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1114 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1115 gpio_num, gpio_shift);
1116 /* clear FLOAT and set SET */
1117 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1118 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1119 break;
a2fbb9ea 1120
17de50b7 1121 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1122 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1123 gpio_num, gpio_shift);
1124 /* set FLOAT */
1125 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1126 break;
a2fbb9ea 1127
c18487ee
YR
1128 default:
1129 break;
a2fbb9ea
ET
1130 }
1131
c18487ee 1132 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1133 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1134
c18487ee 1135 return 0;
a2fbb9ea
ET
1136}
1137
4acac6a5
EG
1138int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1139{
1140 /* The GPIO should be swapped if swap register is set and active */
1141 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1142 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1143 int gpio_shift = gpio_num +
1144 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1145 u32 gpio_mask = (1 << gpio_shift);
1146 u32 gpio_reg;
1147
1148 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1149 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1150 return -EINVAL;
1151 }
1152
1153 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1154 /* read GPIO int */
1155 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1156
1157 switch (mode) {
1158 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1159 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1160 "output low\n", gpio_num, gpio_shift);
1161 /* clear SET and set CLR */
1162 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1163 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1164 break;
1165
1166 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1167 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1168 "output high\n", gpio_num, gpio_shift);
1169 /* clear CLR and set SET */
1170 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1171 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1172 break;
1173
1174 default:
1175 break;
1176 }
1177
1178 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1179 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1180
1181 return 0;
1182}
1183
c18487ee 1184static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1185{
c18487ee
YR
1186 u32 spio_mask = (1 << spio_num);
1187 u32 spio_reg;
a2fbb9ea 1188
c18487ee
YR
1189 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1190 (spio_num > MISC_REGISTERS_SPIO_7)) {
1191 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1192 return -EINVAL;
a2fbb9ea
ET
1193 }
1194
4a37fb66 1195 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1196 /* read SPIO and mask except the float bits */
1197 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1198
c18487ee 1199 switch (mode) {
6378c025 1200 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1201 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1202 /* clear FLOAT and set CLR */
1203 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1204 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1205 break;
a2fbb9ea 1206
6378c025 1207 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1208 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1209 /* clear FLOAT and set SET */
1210 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1211 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1212 break;
a2fbb9ea 1213
c18487ee
YR
1214 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1215 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1216 /* set FLOAT */
1217 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1218 break;
a2fbb9ea 1219
c18487ee
YR
1220 default:
1221 break;
a2fbb9ea
ET
1222 }
1223
c18487ee 1224 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1225 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1226
a2fbb9ea
ET
1227 return 0;
1228}
1229
9f6c9258 1230void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1231{
ad33ea3a
EG
1232 switch (bp->link_vars.ieee_fc &
1233 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1234 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1235 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1236 ADVERTISED_Pause);
1237 break;
356e2385 1238
c18487ee 1239 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1240 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1241 ADVERTISED_Pause);
1242 break;
356e2385 1243
c18487ee 1244 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1245 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 1246 break;
356e2385 1247
c18487ee 1248 default:
34f80b04 1249 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1250 ADVERTISED_Pause);
1251 break;
1252 }
1253}
f1410647 1254
c18487ee 1255
9f6c9258 1256u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 1257{
19680c48
EG
1258 if (!BP_NOMCP(bp)) {
1259 u8 rc;
a2fbb9ea 1260
19680c48 1261 /* Initialize link parameters structure variables */
8c99e7b0
YR
1262 /* It is recommended to turn off RX FC for jumbo frames
1263 for better performance */
0c593270 1264 if (bp->dev->mtu > 5000)
c0700f90 1265 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1266 else
c0700f90 1267 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1268
4a37fb66 1269 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
1270
1271 if (load_mode == LOAD_DIAG)
1272 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
1273
19680c48 1274 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 1275
4a37fb66 1276 bnx2x_release_phy_lock(bp);
a2fbb9ea 1277
3c96c68b
EG
1278 bnx2x_calc_fc_adv(bp);
1279
b5bf9068
EG
1280 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1281 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 1282 bnx2x_link_report(bp);
b5bf9068 1283 }
34f80b04 1284
19680c48
EG
1285 return rc;
1286 }
f5372251 1287 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 1288 return -EINVAL;
a2fbb9ea
ET
1289}
1290
9f6c9258 1291void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1292{
19680c48 1293 if (!BP_NOMCP(bp)) {
4a37fb66 1294 bnx2x_acquire_phy_lock(bp);
54c2fb78 1295 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
19680c48 1296 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1297 bnx2x_release_phy_lock(bp);
a2fbb9ea 1298
19680c48
EG
1299 bnx2x_calc_fc_adv(bp);
1300 } else
f5372251 1301 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 1302}
a2fbb9ea 1303
c18487ee
YR
1304static void bnx2x__link_reset(struct bnx2x *bp)
1305{
19680c48 1306 if (!BP_NOMCP(bp)) {
4a37fb66 1307 bnx2x_acquire_phy_lock(bp);
589abe3a 1308 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 1309 bnx2x_release_phy_lock(bp);
19680c48 1310 } else
f5372251 1311 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 1312}
a2fbb9ea 1313
9f6c9258 1314u8 bnx2x_link_test(struct bnx2x *bp)
c18487ee 1315{
2145a920 1316 u8 rc = 0;
a2fbb9ea 1317
2145a920
VZ
1318 if (!BP_NOMCP(bp)) {
1319 bnx2x_acquire_phy_lock(bp);
1320 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1321 bnx2x_release_phy_lock(bp);
1322 } else
1323 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 1324
c18487ee
YR
1325 return rc;
1326}
a2fbb9ea 1327
8a1c38d1 1328static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 1329{
8a1c38d1
EG
1330 u32 r_param = bp->link_vars.line_speed / 8;
1331 u32 fair_periodic_timeout_usec;
1332 u32 t_fair;
34f80b04 1333
8a1c38d1
EG
1334 memset(&(bp->cmng.rs_vars), 0,
1335 sizeof(struct rate_shaping_vars_per_port));
1336 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 1337
8a1c38d1
EG
1338 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1339 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 1340
8a1c38d1
EG
1341 /* this is the threshold below which no timer arming will occur
1342 1.25 coefficient is for the threshold to be a little bigger
1343 than the real time, to compensate for timer in-accuracy */
1344 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
1345 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1346
8a1c38d1
EG
1347 /* resolution of fairness timer */
1348 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1349 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1350 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 1351
8a1c38d1
EG
1352 /* this is the threshold below which we won't arm the timer anymore */
1353 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 1354
8a1c38d1
EG
1355 /* we multiply by 1e3/8 to get bytes/msec.
1356 We don't want the credits to pass a credit
1357 of the t_fair*FAIR_MEM (algorithm resolution) */
1358 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1359 /* since each tick is 4 usec */
1360 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
1361}
1362
2691d51d
EG
1363/* Calculates the sum of vn_min_rates.
1364 It's needed for further normalizing of the min_rates.
1365 Returns:
1366 sum of vn_min_rates.
1367 or
1368 0 - if all the min_rates are 0.
1369 In the later case fainess algorithm should be deactivated.
1370 If not all min_rates are zero then those that are zeroes will be set to 1.
1371 */
1372static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1373{
1374 int all_zero = 1;
1375 int port = BP_PORT(bp);
1376 int vn;
1377
1378 bp->vn_weight_sum = 0;
1379 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1380 int func = 2*vn + port;
1381 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1382 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1383 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1384
1385 /* Skip hidden vns */
1386 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1387 continue;
1388
1389 /* If min rate is zero - set it to 1 */
1390 if (!vn_min_rate)
1391 vn_min_rate = DEF_MIN_RATE;
1392 else
1393 all_zero = 0;
1394
1395 bp->vn_weight_sum += vn_min_rate;
1396 }
1397
1398 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
1399 if (all_zero) {
1400 bp->cmng.flags.cmng_enables &=
1401 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1402 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1403 " fairness will be disabled\n");
1404 } else
1405 bp->cmng.flags.cmng_enables |=
1406 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
1407}
1408
8a1c38d1 1409static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
1410{
1411 struct rate_shaping_vars_per_vn m_rs_vn;
1412 struct fairness_vars_per_vn m_fair_vn;
1413 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1414 u16 vn_min_rate, vn_max_rate;
1415 int i;
1416
1417 /* If function is hidden - set min and max to zeroes */
1418 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1419 vn_min_rate = 0;
1420 vn_max_rate = 0;
1421
1422 } else {
1423 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1424 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
1425 /* If min rate is zero - set it to 1 */
1426 if (!vn_min_rate)
34f80b04
EG
1427 vn_min_rate = DEF_MIN_RATE;
1428 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1429 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1430 }
8a1c38d1 1431 DP(NETIF_MSG_IFUP,
b015e3d1 1432 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 1433 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
1434
1435 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1436 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1437
1438 /* global vn counter - maximal Mbps for this vn */
1439 m_rs_vn.vn_counter.rate = vn_max_rate;
1440
1441 /* quota - number of bytes transmitted in this period */
1442 m_rs_vn.vn_counter.quota =
1443 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1444
8a1c38d1 1445 if (bp->vn_weight_sum) {
34f80b04
EG
1446 /* credit for each period of the fairness algorithm:
1447 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
1448 vn_weight_sum should not be larger than 10000, thus
1449 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1450 than zero */
34f80b04 1451 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
1452 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1453 (8 * bp->vn_weight_sum))),
1454 (bp->cmng.fair_vars.fair_threshold * 2));
1455 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
1456 m_fair_vn.vn_credit_delta);
1457 }
1458
34f80b04
EG
1459 /* Store it to internal memory */
1460 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1461 REG_WR(bp, BAR_XSTRORM_INTMEM +
1462 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1463 ((u32 *)(&m_rs_vn))[i]);
1464
1465 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1466 REG_WR(bp, BAR_XSTRORM_INTMEM +
1467 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1468 ((u32 *)(&m_fair_vn))[i]);
1469}
1470
8a1c38d1 1471
c18487ee
YR
1472/* This function is called upon link interrupt */
1473static void bnx2x_link_attn(struct bnx2x *bp)
1474{
d9e8b185 1475 u32 prev_link_status = bp->link_vars.link_status;
bb2a0f7a
YG
1476 /* Make sure that we are synced with the current statistics */
1477 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1478
c18487ee 1479 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 1480
bb2a0f7a
YG
1481 if (bp->link_vars.link_up) {
1482
1c06328c 1483 /* dropless flow control */
a18f5128 1484 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
1485 int port = BP_PORT(bp);
1486 u32 pause_enabled = 0;
1487
1488 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1489 pause_enabled = 1;
1490
1491 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 1492 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
1493 pause_enabled);
1494 }
1495
bb2a0f7a
YG
1496 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1497 struct host_port_stats *pstats;
1498
1499 pstats = bnx2x_sp(bp, port_stats);
1500 /* reset old bmac stats */
1501 memset(&(pstats->mac_stx[0]), 0,
1502 sizeof(struct mac_stx));
1503 }
f34d28ea 1504 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
1505 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1506 }
1507
d9e8b185
VZ
1508 /* indicate link status only if link status actually changed */
1509 if (prev_link_status != bp->link_vars.link_status)
1510 bnx2x_link_report(bp);
34f80b04
EG
1511
1512 if (IS_E1HMF(bp)) {
8a1c38d1 1513 int port = BP_PORT(bp);
34f80b04 1514 int func;
8a1c38d1 1515 int vn;
34f80b04 1516
ab6ad5a4 1517 /* Set the attention towards other drivers on the same port */
34f80b04
EG
1518 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1519 if (vn == BP_E1HVN(bp))
1520 continue;
1521
8a1c38d1 1522 func = ((vn << 1) | port);
34f80b04
EG
1523 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1524 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1525 }
34f80b04 1526
8a1c38d1
EG
1527 if (bp->link_vars.link_up) {
1528 int i;
1529
1530 /* Init rate shaping and fairness contexts */
1531 bnx2x_init_port_minmax(bp);
34f80b04 1532
34f80b04 1533 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
1534 bnx2x_init_vn_minmax(bp, 2*vn + port);
1535
1536 /* Store it to internal memory */
1537 for (i = 0;
1538 i < sizeof(struct cmng_struct_per_port) / 4; i++)
1539 REG_WR(bp, BAR_XSTRORM_INTMEM +
1540 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1541 ((u32 *)(&bp->cmng))[i]);
1542 }
34f80b04 1543 }
c18487ee 1544}
a2fbb9ea 1545
9f6c9258 1546void bnx2x__link_status_update(struct bnx2x *bp)
c18487ee 1547{
f34d28ea 1548 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 1549 return;
a2fbb9ea 1550
c18487ee 1551 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 1552
bb2a0f7a
YG
1553 if (bp->link_vars.link_up)
1554 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1555 else
1556 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1557
2691d51d
EG
1558 bnx2x_calc_vn_weight_sum(bp);
1559
c18487ee
YR
1560 /* indicate link status */
1561 bnx2x_link_report(bp);
a2fbb9ea 1562}
a2fbb9ea 1563
34f80b04
EG
1564static void bnx2x_pmf_update(struct bnx2x *bp)
1565{
1566 int port = BP_PORT(bp);
1567 u32 val;
1568
1569 bp->port.pmf = 1;
1570 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1571
1572 /* enable nig attention */
1573 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1574 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1575 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
1576
1577 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
1578}
1579
c18487ee 1580/* end of Link */
a2fbb9ea
ET
1581
1582/* slow path */
1583
1584/*
1585 * General service functions
1586 */
1587
2691d51d
EG
1588/* send the MCP a request, block until there is a reply */
1589u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
1590{
1591 int func = BP_FUNC(bp);
1592 u32 seq = ++bp->fw_seq;
1593 u32 rc = 0;
1594 u32 cnt = 1;
1595 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1596
c4ff7cbf 1597 mutex_lock(&bp->fw_mb_mutex);
2691d51d
EG
1598 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1599 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1600
1601 do {
1602 /* let the FW do it's magic ... */
1603 msleep(delay);
1604
1605 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
1606
c4ff7cbf
EG
1607 /* Give the FW up to 5 second (500*10ms) */
1608 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
1609
1610 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
1611 cnt*delay, rc, seq);
1612
1613 /* is this a reply to our command? */
1614 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
1615 rc &= FW_MSG_CODE_MASK;
1616 else {
1617 /* FW BUG! */
1618 BNX2X_ERR("FW failed to respond!\n");
1619 bnx2x_fw_dump(bp);
1620 rc = 0;
1621 }
c4ff7cbf 1622 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
1623
1624 return rc;
1625}
1626
2691d51d
EG
1627static void bnx2x_e1h_disable(struct bnx2x *bp)
1628{
1629 int port = BP_PORT(bp);
2691d51d
EG
1630
1631 netif_tx_disable(bp->dev);
2691d51d
EG
1632
1633 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
1634
2691d51d
EG
1635 netif_carrier_off(bp->dev);
1636}
1637
1638static void bnx2x_e1h_enable(struct bnx2x *bp)
1639{
1640 int port = BP_PORT(bp);
1641
1642 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
1643
2691d51d
EG
1644 /* Tx queue should be only reenabled */
1645 netif_tx_wake_all_queues(bp->dev);
1646
061bc702
EG
1647 /*
1648 * Should not call netif_carrier_on since it will be called if the link
1649 * is up when checking for link state
1650 */
2691d51d
EG
1651}
1652
1653static void bnx2x_update_min_max(struct bnx2x *bp)
1654{
1655 int port = BP_PORT(bp);
1656 int vn, i;
1657
1658 /* Init rate shaping and fairness contexts */
1659 bnx2x_init_port_minmax(bp);
1660
1661 bnx2x_calc_vn_weight_sum(bp);
1662
1663 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1664 bnx2x_init_vn_minmax(bp, 2*vn + port);
1665
1666 if (bp->port.pmf) {
1667 int func;
1668
1669 /* Set the attention towards other drivers on the same port */
1670 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1671 if (vn == BP_E1HVN(bp))
1672 continue;
1673
1674 func = ((vn << 1) | port);
1675 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1676 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1677 }
1678
1679 /* Store it to internal memory */
1680 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1681 REG_WR(bp, BAR_XSTRORM_INTMEM +
1682 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1683 ((u32 *)(&bp->cmng))[i]);
1684 }
1685}
1686
1687static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1688{
2691d51d 1689 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
1690
1691 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
1692
f34d28ea
EG
1693 /*
1694 * This is the only place besides the function initialization
1695 * where the bp->flags can change so it is done without any
1696 * locks
1697 */
2691d51d
EG
1698 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1699 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
f34d28ea 1700 bp->flags |= MF_FUNC_DIS;
2691d51d
EG
1701
1702 bnx2x_e1h_disable(bp);
1703 } else {
1704 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
f34d28ea 1705 bp->flags &= ~MF_FUNC_DIS;
2691d51d
EG
1706
1707 bnx2x_e1h_enable(bp);
1708 }
1709 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
1710 }
1711 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1712
1713 bnx2x_update_min_max(bp);
1714 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1715 }
1716
1717 /* Report results to MCP */
1718 if (dcc_event)
1719 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
1720 else
1721 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
1722}
1723
28912902
MC
1724/* must be called under the spq lock */
1725static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
1726{
1727 struct eth_spe *next_spe = bp->spq_prod_bd;
1728
1729 if (bp->spq_prod_bd == bp->spq_last_bd) {
1730 bp->spq_prod_bd = bp->spq;
1731 bp->spq_prod_idx = 0;
1732 DP(NETIF_MSG_TIMER, "end of spq\n");
1733 } else {
1734 bp->spq_prod_bd++;
1735 bp->spq_prod_idx++;
1736 }
1737 return next_spe;
1738}
1739
1740/* must be called under the spq lock */
1741static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1742{
1743 int func = BP_FUNC(bp);
1744
1745 /* Make sure that BD data is updated before writing the producer */
1746 wmb();
1747
1748 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1749 bp->spq_prod_idx);
1750 mmiowb();
1751}
1752
a2fbb9ea 1753/* the slow path queue is odd since completions arrive on the fastpath ring */
9f6c9258 1754int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
a2fbb9ea
ET
1755 u32 data_hi, u32 data_lo, int common)
1756{
28912902 1757 struct eth_spe *spe;
a2fbb9ea 1758
a2fbb9ea
ET
1759#ifdef BNX2X_STOP_ON_ERROR
1760 if (unlikely(bp->panic))
1761 return -EIO;
1762#endif
1763
34f80b04 1764 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
1765
1766 if (!bp->spq_left) {
1767 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 1768 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
1769 bnx2x_panic();
1770 return -EBUSY;
1771 }
f1410647 1772
28912902
MC
1773 spe = bnx2x_sp_get_next(bp);
1774
a2fbb9ea 1775 /* CID needs port number to be encoded int it */
28912902 1776 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
1777 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1778 HW_CID(bp, cid));
28912902 1779 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 1780 if (common)
28912902 1781 spe->hdr.type |=
a2fbb9ea
ET
1782 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1783
28912902
MC
1784 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1785 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
1786
1787 bp->spq_left--;
1788
cdaa7cb8
VZ
1789 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1790 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
1791 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1792 (u32)(U64_LO(bp->spq_mapping) +
1793 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1794 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1795
28912902 1796 bnx2x_sp_prod_update(bp);
34f80b04 1797 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
1798 return 0;
1799}
1800
1801/* acquire split MCP access lock register */
4a37fb66 1802static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 1803{
72fd0718 1804 u32 j, val;
34f80b04 1805 int rc = 0;
a2fbb9ea
ET
1806
1807 might_sleep();
72fd0718 1808 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
1809 val = (1UL << 31);
1810 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1811 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1812 if (val & (1L << 31))
1813 break;
1814
1815 msleep(5);
1816 }
a2fbb9ea 1817 if (!(val & (1L << 31))) {
19680c48 1818 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
1819 rc = -EBUSY;
1820 }
1821
1822 return rc;
1823}
1824
4a37fb66
YG
1825/* release split MCP access lock register */
1826static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 1827{
72fd0718 1828 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
1829}
1830
1831static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1832{
1833 struct host_def_status_block *def_sb = bp->def_status_blk;
1834 u16 rc = 0;
1835
1836 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
1837 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1838 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1839 rc |= 1;
1840 }
1841 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1842 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1843 rc |= 2;
1844 }
1845 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1846 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1847 rc |= 4;
1848 }
1849 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1850 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1851 rc |= 8;
1852 }
1853 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1854 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1855 rc |= 16;
1856 }
1857 return rc;
1858}
1859
1860/*
1861 * slow path service functions
1862 */
1863
1864static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1865{
34f80b04 1866 int port = BP_PORT(bp);
5c862848
EG
1867 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
1868 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
1869 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1870 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
1871 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1872 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 1873 u32 aeu_mask;
87942b46 1874 u32 nig_mask = 0;
a2fbb9ea 1875
a2fbb9ea
ET
1876 if (bp->attn_state & asserted)
1877 BNX2X_ERR("IGU ERROR\n");
1878
3fcaf2e5
EG
1879 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1880 aeu_mask = REG_RD(bp, aeu_addr);
1881
a2fbb9ea 1882 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 1883 aeu_mask, asserted);
72fd0718 1884 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 1885 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 1886
3fcaf2e5
EG
1887 REG_WR(bp, aeu_addr, aeu_mask);
1888 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 1889
3fcaf2e5 1890 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 1891 bp->attn_state |= asserted;
3fcaf2e5 1892 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
1893
1894 if (asserted & ATTN_HARD_WIRED_MASK) {
1895 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 1896
a5e9a7cf
EG
1897 bnx2x_acquire_phy_lock(bp);
1898
877e9aa4 1899 /* save nig interrupt mask */
87942b46 1900 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 1901 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 1902
c18487ee 1903 bnx2x_link_attn(bp);
a2fbb9ea
ET
1904
1905 /* handle unicore attn? */
1906 }
1907 if (asserted & ATTN_SW_TIMER_4_FUNC)
1908 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1909
1910 if (asserted & GPIO_2_FUNC)
1911 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1912
1913 if (asserted & GPIO_3_FUNC)
1914 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1915
1916 if (asserted & GPIO_4_FUNC)
1917 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1918
1919 if (port == 0) {
1920 if (asserted & ATTN_GENERAL_ATTN_1) {
1921 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1922 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1923 }
1924 if (asserted & ATTN_GENERAL_ATTN_2) {
1925 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1926 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1927 }
1928 if (asserted & ATTN_GENERAL_ATTN_3) {
1929 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1930 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1931 }
1932 } else {
1933 if (asserted & ATTN_GENERAL_ATTN_4) {
1934 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1935 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1936 }
1937 if (asserted & ATTN_GENERAL_ATTN_5) {
1938 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1939 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1940 }
1941 if (asserted & ATTN_GENERAL_ATTN_6) {
1942 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1943 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1944 }
1945 }
1946
1947 } /* if hardwired */
1948
5c862848
EG
1949 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
1950 asserted, hc_addr);
1951 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
1952
1953 /* now set back the mask */
a5e9a7cf 1954 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 1955 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
1956 bnx2x_release_phy_lock(bp);
1957 }
a2fbb9ea
ET
1958}
1959
fd4ef40d
EG
1960static inline void bnx2x_fan_failure(struct bnx2x *bp)
1961{
1962 int port = BP_PORT(bp);
b7737c9b 1963 u32 ext_phy_config;
fd4ef40d 1964 /* mark the failure */
b7737c9b
YR
1965 ext_phy_config =
1966 SHMEM_RD(bp,
1967 dev_info.port_hw_config[port].external_phy_config);
1968
1969 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
1970 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
fd4ef40d 1971 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
b7737c9b 1972 ext_phy_config);
fd4ef40d
EG
1973
1974 /* log the failure */
cdaa7cb8
VZ
1975 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
1976 " the driver to shutdown the card to prevent permanent"
1977 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 1978}
ab6ad5a4 1979
877e9aa4 1980static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 1981{
34f80b04 1982 int port = BP_PORT(bp);
877e9aa4 1983 int reg_offset;
d90d96ba 1984 u32 val;
877e9aa4 1985
34f80b04
EG
1986 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
1987 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 1988
34f80b04 1989 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
1990
1991 val = REG_RD(bp, reg_offset);
1992 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
1993 REG_WR(bp, reg_offset, val);
1994
1995 BNX2X_ERR("SPIO5 hw attention\n");
1996
fd4ef40d 1997 /* Fan failure attention */
d90d96ba 1998 bnx2x_hw_reset_phy(&bp->link_params);
fd4ef40d 1999 bnx2x_fan_failure(bp);
877e9aa4 2000 }
34f80b04 2001
589abe3a
EG
2002 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2003 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2004 bnx2x_acquire_phy_lock(bp);
2005 bnx2x_handle_module_detect_int(&bp->link_params);
2006 bnx2x_release_phy_lock(bp);
2007 }
2008
34f80b04
EG
2009 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2010
2011 val = REG_RD(bp, reg_offset);
2012 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2013 REG_WR(bp, reg_offset, val);
2014
2015 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2016 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2017 bnx2x_panic();
2018 }
877e9aa4
ET
2019}
2020
2021static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2022{
2023 u32 val;
2024
0626b899 2025 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2026
2027 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2028 BNX2X_ERR("DB hw attention 0x%x\n", val);
2029 /* DORQ discard attention */
2030 if (val & 0x2)
2031 BNX2X_ERR("FATAL error from DORQ\n");
2032 }
34f80b04
EG
2033
2034 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2035
2036 int port = BP_PORT(bp);
2037 int reg_offset;
2038
2039 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2040 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2041
2042 val = REG_RD(bp, reg_offset);
2043 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2044 REG_WR(bp, reg_offset, val);
2045
2046 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 2047 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
2048 bnx2x_panic();
2049 }
877e9aa4
ET
2050}
2051
2052static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2053{
2054 u32 val;
2055
2056 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2057
2058 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2059 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2060 /* CFC error attention */
2061 if (val & 0x2)
2062 BNX2X_ERR("FATAL error from CFC\n");
2063 }
2064
2065 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2066
2067 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2068 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2069 /* RQ_USDMDP_FIFO_OVERFLOW */
2070 if (val & 0x18000)
2071 BNX2X_ERR("FATAL error from PXP\n");
2072 }
34f80b04
EG
2073
2074 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2075
2076 int port = BP_PORT(bp);
2077 int reg_offset;
2078
2079 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2080 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2081
2082 val = REG_RD(bp, reg_offset);
2083 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2084 REG_WR(bp, reg_offset, val);
2085
2086 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 2087 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
2088 bnx2x_panic();
2089 }
877e9aa4
ET
2090}
2091
2092static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2093{
34f80b04
EG
2094 u32 val;
2095
877e9aa4
ET
2096 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2097
34f80b04
EG
2098 if (attn & BNX2X_PMF_LINK_ASSERT) {
2099 int func = BP_FUNC(bp);
2100
2101 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
2102 bp->mf_config = SHMEM_RD(bp,
2103 mf_cfg.func_mf_config[func].config);
2691d51d
EG
2104 val = SHMEM_RD(bp, func_mb[func].drv_status);
2105 if (val & DRV_STATUS_DCC_EVENT_MASK)
2106 bnx2x_dcc_event(bp,
2107 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 2108 bnx2x__link_status_update(bp);
2691d51d 2109 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
2110 bnx2x_pmf_update(bp);
2111
2112 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2113
2114 BNX2X_ERR("MC assert!\n");
2115 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2116 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2117 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2118 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2119 bnx2x_panic();
2120
2121 } else if (attn & BNX2X_MCP_ASSERT) {
2122
2123 BNX2X_ERR("MCP assert!\n");
2124 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2125 bnx2x_fw_dump(bp);
877e9aa4
ET
2126
2127 } else
2128 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2129 }
2130
2131 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2132 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2133 if (attn & BNX2X_GRC_TIMEOUT) {
2134 val = CHIP_IS_E1H(bp) ?
2135 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2136 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2137 }
2138 if (attn & BNX2X_GRC_RSV) {
2139 val = CHIP_IS_E1H(bp) ?
2140 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2141 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2142 }
877e9aa4 2143 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2144 }
2145}
2146
72fd0718
VZ
2147#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
2148#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
2149#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2150#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
2151#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
2152#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2153/*
2154 * should be run under rtnl lock
2155 */
2156static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2157{
2158 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2159 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2160 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2161 barrier();
2162 mmiowb();
2163}
2164
2165/*
2166 * should be run under rtnl lock
2167 */
2168static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2169{
2170 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2171 val |= (1 << 16);
2172 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2173 barrier();
2174 mmiowb();
2175}
2176
2177/*
2178 * should be run under rtnl lock
2179 */
9f6c9258 2180bool bnx2x_reset_is_done(struct bnx2x *bp)
72fd0718
VZ
2181{
2182 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2183 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2184 return (val & RESET_DONE_FLAG_MASK) ? false : true;
2185}
2186
2187/*
2188 * should be run under rtnl lock
2189 */
9f6c9258 2190inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
72fd0718
VZ
2191{
2192 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2193
2194 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2195
2196 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2197 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2198 barrier();
2199 mmiowb();
2200}
2201
2202/*
2203 * should be run under rtnl lock
2204 */
9f6c9258 2205u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
72fd0718
VZ
2206{
2207 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2208
2209 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2210
2211 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2212 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2213 barrier();
2214 mmiowb();
2215
2216 return val1;
2217}
2218
2219/*
2220 * should be run under rtnl lock
2221 */
2222static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2223{
2224 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2225}
2226
2227static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2228{
2229 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2230 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2231}
2232
2233static inline void _print_next_block(int idx, const char *blk)
2234{
2235 if (idx)
2236 pr_cont(", ");
2237 pr_cont("%s", blk);
2238}
2239
2240static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2241{
2242 int i = 0;
2243 u32 cur_bit = 0;
2244 for (i = 0; sig; i++) {
2245 cur_bit = ((u32)0x1 << i);
2246 if (sig & cur_bit) {
2247 switch (cur_bit) {
2248 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2249 _print_next_block(par_num++, "BRB");
2250 break;
2251 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2252 _print_next_block(par_num++, "PARSER");
2253 break;
2254 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2255 _print_next_block(par_num++, "TSDM");
2256 break;
2257 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2258 _print_next_block(par_num++, "SEARCHER");
2259 break;
2260 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2261 _print_next_block(par_num++, "TSEMI");
2262 break;
2263 }
2264
2265 /* Clear the bit */
2266 sig &= ~cur_bit;
2267 }
2268 }
2269
2270 return par_num;
2271}
2272
2273static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2274{
2275 int i = 0;
2276 u32 cur_bit = 0;
2277 for (i = 0; sig; i++) {
2278 cur_bit = ((u32)0x1 << i);
2279 if (sig & cur_bit) {
2280 switch (cur_bit) {
2281 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2282 _print_next_block(par_num++, "PBCLIENT");
2283 break;
2284 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2285 _print_next_block(par_num++, "QM");
2286 break;
2287 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2288 _print_next_block(par_num++, "XSDM");
2289 break;
2290 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2291 _print_next_block(par_num++, "XSEMI");
2292 break;
2293 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2294 _print_next_block(par_num++, "DOORBELLQ");
2295 break;
2296 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2297 _print_next_block(par_num++, "VAUX PCI CORE");
2298 break;
2299 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2300 _print_next_block(par_num++, "DEBUG");
2301 break;
2302 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2303 _print_next_block(par_num++, "USDM");
2304 break;
2305 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2306 _print_next_block(par_num++, "USEMI");
2307 break;
2308 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2309 _print_next_block(par_num++, "UPB");
2310 break;
2311 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2312 _print_next_block(par_num++, "CSDM");
2313 break;
2314 }
2315
2316 /* Clear the bit */
2317 sig &= ~cur_bit;
2318 }
2319 }
2320
2321 return par_num;
2322}
2323
2324static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
2325{
2326 int i = 0;
2327 u32 cur_bit = 0;
2328 for (i = 0; sig; i++) {
2329 cur_bit = ((u32)0x1 << i);
2330 if (sig & cur_bit) {
2331 switch (cur_bit) {
2332 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
2333 _print_next_block(par_num++, "CSEMI");
2334 break;
2335 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
2336 _print_next_block(par_num++, "PXP");
2337 break;
2338 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
2339 _print_next_block(par_num++,
2340 "PXPPCICLOCKCLIENT");
2341 break;
2342 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
2343 _print_next_block(par_num++, "CFC");
2344 break;
2345 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
2346 _print_next_block(par_num++, "CDU");
2347 break;
2348 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
2349 _print_next_block(par_num++, "IGU");
2350 break;
2351 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
2352 _print_next_block(par_num++, "MISC");
2353 break;
2354 }
2355
2356 /* Clear the bit */
2357 sig &= ~cur_bit;
2358 }
2359 }
2360
2361 return par_num;
2362}
2363
2364static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
2365{
2366 int i = 0;
2367 u32 cur_bit = 0;
2368 for (i = 0; sig; i++) {
2369 cur_bit = ((u32)0x1 << i);
2370 if (sig & cur_bit) {
2371 switch (cur_bit) {
2372 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
2373 _print_next_block(par_num++, "MCP ROM");
2374 break;
2375 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
2376 _print_next_block(par_num++, "MCP UMP RX");
2377 break;
2378 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
2379 _print_next_block(par_num++, "MCP UMP TX");
2380 break;
2381 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
2382 _print_next_block(par_num++, "MCP SCPAD");
2383 break;
2384 }
2385
2386 /* Clear the bit */
2387 sig &= ~cur_bit;
2388 }
2389 }
2390
2391 return par_num;
2392}
2393
2394static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
2395 u32 sig2, u32 sig3)
2396{
2397 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
2398 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
2399 int par_num = 0;
2400 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
2401 "[0]:0x%08x [1]:0x%08x "
2402 "[2]:0x%08x [3]:0x%08x\n",
2403 sig0 & HW_PRTY_ASSERT_SET_0,
2404 sig1 & HW_PRTY_ASSERT_SET_1,
2405 sig2 & HW_PRTY_ASSERT_SET_2,
2406 sig3 & HW_PRTY_ASSERT_SET_3);
2407 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
2408 bp->dev->name);
2409 par_num = bnx2x_print_blocks_with_parity0(
2410 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
2411 par_num = bnx2x_print_blocks_with_parity1(
2412 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
2413 par_num = bnx2x_print_blocks_with_parity2(
2414 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
2415 par_num = bnx2x_print_blocks_with_parity3(
2416 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
2417 printk("\n");
2418 return true;
2419 } else
2420 return false;
2421}
2422
9f6c9258 2423bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 2424{
a2fbb9ea 2425 struct attn_route attn;
72fd0718
VZ
2426 int port = BP_PORT(bp);
2427
2428 attn.sig[0] = REG_RD(bp,
2429 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
2430 port*4);
2431 attn.sig[1] = REG_RD(bp,
2432 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
2433 port*4);
2434 attn.sig[2] = REG_RD(bp,
2435 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
2436 port*4);
2437 attn.sig[3] = REG_RD(bp,
2438 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
2439 port*4);
2440
2441 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
2442 attn.sig[3]);
2443}
2444
2445static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2446{
2447 struct attn_route attn, *group_mask;
34f80b04 2448 int port = BP_PORT(bp);
877e9aa4 2449 int index;
a2fbb9ea
ET
2450 u32 reg_addr;
2451 u32 val;
3fcaf2e5 2452 u32 aeu_mask;
a2fbb9ea
ET
2453
2454 /* need to take HW lock because MCP or other port might also
2455 try to handle this event */
4a37fb66 2456 bnx2x_acquire_alr(bp);
a2fbb9ea 2457
72fd0718
VZ
2458 if (bnx2x_chk_parity_attn(bp)) {
2459 bp->recovery_state = BNX2X_RECOVERY_INIT;
2460 bnx2x_set_reset_in_progress(bp);
2461 schedule_delayed_work(&bp->reset_task, 0);
2462 /* Disable HW interrupts */
2463 bnx2x_int_disable(bp);
2464 bnx2x_release_alr(bp);
2465 /* In case of parity errors don't handle attentions so that
2466 * other function would "see" parity errors.
2467 */
2468 return;
2469 }
2470
a2fbb9ea
ET
2471 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2472 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2473 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2474 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2475 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2476 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2477
2478 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2479 if (deasserted & (1 << index)) {
72fd0718 2480 group_mask = &bp->attn_group[index];
a2fbb9ea 2481
34f80b04 2482 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
72fd0718
VZ
2483 index, group_mask->sig[0], group_mask->sig[1],
2484 group_mask->sig[2], group_mask->sig[3]);
a2fbb9ea 2485
877e9aa4 2486 bnx2x_attn_int_deasserted3(bp,
72fd0718 2487 attn.sig[3] & group_mask->sig[3]);
877e9aa4 2488 bnx2x_attn_int_deasserted1(bp,
72fd0718 2489 attn.sig[1] & group_mask->sig[1]);
877e9aa4 2490 bnx2x_attn_int_deasserted2(bp,
72fd0718 2491 attn.sig[2] & group_mask->sig[2]);
877e9aa4 2492 bnx2x_attn_int_deasserted0(bp,
72fd0718 2493 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
2494 }
2495 }
2496
4a37fb66 2497 bnx2x_release_alr(bp);
a2fbb9ea 2498
5c862848 2499 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2500
2501 val = ~deasserted;
3fcaf2e5
EG
2502 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2503 val, reg_addr);
5c862848 2504 REG_WR(bp, reg_addr, val);
a2fbb9ea 2505
a2fbb9ea 2506 if (~bp->attn_state & deasserted)
3fcaf2e5 2507 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2508
2509 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2510 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2511
3fcaf2e5
EG
2512 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2513 aeu_mask = REG_RD(bp, reg_addr);
2514
2515 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2516 aeu_mask, deasserted);
72fd0718 2517 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 2518 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2519
3fcaf2e5
EG
2520 REG_WR(bp, reg_addr, aeu_mask);
2521 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2522
2523 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2524 bp->attn_state &= ~deasserted;
2525 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2526}
2527
2528static void bnx2x_attn_int(struct bnx2x *bp)
2529{
2530 /* read local copy of bits */
68d59484
EG
2531 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2532 attn_bits);
2533 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2534 attn_bits_ack);
a2fbb9ea
ET
2535 u32 attn_state = bp->attn_state;
2536
2537 /* look for changed bits */
2538 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2539 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2540
2541 DP(NETIF_MSG_HW,
2542 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2543 attn_bits, attn_ack, asserted, deasserted);
2544
2545 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2546 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2547
2548 /* handle bits that were raised */
2549 if (asserted)
2550 bnx2x_attn_int_asserted(bp, asserted);
2551
2552 if (deasserted)
2553 bnx2x_attn_int_deasserted(bp, deasserted);
2554}
2555
2556static void bnx2x_sp_task(struct work_struct *work)
2557{
1cf167f2 2558 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2559 u16 status;
2560
2561 /* Return here if interrupt is disabled */
2562 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2563 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2564 return;
2565 }
2566
2567 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2568/* if (status == 0) */
2569/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2570
cdaa7cb8 2571 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 2572
877e9aa4 2573 /* HW attentions */
cdaa7cb8 2574 if (status & 0x1) {
a2fbb9ea 2575 bnx2x_attn_int(bp);
cdaa7cb8
VZ
2576 status &= ~0x1;
2577 }
2578
2579 /* CStorm events: STAT_QUERY */
2580 if (status & 0x2) {
2581 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
2582 status &= ~0x2;
2583 }
2584
2585 if (unlikely(status))
2586 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2587 status);
a2fbb9ea 2588
68d59484 2589 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2590 IGU_INT_NOP, 1);
2591 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2592 IGU_INT_NOP, 1);
2593 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2594 IGU_INT_NOP, 1);
2595 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2596 IGU_INT_NOP, 1);
2597 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2598 IGU_INT_ENABLE, 1);
2599}
2600
9f6c9258 2601irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
a2fbb9ea
ET
2602{
2603 struct net_device *dev = dev_instance;
2604 struct bnx2x *bp = netdev_priv(dev);
2605
2606 /* Return here if interrupt is disabled */
2607 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2608 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2609 return IRQ_HANDLED;
2610 }
2611
8d9c5f34 2612 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2613
2614#ifdef BNX2X_STOP_ON_ERROR
2615 if (unlikely(bp->panic))
2616 return IRQ_HANDLED;
2617#endif
2618
993ac7b5
MC
2619#ifdef BCM_CNIC
2620 {
2621 struct cnic_ops *c_ops;
2622
2623 rcu_read_lock();
2624 c_ops = rcu_dereference(bp->cnic_ops);
2625 if (c_ops)
2626 c_ops->cnic_handler(bp->cnic_data, NULL);
2627 rcu_read_unlock();
2628 }
2629#endif
1cf167f2 2630 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2631
2632 return IRQ_HANDLED;
2633}
2634
2635/* end of slow path */
2636
a2fbb9ea
ET
2637static void bnx2x_timer(unsigned long data)
2638{
2639 struct bnx2x *bp = (struct bnx2x *) data;
2640
2641 if (!netif_running(bp->dev))
2642 return;
2643
2644 if (atomic_read(&bp->intr_sem) != 0)
f1410647 2645 goto timer_restart;
a2fbb9ea
ET
2646
2647 if (poll) {
2648 struct bnx2x_fastpath *fp = &bp->fp[0];
2649 int rc;
2650
7961f791 2651 bnx2x_tx_int(fp);
a2fbb9ea
ET
2652 rc = bnx2x_rx_int(fp, 1000);
2653 }
2654
34f80b04
EG
2655 if (!BP_NOMCP(bp)) {
2656 int func = BP_FUNC(bp);
a2fbb9ea
ET
2657 u32 drv_pulse;
2658 u32 mcp_pulse;
2659
2660 ++bp->fw_drv_pulse_wr_seq;
2661 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
2662 /* TBD - add SYSTEM_TIME */
2663 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 2664 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 2665
34f80b04 2666 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
2667 MCP_PULSE_SEQ_MASK);
2668 /* The delta between driver pulse and mcp response
2669 * should be 1 (before mcp response) or 0 (after mcp response)
2670 */
2671 if ((drv_pulse != mcp_pulse) &&
2672 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
2673 /* someone lost a heartbeat... */
2674 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2675 drv_pulse, mcp_pulse);
2676 }
2677 }
2678
f34d28ea 2679 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 2680 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 2681
f1410647 2682timer_restart:
a2fbb9ea
ET
2683 mod_timer(&bp->timer, jiffies + bp->current_interval);
2684}
2685
2686/* end of Statistics */
2687
2688/* nic init */
2689
2690/*
2691 * nic init service functions
2692 */
2693
34f80b04 2694static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 2695{
34f80b04
EG
2696 int port = BP_PORT(bp);
2697
ca00392c
EG
2698 /* "CSTORM" */
2699 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2700 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
2701 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
2702 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2703 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
2704 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
2705}
2706
9f6c9258 2707void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5c862848 2708 dma_addr_t mapping, int sb_id)
34f80b04
EG
2709{
2710 int port = BP_PORT(bp);
bb2a0f7a 2711 int func = BP_FUNC(bp);
a2fbb9ea 2712 int index;
34f80b04 2713 u64 section;
a2fbb9ea
ET
2714
2715 /* USTORM */
2716 section = ((u64)mapping) + offsetof(struct host_status_block,
2717 u_status_block);
34f80b04 2718 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 2719
ca00392c
EG
2720 REG_WR(bp, BAR_CSTRORM_INTMEM +
2721 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
2722 REG_WR(bp, BAR_CSTRORM_INTMEM +
2723 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 2724 U64_HI(section));
ca00392c
EG
2725 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
2726 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
2727
2728 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
2729 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2730 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
2731
2732 /* CSTORM */
2733 section = ((u64)mapping) + offsetof(struct host_status_block,
2734 c_status_block);
34f80b04 2735 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
2736
2737 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 2738 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 2739 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 2740 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 2741 U64_HI(section));
7a9b2557 2742 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 2743 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
2744
2745 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
2746 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 2747 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
2748
2749 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2750}
2751
2752static void bnx2x_zero_def_sb(struct bnx2x *bp)
2753{
2754 int func = BP_FUNC(bp);
a2fbb9ea 2755
ca00392c 2756 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
2757 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2758 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
2759 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2760 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
2761 sizeof(struct cstorm_def_status_block_u)/4);
2762 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2763 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
2764 sizeof(struct cstorm_def_status_block_c)/4);
2765 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
2766 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2767 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
2768}
2769
2770static void bnx2x_init_def_sb(struct bnx2x *bp,
2771 struct host_def_status_block *def_sb,
34f80b04 2772 dma_addr_t mapping, int sb_id)
a2fbb9ea 2773{
34f80b04
EG
2774 int port = BP_PORT(bp);
2775 int func = BP_FUNC(bp);
a2fbb9ea
ET
2776 int index, val, reg_offset;
2777 u64 section;
2778
2779 /* ATTN */
2780 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2781 atten_status_block);
34f80b04 2782 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 2783
49d66772
ET
2784 bp->attn_state = 0;
2785
a2fbb9ea
ET
2786 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2787 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2788
34f80b04 2789 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
2790 bp->attn_group[index].sig[0] = REG_RD(bp,
2791 reg_offset + 0x10*index);
2792 bp->attn_group[index].sig[1] = REG_RD(bp,
2793 reg_offset + 0x4 + 0x10*index);
2794 bp->attn_group[index].sig[2] = REG_RD(bp,
2795 reg_offset + 0x8 + 0x10*index);
2796 bp->attn_group[index].sig[3] = REG_RD(bp,
2797 reg_offset + 0xc + 0x10*index);
2798 }
2799
a2fbb9ea
ET
2800 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
2801 HC_REG_ATTN_MSG0_ADDR_L);
2802
2803 REG_WR(bp, reg_offset, U64_LO(section));
2804 REG_WR(bp, reg_offset + 4, U64_HI(section));
2805
2806 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
2807
2808 val = REG_RD(bp, reg_offset);
34f80b04 2809 val |= sb_id;
a2fbb9ea
ET
2810 REG_WR(bp, reg_offset, val);
2811
2812 /* USTORM */
2813 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2814 u_def_status_block);
34f80b04 2815 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 2816
ca00392c
EG
2817 REG_WR(bp, BAR_CSTRORM_INTMEM +
2818 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
2819 REG_WR(bp, BAR_CSTRORM_INTMEM +
2820 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 2821 U64_HI(section));
ca00392c
EG
2822 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
2823 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
2824
2825 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
2826 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2827 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
2828
2829 /* CSTORM */
2830 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2831 c_def_status_block);
34f80b04 2832 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
2833
2834 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 2835 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 2836 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 2837 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 2838 U64_HI(section));
5c862848 2839 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 2840 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
2841
2842 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2843 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 2844 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
2845
2846 /* TSTORM */
2847 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2848 t_def_status_block);
34f80b04 2849 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
2850
2851 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 2852 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 2853 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 2854 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 2855 U64_HI(section));
5c862848 2856 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 2857 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
2858
2859 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2860 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 2861 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
2862
2863 /* XSTORM */
2864 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2865 x_def_status_block);
34f80b04 2866 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
2867
2868 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 2869 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 2870 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 2871 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 2872 U64_HI(section));
5c862848 2873 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 2874 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
2875
2876 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2877 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 2878 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 2879
bb2a0f7a 2880 bp->stats_pending = 0;
66e855f3 2881 bp->set_mac_pending = 0;
bb2a0f7a 2882
34f80b04 2883 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
2884}
2885
9f6c9258 2886void bnx2x_update_coalesce(struct bnx2x *bp)
a2fbb9ea 2887{
34f80b04 2888 int port = BP_PORT(bp);
a2fbb9ea
ET
2889 int i;
2890
2891 for_each_queue(bp, i) {
34f80b04 2892 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
2893
2894 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
2895 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2896 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
2897 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 2898 bp->rx_ticks/(4 * BNX2X_BTR));
ca00392c
EG
2899 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2900 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
2901 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 2902 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
2903
2904 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2905 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
2906 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
2907 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 2908 bp->tx_ticks/(4 * BNX2X_BTR));
a2fbb9ea 2909 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
2910 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
2911 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 2912 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
2913 }
2914}
2915
a2fbb9ea
ET
2916static void bnx2x_init_sp_ring(struct bnx2x *bp)
2917{
34f80b04 2918 int func = BP_FUNC(bp);
a2fbb9ea
ET
2919
2920 spin_lock_init(&bp->spq_lock);
2921
2922 bp->spq_left = MAX_SPQ_PENDING;
2923 bp->spq_prod_idx = 0;
a2fbb9ea
ET
2924 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
2925 bp->spq_prod_bd = bp->spq;
2926 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
2927
34f80b04 2928 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 2929 U64_LO(bp->spq_mapping));
34f80b04
EG
2930 REG_WR(bp,
2931 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
2932 U64_HI(bp->spq_mapping));
2933
34f80b04 2934 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2935 bp->spq_prod_idx);
2936}
2937
2938static void bnx2x_init_context(struct bnx2x *bp)
2939{
2940 int i;
2941
54b9ddaa
VZ
2942 /* Rx */
2943 for_each_queue(bp, i) {
a2fbb9ea
ET
2944 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
2945 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 2946 u8 cl_id = fp->cl_id;
a2fbb9ea 2947
34f80b04
EG
2948 context->ustorm_st_context.common.sb_index_numbers =
2949 BNX2X_RX_SB_INDEX_NUM;
0626b899 2950 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 2951 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 2952 context->ustorm_st_context.common.flags =
de832a55
EG
2953 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
2954 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
2955 context->ustorm_st_context.common.statistics_counter_id =
2956 cl_id;
8d9c5f34 2957 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 2958 BNX2X_RX_ALIGN_SHIFT;
34f80b04 2959 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 2960 bp->rx_buf_size;
34f80b04 2961 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 2962 U64_HI(fp->rx_desc_mapping);
34f80b04 2963 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 2964 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
2965 if (!fp->disable_tpa) {
2966 context->ustorm_st_context.common.flags |=
ca00392c 2967 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 2968 context->ustorm_st_context.common.sge_buff_size =
cdaa7cb8
VZ
2969 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
2970 0xffff);
7a9b2557
VZ
2971 context->ustorm_st_context.common.sge_page_base_hi =
2972 U64_HI(fp->rx_sge_mapping);
2973 context->ustorm_st_context.common.sge_page_base_lo =
2974 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
2975
2976 context->ustorm_st_context.common.max_sges_for_packet =
2977 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
2978 context->ustorm_st_context.common.max_sges_for_packet =
2979 ((context->ustorm_st_context.common.
2980 max_sges_for_packet + PAGES_PER_SGE - 1) &
2981 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
2982 }
2983
8d9c5f34
EG
2984 context->ustorm_ag_context.cdu_usage =
2985 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
2986 CDU_REGION_NUMBER_UCM_AG,
2987 ETH_CONNECTION_TYPE);
2988
ca00392c
EG
2989 context->xstorm_ag_context.cdu_reserved =
2990 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
2991 CDU_REGION_NUMBER_XCM_AG,
2992 ETH_CONNECTION_TYPE);
2993 }
2994
54b9ddaa
VZ
2995 /* Tx */
2996 for_each_queue(bp, i) {
ca00392c
EG
2997 struct bnx2x_fastpath *fp = &bp->fp[i];
2998 struct eth_context *context =
54b9ddaa 2999 bnx2x_sp(bp, context[i].eth);
ca00392c
EG
3000
3001 context->cstorm_st_context.sb_index_number =
3002 C_SB_ETH_TX_CQ_INDEX;
3003 context->cstorm_st_context.status_block_id = fp->sb_id;
3004
8d9c5f34
EG
3005 context->xstorm_st_context.tx_bd_page_base_hi =
3006 U64_HI(fp->tx_desc_mapping);
3007 context->xstorm_st_context.tx_bd_page_base_lo =
3008 U64_LO(fp->tx_desc_mapping);
ca00392c 3009 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 3010 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
3011 }
3012}
3013
3014static void bnx2x_init_ind_table(struct bnx2x *bp)
3015{
26c8fa4d 3016 int func = BP_FUNC(bp);
a2fbb9ea
ET
3017 int i;
3018
555f6c78 3019 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
3020 return;
3021
555f6c78
EG
3022 DP(NETIF_MSG_IFUP,
3023 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 3024 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 3025 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 3026 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 3027 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
3028}
3029
9f6c9258 3030void bnx2x_set_client_config(struct bnx2x *bp)
49d66772 3031{
49d66772 3032 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
3033 int port = BP_PORT(bp);
3034 int i;
49d66772 3035
e7799c5f 3036 tstorm_client.mtu = bp->dev->mtu;
49d66772 3037 tstorm_client.config_flags =
de832a55
EG
3038 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
3039 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 3040#ifdef BCM_VLAN
0c6671b0 3041 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 3042 tstorm_client.config_flags |=
8d9c5f34 3043 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
3044 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3045 }
3046#endif
49d66772
ET
3047
3048 for_each_queue(bp, i) {
de832a55
EG
3049 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
3050
49d66772 3051 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3052 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
3053 ((u32 *)&tstorm_client)[0]);
3054 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3055 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
3056 ((u32 *)&tstorm_client)[1]);
3057 }
3058
34f80b04
EG
3059 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
3060 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
3061}
3062
9f6c9258 3063void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
a2fbb9ea 3064{
a2fbb9ea 3065 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 3066 int mode = bp->rx_mode;
37b091ba 3067 int mask = bp->rx_mode_cl_mask;
34f80b04 3068 int func = BP_FUNC(bp);
581ce43d 3069 int port = BP_PORT(bp);
a2fbb9ea 3070 int i;
581ce43d
EG
3071 /* All but management unicast packets should pass to the host as well */
3072 u32 llh_mask =
3073 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
3074 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
3075 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3076 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 3077
3196a88a 3078 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
3079
3080 switch (mode) {
3081 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
3082 tstorm_mac_filter.ucast_drop_all = mask;
3083 tstorm_mac_filter.mcast_drop_all = mask;
3084 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 3085 break;
356e2385 3086
a2fbb9ea 3087 case BNX2X_RX_MODE_NORMAL:
34f80b04 3088 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 3089 break;
356e2385 3090
a2fbb9ea 3091 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
3092 tstorm_mac_filter.mcast_accept_all = mask;
3093 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 3094 break;
356e2385 3095
a2fbb9ea 3096 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
3097 tstorm_mac_filter.ucast_accept_all = mask;
3098 tstorm_mac_filter.mcast_accept_all = mask;
3099 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
3100 /* pass management unicast packets as well */
3101 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 3102 break;
356e2385 3103
a2fbb9ea 3104 default:
34f80b04
EG
3105 BNX2X_ERR("BAD rx mode (%d)\n", mode);
3106 break;
a2fbb9ea
ET
3107 }
3108
581ce43d
EG
3109 REG_WR(bp,
3110 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
3111 llh_mask);
3112
a2fbb9ea
ET
3113 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
3114 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3115 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
3116 ((u32 *)&tstorm_mac_filter)[i]);
3117
34f80b04 3118/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
3119 ((u32 *)&tstorm_mac_filter)[i]); */
3120 }
a2fbb9ea 3121
49d66772
ET
3122 if (mode != BNX2X_RX_MODE_NONE)
3123 bnx2x_set_client_config(bp);
a2fbb9ea
ET
3124}
3125
471de716
EG
3126static void bnx2x_init_internal_common(struct bnx2x *bp)
3127{
3128 int i;
3129
3130 /* Zero this manually as its initialization is
3131 currently missing in the initTool */
3132 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3133 REG_WR(bp, BAR_USTRORM_INTMEM +
3134 USTORM_AGG_DATA_OFFSET + i * 4, 0);
3135}
3136
3137static void bnx2x_init_internal_port(struct bnx2x *bp)
3138{
3139 int port = BP_PORT(bp);
3140
ca00392c
EG
3141 REG_WR(bp,
3142 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
3143 REG_WR(bp,
3144 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
3145 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3146 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3147}
3148
3149static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 3150{
a2fbb9ea
ET
3151 struct tstorm_eth_function_common_config tstorm_config = {0};
3152 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
3153 int port = BP_PORT(bp);
3154 int func = BP_FUNC(bp);
de832a55
EG
3155 int i, j;
3156 u32 offset;
471de716 3157 u16 max_agg_size;
a2fbb9ea 3158
c68ed255
TH
3159 tstorm_config.config_flags = RSS_FLAGS(bp);
3160
3161 if (is_multi(bp))
a2fbb9ea 3162 tstorm_config.rss_result_mask = MULTI_MASK;
ca00392c
EG
3163
3164 /* Enable TPA if needed */
3165 if (bp->flags & TPA_ENABLE_FLAG)
3166 tstorm_config.config_flags |=
3167 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
3168
8d9c5f34
EG
3169 if (IS_E1HMF(bp))
3170 tstorm_config.config_flags |=
3171 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 3172
34f80b04
EG
3173 tstorm_config.leading_client_id = BP_L_ID(bp);
3174
a2fbb9ea 3175 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3176 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
3177 (*(u32 *)&tstorm_config));
3178
c14423fe 3179 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 3180 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
3181 bnx2x_set_storm_rx_mode(bp);
3182
de832a55
EG
3183 for_each_queue(bp, i) {
3184 u8 cl_id = bp->fp[i].cl_id;
3185
3186 /* reset xstorm per client statistics */
3187 offset = BAR_XSTRORM_INTMEM +
3188 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3189 for (j = 0;
3190 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
3191 REG_WR(bp, offset + j*4, 0);
3192
3193 /* reset tstorm per client statistics */
3194 offset = BAR_TSTRORM_INTMEM +
3195 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3196 for (j = 0;
3197 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
3198 REG_WR(bp, offset + j*4, 0);
3199
3200 /* reset ustorm per client statistics */
3201 offset = BAR_USTRORM_INTMEM +
3202 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3203 for (j = 0;
3204 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
3205 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
3206 }
3207
3208 /* Init statistics related context */
34f80b04 3209 stats_flags.collect_eth = 1;
a2fbb9ea 3210
66e855f3 3211 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 3212 ((u32 *)&stats_flags)[0]);
66e855f3 3213 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
3214 ((u32 *)&stats_flags)[1]);
3215
66e855f3 3216 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 3217 ((u32 *)&stats_flags)[0]);
66e855f3 3218 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
3219 ((u32 *)&stats_flags)[1]);
3220
de832a55
EG
3221 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
3222 ((u32 *)&stats_flags)[0]);
3223 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
3224 ((u32 *)&stats_flags)[1]);
3225
66e855f3 3226 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 3227 ((u32 *)&stats_flags)[0]);
66e855f3 3228 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
3229 ((u32 *)&stats_flags)[1]);
3230
66e855f3
YG
3231 REG_WR(bp, BAR_XSTRORM_INTMEM +
3232 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3233 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3234 REG_WR(bp, BAR_XSTRORM_INTMEM +
3235 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3236 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3237
3238 REG_WR(bp, BAR_TSTRORM_INTMEM +
3239 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3240 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3241 REG_WR(bp, BAR_TSTRORM_INTMEM +
3242 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3243 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 3244
de832a55
EG
3245 REG_WR(bp, BAR_USTRORM_INTMEM +
3246 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3247 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3248 REG_WR(bp, BAR_USTRORM_INTMEM +
3249 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3250 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3251
34f80b04
EG
3252 if (CHIP_IS_E1H(bp)) {
3253 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3254 IS_E1HMF(bp));
3255 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3256 IS_E1HMF(bp));
3257 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3258 IS_E1HMF(bp));
3259 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3260 IS_E1HMF(bp));
3261
7a9b2557
VZ
3262 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
3263 bp->e1hov);
34f80b04
EG
3264 }
3265
4f40f2cb 3266 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
cdaa7cb8
VZ
3267 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
3268 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
54b9ddaa 3269 for_each_queue(bp, i) {
7a9b2557 3270 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
3271
3272 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 3273 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
3274 U64_LO(fp->rx_comp_mapping));
3275 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 3276 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
3277 U64_HI(fp->rx_comp_mapping));
3278
ca00392c
EG
3279 /* Next page */
3280 REG_WR(bp, BAR_USTRORM_INTMEM +
3281 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
3282 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3283 REG_WR(bp, BAR_USTRORM_INTMEM +
3284 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
3285 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3286
7a9b2557 3287 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 3288 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
3289 max_agg_size);
3290 }
8a1c38d1 3291
1c06328c
EG
3292 /* dropless flow control */
3293 if (CHIP_IS_E1H(bp)) {
3294 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
3295
3296 rx_pause.bd_thr_low = 250;
3297 rx_pause.cqe_thr_low = 250;
3298 rx_pause.cos = 1;
3299 rx_pause.sge_thr_low = 0;
3300 rx_pause.bd_thr_high = 350;
3301 rx_pause.cqe_thr_high = 350;
3302 rx_pause.sge_thr_high = 0;
3303
54b9ddaa 3304 for_each_queue(bp, i) {
1c06328c
EG
3305 struct bnx2x_fastpath *fp = &bp->fp[i];
3306
3307 if (!fp->disable_tpa) {
3308 rx_pause.sge_thr_low = 150;
3309 rx_pause.sge_thr_high = 250;
3310 }
3311
3312
3313 offset = BAR_USTRORM_INTMEM +
3314 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
3315 fp->cl_id);
3316 for (j = 0;
3317 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
3318 j++)
3319 REG_WR(bp, offset + j*4,
3320 ((u32 *)&rx_pause)[j]);
3321 }
3322 }
3323
8a1c38d1
EG
3324 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3325
3326 /* Init rate shaping and fairness contexts */
3327 if (IS_E1HMF(bp)) {
3328 int vn;
3329
3330 /* During init there is no active link
3331 Until link is up, set link rate to 10Gbps */
3332 bp->link_vars.line_speed = SPEED_10000;
3333 bnx2x_init_port_minmax(bp);
3334
b015e3d1
EG
3335 if (!BP_NOMCP(bp))
3336 bp->mf_config =
3337 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
3338 bnx2x_calc_vn_weight_sum(bp);
3339
3340 for (vn = VN_0; vn < E1HVN_MAX; vn++)
3341 bnx2x_init_vn_minmax(bp, 2*vn + port);
3342
3343 /* Enable rate shaping and fairness */
b015e3d1 3344 bp->cmng.flags.cmng_enables |=
8a1c38d1 3345 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 3346
8a1c38d1
EG
3347 } else {
3348 /* rate shaping and fairness are disabled */
3349 DP(NETIF_MSG_IFUP,
3350 "single function mode minmax will be disabled\n");
3351 }
3352
3353
cdaa7cb8 3354 /* Store cmng structures to internal memory */
8a1c38d1
EG
3355 if (bp->port.pmf)
3356 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
3357 REG_WR(bp, BAR_XSTRORM_INTMEM +
3358 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
3359 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
3360}
3361
471de716
EG
3362static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3363{
3364 switch (load_code) {
3365 case FW_MSG_CODE_DRV_LOAD_COMMON:
3366 bnx2x_init_internal_common(bp);
3367 /* no break */
3368
3369 case FW_MSG_CODE_DRV_LOAD_PORT:
3370 bnx2x_init_internal_port(bp);
3371 /* no break */
3372
3373 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3374 bnx2x_init_internal_func(bp);
3375 break;
3376
3377 default:
3378 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3379 break;
3380 }
3381}
3382
9f6c9258 3383void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
3384{
3385 int i;
3386
3387 for_each_queue(bp, i) {
3388 struct bnx2x_fastpath *fp = &bp->fp[i];
3389
34f80b04 3390 fp->bp = bp;
a2fbb9ea 3391 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 3392 fp->index = i;
34f80b04 3393 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
3394#ifdef BCM_CNIC
3395 fp->sb_id = fp->cl_id + 1;
3396#else
34f80b04 3397 fp->sb_id = fp->cl_id;
37b091ba 3398#endif
34f80b04 3399 DP(NETIF_MSG_IFUP,
f5372251
EG
3400 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
3401 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 3402 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 3403 fp->sb_id);
5c862848 3404 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
3405 }
3406
16119785
EG
3407 /* ensure status block indices were read */
3408 rmb();
3409
3410
5c862848
EG
3411 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
3412 DEF_SB_ID);
3413 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
3414 bnx2x_update_coalesce(bp);
3415 bnx2x_init_rx_rings(bp);
3416 bnx2x_init_tx_ring(bp);
3417 bnx2x_init_sp_ring(bp);
3418 bnx2x_init_context(bp);
471de716 3419 bnx2x_init_internal(bp, load_code);
a2fbb9ea 3420 bnx2x_init_ind_table(bp);
0ef00459
EG
3421 bnx2x_stats_init(bp);
3422
3423 /* At this point, we are ready for interrupts */
3424 atomic_set(&bp->intr_sem, 0);
3425
3426 /* flush all before enabling interrupts */
3427 mb();
3428 mmiowb();
3429
615f8fd9 3430 bnx2x_int_enable(bp);
eb8da205
EG
3431
3432 /* Check for SPIO5 */
3433 bnx2x_attn_int_deasserted0(bp,
3434 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
3435 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
3436}
3437
3438/* end of nic init */
3439
3440/*
3441 * gzip service functions
3442 */
3443
3444static int bnx2x_gunzip_init(struct bnx2x *bp)
3445{
1a983142
FT
3446 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
3447 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
3448 if (bp->gunzip_buf == NULL)
3449 goto gunzip_nomem1;
3450
3451 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3452 if (bp->strm == NULL)
3453 goto gunzip_nomem2;
3454
3455 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3456 GFP_KERNEL);
3457 if (bp->strm->workspace == NULL)
3458 goto gunzip_nomem3;
3459
3460 return 0;
3461
3462gunzip_nomem3:
3463 kfree(bp->strm);
3464 bp->strm = NULL;
3465
3466gunzip_nomem2:
1a983142
FT
3467 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3468 bp->gunzip_mapping);
a2fbb9ea
ET
3469 bp->gunzip_buf = NULL;
3470
3471gunzip_nomem1:
cdaa7cb8
VZ
3472 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
3473 " un-compression\n");
a2fbb9ea
ET
3474 return -ENOMEM;
3475}
3476
3477static void bnx2x_gunzip_end(struct bnx2x *bp)
3478{
3479 kfree(bp->strm->workspace);
3480
3481 kfree(bp->strm);
3482 bp->strm = NULL;
3483
3484 if (bp->gunzip_buf) {
1a983142
FT
3485 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3486 bp->gunzip_mapping);
a2fbb9ea
ET
3487 bp->gunzip_buf = NULL;
3488 }
3489}
3490
94a78b79 3491static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
3492{
3493 int n, rc;
3494
3495 /* check gzip header */
94a78b79
VZ
3496 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
3497 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 3498 return -EINVAL;
94a78b79 3499 }
a2fbb9ea
ET
3500
3501 n = 10;
3502
34f80b04 3503#define FNAME 0x8
a2fbb9ea
ET
3504
3505 if (zbuf[3] & FNAME)
3506 while ((zbuf[n++] != 0) && (n < len));
3507
94a78b79 3508 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
3509 bp->strm->avail_in = len - n;
3510 bp->strm->next_out = bp->gunzip_buf;
3511 bp->strm->avail_out = FW_BUF_SIZE;
3512
3513 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
3514 if (rc != Z_OK)
3515 return rc;
3516
3517 rc = zlib_inflate(bp->strm, Z_FINISH);
3518 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
3519 netdev_err(bp->dev, "Firmware decompression error: %s\n",
3520 bp->strm->msg);
a2fbb9ea
ET
3521
3522 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
3523 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
3524 netdev_err(bp->dev, "Firmware decompression error:"
3525 " gunzip_outlen (%d) not aligned\n",
3526 bp->gunzip_outlen);
a2fbb9ea
ET
3527 bp->gunzip_outlen >>= 2;
3528
3529 zlib_inflateEnd(bp->strm);
3530
3531 if (rc == Z_STREAM_END)
3532 return 0;
3533
3534 return rc;
3535}
3536
3537/* nic load/unload */
3538
3539/*
34f80b04 3540 * General service functions
a2fbb9ea
ET
3541 */
3542
3543/* send a NIG loopback debug packet */
3544static void bnx2x_lb_pckt(struct bnx2x *bp)
3545{
a2fbb9ea 3546 u32 wb_write[3];
a2fbb9ea
ET
3547
3548 /* Ethernet source and destination addresses */
a2fbb9ea
ET
3549 wb_write[0] = 0x55555555;
3550 wb_write[1] = 0x55555555;
34f80b04 3551 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 3552 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
3553
3554 /* NON-IP protocol */
a2fbb9ea
ET
3555 wb_write[0] = 0x09000000;
3556 wb_write[1] = 0x55555555;
34f80b04 3557 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 3558 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
3559}
3560
3561/* some of the internal memories
3562 * are not directly readable from the driver
3563 * to test them we send debug packets
3564 */
3565static int bnx2x_int_mem_test(struct bnx2x *bp)
3566{
3567 int factor;
3568 int count, i;
3569 u32 val = 0;
3570
ad8d3948 3571 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 3572 factor = 120;
ad8d3948
EG
3573 else if (CHIP_REV_IS_EMUL(bp))
3574 factor = 200;
3575 else
a2fbb9ea 3576 factor = 1;
a2fbb9ea
ET
3577
3578 DP(NETIF_MSG_HW, "start part1\n");
3579
3580 /* Disable inputs of parser neighbor blocks */
3581 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3582 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3583 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 3584 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
3585
3586 /* Write 0 to parser credits for CFC search request */
3587 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3588
3589 /* send Ethernet packet */
3590 bnx2x_lb_pckt(bp);
3591
3592 /* TODO do i reset NIG statistic? */
3593 /* Wait until NIG register shows 1 packet of size 0x10 */
3594 count = 1000 * factor;
3595 while (count) {
34f80b04 3596
a2fbb9ea
ET
3597 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3598 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
3599 if (val == 0x10)
3600 break;
3601
3602 msleep(10);
3603 count--;
3604 }
3605 if (val != 0x10) {
3606 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3607 return -1;
3608 }
3609
3610 /* Wait until PRS register shows 1 packet */
3611 count = 1000 * factor;
3612 while (count) {
3613 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
3614 if (val == 1)
3615 break;
3616
3617 msleep(10);
3618 count--;
3619 }
3620 if (val != 0x1) {
3621 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3622 return -2;
3623 }
3624
3625 /* Reset and init BRB, PRS */
34f80b04 3626 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 3627 msleep(50);
34f80b04 3628 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 3629 msleep(50);
94a78b79
VZ
3630 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3631 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
3632
3633 DP(NETIF_MSG_HW, "part2\n");
3634
3635 /* Disable inputs of parser neighbor blocks */
3636 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3637 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3638 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 3639 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
3640
3641 /* Write 0 to parser credits for CFC search request */
3642 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3643
3644 /* send 10 Ethernet packets */
3645 for (i = 0; i < 10; i++)
3646 bnx2x_lb_pckt(bp);
3647
3648 /* Wait until NIG register shows 10 + 1
3649 packets of size 11*0x10 = 0xb0 */
3650 count = 1000 * factor;
3651 while (count) {
34f80b04 3652
a2fbb9ea
ET
3653 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3654 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
3655 if (val == 0xb0)
3656 break;
3657
3658 msleep(10);
3659 count--;
3660 }
3661 if (val != 0xb0) {
3662 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3663 return -3;
3664 }
3665
3666 /* Wait until PRS register shows 2 packets */
3667 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3668 if (val != 2)
3669 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3670
3671 /* Write 1 to parser credits for CFC search request */
3672 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
3673
3674 /* Wait until PRS register shows 3 packets */
3675 msleep(10 * factor);
3676 /* Wait until NIG register shows 1 packet of size 0x10 */
3677 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3678 if (val != 3)
3679 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3680
3681 /* clear NIG EOP FIFO */
3682 for (i = 0; i < 11; i++)
3683 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
3684 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
3685 if (val != 1) {
3686 BNX2X_ERR("clear of NIG failed\n");
3687 return -4;
3688 }
3689
3690 /* Reset and init BRB, PRS, NIG */
3691 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3692 msleep(50);
3693 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3694 msleep(50);
94a78b79
VZ
3695 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3696 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 3697#ifndef BCM_CNIC
a2fbb9ea
ET
3698 /* set NIC mode */
3699 REG_WR(bp, PRS_REG_NIC_MODE, 1);
3700#endif
3701
3702 /* Enable inputs of parser neighbor blocks */
3703 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
3704 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
3705 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 3706 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
3707
3708 DP(NETIF_MSG_HW, "done\n");
3709
3710 return 0; /* OK */
3711}
3712
3713static void enable_blocks_attention(struct bnx2x *bp)
3714{
3715 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3716 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
3717 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3718 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
3719 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
3720 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
3721 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
3722 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
3723 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
3724/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3725/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
3726 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
3727 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
3728 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
3729/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3730/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
3731 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
3732 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
3733 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
3734 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
3735/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3736/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3737 if (CHIP_REV_IS_FPGA(bp))
3738 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
3739 else
3740 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
3741 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
3742 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
3743 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
3744/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3745/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
3746 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
3747 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
3748/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3749 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
3750}
3751
72fd0718
VZ
3752static const struct {
3753 u32 addr;
3754 u32 mask;
3755} bnx2x_parity_mask[] = {
3756 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
3757 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
3758 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
3759 {HC_REG_HC_PRTY_MASK, 0xffffffff},
3760 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
3761 {QM_REG_QM_PRTY_MASK, 0x0},
3762 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
3763 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
3764 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
3765 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
3766 {CDU_REG_CDU_PRTY_MASK, 0x0},
3767 {CFC_REG_CFC_PRTY_MASK, 0x0},
3768 {DBG_REG_DBG_PRTY_MASK, 0x0},
3769 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
3770 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
3771 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
3772 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
3773 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
3774 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
3775 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
3776 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
3777 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
3778 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
3779 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
3780 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
3781 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
3782 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
3783 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
3784};
3785
3786static void enable_blocks_parity(struct bnx2x *bp)
3787{
3788 int i, mask_arr_len =
3789 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
3790
3791 for (i = 0; i < mask_arr_len; i++)
3792 REG_WR(bp, bnx2x_parity_mask[i].addr,
3793 bnx2x_parity_mask[i].mask);
3794}
3795
34f80b04 3796
81f75bbf
EG
3797static void bnx2x_reset_common(struct bnx2x *bp)
3798{
3799 /* reset_common */
3800 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
3801 0xd3ffff7f);
3802 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
3803}
3804
573f2035
EG
3805static void bnx2x_init_pxp(struct bnx2x *bp)
3806{
3807 u16 devctl;
3808 int r_order, w_order;
3809
3810 pci_read_config_word(bp->pdev,
3811 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
3812 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
3813 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3814 if (bp->mrrs == -1)
3815 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3816 else {
3817 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
3818 r_order = bp->mrrs;
3819 }
3820
3821 bnx2x_init_pxp_arb(bp, r_order, w_order);
3822}
fd4ef40d
EG
3823
3824static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3825{
2145a920 3826 int is_required;
fd4ef40d 3827 u32 val;
2145a920 3828 int port;
fd4ef40d 3829
2145a920
VZ
3830 if (BP_NOMCP(bp))
3831 return;
3832
3833 is_required = 0;
fd4ef40d
EG
3834 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
3835 SHARED_HW_CFG_FAN_FAILURE_MASK;
3836
3837 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
3838 is_required = 1;
3839
3840 /*
3841 * The fan failure mechanism is usually related to the PHY type since
3842 * the power consumption of the board is affected by the PHY. Currently,
3843 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
3844 */
3845 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
3846 for (port = PORT_0; port < PORT_MAX; port++) {
fd4ef40d 3847 is_required |=
d90d96ba
YR
3848 bnx2x_fan_failure_det_req(
3849 bp,
3850 bp->common.shmem_base,
3851 port);
fd4ef40d
EG
3852 }
3853
3854 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
3855
3856 if (is_required == 0)
3857 return;
3858
3859 /* Fan failure is indicated by SPIO 5 */
3860 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
3861 MISC_REGISTERS_SPIO_INPUT_HI_Z);
3862
3863 /* set to active low mode */
3864 val = REG_RD(bp, MISC_REG_SPIO_INT);
3865 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 3866 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
3867 REG_WR(bp, MISC_REG_SPIO_INT, val);
3868
3869 /* enable interrupt to signal the IGU */
3870 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
3871 val |= (1 << MISC_REGISTERS_SPIO_5);
3872 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3873}
3874
34f80b04 3875static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 3876{
a2fbb9ea 3877 u32 val, i;
37b091ba
MC
3878#ifdef BCM_CNIC
3879 u32 wb_write[2];
3880#endif
a2fbb9ea 3881
34f80b04 3882 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 3883
81f75bbf 3884 bnx2x_reset_common(bp);
34f80b04
EG
3885 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
3886 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 3887
94a78b79 3888 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
3889 if (CHIP_IS_E1H(bp))
3890 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 3891
34f80b04
EG
3892 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
3893 msleep(30);
3894 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 3895
94a78b79 3896 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
3897 if (CHIP_IS_E1(bp)) {
3898 /* enable HW interrupt from PXP on USDM overflow
3899 bit 16 on INT_MASK_0 */
3900 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3901 }
a2fbb9ea 3902
94a78b79 3903 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 3904 bnx2x_init_pxp(bp);
a2fbb9ea
ET
3905
3906#ifdef __BIG_ENDIAN
34f80b04
EG
3907 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
3908 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
3909 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
3910 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
3911 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
3912 /* make sure this value is 0 */
3913 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
3914
3915/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3916 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
3917 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
3918 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
3919 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
3920#endif
3921
34f80b04 3922 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 3923#ifdef BCM_CNIC
34f80b04
EG
3924 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3925 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3926 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
3927#endif
3928
34f80b04
EG
3929 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
3930 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 3931
34f80b04
EG
3932 /* let the HW do it's magic ... */
3933 msleep(100);
3934 /* finish PXP init */
3935 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
3936 if (val != 1) {
3937 BNX2X_ERR("PXP2 CFG failed\n");
3938 return -EBUSY;
3939 }
3940 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
3941 if (val != 1) {
3942 BNX2X_ERR("PXP2 RD_INIT failed\n");
3943 return -EBUSY;
3944 }
a2fbb9ea 3945
34f80b04
EG
3946 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
3947 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 3948
94a78b79 3949 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 3950
34f80b04
EG
3951 /* clean the DMAE memory */
3952 bp->dmae_ready = 1;
3953 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 3954
94a78b79
VZ
3955 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
3956 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
3957 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
3958 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 3959
34f80b04
EG
3960 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
3961 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
3962 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
3963 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
3964
94a78b79 3965 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
3966
3967#ifdef BCM_CNIC
3968 wb_write[0] = 0;
3969 wb_write[1] = 0;
3970 for (i = 0; i < 64; i++) {
3971 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
3972 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
3973
3974 if (CHIP_IS_E1H(bp)) {
3975 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
3976 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
3977 wb_write, 2);
3978 }
3979 }
3980#endif
34f80b04
EG
3981 /* soft reset pulse */
3982 REG_WR(bp, QM_REG_SOFT_RESET, 1);
3983 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 3984
37b091ba 3985#ifdef BCM_CNIC
94a78b79 3986 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 3987#endif
a2fbb9ea 3988
94a78b79 3989 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
3990 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
3991 if (!CHIP_REV_IS_SLOW(bp)) {
3992 /* enable hw interrupt from doorbell Q */
3993 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3994 }
a2fbb9ea 3995
94a78b79
VZ
3996 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3997 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 3998 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 3999#ifndef BCM_CNIC
3196a88a
EG
4000 /* set NIC mode */
4001 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 4002#endif
34f80b04
EG
4003 if (CHIP_IS_E1H(bp))
4004 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 4005
94a78b79
VZ
4006 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4007 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4008 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4009 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 4010
ca00392c
EG
4011 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4012 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4013 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4014 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 4015
94a78b79
VZ
4016 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4017 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4018 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4019 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 4020
34f80b04
EG
4021 /* sync semi rtc */
4022 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4023 0x80000000);
4024 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4025 0x80000000);
a2fbb9ea 4026
94a78b79
VZ
4027 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4028 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4029 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 4030
34f80b04 4031 REG_WR(bp, SRC_REG_SOFT_RST, 1);
c68ed255
TH
4032 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4033 REG_WR(bp, i, random32());
94a78b79 4034 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
4035#ifdef BCM_CNIC
4036 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4037 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4038 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4039 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4040 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4041 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4042 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4043 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4044 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4045 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4046#endif
34f80b04 4047 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 4048
34f80b04
EG
4049 if (sizeof(union cdu_context) != 1024)
4050 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
4051 dev_alert(&bp->pdev->dev, "please adjust the size "
4052 "of cdu_context(%ld)\n",
7995c64e 4053 (long)sizeof(union cdu_context));
a2fbb9ea 4054
94a78b79 4055 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
4056 val = (4 << 24) + (0 << 12) + 1024;
4057 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 4058
94a78b79 4059 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 4060 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
4061 /* enable context validation interrupt from CFC */
4062 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4063
4064 /* set the thresholds to prevent CFC/CDU race */
4065 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 4066
94a78b79
VZ
4067 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
4068 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 4069
94a78b79 4070 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
4071 /* Reset PCIE errors for debug */
4072 REG_WR(bp, 0x2814, 0xffffffff);
4073 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 4074
94a78b79 4075 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 4076 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 4077 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 4078 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 4079
94a78b79 4080 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
4081 if (CHIP_IS_E1H(bp)) {
4082 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
4083 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
4084 }
4085
4086 if (CHIP_REV_IS_SLOW(bp))
4087 msleep(200);
4088
4089 /* finish CFC init */
4090 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4091 if (val != 1) {
4092 BNX2X_ERR("CFC LL_INIT failed\n");
4093 return -EBUSY;
4094 }
4095 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4096 if (val != 1) {
4097 BNX2X_ERR("CFC AC_INIT failed\n");
4098 return -EBUSY;
4099 }
4100 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4101 if (val != 1) {
4102 BNX2X_ERR("CFC CAM_INIT failed\n");
4103 return -EBUSY;
4104 }
4105 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 4106
34f80b04
EG
4107 /* read NIG statistic
4108 to see if this is our first up since powerup */
4109 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4110 val = *bnx2x_sp(bp, wb_data[0]);
4111
4112 /* do internal memory self test */
4113 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4114 BNX2X_ERR("internal mem self test failed\n");
4115 return -EBUSY;
4116 }
4117
d90d96ba
YR
4118 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
4119 bp->common.shmem_base);
f1410647 4120
fd4ef40d
EG
4121 bnx2x_setup_fan_failure_detection(bp);
4122
34f80b04
EG
4123 /* clear PXP2 attentions */
4124 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 4125
34f80b04 4126 enable_blocks_attention(bp);
72fd0718
VZ
4127 if (CHIP_PARITY_SUPPORTED(bp))
4128 enable_blocks_parity(bp);
a2fbb9ea 4129
6bbca910
YR
4130 if (!BP_NOMCP(bp)) {
4131 bnx2x_acquire_phy_lock(bp);
4132 bnx2x_common_init_phy(bp, bp->common.shmem_base);
4133 bnx2x_release_phy_lock(bp);
4134 } else
4135 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4136
34f80b04
EG
4137 return 0;
4138}
a2fbb9ea 4139
34f80b04
EG
4140static int bnx2x_init_port(struct bnx2x *bp)
4141{
4142 int port = BP_PORT(bp);
94a78b79 4143 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 4144 u32 low, high;
34f80b04 4145 u32 val;
a2fbb9ea 4146
cdaa7cb8 4147 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
4148
4149 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 4150
94a78b79 4151 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 4152 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
4153
4154 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4155 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4156 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 4157 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 4158
37b091ba
MC
4159#ifdef BCM_CNIC
4160 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 4161
94a78b79 4162 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
4163 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4164 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 4165#endif
cdaa7cb8 4166
94a78b79 4167 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 4168
94a78b79 4169 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
4170 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
4171 /* no pause for emulation and FPGA */
4172 low = 0;
4173 high = 513;
4174 } else {
4175 if (IS_E1HMF(bp))
4176 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4177 else if (bp->dev->mtu > 4096) {
4178 if (bp->flags & ONE_PORT_FLAG)
4179 low = 160;
4180 else {
4181 val = bp->dev->mtu;
4182 /* (24*1024 + val*4)/256 */
4183 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
4184 }
4185 } else
4186 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4187 high = low + 56; /* 14*1024/256 */
4188 }
4189 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4190 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4191
4192
94a78b79 4193 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 4194
94a78b79 4195 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 4196 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 4197 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 4198 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 4199
94a78b79
VZ
4200 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4201 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4202 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4203 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 4204
94a78b79 4205 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 4206 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 4207
94a78b79 4208 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
4209
4210 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 4211 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
4212
4213 /* update threshold */
34f80b04 4214 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 4215 /* update init credit */
34f80b04 4216 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
4217
4218 /* probe changes */
34f80b04 4219 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 4220 msleep(5);
34f80b04 4221 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 4222
37b091ba
MC
4223#ifdef BCM_CNIC
4224 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 4225#endif
94a78b79 4226 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 4227 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
4228
4229 if (CHIP_IS_E1(bp)) {
4230 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4231 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4232 }
94a78b79 4233 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 4234
94a78b79 4235 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
4236 /* init aeu_mask_attn_func_0/1:
4237 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4238 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4239 * bits 4-7 are used for "per vn group attention" */
4240 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4241 (IS_E1HMF(bp) ? 0xF7 : 0x7));
4242
94a78b79 4243 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 4244 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 4245 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 4246 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 4247 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 4248
94a78b79 4249 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
4250
4251 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4252
4253 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
4254 /* 0x2 disable e1hov, 0x1 enable */
4255 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4256 (IS_E1HMF(bp) ? 0x1 : 0x2));
4257
1c06328c
EG
4258 {
4259 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4260 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
4261 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
4262 }
34f80b04
EG
4263 }
4264
94a78b79 4265 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 4266 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
d90d96ba
YR
4267 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
4268 bp->common.shmem_base);
a2fbb9ea 4269
d90d96ba
YR
4270 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
4271 port)) {
4d295db0
EG
4272 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4273 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4274 val = REG_RD(bp, reg_addr);
f1410647 4275 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0 4276 REG_WR(bp, reg_addr, val);
f1410647 4277 }
c18487ee 4278 bnx2x__link_reset(bp);
a2fbb9ea 4279
34f80b04
EG
4280 return 0;
4281}
4282
4283#define ILT_PER_FUNC (768/2)
4284#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
4285/* the phys address is shifted right 12 bits and has an added
4286 1=valid bit added to the 53rd bit
4287 then since this is a wide register(TM)
4288 we split it into two 32 bit writes
4289 */
4290#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4291#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
4292#define PXP_ONE_ILT(x) (((x) << 10) | x)
4293#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
4294
37b091ba
MC
4295#ifdef BCM_CNIC
4296#define CNIC_ILT_LINES 127
4297#define CNIC_CTX_PER_ILT 16
4298#else
34f80b04 4299#define CNIC_ILT_LINES 0
37b091ba 4300#endif
34f80b04
EG
4301
4302static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4303{
4304 int reg;
4305
4306 if (CHIP_IS_E1H(bp))
4307 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4308 else /* E1 */
4309 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4310
4311 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4312}
4313
4314static int bnx2x_init_func(struct bnx2x *bp)
4315{
4316 int port = BP_PORT(bp);
4317 int func = BP_FUNC(bp);
8badd27a 4318 u32 addr, val;
34f80b04
EG
4319 int i;
4320
cdaa7cb8 4321 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 4322
8badd27a
EG
4323 /* set MSI reconfigure capability */
4324 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4325 val = REG_RD(bp, addr);
4326 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
4327 REG_WR(bp, addr, val);
4328
34f80b04
EG
4329 i = FUNC_ILT_BASE(func);
4330
4331 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
4332 if (CHIP_IS_E1H(bp)) {
4333 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
4334 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
4335 } else /* E1 */
4336 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
4337 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
4338
37b091ba
MC
4339#ifdef BCM_CNIC
4340 i += 1 + CNIC_ILT_LINES;
4341 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
4342 if (CHIP_IS_E1(bp))
4343 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
4344 else {
4345 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
4346 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
4347 }
4348
4349 i++;
4350 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
4351 if (CHIP_IS_E1(bp))
4352 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
4353 else {
4354 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
4355 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
4356 }
4357
4358 i++;
4359 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
4360 if (CHIP_IS_E1(bp))
4361 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
4362 else {
4363 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
4364 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
4365 }
4366
4367 /* tell the searcher where the T2 table is */
4368 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
4369
4370 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
4371 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
4372
4373 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
4374 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
4375 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
4376
4377 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
4378#endif
34f80b04
EG
4379
4380 if (CHIP_IS_E1H(bp)) {
573f2035
EG
4381 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4382 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4383 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4384 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4385 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4386 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4387 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4388 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4389 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
4390
4391 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4392 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
4393 }
4394
4395 /* HC init per function */
4396 if (CHIP_IS_E1H(bp)) {
4397 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4398
4399 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4400 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4401 }
94a78b79 4402 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 4403
c14423fe 4404 /* Reset PCIE errors for debug */
a2fbb9ea
ET
4405 REG_WR(bp, 0x2114, 0xffffffff);
4406 REG_WR(bp, 0x2120, 0xffffffff);
b7737c9b 4407 bnx2x_phy_probe(&bp->link_params);
34f80b04
EG
4408 return 0;
4409}
4410
9f6c9258 4411int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
34f80b04
EG
4412{
4413 int i, rc = 0;
a2fbb9ea 4414
34f80b04
EG
4415 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
4416 BP_FUNC(bp), load_code);
a2fbb9ea 4417
34f80b04
EG
4418 bp->dmae_ready = 0;
4419 mutex_init(&bp->dmae_mutex);
54016b26
EG
4420 rc = bnx2x_gunzip_init(bp);
4421 if (rc)
4422 return rc;
a2fbb9ea 4423
34f80b04
EG
4424 switch (load_code) {
4425 case FW_MSG_CODE_DRV_LOAD_COMMON:
4426 rc = bnx2x_init_common(bp);
4427 if (rc)
4428 goto init_hw_err;
4429 /* no break */
4430
4431 case FW_MSG_CODE_DRV_LOAD_PORT:
4432 bp->dmae_ready = 1;
4433 rc = bnx2x_init_port(bp);
4434 if (rc)
4435 goto init_hw_err;
4436 /* no break */
4437
4438 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4439 bp->dmae_ready = 1;
4440 rc = bnx2x_init_func(bp);
4441 if (rc)
4442 goto init_hw_err;
4443 break;
4444
4445 default:
4446 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4447 break;
4448 }
4449
4450 if (!BP_NOMCP(bp)) {
4451 int func = BP_FUNC(bp);
a2fbb9ea
ET
4452
4453 bp->fw_drv_pulse_wr_seq =
34f80b04 4454 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 4455 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
4456 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4457 }
a2fbb9ea 4458
34f80b04
EG
4459 /* this needs to be done before gunzip end */
4460 bnx2x_zero_def_sb(bp);
4461 for_each_queue(bp, i)
4462 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
4463#ifdef BCM_CNIC
4464 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4465#endif
34f80b04
EG
4466
4467init_hw_err:
4468 bnx2x_gunzip_end(bp);
4469
4470 return rc;
a2fbb9ea
ET
4471}
4472
9f6c9258 4473void bnx2x_free_mem(struct bnx2x *bp)
a2fbb9ea
ET
4474{
4475
4476#define BNX2X_PCI_FREE(x, y, size) \
4477 do { \
4478 if (x) { \
1a983142 4479 dma_free_coherent(&bp->pdev->dev, size, x, y); \
a2fbb9ea
ET
4480 x = NULL; \
4481 y = 0; \
4482 } \
4483 } while (0)
4484
4485#define BNX2X_FREE(x) \
4486 do { \
4487 if (x) { \
4488 vfree(x); \
4489 x = NULL; \
4490 } \
4491 } while (0)
4492
4493 int i;
4494
4495 /* fastpath */
555f6c78 4496 /* Common */
a2fbb9ea
ET
4497 for_each_queue(bp, i) {
4498
555f6c78 4499 /* status blocks */
a2fbb9ea
ET
4500 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
4501 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 4502 sizeof(struct host_status_block));
555f6c78
EG
4503 }
4504 /* Rx */
54b9ddaa 4505 for_each_queue(bp, i) {
a2fbb9ea 4506
555f6c78 4507 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
4508 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
4509 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
4510 bnx2x_fp(bp, i, rx_desc_mapping),
4511 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4512
4513 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
4514 bnx2x_fp(bp, i, rx_comp_mapping),
4515 sizeof(struct eth_fast_path_rx_cqe) *
4516 NUM_RCQ_BD);
a2fbb9ea 4517
7a9b2557 4518 /* SGE ring */
32626230 4519 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
4520 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
4521 bnx2x_fp(bp, i, rx_sge_mapping),
4522 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4523 }
555f6c78 4524 /* Tx */
54b9ddaa 4525 for_each_queue(bp, i) {
555f6c78
EG
4526
4527 /* fastpath tx rings: tx_buf tx_desc */
4528 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
4529 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
4530 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 4531 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 4532 }
a2fbb9ea
ET
4533 /* end of fastpath */
4534
4535 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 4536 sizeof(struct host_def_status_block));
a2fbb9ea
ET
4537
4538 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 4539 sizeof(struct bnx2x_slowpath));
a2fbb9ea 4540
37b091ba 4541#ifdef BCM_CNIC
a2fbb9ea
ET
4542 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
4543 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
4544 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
4545 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
4546 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
4547 sizeof(struct host_status_block));
a2fbb9ea 4548#endif
7a9b2557 4549 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
4550
4551#undef BNX2X_PCI_FREE
4552#undef BNX2X_KFREE
4553}
4554
9f6c9258 4555int bnx2x_alloc_mem(struct bnx2x *bp)
a2fbb9ea
ET
4556{
4557
4558#define BNX2X_PCI_ALLOC(x, y, size) \
4559 do { \
1a983142 4560 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
9f6c9258
DK
4561 if (x == NULL) \
4562 goto alloc_mem_err; \
4563 memset(x, 0, size); \
4564 } while (0)
a2fbb9ea 4565
9f6c9258
DK
4566#define BNX2X_ALLOC(x, size) \
4567 do { \
4568 x = vmalloc(size); \
4569 if (x == NULL) \
4570 goto alloc_mem_err; \
4571 memset(x, 0, size); \
4572 } while (0)
a2fbb9ea 4573
9f6c9258 4574 int i;
a2fbb9ea 4575
9f6c9258
DK
4576 /* fastpath */
4577 /* Common */
a2fbb9ea 4578 for_each_queue(bp, i) {
9f6c9258 4579 bnx2x_fp(bp, i, bp) = bp;
a2fbb9ea 4580
9f6c9258
DK
4581 /* status blocks */
4582 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
4583 &bnx2x_fp(bp, i, status_blk_mapping),
4584 sizeof(struct host_status_block));
a2fbb9ea 4585 }
9f6c9258
DK
4586 /* Rx */
4587 for_each_queue(bp, i) {
a2fbb9ea 4588
9f6c9258
DK
4589 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4590 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
4591 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4592 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
4593 &bnx2x_fp(bp, i, rx_desc_mapping),
4594 sizeof(struct eth_rx_bd) * NUM_RX_BD);
555f6c78 4595
9f6c9258
DK
4596 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
4597 &bnx2x_fp(bp, i, rx_comp_mapping),
4598 sizeof(struct eth_fast_path_rx_cqe) *
4599 NUM_RCQ_BD);
a2fbb9ea 4600
9f6c9258
DK
4601 /* SGE ring */
4602 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
4603 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4604 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
4605 &bnx2x_fp(bp, i, rx_sge_mapping),
4606 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4607 }
4608 /* Tx */
4609 for_each_queue(bp, i) {
8badd27a 4610
9f6c9258
DK
4611 /* fastpath tx rings: tx_buf tx_desc */
4612 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
4613 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4614 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
4615 &bnx2x_fp(bp, i, tx_desc_mapping),
4616 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
8badd27a 4617 }
9f6c9258 4618 /* end of fastpath */
8badd27a 4619
9f6c9258
DK
4620 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4621 sizeof(struct host_def_status_block));
8badd27a 4622
9f6c9258
DK
4623 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4624 sizeof(struct bnx2x_slowpath));
a2fbb9ea 4625
9f6c9258
DK
4626#ifdef BCM_CNIC
4627 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
8badd27a 4628
9f6c9258
DK
4629 /* allocate searcher T2 table
4630 we allocate 1/4 of alloc num for T2
4631 (which is not entered into the ILT) */
4632 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
a2fbb9ea 4633
9f6c9258
DK
4634 /* Initialize T2 (for 1024 connections) */
4635 for (i = 0; i < 16*1024; i += 64)
4636 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 4637
9f6c9258
DK
4638 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
4639 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
65abd74d 4640
9f6c9258
DK
4641 /* QM queues (128*MAX_CONN) */
4642 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
65abd74d 4643
9f6c9258
DK
4644 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
4645 sizeof(struct host_status_block));
4646#endif
65abd74d 4647
9f6c9258
DK
4648 /* Slow path ring */
4649 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
65abd74d 4650
9f6c9258 4651 return 0;
e1510706 4652
9f6c9258
DK
4653alloc_mem_err:
4654 bnx2x_free_mem(bp);
4655 return -ENOMEM;
e1510706 4656
9f6c9258
DK
4657#undef BNX2X_PCI_ALLOC
4658#undef BNX2X_ALLOC
65abd74d
YG
4659}
4660
65abd74d 4661
a2fbb9ea
ET
4662/*
4663 * Init service functions
4664 */
4665
e665bfda
MC
4666/**
4667 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
4668 *
4669 * @param bp driver descriptor
4670 * @param set set or clear an entry (1 or 0)
4671 * @param mac pointer to a buffer containing a MAC
4672 * @param cl_bit_vec bit vector of clients to register a MAC for
4673 * @param cam_offset offset in a CAM to use
4674 * @param with_bcast set broadcast MAC as well
4675 */
4676static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
4677 u32 cl_bit_vec, u8 cam_offset,
4678 u8 with_bcast)
a2fbb9ea
ET
4679{
4680 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 4681 int port = BP_PORT(bp);
a2fbb9ea
ET
4682
4683 /* CAM allocation
4684 * unicasts 0-31:port0 32-63:port1
4685 * multicast 64-127:port0 128-191:port1
4686 */
e665bfda
MC
4687 config->hdr.length = 1 + (with_bcast ? 1 : 0);
4688 config->hdr.offset = cam_offset;
4689 config->hdr.client_id = 0xff;
a2fbb9ea
ET
4690 config->hdr.reserved1 = 0;
4691
4692 /* primary MAC */
4693 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 4694 swab16(*(u16 *)&mac[0]);
a2fbb9ea 4695 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 4696 swab16(*(u16 *)&mac[2]);
a2fbb9ea 4697 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 4698 swab16(*(u16 *)&mac[4]);
34f80b04 4699 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
4700 if (set)
4701 config->config_table[0].target_table_entry.flags = 0;
4702 else
4703 CAM_INVALIDATE(config->config_table[0]);
ca00392c 4704 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 4705 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
4706 config->config_table[0].target_table_entry.vlan_id = 0;
4707
3101c2bc
YG
4708 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
4709 (set ? "setting" : "clearing"),
a2fbb9ea
ET
4710 config->config_table[0].cam_entry.msb_mac_addr,
4711 config->config_table[0].cam_entry.middle_mac_addr,
4712 config->config_table[0].cam_entry.lsb_mac_addr);
4713
4714 /* broadcast */
e665bfda
MC
4715 if (with_bcast) {
4716 config->config_table[1].cam_entry.msb_mac_addr =
4717 cpu_to_le16(0xffff);
4718 config->config_table[1].cam_entry.middle_mac_addr =
4719 cpu_to_le16(0xffff);
4720 config->config_table[1].cam_entry.lsb_mac_addr =
4721 cpu_to_le16(0xffff);
4722 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
4723 if (set)
4724 config->config_table[1].target_table_entry.flags =
4725 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4726 else
4727 CAM_INVALIDATE(config->config_table[1]);
4728 config->config_table[1].target_table_entry.clients_bit_vector =
4729 cpu_to_le32(cl_bit_vec);
4730 config->config_table[1].target_table_entry.vlan_id = 0;
4731 }
a2fbb9ea
ET
4732
4733 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4734 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4735 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4736}
4737
e665bfda
MC
4738/**
4739 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
4740 *
4741 * @param bp driver descriptor
4742 * @param set set or clear an entry (1 or 0)
4743 * @param mac pointer to a buffer containing a MAC
4744 * @param cl_bit_vec bit vector of clients to register a MAC for
4745 * @param cam_offset offset in a CAM to use
4746 */
4747static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
4748 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
4749{
4750 struct mac_configuration_cmd_e1h *config =
4751 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
4752
8d9c5f34 4753 config->hdr.length = 1;
e665bfda
MC
4754 config->hdr.offset = cam_offset;
4755 config->hdr.client_id = 0xff;
34f80b04
EG
4756 config->hdr.reserved1 = 0;
4757
4758 /* primary MAC */
4759 config->config_table[0].msb_mac_addr =
e665bfda 4760 swab16(*(u16 *)&mac[0]);
34f80b04 4761 config->config_table[0].middle_mac_addr =
e665bfda 4762 swab16(*(u16 *)&mac[2]);
34f80b04 4763 config->config_table[0].lsb_mac_addr =
e665bfda 4764 swab16(*(u16 *)&mac[4]);
ca00392c 4765 config->config_table[0].clients_bit_vector =
e665bfda 4766 cpu_to_le32(cl_bit_vec);
34f80b04
EG
4767 config->config_table[0].vlan_id = 0;
4768 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
4769 if (set)
4770 config->config_table[0].flags = BP_PORT(bp);
4771 else
4772 config->config_table[0].flags =
4773 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 4774
e665bfda 4775 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 4776 (set ? "setting" : "clearing"),
34f80b04
EG
4777 config->config_table[0].msb_mac_addr,
4778 config->config_table[0].middle_mac_addr,
e665bfda 4779 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
4780
4781 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4782 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4783 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4784}
4785
a2fbb9ea
ET
4786static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4787 int *state_p, int poll)
4788{
4789 /* can take a while if any port is running */
8b3a0f0b 4790 int cnt = 5000;
a2fbb9ea 4791
c14423fe
ET
4792 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4793 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
4794
4795 might_sleep();
34f80b04 4796 while (cnt--) {
a2fbb9ea
ET
4797 if (poll) {
4798 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
4799 /* if index is different from 0
4800 * the reply for some commands will
3101c2bc 4801 * be on the non default queue
a2fbb9ea
ET
4802 */
4803 if (idx)
4804 bnx2x_rx_int(&bp->fp[idx], 10);
4805 }
a2fbb9ea 4806
3101c2bc 4807 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
4808 if (*state_p == state) {
4809#ifdef BNX2X_STOP_ON_ERROR
4810 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
4811#endif
a2fbb9ea 4812 return 0;
8b3a0f0b 4813 }
a2fbb9ea 4814
a2fbb9ea 4815 msleep(1);
e3553b29
EG
4816
4817 if (bp->panic)
4818 return -EIO;
a2fbb9ea
ET
4819 }
4820
a2fbb9ea 4821 /* timeout! */
49d66772
ET
4822 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4823 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
4824#ifdef BNX2X_STOP_ON_ERROR
4825 bnx2x_panic();
4826#endif
a2fbb9ea 4827
49d66772 4828 return -EBUSY;
a2fbb9ea
ET
4829}
4830
9f6c9258 4831void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
e665bfda
MC
4832{
4833 bp->set_mac_pending++;
4834 smp_wmb();
4835
4836 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
4837 (1 << bp->fp->cl_id), BP_FUNC(bp));
4838
4839 /* Wait for a completion */
4840 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4841}
4842
9f6c9258 4843void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
e665bfda
MC
4844{
4845 bp->set_mac_pending++;
4846 smp_wmb();
4847
4848 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
4849 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
4850 1);
4851
4852 /* Wait for a completion */
4853 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4854}
4855
993ac7b5
MC
4856#ifdef BCM_CNIC
4857/**
4858 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
4859 * MAC(s). This function will wait until the ramdord completion
4860 * returns.
4861 *
4862 * @param bp driver handle
4863 * @param set set or clear the CAM entry
4864 *
4865 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
4866 */
9f6c9258 4867int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
993ac7b5
MC
4868{
4869 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
4870
4871 bp->set_mac_pending++;
4872 smp_wmb();
4873
4874 /* Send a SET_MAC ramrod */
4875 if (CHIP_IS_E1(bp))
4876 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
4877 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
4878 1);
4879 else
4880 /* CAM allocation for E1H
4881 * unicasts: by func number
4882 * multicast: 20+FUNC*20, 20 each
4883 */
4884 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
4885 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
4886
4887 /* Wait for a completion when setting */
4888 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4889
4890 return 0;
4891}
4892#endif
4893
9f6c9258 4894int bnx2x_setup_leading(struct bnx2x *bp)
a2fbb9ea 4895{
34f80b04 4896 int rc;
a2fbb9ea 4897
c14423fe 4898 /* reset IGU state */
34f80b04 4899 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4900
4901 /* SETUP ramrod */
4902 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
4903
34f80b04
EG
4904 /* Wait for completion */
4905 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 4906
34f80b04 4907 return rc;
a2fbb9ea
ET
4908}
4909
9f6c9258 4910int bnx2x_setup_multi(struct bnx2x *bp, int index)
a2fbb9ea 4911{
555f6c78
EG
4912 struct bnx2x_fastpath *fp = &bp->fp[index];
4913
a2fbb9ea 4914 /* reset IGU state */
555f6c78 4915 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 4916
228241eb 4917 /* SETUP ramrod */
555f6c78
EG
4918 fp->state = BNX2X_FP_STATE_OPENING;
4919 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
4920 fp->cl_id, 0);
a2fbb9ea
ET
4921
4922 /* Wait for completion */
4923 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 4924 &(fp->state), 0);
a2fbb9ea
ET
4925}
4926
a2fbb9ea 4927
9f6c9258 4928void bnx2x_set_num_queues_msix(struct bnx2x *bp)
ca00392c 4929{
ca00392c
EG
4930
4931 switch (bp->multi_mode) {
4932 case ETH_RSS_MODE_DISABLED:
54b9ddaa 4933 bp->num_queues = 1;
ca00392c
EG
4934 break;
4935
4936 case ETH_RSS_MODE_REGULAR:
54b9ddaa
VZ
4937 if (num_queues)
4938 bp->num_queues = min_t(u32, num_queues,
4939 BNX2X_MAX_QUEUES(bp));
ca00392c 4940 else
54b9ddaa
VZ
4941 bp->num_queues = min_t(u32, num_online_cpus(),
4942 BNX2X_MAX_QUEUES(bp));
ca00392c
EG
4943 break;
4944
4945
4946 default:
54b9ddaa 4947 bp->num_queues = 1;
9f6c9258
DK
4948 break;
4949 }
a2fbb9ea
ET
4950}
4951
9f6c9258
DK
4952
4953
a2fbb9ea
ET
4954static int bnx2x_stop_multi(struct bnx2x *bp, int index)
4955{
555f6c78 4956 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
4957 int rc;
4958
c14423fe 4959 /* halt the connection */
555f6c78
EG
4960 fp->state = BNX2X_FP_STATE_HALTING;
4961 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 4962
34f80b04 4963 /* Wait for completion */
a2fbb9ea 4964 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 4965 &(fp->state), 1);
c14423fe 4966 if (rc) /* timeout */
a2fbb9ea
ET
4967 return rc;
4968
4969 /* delete cfc entry */
4970 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
4971
34f80b04
EG
4972 /* Wait for completion */
4973 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 4974 &(fp->state), 1);
34f80b04 4975 return rc;
a2fbb9ea
ET
4976}
4977
da5a662a 4978static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 4979{
4781bfad 4980 __le16 dsb_sp_prod_idx;
c14423fe 4981 /* if the other port is handling traffic,
a2fbb9ea 4982 this can take a lot of time */
34f80b04
EG
4983 int cnt = 500;
4984 int rc;
a2fbb9ea
ET
4985
4986 might_sleep();
4987
4988 /* Send HALT ramrod */
4989 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 4990 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 4991
34f80b04
EG
4992 /* Wait for completion */
4993 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
4994 &(bp->fp[0].state), 1);
4995 if (rc) /* timeout */
da5a662a 4996 return rc;
a2fbb9ea 4997
49d66772 4998 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 4999
228241eb 5000 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
5001 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
5002
49d66772 5003 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
5004 we are going to reset the chip anyway
5005 so there is not much to do if this times out
5006 */
34f80b04 5007 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
5008 if (!cnt) {
5009 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
5010 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5011 *bp->dsb_sp_prod, dsb_sp_prod_idx);
5012#ifdef BNX2X_STOP_ON_ERROR
5013 bnx2x_panic();
5014#endif
36e552ab 5015 rc = -EBUSY;
34f80b04
EG
5016 break;
5017 }
5018 cnt--;
da5a662a 5019 msleep(1);
5650d9d4 5020 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
5021 }
5022 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
5023 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
5024
5025 return rc;
a2fbb9ea
ET
5026}
5027
34f80b04
EG
5028static void bnx2x_reset_func(struct bnx2x *bp)
5029{
5030 int port = BP_PORT(bp);
5031 int func = BP_FUNC(bp);
5032 int base, i;
5033
5034 /* Configure IGU */
5035 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5036 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5037
37b091ba
MC
5038#ifdef BCM_CNIC
5039 /* Disable Timer scan */
5040 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
5041 /*
5042 * Wait for at least 10ms and up to 2 second for the timers scan to
5043 * complete
5044 */
5045 for (i = 0; i < 200; i++) {
5046 msleep(10);
5047 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
5048 break;
5049 }
5050#endif
34f80b04
EG
5051 /* Clear ILT */
5052 base = FUNC_ILT_BASE(func);
5053 for (i = base; i < base + ILT_PER_FUNC; i++)
5054 bnx2x_ilt_wr(bp, i, 0);
5055}
5056
5057static void bnx2x_reset_port(struct bnx2x *bp)
5058{
5059 int port = BP_PORT(bp);
5060 u32 val;
5061
5062 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5063
5064 /* Do not rcv packets to BRB */
5065 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5066 /* Do not direct rcv packets that are not for MCP to the BRB */
5067 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5068 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5069
5070 /* Configure AEU */
5071 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5072
5073 msleep(100);
5074 /* Check for BRB port occupancy */
5075 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5076 if (val)
5077 DP(NETIF_MSG_IFDOWN,
33471629 5078 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
5079
5080 /* TODO: Close Doorbell port? */
5081}
5082
34f80b04
EG
5083static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5084{
5085 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
5086 BP_FUNC(bp), reset_code);
5087
5088 switch (reset_code) {
5089 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5090 bnx2x_reset_port(bp);
5091 bnx2x_reset_func(bp);
5092 bnx2x_reset_common(bp);
5093 break;
5094
5095 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5096 bnx2x_reset_port(bp);
5097 bnx2x_reset_func(bp);
5098 break;
5099
5100 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5101 bnx2x_reset_func(bp);
5102 break;
49d66772 5103
34f80b04
EG
5104 default:
5105 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5106 break;
5107 }
5108}
5109
9f6c9258 5110void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 5111{
da5a662a 5112 int port = BP_PORT(bp);
a2fbb9ea 5113 u32 reset_code = 0;
da5a662a 5114 int i, cnt, rc;
a2fbb9ea 5115
555f6c78 5116 /* Wait until tx fastpath tasks complete */
54b9ddaa 5117 for_each_queue(bp, i) {
228241eb
ET
5118 struct bnx2x_fastpath *fp = &bp->fp[i];
5119
34f80b04 5120 cnt = 1000;
e8b5fc51 5121 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 5122
7961f791 5123 bnx2x_tx_int(fp);
34f80b04
EG
5124 if (!cnt) {
5125 BNX2X_ERR("timeout waiting for queue[%d]\n",
5126 i);
5127#ifdef BNX2X_STOP_ON_ERROR
5128 bnx2x_panic();
5129 return -EBUSY;
5130#else
5131 break;
5132#endif
5133 }
5134 cnt--;
da5a662a 5135 msleep(1);
34f80b04 5136 }
228241eb 5137 }
da5a662a
VZ
5138 /* Give HW time to discard old tx messages */
5139 msleep(1);
a2fbb9ea 5140
3101c2bc
YG
5141 if (CHIP_IS_E1(bp)) {
5142 struct mac_configuration_cmd *config =
5143 bnx2x_sp(bp, mcast_config);
5144
e665bfda 5145 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 5146
8d9c5f34 5147 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
5148 CAM_INVALIDATE(config->config_table[i]);
5149
8d9c5f34 5150 config->hdr.length = i;
3101c2bc
YG
5151 if (CHIP_REV_IS_SLOW(bp))
5152 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
5153 else
5154 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 5155 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
5156 config->hdr.reserved1 = 0;
5157
e665bfda
MC
5158 bp->set_mac_pending++;
5159 smp_wmb();
5160
3101c2bc
YG
5161 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5162 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
5163 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
5164
5165 } else { /* E1H */
65abd74d
YG
5166 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5167
e665bfda 5168 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
5169
5170 for (i = 0; i < MC_HASH_SIZE; i++)
5171 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
5172
5173 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 5174 }
993ac7b5
MC
5175#ifdef BCM_CNIC
5176 /* Clear iSCSI L2 MAC */
5177 mutex_lock(&bp->cnic_mutex);
5178 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
5179 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
5180 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
5181 }
5182 mutex_unlock(&bp->cnic_mutex);
5183#endif
3101c2bc 5184
65abd74d
YG
5185 if (unload_mode == UNLOAD_NORMAL)
5186 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5187
7d0446c2 5188 else if (bp->flags & NO_WOL_FLAG)
65abd74d 5189 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 5190
7d0446c2 5191 else if (bp->wol) {
65abd74d
YG
5192 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5193 u8 *mac_addr = bp->dev->dev_addr;
5194 u32 val;
5195 /* The mac address is written to entries 1-4 to
5196 preserve entry 0 which is used by the PMF */
5197 u8 entry = (BP_E1HVN(bp) + 1)*8;
5198
5199 val = (mac_addr[0] << 8) | mac_addr[1];
5200 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
5201
5202 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
5203 (mac_addr[4] << 8) | mac_addr[5];
5204 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
5205
5206 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
5207
5208 } else
5209 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 5210
34f80b04
EG
5211 /* Close multi and leading connections
5212 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
5213 for_each_nondefault_queue(bp, i)
5214 if (bnx2x_stop_multi(bp, i))
228241eb 5215 goto unload_error;
a2fbb9ea 5216
da5a662a
VZ
5217 rc = bnx2x_stop_leading(bp);
5218 if (rc) {
34f80b04 5219 BNX2X_ERR("Stop leading failed!\n");
da5a662a 5220#ifdef BNX2X_STOP_ON_ERROR
34f80b04 5221 return -EBUSY;
da5a662a
VZ
5222#else
5223 goto unload_error;
34f80b04 5224#endif
228241eb
ET
5225 }
5226
5227unload_error:
34f80b04 5228 if (!BP_NOMCP(bp))
228241eb 5229 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 5230 else {
f5372251 5231 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
5232 load_count[0], load_count[1], load_count[2]);
5233 load_count[0]--;
da5a662a 5234 load_count[1 + port]--;
f5372251 5235 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
5236 load_count[0], load_count[1], load_count[2]);
5237 if (load_count[0] == 0)
5238 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 5239 else if (load_count[1 + port] == 0)
34f80b04
EG
5240 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
5241 else
5242 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
5243 }
a2fbb9ea 5244
34f80b04
EG
5245 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
5246 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
5247 bnx2x__link_reset(bp);
a2fbb9ea
ET
5248
5249 /* Reset the chip */
228241eb 5250 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
5251
5252 /* Report UNLOAD_DONE to MCP */
34f80b04 5253 if (!BP_NOMCP(bp))
a2fbb9ea 5254 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 5255
72fd0718
VZ
5256}
5257
9f6c9258 5258void bnx2x_disable_close_the_gate(struct bnx2x *bp)
72fd0718
VZ
5259{
5260 u32 val;
5261
5262 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
5263
5264 if (CHIP_IS_E1(bp)) {
5265 int port = BP_PORT(bp);
5266 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5267 MISC_REG_AEU_MASK_ATTN_FUNC_0;
5268
5269 val = REG_RD(bp, addr);
5270 val &= ~(0x300);
5271 REG_WR(bp, addr, val);
5272 } else if (CHIP_IS_E1H(bp)) {
5273 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
5274 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
5275 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
5276 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
5277 }
5278}
5279
72fd0718
VZ
5280
5281/* Close gates #2, #3 and #4: */
5282static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
5283{
5284 u32 val, addr;
5285
5286 /* Gates #2 and #4a are closed/opened for "not E1" only */
5287 if (!CHIP_IS_E1(bp)) {
5288 /* #4 */
5289 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
5290 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
5291 close ? (val | 0x1) : (val & (~(u32)1)));
5292 /* #2 */
5293 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
5294 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
5295 close ? (val | 0x1) : (val & (~(u32)1)));
5296 }
5297
5298 /* #3 */
5299 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
5300 val = REG_RD(bp, addr);
5301 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
5302
5303 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
5304 close ? "closing" : "opening");
5305 mmiowb();
5306}
5307
5308#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
5309
5310static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
5311{
5312 /* Do some magic... */
5313 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5314 *magic_val = val & SHARED_MF_CLP_MAGIC;
5315 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
5316}
5317
5318/* Restore the value of the `magic' bit.
5319 *
5320 * @param pdev Device handle.
5321 * @param magic_val Old value of the `magic' bit.
5322 */
5323static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
5324{
5325 /* Restore the `magic' bit value... */
5326 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
5327 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
5328 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
5329 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5330 MF_CFG_WR(bp, shared_mf_config.clp_mb,
5331 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
5332}
5333
5334/* Prepares for MCP reset: takes care of CLP configurations.
5335 *
5336 * @param bp
5337 * @param magic_val Old value of 'magic' bit.
5338 */
5339static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
5340{
5341 u32 shmem;
5342 u32 validity_offset;
5343
5344 DP(NETIF_MSG_HW, "Starting\n");
5345
5346 /* Set `magic' bit in order to save MF config */
5347 if (!CHIP_IS_E1(bp))
5348 bnx2x_clp_reset_prep(bp, magic_val);
5349
5350 /* Get shmem offset */
5351 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5352 validity_offset = offsetof(struct shmem_region, validity_map[0]);
5353
5354 /* Clear validity map flags */
5355 if (shmem > 0)
5356 REG_WR(bp, shmem + validity_offset, 0);
5357}
5358
5359#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
5360#define MCP_ONE_TIMEOUT 100 /* 100 ms */
5361
5362/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
5363 * depending on the HW type.
5364 *
5365 * @param bp
5366 */
5367static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
5368{
5369 /* special handling for emulation and FPGA,
5370 wait 10 times longer */
5371 if (CHIP_REV_IS_SLOW(bp))
5372 msleep(MCP_ONE_TIMEOUT*10);
5373 else
5374 msleep(MCP_ONE_TIMEOUT);
5375}
5376
5377static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
5378{
5379 u32 shmem, cnt, validity_offset, val;
5380 int rc = 0;
5381
5382 msleep(100);
5383
5384 /* Get shmem offset */
5385 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5386 if (shmem == 0) {
5387 BNX2X_ERR("Shmem 0 return failure\n");
5388 rc = -ENOTTY;
5389 goto exit_lbl;
5390 }
5391
5392 validity_offset = offsetof(struct shmem_region, validity_map[0]);
5393
5394 /* Wait for MCP to come up */
5395 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
5396 /* TBD: its best to check validity map of last port.
5397 * currently checks on port 0.
5398 */
5399 val = REG_RD(bp, shmem + validity_offset);
5400 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
5401 shmem + validity_offset, val);
5402
5403 /* check that shared memory is valid. */
5404 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5405 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5406 break;
5407
5408 bnx2x_mcp_wait_one(bp);
5409 }
5410
5411 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
5412
5413 /* Check that shared memory is valid. This indicates that MCP is up. */
5414 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
5415 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
5416 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
5417 rc = -ENOTTY;
5418 goto exit_lbl;
5419 }
5420
5421exit_lbl:
5422 /* Restore the `magic' bit value */
5423 if (!CHIP_IS_E1(bp))
5424 bnx2x_clp_reset_done(bp, magic_val);
5425
5426 return rc;
5427}
5428
5429static void bnx2x_pxp_prep(struct bnx2x *bp)
5430{
5431 if (!CHIP_IS_E1(bp)) {
5432 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
5433 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
5434 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
5435 mmiowb();
5436 }
5437}
5438
5439/*
5440 * Reset the whole chip except for:
5441 * - PCIE core
5442 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
5443 * one reset bit)
5444 * - IGU
5445 * - MISC (including AEU)
5446 * - GRC
5447 * - RBCN, RBCP
5448 */
5449static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
5450{
5451 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
5452
5453 not_reset_mask1 =
5454 MISC_REGISTERS_RESET_REG_1_RST_HC |
5455 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
5456 MISC_REGISTERS_RESET_REG_1_RST_PXP;
5457
5458 not_reset_mask2 =
5459 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
5460 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
5461 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
5462 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
5463 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
5464 MISC_REGISTERS_RESET_REG_2_RST_GRC |
5465 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
5466 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
5467
5468 reset_mask1 = 0xffffffff;
5469
5470 if (CHIP_IS_E1(bp))
5471 reset_mask2 = 0xffff;
5472 else
5473 reset_mask2 = 0x1ffff;
5474
5475 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5476 reset_mask1 & (~not_reset_mask1));
5477 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5478 reset_mask2 & (~not_reset_mask2));
5479
5480 barrier();
5481 mmiowb();
5482
5483 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
5484 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
5485 mmiowb();
5486}
5487
5488static int bnx2x_process_kill(struct bnx2x *bp)
5489{
5490 int cnt = 1000;
5491 u32 val = 0;
5492 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
5493
5494
5495 /* Empty the Tetris buffer, wait for 1s */
5496 do {
5497 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
5498 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
5499 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
5500 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
5501 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
5502 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
5503 ((port_is_idle_0 & 0x1) == 0x1) &&
5504 ((port_is_idle_1 & 0x1) == 0x1) &&
5505 (pgl_exp_rom2 == 0xffffffff))
5506 break;
5507 msleep(1);
5508 } while (cnt-- > 0);
5509
5510 if (cnt <= 0) {
5511 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
5512 " are still"
5513 " outstanding read requests after 1s!\n");
5514 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
5515 " port_is_idle_0=0x%08x,"
5516 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
5517 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
5518 pgl_exp_rom2);
5519 return -EAGAIN;
5520 }
5521
5522 barrier();
5523
5524 /* Close gates #2, #3 and #4 */
5525 bnx2x_set_234_gates(bp, true);
5526
5527 /* TBD: Indicate that "process kill" is in progress to MCP */
5528
5529 /* Clear "unprepared" bit */
5530 REG_WR(bp, MISC_REG_UNPREPARED, 0);
5531 barrier();
5532
5533 /* Make sure all is written to the chip before the reset */
5534 mmiowb();
5535
5536 /* Wait for 1ms to empty GLUE and PCI-E core queues,
5537 * PSWHST, GRC and PSWRD Tetris buffer.
5538 */
5539 msleep(1);
5540
5541 /* Prepare to chip reset: */
5542 /* MCP */
5543 bnx2x_reset_mcp_prep(bp, &val);
5544
5545 /* PXP */
5546 bnx2x_pxp_prep(bp);
5547 barrier();
5548
5549 /* reset the chip */
5550 bnx2x_process_kill_chip_reset(bp);
5551 barrier();
5552
5553 /* Recover after reset: */
5554 /* MCP */
5555 if (bnx2x_reset_mcp_comp(bp, val))
5556 return -EAGAIN;
5557
5558 /* PXP */
5559 bnx2x_pxp_prep(bp);
5560
5561 /* Open the gates #2, #3 and #4 */
5562 bnx2x_set_234_gates(bp, false);
5563
5564 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
5565 * reset state, re-enable attentions. */
5566
a2fbb9ea
ET
5567 return 0;
5568}
5569
72fd0718
VZ
5570static int bnx2x_leader_reset(struct bnx2x *bp)
5571{
5572 int rc = 0;
5573 /* Try to recover after the failure */
5574 if (bnx2x_process_kill(bp)) {
5575 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
5576 bp->dev->name);
5577 rc = -EAGAIN;
5578 goto exit_leader_reset;
5579 }
5580
5581 /* Clear "reset is in progress" bit and update the driver state */
5582 bnx2x_set_reset_done(bp);
5583 bp->recovery_state = BNX2X_RECOVERY_DONE;
5584
5585exit_leader_reset:
5586 bp->is_leader = 0;
5587 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
5588 smp_wmb();
5589 return rc;
5590}
5591
72fd0718
VZ
5592/* Assumption: runs under rtnl lock. This together with the fact
5593 * that it's called only from bnx2x_reset_task() ensure that it
5594 * will never be called when netif_running(bp->dev) is false.
5595 */
5596static void bnx2x_parity_recover(struct bnx2x *bp)
5597{
5598 DP(NETIF_MSG_HW, "Handling parity\n");
5599 while (1) {
5600 switch (bp->recovery_state) {
5601 case BNX2X_RECOVERY_INIT:
5602 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
5603 /* Try to get a LEADER_LOCK HW lock */
5604 if (bnx2x_trylock_hw_lock(bp,
5605 HW_LOCK_RESOURCE_RESERVED_08))
5606 bp->is_leader = 1;
5607
5608 /* Stop the driver */
5609 /* If interface has been removed - break */
5610 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
5611 return;
5612
5613 bp->recovery_state = BNX2X_RECOVERY_WAIT;
5614 /* Ensure "is_leader" and "recovery_state"
5615 * update values are seen on other CPUs
5616 */
5617 smp_wmb();
5618 break;
5619
5620 case BNX2X_RECOVERY_WAIT:
5621 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
5622 if (bp->is_leader) {
5623 u32 load_counter = bnx2x_get_load_cnt(bp);
5624 if (load_counter) {
5625 /* Wait until all other functions get
5626 * down.
5627 */
5628 schedule_delayed_work(&bp->reset_task,
5629 HZ/10);
5630 return;
5631 } else {
5632 /* If all other functions got down -
5633 * try to bring the chip back to
5634 * normal. In any case it's an exit
5635 * point for a leader.
5636 */
5637 if (bnx2x_leader_reset(bp) ||
5638 bnx2x_nic_load(bp, LOAD_NORMAL)) {
5639 printk(KERN_ERR"%s: Recovery "
5640 "has failed. Power cycle is "
5641 "needed.\n", bp->dev->name);
5642 /* Disconnect this device */
5643 netif_device_detach(bp->dev);
5644 /* Block ifup for all function
5645 * of this ASIC until
5646 * "process kill" or power
5647 * cycle.
5648 */
5649 bnx2x_set_reset_in_progress(bp);
5650 /* Shut down the power */
5651 bnx2x_set_power_state(bp,
5652 PCI_D3hot);
5653 return;
5654 }
5655
5656 return;
5657 }
5658 } else { /* non-leader */
5659 if (!bnx2x_reset_is_done(bp)) {
5660 /* Try to get a LEADER_LOCK HW lock as
5661 * long as a former leader may have
5662 * been unloaded by the user or
5663 * released a leadership by another
5664 * reason.
5665 */
5666 if (bnx2x_trylock_hw_lock(bp,
5667 HW_LOCK_RESOURCE_RESERVED_08)) {
5668 /* I'm a leader now! Restart a
5669 * switch case.
5670 */
5671 bp->is_leader = 1;
5672 break;
5673 }
5674
5675 schedule_delayed_work(&bp->reset_task,
5676 HZ/10);
5677 return;
5678
5679 } else { /* A leader has completed
5680 * the "process kill". It's an exit
5681 * point for a non-leader.
5682 */
5683 bnx2x_nic_load(bp, LOAD_NORMAL);
5684 bp->recovery_state =
5685 BNX2X_RECOVERY_DONE;
5686 smp_wmb();
5687 return;
5688 }
5689 }
5690 default:
5691 return;
5692 }
5693 }
5694}
5695
5696/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
5697 * scheduled on a general queue in order to prevent a dead lock.
5698 */
34f80b04
EG
5699static void bnx2x_reset_task(struct work_struct *work)
5700{
72fd0718 5701 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
5702
5703#ifdef BNX2X_STOP_ON_ERROR
5704 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
5705 " so reset not done to allow debug dump,\n"
72fd0718 5706 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
5707 return;
5708#endif
5709
5710 rtnl_lock();
5711
5712 if (!netif_running(bp->dev))
5713 goto reset_task_exit;
5714
72fd0718
VZ
5715 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
5716 bnx2x_parity_recover(bp);
5717 else {
5718 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
5719 bnx2x_nic_load(bp, LOAD_NORMAL);
5720 }
34f80b04
EG
5721
5722reset_task_exit:
5723 rtnl_unlock();
5724}
5725
a2fbb9ea
ET
5726/* end of nic load/unload */
5727
a2fbb9ea
ET
5728/*
5729 * Init service functions
5730 */
5731
f1ef27ef
EG
5732static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
5733{
5734 switch (func) {
5735 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
5736 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
5737 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
5738 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
5739 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
5740 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
5741 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
5742 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
5743 default:
5744 BNX2X_ERR("Unsupported function index: %d\n", func);
5745 return (u32)(-1);
5746 }
5747}
5748
5749static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
5750{
5751 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
5752
5753 /* Flush all outstanding writes */
5754 mmiowb();
5755
5756 /* Pretend to be function 0 */
5757 REG_WR(bp, reg, 0);
5758 /* Flush the GRC transaction (in the chip) */
5759 new_val = REG_RD(bp, reg);
5760 if (new_val != 0) {
5761 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
5762 new_val);
5763 BUG();
5764 }
5765
5766 /* From now we are in the "like-E1" mode */
5767 bnx2x_int_disable(bp);
5768
5769 /* Flush all outstanding writes */
5770 mmiowb();
5771
5772 /* Restore the original funtion settings */
5773 REG_WR(bp, reg, orig_func);
5774 new_val = REG_RD(bp, reg);
5775 if (new_val != orig_func) {
5776 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
5777 orig_func, new_val);
5778 BUG();
5779 }
5780}
5781
5782static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
5783{
5784 if (CHIP_IS_E1H(bp))
5785 bnx2x_undi_int_disable_e1h(bp, func);
5786 else
5787 bnx2x_int_disable(bp);
5788}
5789
34f80b04
EG
5790static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5791{
5792 u32 val;
5793
5794 /* Check if there is any driver already loaded */
5795 val = REG_RD(bp, MISC_REG_UNPREPARED);
5796 if (val == 0x1) {
5797 /* Check if it is the UNDI driver
5798 * UNDI driver initializes CID offset for normal bell to 0x7
5799 */
4a37fb66 5800 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
5801 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
5802 if (val == 0x7) {
5803 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 5804 /* save our func */
34f80b04 5805 int func = BP_FUNC(bp);
da5a662a
VZ
5806 u32 swap_en;
5807 u32 swap_val;
34f80b04 5808
b4661739
EG
5809 /* clear the UNDI indication */
5810 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
5811
34f80b04
EG
5812 BNX2X_DEV_INFO("UNDI is active! reset device\n");
5813
5814 /* try unload UNDI on port 0 */
5815 bp->func = 0;
da5a662a
VZ
5816 bp->fw_seq =
5817 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5818 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 5819 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
5820
5821 /* if UNDI is loaded on the other port */
5822 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
5823
da5a662a
VZ
5824 /* send "DONE" for previous unload */
5825 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5826
5827 /* unload UNDI on port 1 */
34f80b04 5828 bp->func = 1;
da5a662a
VZ
5829 bp->fw_seq =
5830 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5831 DRV_MSG_SEQ_NUMBER_MASK);
5832 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5833
5834 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
5835 }
5836
b4661739
EG
5837 /* now it's safe to release the lock */
5838 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5839
f1ef27ef 5840 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
5841
5842 /* close input traffic and wait for it */
5843 /* Do not rcv packets to BRB */
5844 REG_WR(bp,
5845 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
5846 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
5847 /* Do not direct rcv packets that are not for MCP to
5848 * the BRB */
5849 REG_WR(bp,
5850 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
5851 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5852 /* clear AEU */
5853 REG_WR(bp,
5854 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5855 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
5856 msleep(10);
5857
5858 /* save NIG port swap info */
5859 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5860 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
5861 /* reset device */
5862 REG_WR(bp,
5863 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 5864 0xd3ffffff);
34f80b04
EG
5865 REG_WR(bp,
5866 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5867 0x1403);
da5a662a
VZ
5868 /* take the NIG out of reset and restore swap values */
5869 REG_WR(bp,
5870 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5871 MISC_REGISTERS_RESET_REG_1_RST_NIG);
5872 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
5873 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
5874
5875 /* send unload done to the MCP */
5876 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5877
5878 /* restore our func and fw_seq */
5879 bp->func = func;
5880 bp->fw_seq =
5881 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5882 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
5883
5884 } else
5885 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
5886 }
5887}
5888
5889static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5890{
5891 u32 val, val2, val3, val4, id;
72ce58c3 5892 u16 pmc;
34f80b04
EG
5893
5894 /* Get the chip revision id and number. */
5895 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5896 val = REG_RD(bp, MISC_REG_CHIP_NUM);
5897 id = ((val & 0xffff) << 16);
5898 val = REG_RD(bp, MISC_REG_CHIP_REV);
5899 id |= ((val & 0xf) << 12);
5900 val = REG_RD(bp, MISC_REG_CHIP_METAL);
5901 id |= ((val & 0xff) << 4);
5a40e08e 5902 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
5903 id |= (val & 0xf);
5904 bp->common.chip_id = id;
5905 bp->link_params.chip_id = bp->common.chip_id;
5906 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
5907
1c06328c
EG
5908 val = (REG_RD(bp, 0x2874) & 0x55);
5909 if ((bp->common.chip_id & 0x1) ||
5910 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
5911 bp->flags |= ONE_PORT_FLAG;
5912 BNX2X_DEV_INFO("single port device\n");
5913 }
5914
34f80b04
EG
5915 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
5916 bp->common.flash_size = (NVRAM_1MB_SIZE <<
5917 (val & MCPR_NVM_CFG4_FLASH_SIZE));
5918 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5919 bp->common.flash_size, bp->common.flash_size);
5920
5921 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 5922 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 5923 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
5924 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
5925 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
5926
5927 if (!bp->common.shmem_base ||
5928 (bp->common.shmem_base < 0xA0000) ||
5929 (bp->common.shmem_base >= 0xC0000)) {
5930 BNX2X_DEV_INFO("MCP not active\n");
5931 bp->flags |= NO_MCP_FLAG;
5932 return;
5933 }
5934
5935 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
5936 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5937 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
cdaa7cb8 5938 BNX2X_ERROR("BAD MCP validity signature\n");
34f80b04
EG
5939
5940 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 5941 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
5942
5943 bp->link_params.hw_led_mode = ((bp->common.hw_config &
5944 SHARED_HW_CFG_LED_MODE_MASK) >>
5945 SHARED_HW_CFG_LED_MODE_SHIFT);
5946
c2c8b03e
EG
5947 bp->link_params.feature_config_flags = 0;
5948 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
5949 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
5950 bp->link_params.feature_config_flags |=
5951 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
5952 else
5953 bp->link_params.feature_config_flags &=
5954 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
5955
34f80b04
EG
5956 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
5957 bp->common.bc_ver = val;
5958 BNX2X_DEV_INFO("bc_ver %X\n", val);
5959 if (val < BNX2X_BC_VER) {
5960 /* for now only warn
5961 * later we might need to enforce this */
cdaa7cb8
VZ
5962 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
5963 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 5964 }
4d295db0
EG
5965 bp->link_params.feature_config_flags |=
5966 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
5967 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
5968
5969 if (BP_E1HVN(bp) == 0) {
5970 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
5971 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
5972 } else {
5973 /* no WOL capability for E1HVN != 0 */
5974 bp->flags |= NO_WOL_FLAG;
5975 }
5976 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 5977 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
5978
5979 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
5980 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
5981 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
5982 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
5983
cdaa7cb8
VZ
5984 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
5985 val, val2, val3, val4);
34f80b04
EG
5986}
5987
5988static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
5989 u32 switch_cfg)
a2fbb9ea 5990{
34f80b04 5991 int port = BP_PORT(bp);
b7737c9b
YR
5992 bp->port.supported = 0;
5993 switch (bp->link_params.num_phys) {
5994 case 1:
5995 bp->port.supported = bp->link_params.phy[INT_PHY].supported;
a2fbb9ea 5996 break;
b7737c9b
YR
5997 case 2:
5998 bp->port.supported = bp->link_params.phy[EXT_PHY1].supported;
a2fbb9ea 5999 break;
b7737c9b 6000 }
a2fbb9ea 6001
b7737c9b
YR
6002 if (!(bp->port.supported)) {
6003 BNX2X_ERR("NVRAM config error. BAD phy config."
6004 "PHY1 config 0x%x\n",
6005 SHMEM_RD(bp,
6006 dev_info.port_hw_config[port].external_phy_config));
a2fbb9ea
ET
6007 return;
6008 }
6009
b7737c9b
YR
6010 switch (switch_cfg) {
6011 case SWITCH_CFG_1G:
34f80b04
EG
6012 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6013 port*0x10);
6014 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
6015 break;
6016
6017 case SWITCH_CFG_10G:
34f80b04
EG
6018 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6019 port*0x18);
6020 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 6021
a2fbb9ea
ET
6022 break;
6023
6024 default:
6025 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 6026 bp->port.link_config);
a2fbb9ea
ET
6027 return;
6028 }
a2fbb9ea 6029 /* mask what we support according to speed_cap_mask */
c18487ee
YR
6030 if (!(bp->link_params.speed_cap_mask &
6031 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 6032 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 6033
c18487ee
YR
6034 if (!(bp->link_params.speed_cap_mask &
6035 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 6036 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 6037
c18487ee
YR
6038 if (!(bp->link_params.speed_cap_mask &
6039 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 6040 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 6041
c18487ee
YR
6042 if (!(bp->link_params.speed_cap_mask &
6043 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 6044 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 6045
c18487ee
YR
6046 if (!(bp->link_params.speed_cap_mask &
6047 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
6048 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
6049 SUPPORTED_1000baseT_Full);
a2fbb9ea 6050
c18487ee
YR
6051 if (!(bp->link_params.speed_cap_mask &
6052 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 6053 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 6054
c18487ee
YR
6055 if (!(bp->link_params.speed_cap_mask &
6056 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 6057 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 6058
34f80b04 6059 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
6060}
6061
34f80b04 6062static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 6063{
c18487ee 6064 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 6065
34f80b04 6066 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 6067 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 6068 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 6069 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 6070 bp->port.advertising = bp->port.supported;
a2fbb9ea 6071 } else {
a2fbb9ea 6072 /* force 10G, no AN */
c18487ee 6073 bp->link_params.req_line_speed = SPEED_10000;
b7737c9b 6074 bp->port.advertising = (ADVERTISED_10000baseT_Full |
a2fbb9ea 6075 ADVERTISED_FIBRE);
a2fbb9ea
ET
6076 }
6077 break;
6078
6079 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 6080 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 6081 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
6082 bp->port.advertising = (ADVERTISED_10baseT_Full |
6083 ADVERTISED_TP);
a2fbb9ea 6084 } else {
cdaa7cb8
VZ
6085 BNX2X_ERROR("NVRAM config error. "
6086 "Invalid link_config 0x%x"
6087 " speed_cap_mask 0x%x\n",
6088 bp->port.link_config,
6089 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6090 return;
6091 }
6092 break;
6093
6094 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 6095 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
6096 bp->link_params.req_line_speed = SPEED_10;
6097 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
6098 bp->port.advertising = (ADVERTISED_10baseT_Half |
6099 ADVERTISED_TP);
a2fbb9ea 6100 } else {
cdaa7cb8
VZ
6101 BNX2X_ERROR("NVRAM config error. "
6102 "Invalid link_config 0x%x"
6103 " speed_cap_mask 0x%x\n",
6104 bp->port.link_config,
6105 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6106 return;
6107 }
6108 break;
6109
6110 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 6111 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 6112 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
6113 bp->port.advertising = (ADVERTISED_100baseT_Full |
6114 ADVERTISED_TP);
a2fbb9ea 6115 } else {
cdaa7cb8
VZ
6116 BNX2X_ERROR("NVRAM config error. "
6117 "Invalid link_config 0x%x"
6118 " speed_cap_mask 0x%x\n",
6119 bp->port.link_config,
6120 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6121 return;
6122 }
6123 break;
6124
6125 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 6126 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
6127 bp->link_params.req_line_speed = SPEED_100;
6128 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
6129 bp->port.advertising = (ADVERTISED_100baseT_Half |
6130 ADVERTISED_TP);
a2fbb9ea 6131 } else {
cdaa7cb8
VZ
6132 BNX2X_ERROR("NVRAM config error. "
6133 "Invalid link_config 0x%x"
6134 " speed_cap_mask 0x%x\n",
6135 bp->port.link_config,
6136 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6137 return;
6138 }
6139 break;
6140
6141 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 6142 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 6143 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
6144 bp->port.advertising = (ADVERTISED_1000baseT_Full |
6145 ADVERTISED_TP);
a2fbb9ea 6146 } else {
cdaa7cb8
VZ
6147 BNX2X_ERROR("NVRAM config error. "
6148 "Invalid link_config 0x%x"
6149 " speed_cap_mask 0x%x\n",
6150 bp->port.link_config,
6151 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6152 return;
6153 }
6154 break;
6155
6156 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 6157 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 6158 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
6159 bp->port.advertising = (ADVERTISED_2500baseX_Full |
6160 ADVERTISED_TP);
a2fbb9ea 6161 } else {
cdaa7cb8
VZ
6162 BNX2X_ERROR("NVRAM config error. "
6163 "Invalid link_config 0x%x"
6164 " speed_cap_mask 0x%x\n",
6165 bp->port.link_config,
6166 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6167 return;
6168 }
6169 break;
6170
6171 case PORT_FEATURE_LINK_SPEED_10G_CX4:
6172 case PORT_FEATURE_LINK_SPEED_10G_KX4:
6173 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 6174 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 6175 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
6176 bp->port.advertising = (ADVERTISED_10000baseT_Full |
6177 ADVERTISED_FIBRE);
a2fbb9ea 6178 } else {
cdaa7cb8
VZ
6179 BNX2X_ERROR("NVRAM config error. "
6180 "Invalid link_config 0x%x"
6181 " speed_cap_mask 0x%x\n",
6182 bp->port.link_config,
6183 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6184 return;
6185 }
6186 break;
6187
6188 default:
cdaa7cb8
VZ
6189 BNX2X_ERROR("NVRAM config error. "
6190 "BAD link speed link_config 0x%x\n",
6191 bp->port.link_config);
c18487ee 6192 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 6193 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
6194 break;
6195 }
a2fbb9ea 6196
34f80b04
EG
6197 bp->link_params.req_flow_ctrl = (bp->port.link_config &
6198 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 6199 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 6200 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 6201 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 6202
c18487ee 6203 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 6204 " advertising 0x%x\n",
c18487ee
YR
6205 bp->link_params.req_line_speed,
6206 bp->link_params.req_duplex,
34f80b04 6207 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
6208}
6209
e665bfda
MC
6210static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
6211{
6212 mac_hi = cpu_to_be16(mac_hi);
6213 mac_lo = cpu_to_be32(mac_lo);
6214 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
6215 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
6216}
6217
34f80b04 6218static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 6219{
34f80b04
EG
6220 int port = BP_PORT(bp);
6221 u32 val, val2;
589abe3a 6222 u32 config;
b7737c9b 6223 u32 ext_phy_type, ext_phy_config;;
a2fbb9ea 6224
c18487ee 6225 bp->link_params.bp = bp;
34f80b04 6226 bp->link_params.port = port;
c18487ee 6227
c18487ee 6228 bp->link_params.lane_config =
a2fbb9ea 6229 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
4d295db0 6230
c18487ee 6231 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
6232 SHMEM_RD(bp,
6233 dev_info.port_hw_config[port].speed_capability_mask);
6234
34f80b04 6235 bp->port.link_config =
a2fbb9ea
ET
6236 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6237
c2c8b03e 6238
3ce2c3f9
EG
6239 /* If the device is capable of WoL, set the default state according
6240 * to the HW
6241 */
4d295db0 6242 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
6243 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
6244 (config & PORT_FEATURE_WOL_ENABLED));
6245
b7737c9b 6246 BNX2X_DEV_INFO("lane_config 0x%08x"
c2c8b03e 6247 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee 6248 bp->link_params.lane_config,
34f80b04 6249 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 6250
4d295db0
EG
6251 bp->link_params.switch_cfg |= (bp->port.link_config &
6252 PORT_FEATURE_CONNECTED_SWITCH_MASK);
b7737c9b 6253 bnx2x_phy_probe(&bp->link_params);
c18487ee 6254 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
6255
6256 bnx2x_link_settings_requested(bp);
6257
01cd4528
EG
6258 /*
6259 * If connected directly, work with the internal PHY, otherwise, work
6260 * with the external PHY
6261 */
b7737c9b
YR
6262 ext_phy_config =
6263 SHMEM_RD(bp,
6264 dev_info.port_hw_config[port].external_phy_config);
6265 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
01cd4528 6266 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
b7737c9b 6267 bp->mdio.prtad = bp->port.phy_addr;
01cd4528
EG
6268
6269 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
6270 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
6271 bp->mdio.prtad =
b7737c9b 6272 XGXS_EXT_PHY_ADDR(ext_phy_config);
01cd4528 6273
a2fbb9ea
ET
6274 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6275 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 6276 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
6277 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
6278 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
6279
6280#ifdef BCM_CNIC
6281 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
6282 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
6283 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
6284#endif
34f80b04
EG
6285}
6286
6287static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6288{
6289 int func = BP_FUNC(bp);
6290 u32 val, val2;
6291 int rc = 0;
a2fbb9ea 6292
34f80b04 6293 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 6294
34f80b04
EG
6295 bp->e1hov = 0;
6296 bp->e1hmf = 0;
2145a920 6297 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
34f80b04
EG
6298 bp->mf_config =
6299 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 6300
2691d51d 6301 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 6302 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 6303 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 6304 bp->e1hmf = 1;
2691d51d
EG
6305 BNX2X_DEV_INFO("%s function mode\n",
6306 IS_E1HMF(bp) ? "multi" : "single");
6307
6308 if (IS_E1HMF(bp)) {
6309 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
6310 e1hov_tag) &
6311 FUNC_MF_CFG_E1HOV_TAG_MASK);
6312 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
6313 bp->e1hov = val;
6314 BNX2X_DEV_INFO("E1HOV for func %d is %d "
6315 "(0x%04x)\n",
6316 func, bp->e1hov, bp->e1hov);
6317 } else {
cdaa7cb8
VZ
6318 BNX2X_ERROR("No valid E1HOV for func %d,"
6319 " aborting\n", func);
34f80b04
EG
6320 rc = -EPERM;
6321 }
2691d51d
EG
6322 } else {
6323 if (BP_E1HVN(bp)) {
cdaa7cb8
VZ
6324 BNX2X_ERROR("VN %d in single function mode,"
6325 " aborting\n", BP_E1HVN(bp));
2691d51d
EG
6326 rc = -EPERM;
6327 }
34f80b04
EG
6328 }
6329 }
a2fbb9ea 6330
34f80b04
EG
6331 if (!BP_NOMCP(bp)) {
6332 bnx2x_get_port_hwinfo(bp);
6333
6334 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
6335 DRV_MSG_SEQ_NUMBER_MASK);
6336 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
6337 }
6338
6339 if (IS_E1HMF(bp)) {
6340 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
6341 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
6342 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
6343 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
6344 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6345 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6346 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6347 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6348 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
6349 bp->dev->dev_addr[5] = (u8)(val & 0xff);
6350 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
6351 ETH_ALEN);
6352 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
6353 ETH_ALEN);
a2fbb9ea 6354 }
34f80b04
EG
6355
6356 return rc;
a2fbb9ea
ET
6357 }
6358
34f80b04
EG
6359 if (BP_NOMCP(bp)) {
6360 /* only supposed to happen on emulation/FPGA */
cdaa7cb8 6361 BNX2X_ERROR("warning: random MAC workaround active\n");
34f80b04
EG
6362 random_ether_addr(bp->dev->dev_addr);
6363 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6364 }
a2fbb9ea 6365
34f80b04
EG
6366 return rc;
6367}
6368
34f24c7f
VZ
6369static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
6370{
6371 int cnt, i, block_end, rodi;
6372 char vpd_data[BNX2X_VPD_LEN+1];
6373 char str_id_reg[VENDOR_ID_LEN+1];
6374 char str_id_cap[VENDOR_ID_LEN+1];
6375 u8 len;
6376
6377 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
6378 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
6379
6380 if (cnt < BNX2X_VPD_LEN)
6381 goto out_not_found;
6382
6383 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
6384 PCI_VPD_LRDT_RO_DATA);
6385 if (i < 0)
6386 goto out_not_found;
6387
6388
6389 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
6390 pci_vpd_lrdt_size(&vpd_data[i]);
6391
6392 i += PCI_VPD_LRDT_TAG_SIZE;
6393
6394 if (block_end > BNX2X_VPD_LEN)
6395 goto out_not_found;
6396
6397 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6398 PCI_VPD_RO_KEYWORD_MFR_ID);
6399 if (rodi < 0)
6400 goto out_not_found;
6401
6402 len = pci_vpd_info_field_size(&vpd_data[rodi]);
6403
6404 if (len != VENDOR_ID_LEN)
6405 goto out_not_found;
6406
6407 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6408
6409 /* vendor specific info */
6410 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
6411 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
6412 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
6413 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
6414
6415 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6416 PCI_VPD_RO_KEYWORD_VENDOR0);
6417 if (rodi >= 0) {
6418 len = pci_vpd_info_field_size(&vpd_data[rodi]);
6419
6420 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6421
6422 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
6423 memcpy(bp->fw_ver, &vpd_data[rodi], len);
6424 bp->fw_ver[len] = ' ';
6425 }
6426 }
6427 return;
6428 }
6429out_not_found:
6430 return;
6431}
6432
34f80b04
EG
6433static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6434{
6435 int func = BP_FUNC(bp);
87942b46 6436 int timer_interval;
34f80b04
EG
6437 int rc;
6438
da5a662a
VZ
6439 /* Disable interrupt handling until HW is initialized */
6440 atomic_set(&bp->intr_sem, 1);
e1510706 6441 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 6442
34f80b04 6443 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 6444 mutex_init(&bp->fw_mb_mutex);
bb7e95c8 6445 spin_lock_init(&bp->stats_lock);
993ac7b5
MC
6446#ifdef BCM_CNIC
6447 mutex_init(&bp->cnic_mutex);
6448#endif
a2fbb9ea 6449
1cf167f2 6450 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 6451 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
6452
6453 rc = bnx2x_get_hwinfo(bp);
6454
34f24c7f 6455 bnx2x_read_fwinfo(bp);
34f80b04
EG
6456 /* need to reset chip if undi was active */
6457 if (!BP_NOMCP(bp))
6458 bnx2x_undi_unload(bp);
6459
6460 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 6461 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
6462
6463 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
6464 dev_err(&bp->pdev->dev, "MCP disabled, "
6465 "must load devices in order!\n");
34f80b04 6466
555f6c78 6467 /* Set multi queue mode */
8badd27a
EG
6468 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
6469 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
cdaa7cb8
VZ
6470 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
6471 "requested is not MSI-X\n");
555f6c78
EG
6472 multi_mode = ETH_RSS_MODE_DISABLED;
6473 }
6474 bp->multi_mode = multi_mode;
5d7cd496 6475 bp->int_mode = int_mode;
555f6c78 6476
4fd89b7a
DK
6477 bp->dev->features |= NETIF_F_GRO;
6478
7a9b2557
VZ
6479 /* Set TPA flags */
6480 if (disable_tpa) {
6481 bp->flags &= ~TPA_ENABLE_FLAG;
6482 bp->dev->features &= ~NETIF_F_LRO;
6483 } else {
6484 bp->flags |= TPA_ENABLE_FLAG;
6485 bp->dev->features |= NETIF_F_LRO;
6486 }
5d7cd496 6487 bp->disable_tpa = disable_tpa;
7a9b2557 6488
a18f5128
EG
6489 if (CHIP_IS_E1(bp))
6490 bp->dropless_fc = 0;
6491 else
6492 bp->dropless_fc = dropless_fc;
6493
8d5726c4 6494 bp->mrrs = mrrs;
7a9b2557 6495
34f80b04
EG
6496 bp->tx_ring_size = MAX_TX_AVAIL;
6497 bp->rx_ring_size = MAX_RX_AVAIL;
6498
6499 bp->rx_csum = 1;
34f80b04 6500
7d323bfd
EG
6501 /* make sure that the numbers are in the right granularity */
6502 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6503 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
34f80b04 6504
87942b46
EG
6505 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
6506 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
6507
6508 init_timer(&bp->timer);
6509 bp->timer.expires = jiffies + bp->current_interval;
6510 bp->timer.data = (unsigned long) bp;
6511 bp->timer.function = bnx2x_timer;
6512
6513 return rc;
a2fbb9ea
ET
6514}
6515
a2fbb9ea 6516
de0c62db
DK
6517/****************************************************************************
6518* General service functions
6519****************************************************************************/
a2fbb9ea 6520
bb2a0f7a 6521/* called with rtnl_lock */
a2fbb9ea
ET
6522static int bnx2x_open(struct net_device *dev)
6523{
6524 struct bnx2x *bp = netdev_priv(dev);
6525
6eccabb3
EG
6526 netif_carrier_off(dev);
6527
a2fbb9ea
ET
6528 bnx2x_set_power_state(bp, PCI_D0);
6529
72fd0718
VZ
6530 if (!bnx2x_reset_is_done(bp)) {
6531 do {
6532 /* Reset MCP mail box sequence if there is on going
6533 * recovery
6534 */
6535 bp->fw_seq = 0;
6536
6537 /* If it's the first function to load and reset done
6538 * is still not cleared it may mean that. We don't
6539 * check the attention state here because it may have
6540 * already been cleared by a "common" reset but we
6541 * shell proceed with "process kill" anyway.
6542 */
6543 if ((bnx2x_get_load_cnt(bp) == 0) &&
6544 bnx2x_trylock_hw_lock(bp,
6545 HW_LOCK_RESOURCE_RESERVED_08) &&
6546 (!bnx2x_leader_reset(bp))) {
6547 DP(NETIF_MSG_HW, "Recovered in open\n");
6548 break;
6549 }
6550
6551 bnx2x_set_power_state(bp, PCI_D3hot);
6552
6553 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
6554 " completed yet. Try again later. If u still see this"
6555 " message after a few retries then power cycle is"
6556 " required.\n", bp->dev->name);
6557
6558 return -EAGAIN;
6559 } while (0);
6560 }
6561
6562 bp->recovery_state = BNX2X_RECOVERY_DONE;
6563
bb2a0f7a 6564 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
6565}
6566
bb2a0f7a 6567/* called with rtnl_lock */
a2fbb9ea
ET
6568static int bnx2x_close(struct net_device *dev)
6569{
a2fbb9ea
ET
6570 struct bnx2x *bp = netdev_priv(dev);
6571
6572 /* Unload the driver, release IRQs */
bb2a0f7a 6573 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 6574 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
6575
6576 return 0;
6577}
6578
f5372251 6579/* called with netif_tx_lock from dev_mcast.c */
9f6c9258 6580void bnx2x_set_rx_mode(struct net_device *dev)
34f80b04
EG
6581{
6582 struct bnx2x *bp = netdev_priv(dev);
6583 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
6584 int port = BP_PORT(bp);
6585
6586 if (bp->state != BNX2X_STATE_OPEN) {
6587 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6588 return;
6589 }
6590
6591 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
6592
6593 if (dev->flags & IFF_PROMISC)
6594 rx_mode = BNX2X_RX_MODE_PROMISC;
6595
6596 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
6597 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
6598 CHIP_IS_E1(bp)))
34f80b04
EG
6599 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6600
6601 else { /* some multicasts */
6602 if (CHIP_IS_E1(bp)) {
6603 int i, old, offset;
22bedad3 6604 struct netdev_hw_addr *ha;
34f80b04
EG
6605 struct mac_configuration_cmd *config =
6606 bnx2x_sp(bp, mcast_config);
6607
0ddf477b 6608 i = 0;
22bedad3 6609 netdev_for_each_mc_addr(ha, dev) {
34f80b04
EG
6610 config->config_table[i].
6611 cam_entry.msb_mac_addr =
22bedad3 6612 swab16(*(u16 *)&ha->addr[0]);
34f80b04
EG
6613 config->config_table[i].
6614 cam_entry.middle_mac_addr =
22bedad3 6615 swab16(*(u16 *)&ha->addr[2]);
34f80b04
EG
6616 config->config_table[i].
6617 cam_entry.lsb_mac_addr =
22bedad3 6618 swab16(*(u16 *)&ha->addr[4]);
34f80b04
EG
6619 config->config_table[i].cam_entry.flags =
6620 cpu_to_le16(port);
6621 config->config_table[i].
6622 target_table_entry.flags = 0;
ca00392c
EG
6623 config->config_table[i].target_table_entry.
6624 clients_bit_vector =
6625 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
6626 config->config_table[i].
6627 target_table_entry.vlan_id = 0;
6628
6629 DP(NETIF_MSG_IFUP,
6630 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6631 config->config_table[i].
6632 cam_entry.msb_mac_addr,
6633 config->config_table[i].
6634 cam_entry.middle_mac_addr,
6635 config->config_table[i].
6636 cam_entry.lsb_mac_addr);
0ddf477b 6637 i++;
34f80b04 6638 }
8d9c5f34 6639 old = config->hdr.length;
34f80b04
EG
6640 if (old > i) {
6641 for (; i < old; i++) {
6642 if (CAM_IS_INVALID(config->
6643 config_table[i])) {
af246401 6644 /* already invalidated */
34f80b04
EG
6645 break;
6646 }
6647 /* invalidate */
6648 CAM_INVALIDATE(config->
6649 config_table[i]);
6650 }
6651 }
6652
6653 if (CHIP_REV_IS_SLOW(bp))
6654 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6655 else
6656 offset = BNX2X_MAX_MULTICAST*(1 + port);
6657
8d9c5f34 6658 config->hdr.length = i;
34f80b04 6659 config->hdr.offset = offset;
8d9c5f34 6660 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6661 config->hdr.reserved1 = 0;
6662
e665bfda
MC
6663 bp->set_mac_pending++;
6664 smp_wmb();
6665
34f80b04
EG
6666 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6667 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6668 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
6669 0);
6670 } else { /* E1H */
6671 /* Accept one or more multicasts */
22bedad3 6672 struct netdev_hw_addr *ha;
34f80b04
EG
6673 u32 mc_filter[MC_HASH_SIZE];
6674 u32 crc, bit, regidx;
6675 int i;
6676
6677 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6678
22bedad3 6679 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 6680 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
22bedad3 6681 ha->addr);
34f80b04 6682
22bedad3 6683 crc = crc32c_le(0, ha->addr, ETH_ALEN);
34f80b04
EG
6684 bit = (crc >> 24) & 0xff;
6685 regidx = bit >> 5;
6686 bit &= 0x1f;
6687 mc_filter[regidx] |= (1 << bit);
6688 }
6689
6690 for (i = 0; i < MC_HASH_SIZE; i++)
6691 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6692 mc_filter[i]);
6693 }
6694 }
6695
6696 bp->rx_mode = rx_mode;
6697 bnx2x_set_storm_rx_mode(bp);
6698}
6699
a2fbb9ea 6700
c18487ee 6701/* called with rtnl_lock */
01cd4528
EG
6702static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
6703 int devad, u16 addr)
a2fbb9ea 6704{
01cd4528
EG
6705 struct bnx2x *bp = netdev_priv(netdev);
6706 u16 value;
6707 int rc;
a2fbb9ea 6708
01cd4528
EG
6709 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
6710 prtad, devad, addr);
a2fbb9ea 6711
01cd4528
EG
6712 /* The HW expects different devad if CL22 is used */
6713 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 6714
01cd4528 6715 bnx2x_acquire_phy_lock(bp);
e10bc84d 6716 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
01cd4528
EG
6717 bnx2x_release_phy_lock(bp);
6718 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 6719
01cd4528
EG
6720 if (!rc)
6721 rc = value;
6722 return rc;
6723}
a2fbb9ea 6724
01cd4528
EG
6725/* called with rtnl_lock */
6726static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
6727 u16 addr, u16 value)
6728{
6729 struct bnx2x *bp = netdev_priv(netdev);
01cd4528
EG
6730 int rc;
6731
6732 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
6733 " value 0x%x\n", prtad, devad, addr, value);
6734
01cd4528
EG
6735 /* The HW expects different devad if CL22 is used */
6736 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 6737
01cd4528 6738 bnx2x_acquire_phy_lock(bp);
e10bc84d 6739 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
01cd4528
EG
6740 bnx2x_release_phy_lock(bp);
6741 return rc;
6742}
c18487ee 6743
01cd4528
EG
6744/* called with rtnl_lock */
6745static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6746{
6747 struct bnx2x *bp = netdev_priv(dev);
6748 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 6749
01cd4528
EG
6750 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
6751 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 6752
01cd4528
EG
6753 if (!netif_running(dev))
6754 return -EAGAIN;
6755
6756 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
6757}
6758
257ddbda 6759#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
6760static void poll_bnx2x(struct net_device *dev)
6761{
6762 struct bnx2x *bp = netdev_priv(dev);
6763
6764 disable_irq(bp->pdev->irq);
6765 bnx2x_interrupt(bp->pdev->irq, dev);
6766 enable_irq(bp->pdev->irq);
6767}
6768#endif
6769
c64213cd
SH
6770static const struct net_device_ops bnx2x_netdev_ops = {
6771 .ndo_open = bnx2x_open,
6772 .ndo_stop = bnx2x_close,
6773 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 6774 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
6775 .ndo_set_mac_address = bnx2x_change_mac_addr,
6776 .ndo_validate_addr = eth_validate_addr,
6777 .ndo_do_ioctl = bnx2x_ioctl,
6778 .ndo_change_mtu = bnx2x_change_mtu,
6779 .ndo_tx_timeout = bnx2x_tx_timeout,
6780#ifdef BCM_VLAN
6781 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
6782#endif
257ddbda 6783#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
6784 .ndo_poll_controller = poll_bnx2x,
6785#endif
6786};
6787
34f80b04
EG
6788static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
6789 struct net_device *dev)
a2fbb9ea
ET
6790{
6791 struct bnx2x *bp;
6792 int rc;
6793
6794 SET_NETDEV_DEV(dev, &pdev->dev);
6795 bp = netdev_priv(dev);
6796
34f80b04
EG
6797 bp->dev = dev;
6798 bp->pdev = pdev;
a2fbb9ea 6799 bp->flags = 0;
34f80b04 6800 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
6801
6802 rc = pci_enable_device(pdev);
6803 if (rc) {
cdaa7cb8
VZ
6804 dev_err(&bp->pdev->dev,
6805 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
6806 goto err_out;
6807 }
6808
6809 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
6810 dev_err(&bp->pdev->dev,
6811 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
6812 rc = -ENODEV;
6813 goto err_out_disable;
6814 }
6815
6816 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
6817 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
6818 " base address, aborting\n");
a2fbb9ea
ET
6819 rc = -ENODEV;
6820 goto err_out_disable;
6821 }
6822
34f80b04
EG
6823 if (atomic_read(&pdev->enable_cnt) == 1) {
6824 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6825 if (rc) {
cdaa7cb8
VZ
6826 dev_err(&bp->pdev->dev,
6827 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
6828 goto err_out_disable;
6829 }
a2fbb9ea 6830
34f80b04
EG
6831 pci_set_master(pdev);
6832 pci_save_state(pdev);
6833 }
a2fbb9ea
ET
6834
6835 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6836 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
6837 dev_err(&bp->pdev->dev,
6838 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
6839 rc = -EIO;
6840 goto err_out_release;
6841 }
6842
6843 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
6844 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
6845 dev_err(&bp->pdev->dev,
6846 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
6847 rc = -EIO;
6848 goto err_out_release;
6849 }
6850
1a983142 6851 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 6852 bp->flags |= USING_DAC_FLAG;
1a983142 6853 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
6854 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
6855 " failed, aborting\n");
a2fbb9ea
ET
6856 rc = -EIO;
6857 goto err_out_release;
6858 }
6859
1a983142 6860 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
6861 dev_err(&bp->pdev->dev,
6862 "System does not support DMA, aborting\n");
a2fbb9ea
ET
6863 rc = -EIO;
6864 goto err_out_release;
6865 }
6866
34f80b04
EG
6867 dev->mem_start = pci_resource_start(pdev, 0);
6868 dev->base_addr = dev->mem_start;
6869 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
6870
6871 dev->irq = pdev->irq;
6872
275f165f 6873 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 6874 if (!bp->regview) {
cdaa7cb8
VZ
6875 dev_err(&bp->pdev->dev,
6876 "Cannot map register space, aborting\n");
a2fbb9ea
ET
6877 rc = -ENOMEM;
6878 goto err_out_release;
6879 }
6880
34f80b04
EG
6881 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
6882 min_t(u64, BNX2X_DB_SIZE,
6883 pci_resource_len(pdev, 2)));
a2fbb9ea 6884 if (!bp->doorbells) {
cdaa7cb8
VZ
6885 dev_err(&bp->pdev->dev,
6886 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
6887 rc = -ENOMEM;
6888 goto err_out_unmap;
6889 }
6890
6891 bnx2x_set_power_state(bp, PCI_D0);
6892
34f80b04
EG
6893 /* clean indirect addresses */
6894 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
6895 PCICFG_VENDOR_ID_OFFSET);
6896 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
6897 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
6898 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
6899 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 6900
72fd0718
VZ
6901 /* Reset the load counter */
6902 bnx2x_clear_load_cnt(bp);
6903
34f80b04 6904 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 6905
c64213cd 6906 dev->netdev_ops = &bnx2x_netdev_ops;
de0c62db 6907 bnx2x_set_ethtool_ops(dev);
34f80b04
EG
6908 dev->features |= NETIF_F_SG;
6909 dev->features |= NETIF_F_HW_CSUM;
6910 if (bp->flags & USING_DAC_FLAG)
6911 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
6912 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
6913 dev->features |= NETIF_F_TSO6;
34f80b04
EG
6914#ifdef BCM_VLAN
6915 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 6916 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
6917
6918 dev->vlan_features |= NETIF_F_SG;
6919 dev->vlan_features |= NETIF_F_HW_CSUM;
6920 if (bp->flags & USING_DAC_FLAG)
6921 dev->vlan_features |= NETIF_F_HIGHDMA;
6922 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
6923 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 6924#endif
a2fbb9ea 6925
01cd4528
EG
6926 /* get_port_hwinfo() will set prtad and mmds properly */
6927 bp->mdio.prtad = MDIO_PRTAD_NONE;
6928 bp->mdio.mmds = 0;
6929 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
6930 bp->mdio.dev = dev;
6931 bp->mdio.mdio_read = bnx2x_mdio_read;
6932 bp->mdio.mdio_write = bnx2x_mdio_write;
6933
a2fbb9ea
ET
6934 return 0;
6935
6936err_out_unmap:
6937 if (bp->regview) {
6938 iounmap(bp->regview);
6939 bp->regview = NULL;
6940 }
a2fbb9ea
ET
6941 if (bp->doorbells) {
6942 iounmap(bp->doorbells);
6943 bp->doorbells = NULL;
6944 }
6945
6946err_out_release:
34f80b04
EG
6947 if (atomic_read(&pdev->enable_cnt) == 1)
6948 pci_release_regions(pdev);
a2fbb9ea
ET
6949
6950err_out_disable:
6951 pci_disable_device(pdev);
6952 pci_set_drvdata(pdev, NULL);
6953
6954err_out:
6955 return rc;
6956}
6957
37f9ce62
EG
6958static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
6959 int *width, int *speed)
25047950
ET
6960{
6961 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
6962
37f9ce62 6963 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 6964
37f9ce62
EG
6965 /* return value of 1=2.5GHz 2=5GHz */
6966 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 6967}
37f9ce62 6968
6891dd25 6969static int bnx2x_check_firmware(struct bnx2x *bp)
94a78b79 6970{
37f9ce62 6971 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
6972 struct bnx2x_fw_file_hdr *fw_hdr;
6973 struct bnx2x_fw_file_section *sections;
94a78b79 6974 u32 offset, len, num_ops;
37f9ce62 6975 u16 *ops_offsets;
94a78b79 6976 int i;
37f9ce62 6977 const u8 *fw_ver;
94a78b79
VZ
6978
6979 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
6980 return -EINVAL;
6981
6982 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
6983 sections = (struct bnx2x_fw_file_section *)fw_hdr;
6984
6985 /* Make sure none of the offsets and sizes make us read beyond
6986 * the end of the firmware data */
6987 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
6988 offset = be32_to_cpu(sections[i].offset);
6989 len = be32_to_cpu(sections[i].len);
6990 if (offset + len > firmware->size) {
cdaa7cb8
VZ
6991 dev_err(&bp->pdev->dev,
6992 "Section %d length is out of bounds\n", i);
94a78b79
VZ
6993 return -EINVAL;
6994 }
6995 }
6996
6997 /* Likewise for the init_ops offsets */
6998 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
6999 ops_offsets = (u16 *)(firmware->data + offset);
7000 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
7001
7002 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
7003 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
7004 dev_err(&bp->pdev->dev,
7005 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
7006 return -EINVAL;
7007 }
7008 }
7009
7010 /* Check FW version */
7011 offset = be32_to_cpu(fw_hdr->fw_version.offset);
7012 fw_ver = firmware->data + offset;
7013 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
7014 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
7015 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
7016 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
7017 dev_err(&bp->pdev->dev,
7018 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
7019 fw_ver[0], fw_ver[1], fw_ver[2],
7020 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
7021 BCM_5710_FW_MINOR_VERSION,
7022 BCM_5710_FW_REVISION_VERSION,
7023 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 7024 return -EINVAL;
94a78b79
VZ
7025 }
7026
7027 return 0;
7028}
7029
ab6ad5a4 7030static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 7031{
ab6ad5a4
EG
7032 const __be32 *source = (const __be32 *)_source;
7033 u32 *target = (u32 *)_target;
94a78b79 7034 u32 i;
94a78b79
VZ
7035
7036 for (i = 0; i < n/4; i++)
7037 target[i] = be32_to_cpu(source[i]);
7038}
7039
7040/*
7041 Ops array is stored in the following format:
7042 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7043 */
ab6ad5a4 7044static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 7045{
ab6ad5a4
EG
7046 const __be32 *source = (const __be32 *)_source;
7047 struct raw_op *target = (struct raw_op *)_target;
94a78b79 7048 u32 i, j, tmp;
94a78b79 7049
ab6ad5a4 7050 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
7051 tmp = be32_to_cpu(source[j]);
7052 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
7053 target[i].offset = tmp & 0xffffff;
7054 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
7055 }
7056}
ab6ad5a4
EG
7057
7058static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 7059{
ab6ad5a4
EG
7060 const __be16 *source = (const __be16 *)_source;
7061 u16 *target = (u16 *)_target;
94a78b79 7062 u32 i;
94a78b79
VZ
7063
7064 for (i = 0; i < n/2; i++)
7065 target[i] = be16_to_cpu(source[i]);
7066}
7067
7995c64e
JP
7068#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
7069do { \
7070 u32 len = be32_to_cpu(fw_hdr->arr.len); \
7071 bp->arr = kmalloc(len, GFP_KERNEL); \
7072 if (!bp->arr) { \
7073 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7074 goto lbl; \
7075 } \
7076 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
7077 (u8 *)bp->arr, len); \
7078} while (0)
94a78b79 7079
6891dd25 7080int bnx2x_init_firmware(struct bnx2x *bp)
94a78b79 7081{
45229b42 7082 const char *fw_file_name;
94a78b79 7083 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 7084 int rc;
94a78b79 7085
94a78b79 7086 if (CHIP_IS_E1(bp))
45229b42 7087 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 7088 else if (CHIP_IS_E1H(bp))
45229b42 7089 fw_file_name = FW_FILE_NAME_E1H;
cdaa7cb8 7090 else {
6891dd25 7091 BNX2X_ERR("Unsupported chip revision\n");
cdaa7cb8
VZ
7092 return -EINVAL;
7093 }
94a78b79 7094
6891dd25 7095 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
94a78b79 7096
6891dd25 7097 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
94a78b79 7098 if (rc) {
6891dd25 7099 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
7100 goto request_firmware_exit;
7101 }
7102
7103 rc = bnx2x_check_firmware(bp);
7104 if (rc) {
6891dd25 7105 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
7106 goto request_firmware_exit;
7107 }
7108
7109 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
7110
7111 /* Initialize the pointers to the init arrays */
7112 /* Blob */
7113 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
7114
7115 /* Opcodes */
7116 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
7117
7118 /* Offsets */
ab6ad5a4
EG
7119 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
7120 be16_to_cpu_n);
94a78b79
VZ
7121
7122 /* STORMs firmware */
573f2035
EG
7123 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7124 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
7125 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
7126 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
7127 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7128 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
7129 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
7130 be32_to_cpu(fw_hdr->usem_pram_data.offset);
7131 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7132 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
7133 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
7134 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
7135 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7136 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
7137 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
7138 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
7139
7140 return 0;
ab6ad5a4 7141
94a78b79
VZ
7142init_offsets_alloc_err:
7143 kfree(bp->init_ops);
7144init_ops_alloc_err:
7145 kfree(bp->init_data);
7146request_firmware_exit:
7147 release_firmware(bp->firmware);
7148
7149 return rc;
7150}
7151
7152
a2fbb9ea
ET
7153static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7154 const struct pci_device_id *ent)
7155{
a2fbb9ea
ET
7156 struct net_device *dev = NULL;
7157 struct bnx2x *bp;
37f9ce62 7158 int pcie_width, pcie_speed;
25047950 7159 int rc;
a2fbb9ea 7160
a2fbb9ea 7161 /* dev zeroed in init_etherdev */
555f6c78 7162 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04 7163 if (!dev) {
cdaa7cb8 7164 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 7165 return -ENOMEM;
34f80b04 7166 }
a2fbb9ea 7167
a2fbb9ea 7168 bp = netdev_priv(dev);
7995c64e 7169 bp->msg_enable = debug;
a2fbb9ea 7170
df4770de
EG
7171 pci_set_drvdata(pdev, dev);
7172
34f80b04 7173 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
7174 if (rc < 0) {
7175 free_netdev(dev);
7176 return rc;
7177 }
7178
34f80b04 7179 rc = bnx2x_init_bp(bp);
693fc0d1
EG
7180 if (rc)
7181 goto init_one_exit;
7182
7183 rc = register_netdev(dev);
34f80b04 7184 if (rc) {
693fc0d1 7185 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
7186 goto init_one_exit;
7187 }
7188
37f9ce62 7189 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
cdaa7cb8
VZ
7190 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
7191 " IRQ %d, ", board_info[ent->driver_data].name,
7192 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
7193 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
7194 dev->base_addr, bp->pdev->irq);
7195 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 7196
a2fbb9ea 7197 return 0;
34f80b04
EG
7198
7199init_one_exit:
7200 if (bp->regview)
7201 iounmap(bp->regview);
7202
7203 if (bp->doorbells)
7204 iounmap(bp->doorbells);
7205
7206 free_netdev(dev);
7207
7208 if (atomic_read(&pdev->enable_cnt) == 1)
7209 pci_release_regions(pdev);
7210
7211 pci_disable_device(pdev);
7212 pci_set_drvdata(pdev, NULL);
7213
7214 return rc;
a2fbb9ea
ET
7215}
7216
7217static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7218{
7219 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
7220 struct bnx2x *bp;
7221
7222 if (!dev) {
cdaa7cb8 7223 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
7224 return;
7225 }
228241eb 7226 bp = netdev_priv(dev);
a2fbb9ea 7227
a2fbb9ea
ET
7228 unregister_netdev(dev);
7229
72fd0718
VZ
7230 /* Make sure RESET task is not scheduled before continuing */
7231 cancel_delayed_work_sync(&bp->reset_task);
7232
a2fbb9ea
ET
7233 if (bp->regview)
7234 iounmap(bp->regview);
7235
7236 if (bp->doorbells)
7237 iounmap(bp->doorbells);
7238
7239 free_netdev(dev);
34f80b04
EG
7240
7241 if (atomic_read(&pdev->enable_cnt) == 1)
7242 pci_release_regions(pdev);
7243
a2fbb9ea
ET
7244 pci_disable_device(pdev);
7245 pci_set_drvdata(pdev, NULL);
7246}
7247
f8ef6e44
YG
7248static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
7249{
7250 int i;
7251
7252 bp->state = BNX2X_STATE_ERROR;
7253
7254 bp->rx_mode = BNX2X_RX_MODE_NONE;
7255
7256 bnx2x_netif_stop(bp, 0);
c89af1a3 7257 netif_carrier_off(bp->dev);
f8ef6e44
YG
7258
7259 del_timer_sync(&bp->timer);
7260 bp->stats_state = STATS_STATE_DISABLED;
7261 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
7262
7263 /* Release IRQs */
6cbe5065 7264 bnx2x_free_irq(bp, false);
f8ef6e44
YG
7265
7266 if (CHIP_IS_E1(bp)) {
7267 struct mac_configuration_cmd *config =
7268 bnx2x_sp(bp, mcast_config);
7269
8d9c5f34 7270 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
7271 CAM_INVALIDATE(config->config_table[i]);
7272 }
7273
7274 /* Free SKBs, SGEs, TPA pool and driver internals */
7275 bnx2x_free_skbs(bp);
54b9ddaa 7276 for_each_queue(bp, i)
f8ef6e44 7277 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 7278 for_each_queue(bp, i)
7cde1c8b 7279 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
7280 bnx2x_free_mem(bp);
7281
7282 bp->state = BNX2X_STATE_CLOSED;
7283
f8ef6e44
YG
7284 return 0;
7285}
7286
7287static void bnx2x_eeh_recover(struct bnx2x *bp)
7288{
7289 u32 val;
7290
7291 mutex_init(&bp->port.phy_mutex);
7292
7293 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7294 bp->link_params.shmem_base = bp->common.shmem_base;
7295 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7296
7297 if (!bp->common.shmem_base ||
7298 (bp->common.shmem_base < 0xA0000) ||
7299 (bp->common.shmem_base >= 0xC0000)) {
7300 BNX2X_DEV_INFO("MCP not active\n");
7301 bp->flags |= NO_MCP_FLAG;
7302 return;
7303 }
7304
7305 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7306 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7307 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7308 BNX2X_ERR("BAD MCP validity signature\n");
7309
7310 if (!BP_NOMCP(bp)) {
7311 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
7312 & DRV_MSG_SEQ_NUMBER_MASK);
7313 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7314 }
7315}
7316
493adb1f
WX
7317/**
7318 * bnx2x_io_error_detected - called when PCI error is detected
7319 * @pdev: Pointer to PCI device
7320 * @state: The current pci connection state
7321 *
7322 * This function is called after a PCI bus error affecting
7323 * this device has been detected.
7324 */
7325static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
7326 pci_channel_state_t state)
7327{
7328 struct net_device *dev = pci_get_drvdata(pdev);
7329 struct bnx2x *bp = netdev_priv(dev);
7330
7331 rtnl_lock();
7332
7333 netif_device_detach(dev);
7334
07ce50e4
DN
7335 if (state == pci_channel_io_perm_failure) {
7336 rtnl_unlock();
7337 return PCI_ERS_RESULT_DISCONNECT;
7338 }
7339
493adb1f 7340 if (netif_running(dev))
f8ef6e44 7341 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
7342
7343 pci_disable_device(pdev);
7344
7345 rtnl_unlock();
7346
7347 /* Request a slot reset */
7348 return PCI_ERS_RESULT_NEED_RESET;
7349}
7350
7351/**
7352 * bnx2x_io_slot_reset - called after the PCI bus has been reset
7353 * @pdev: Pointer to PCI device
7354 *
7355 * Restart the card from scratch, as if from a cold-boot.
7356 */
7357static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
7358{
7359 struct net_device *dev = pci_get_drvdata(pdev);
7360 struct bnx2x *bp = netdev_priv(dev);
7361
7362 rtnl_lock();
7363
7364 if (pci_enable_device(pdev)) {
7365 dev_err(&pdev->dev,
7366 "Cannot re-enable PCI device after reset\n");
7367 rtnl_unlock();
7368 return PCI_ERS_RESULT_DISCONNECT;
7369 }
7370
7371 pci_set_master(pdev);
7372 pci_restore_state(pdev);
7373
7374 if (netif_running(dev))
7375 bnx2x_set_power_state(bp, PCI_D0);
7376
7377 rtnl_unlock();
7378
7379 return PCI_ERS_RESULT_RECOVERED;
7380}
7381
7382/**
7383 * bnx2x_io_resume - called when traffic can start flowing again
7384 * @pdev: Pointer to PCI device
7385 *
7386 * This callback is called when the error recovery driver tells us that
7387 * its OK to resume normal operation.
7388 */
7389static void bnx2x_io_resume(struct pci_dev *pdev)
7390{
7391 struct net_device *dev = pci_get_drvdata(pdev);
7392 struct bnx2x *bp = netdev_priv(dev);
7393
72fd0718
VZ
7394 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
7395 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
7396 return;
7397 }
7398
493adb1f
WX
7399 rtnl_lock();
7400
f8ef6e44
YG
7401 bnx2x_eeh_recover(bp);
7402
493adb1f 7403 if (netif_running(dev))
f8ef6e44 7404 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
7405
7406 netif_device_attach(dev);
7407
7408 rtnl_unlock();
7409}
7410
7411static struct pci_error_handlers bnx2x_err_handler = {
7412 .error_detected = bnx2x_io_error_detected,
356e2385
EG
7413 .slot_reset = bnx2x_io_slot_reset,
7414 .resume = bnx2x_io_resume,
493adb1f
WX
7415};
7416
a2fbb9ea 7417static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
7418 .name = DRV_MODULE_NAME,
7419 .id_table = bnx2x_pci_tbl,
7420 .probe = bnx2x_init_one,
7421 .remove = __devexit_p(bnx2x_remove_one),
7422 .suspend = bnx2x_suspend,
7423 .resume = bnx2x_resume,
7424 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
7425};
7426
7427static int __init bnx2x_init(void)
7428{
dd21ca6d
SG
7429 int ret;
7430
7995c64e 7431 pr_info("%s", version);
938cf541 7432
1cf167f2
EG
7433 bnx2x_wq = create_singlethread_workqueue("bnx2x");
7434 if (bnx2x_wq == NULL) {
7995c64e 7435 pr_err("Cannot create workqueue\n");
1cf167f2
EG
7436 return -ENOMEM;
7437 }
7438
dd21ca6d
SG
7439 ret = pci_register_driver(&bnx2x_pci_driver);
7440 if (ret) {
7995c64e 7441 pr_err("Cannot register driver\n");
dd21ca6d
SG
7442 destroy_workqueue(bnx2x_wq);
7443 }
7444 return ret;
a2fbb9ea
ET
7445}
7446
7447static void __exit bnx2x_cleanup(void)
7448{
7449 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
7450
7451 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
7452}
7453
7454module_init(bnx2x_init);
7455module_exit(bnx2x_cleanup);
7456
993ac7b5
MC
7457#ifdef BCM_CNIC
7458
7459/* count denotes the number of new completions we have seen */
7460static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
7461{
7462 struct eth_spe *spe;
7463
7464#ifdef BNX2X_STOP_ON_ERROR
7465 if (unlikely(bp->panic))
7466 return;
7467#endif
7468
7469 spin_lock_bh(&bp->spq_lock);
7470 bp->cnic_spq_pending -= count;
7471
7472 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
7473 bp->cnic_spq_pending++) {
7474
7475 if (!bp->cnic_kwq_pending)
7476 break;
7477
7478 spe = bnx2x_sp_get_next(bp);
7479 *spe = *bp->cnic_kwq_cons;
7480
7481 bp->cnic_kwq_pending--;
7482
7483 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
7484 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
7485
7486 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
7487 bp->cnic_kwq_cons = bp->cnic_kwq;
7488 else
7489 bp->cnic_kwq_cons++;
7490 }
7491 bnx2x_sp_prod_update(bp);
7492 spin_unlock_bh(&bp->spq_lock);
7493}
7494
7495static int bnx2x_cnic_sp_queue(struct net_device *dev,
7496 struct kwqe_16 *kwqes[], u32 count)
7497{
7498 struct bnx2x *bp = netdev_priv(dev);
7499 int i;
7500
7501#ifdef BNX2X_STOP_ON_ERROR
7502 if (unlikely(bp->panic))
7503 return -EIO;
7504#endif
7505
7506 spin_lock_bh(&bp->spq_lock);
7507
7508 for (i = 0; i < count; i++) {
7509 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
7510
7511 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
7512 break;
7513
7514 *bp->cnic_kwq_prod = *spe;
7515
7516 bp->cnic_kwq_pending++;
7517
7518 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
7519 spe->hdr.conn_and_cmd_data, spe->hdr.type,
7520 spe->data.mac_config_addr.hi,
7521 spe->data.mac_config_addr.lo,
7522 bp->cnic_kwq_pending);
7523
7524 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
7525 bp->cnic_kwq_prod = bp->cnic_kwq;
7526 else
7527 bp->cnic_kwq_prod++;
7528 }
7529
7530 spin_unlock_bh(&bp->spq_lock);
7531
7532 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
7533 bnx2x_cnic_sp_post(bp, 0);
7534
7535 return i;
7536}
7537
7538static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7539{
7540 struct cnic_ops *c_ops;
7541 int rc = 0;
7542
7543 mutex_lock(&bp->cnic_mutex);
7544 c_ops = bp->cnic_ops;
7545 if (c_ops)
7546 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7547 mutex_unlock(&bp->cnic_mutex);
7548
7549 return rc;
7550}
7551
7552static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7553{
7554 struct cnic_ops *c_ops;
7555 int rc = 0;
7556
7557 rcu_read_lock();
7558 c_ops = rcu_dereference(bp->cnic_ops);
7559 if (c_ops)
7560 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7561 rcu_read_unlock();
7562
7563 return rc;
7564}
7565
7566/*
7567 * for commands that have no data
7568 */
9f6c9258 7569int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
993ac7b5
MC
7570{
7571 struct cnic_ctl_info ctl = {0};
7572
7573 ctl.cmd = cmd;
7574
7575 return bnx2x_cnic_ctl_send(bp, &ctl);
7576}
7577
7578static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
7579{
7580 struct cnic_ctl_info ctl;
7581
7582 /* first we tell CNIC and only then we count this as a completion */
7583 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
7584 ctl.data.comp.cid = cid;
7585
7586 bnx2x_cnic_ctl_send_bh(bp, &ctl);
7587 bnx2x_cnic_sp_post(bp, 1);
7588}
7589
7590static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7591{
7592 struct bnx2x *bp = netdev_priv(dev);
7593 int rc = 0;
7594
7595 switch (ctl->cmd) {
7596 case DRV_CTL_CTXTBL_WR_CMD: {
7597 u32 index = ctl->data.io.offset;
7598 dma_addr_t addr = ctl->data.io.dma_addr;
7599
7600 bnx2x_ilt_wr(bp, index, addr);
7601 break;
7602 }
7603
7604 case DRV_CTL_COMPLETION_CMD: {
7605 int count = ctl->data.comp.comp_count;
7606
7607 bnx2x_cnic_sp_post(bp, count);
7608 break;
7609 }
7610
7611 /* rtnl_lock is held. */
7612 case DRV_CTL_START_L2_CMD: {
7613 u32 cli = ctl->data.ring.client_id;
7614
7615 bp->rx_mode_cl_mask |= (1 << cli);
7616 bnx2x_set_storm_rx_mode(bp);
7617 break;
7618 }
7619
7620 /* rtnl_lock is held. */
7621 case DRV_CTL_STOP_L2_CMD: {
7622 u32 cli = ctl->data.ring.client_id;
7623
7624 bp->rx_mode_cl_mask &= ~(1 << cli);
7625 bnx2x_set_storm_rx_mode(bp);
7626 break;
7627 }
7628
7629 default:
7630 BNX2X_ERR("unknown command %x\n", ctl->cmd);
7631 rc = -EINVAL;
7632 }
7633
7634 return rc;
7635}
7636
9f6c9258 7637void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
993ac7b5
MC
7638{
7639 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7640
7641 if (bp->flags & USING_MSIX_FLAG) {
7642 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
7643 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
7644 cp->irq_arr[0].vector = bp->msix_table[1].vector;
7645 } else {
7646 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
7647 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
7648 }
7649 cp->irq_arr[0].status_blk = bp->cnic_sb;
7650 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
7651 cp->irq_arr[1].status_blk = bp->def_status_blk;
7652 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
7653
7654 cp->num_irq = 2;
7655}
7656
7657static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
7658 void *data)
7659{
7660 struct bnx2x *bp = netdev_priv(dev);
7661 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7662
7663 if (ops == NULL)
7664 return -EINVAL;
7665
7666 if (atomic_read(&bp->intr_sem) != 0)
7667 return -EBUSY;
7668
7669 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
7670 if (!bp->cnic_kwq)
7671 return -ENOMEM;
7672
7673 bp->cnic_kwq_cons = bp->cnic_kwq;
7674 bp->cnic_kwq_prod = bp->cnic_kwq;
7675 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
7676
7677 bp->cnic_spq_pending = 0;
7678 bp->cnic_kwq_pending = 0;
7679
7680 bp->cnic_data = data;
7681
7682 cp->num_irq = 0;
7683 cp->drv_state = CNIC_DRV_STATE_REGD;
7684
7685 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
7686
7687 bnx2x_setup_cnic_irq_info(bp);
7688 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7689 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7690 rcu_assign_pointer(bp->cnic_ops, ops);
7691
7692 return 0;
7693}
7694
7695static int bnx2x_unregister_cnic(struct net_device *dev)
7696{
7697 struct bnx2x *bp = netdev_priv(dev);
7698 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7699
7700 mutex_lock(&bp->cnic_mutex);
7701 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7702 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7703 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7704 }
7705 cp->drv_state = 0;
7706 rcu_assign_pointer(bp->cnic_ops, NULL);
7707 mutex_unlock(&bp->cnic_mutex);
7708 synchronize_rcu();
7709 kfree(bp->cnic_kwq);
7710 bp->cnic_kwq = NULL;
7711
7712 return 0;
7713}
7714
7715struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
7716{
7717 struct bnx2x *bp = netdev_priv(dev);
7718 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7719
7720 cp->drv_owner = THIS_MODULE;
7721 cp->chip_id = CHIP_ID(bp);
7722 cp->pdev = bp->pdev;
7723 cp->io_base = bp->regview;
7724 cp->io_base2 = bp->doorbells;
7725 cp->max_kwqe_pending = 8;
7726 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
7727 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
7728 cp->ctx_tbl_len = CNIC_ILT_LINES;
7729 cp->starting_cid = BCM_CNIC_CID_START;
7730 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
7731 cp->drv_ctl = bnx2x_drv_ctl;
7732 cp->drv_register_cnic = bnx2x_register_cnic;
7733 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
7734
7735 return cp;
7736}
7737EXPORT_SYMBOL(bnx2x_cnic_probe);
7738
7739#endif /* BCM_CNIC */
94a78b79 7740