]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x/bnx2x_main.c
bnx2x: create folder for bnx2x firmware files
[net-next-2.6.git] / drivers / net / bnx2x / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea 51#include <linux/io.h>
45229b42 52#include <linux/stringify.h>
a2fbb9ea 53
b0efbb99 54#define BNX2X_MAIN
a2fbb9ea
ET
55#include "bnx2x.h"
56#include "bnx2x_init.h"
94a78b79 57#include "bnx2x_init_ops.h"
9f6c9258 58#include "bnx2x_cmn.h"
a2fbb9ea 59
a2fbb9ea 60
94a78b79
VZ
61#include <linux/firmware.h>
62#include "bnx2x_fw_file_hdr.h"
63/* FW files */
45229b42
BH
64#define FW_FILE_VERSION \
65 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
66 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
67 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
68 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
560131f3
DK
69#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
70#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
94a78b79 71
34f80b04
EG
72/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ)
a2fbb9ea 74
53a10565 75static char version[] __devinitdata =
34f80b04 76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
24e3fcef 79MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 80MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
81MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
83MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H);
a2fbb9ea 85
555f6c78
EG
86static int multi_mode = 1;
87module_param(multi_mode, int, 0);
ca00392c
EG
88MODULE_PARM_DESC(multi_mode, " Multi queue mode "
89 "(0 Disable; 1 Enable (default))");
90
54b9ddaa
VZ
91static int num_queues;
92module_param(num_queues, int, 0);
93MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
94 " (default is as a number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
cdaa7cb8
VZ
102MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
103 "(1 INT#x; 2 MSI)");
8badd27a 104
a18f5128
EG
105static int dropless_fc;
106module_param(dropless_fc, int, 0);
107MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
108
9898f86d 109static int poll;
a2fbb9ea 110module_param(poll, int, 0);
9898f86d 111MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
112
113static int mrrs = -1;
114module_param(mrrs, int, 0);
115MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
116
9898f86d 117static int debug;
a2fbb9ea 118module_param(debug, int, 0);
9898f86d
EG
119MODULE_PARM_DESC(debug, " Default debug msglevel");
120
1cf167f2 121static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
122
123enum bnx2x_board_type {
124 BCM57710 = 0,
34f80b04
EG
125 BCM57711 = 1,
126 BCM57711E = 2,
a2fbb9ea
ET
127};
128
34f80b04 129/* indexed by board_type, above */
53a10565 130static struct {
a2fbb9ea
ET
131 char *name;
132} board_info[] __devinitdata = {
34f80b04
EG
133 { "Broadcom NetXtreme II BCM57710 XGb" },
134 { "Broadcom NetXtreme II BCM57711 XGb" },
135 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
136};
137
34f80b04 138
a3aa1884 139static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
140 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
143 { 0 }
144};
145
146MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148/****************************************************************************
149* General service functions
150****************************************************************************/
151
152/* used only at init
153 * locking is done by mcp
154 */
573f2035 155void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
156{
157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160 PCICFG_VENDOR_ID_OFFSET);
161}
162
a2fbb9ea
ET
163static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164{
165 u32 val;
166
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170 PCICFG_VENDOR_ID_OFFSET);
171
172 return val;
173}
a2fbb9ea 174
6c719d00 175const u32 dmae_reg_go_c[] = {
a2fbb9ea
ET
176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180};
181
182/* copy command into DMAE command memory and set DMAE command go */
6c719d00 183void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
a2fbb9ea
ET
184{
185 u32 cmd_offset;
186 int i;
187
188 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
189 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
190 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
191
ad8d3948
EG
192 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
193 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
194 }
195 REG_WR(bp, dmae_reg_go_c[idx], 1);
196}
197
ad8d3948
EG
198void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
199 u32 len32)
a2fbb9ea 200{
5ff7b6d4 201 struct dmae_command dmae;
a2fbb9ea 202 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
203 int cnt = 200;
204
205 if (!bp->dmae_ready) {
206 u32 *data = bnx2x_sp(bp, wb_data[0]);
207
208 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
209 " using indirect\n", dst_addr, len32);
210 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
211 return;
212 }
213
5ff7b6d4 214 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 215
5ff7b6d4
EG
216 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
217 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
218 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 219#ifdef __BIG_ENDIAN
5ff7b6d4 220 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 221#else
5ff7b6d4 222 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 223#endif
5ff7b6d4
EG
224 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
225 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
226 dmae.src_addr_lo = U64_LO(dma_addr);
227 dmae.src_addr_hi = U64_HI(dma_addr);
228 dmae.dst_addr_lo = dst_addr >> 2;
229 dmae.dst_addr_hi = 0;
230 dmae.len = len32;
231 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
232 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
233 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 234
c3eefaf6 235 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
236 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
237 "dst_addr [%x:%08x (%08x)]\n"
238 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
239 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
240 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
241 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 242 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
243 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
244 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 245
5ff7b6d4
EG
246 mutex_lock(&bp->dmae_mutex);
247
a2fbb9ea
ET
248 *wb_comp = 0;
249
5ff7b6d4 250 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
251
252 udelay(5);
ad8d3948
EG
253
254 while (*wb_comp != DMAE_COMP_VAL) {
255 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
256
ad8d3948 257 if (!cnt) {
c3eefaf6 258 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
259 break;
260 }
ad8d3948 261 cnt--;
12469401
YG
262 /* adjust delay for emulation/FPGA */
263 if (CHIP_REV_IS_SLOW(bp))
264 msleep(100);
265 else
266 udelay(5);
a2fbb9ea 267 }
ad8d3948
EG
268
269 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
270}
271
c18487ee 272void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 273{
5ff7b6d4 274 struct dmae_command dmae;
a2fbb9ea 275 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
276 int cnt = 200;
277
278 if (!bp->dmae_ready) {
279 u32 *data = bnx2x_sp(bp, wb_data[0]);
280 int i;
281
282 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
283 " using indirect\n", src_addr, len32);
284 for (i = 0; i < len32; i++)
285 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
286 return;
287 }
288
5ff7b6d4 289 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 290
5ff7b6d4
EG
291 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
292 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
293 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 294#ifdef __BIG_ENDIAN
5ff7b6d4 295 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 296#else
5ff7b6d4 297 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 298#endif
5ff7b6d4
EG
299 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
300 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
301 dmae.src_addr_lo = src_addr >> 2;
302 dmae.src_addr_hi = 0;
303 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
304 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
305 dmae.len = len32;
306 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
307 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
308 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 309
c3eefaf6 310 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
311 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
312 "dst_addr [%x:%08x (%08x)]\n"
313 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
314 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
315 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
316 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 317
5ff7b6d4
EG
318 mutex_lock(&bp->dmae_mutex);
319
320 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
321 *wb_comp = 0;
322
5ff7b6d4 323 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
324
325 udelay(5);
ad8d3948
EG
326
327 while (*wb_comp != DMAE_COMP_VAL) {
328
ad8d3948 329 if (!cnt) {
c3eefaf6 330 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
331 break;
332 }
ad8d3948 333 cnt--;
12469401
YG
334 /* adjust delay for emulation/FPGA */
335 if (CHIP_REV_IS_SLOW(bp))
336 msleep(100);
337 else
338 udelay(5);
a2fbb9ea 339 }
ad8d3948 340 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
341 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
342 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
343
344 mutex_unlock(&bp->dmae_mutex);
345}
346
573f2035
EG
347void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
348 u32 addr, u32 len)
349{
02e3c6cb 350 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
351 int offset = 0;
352
02e3c6cb 353 while (len > dmae_wr_max) {
573f2035 354 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
355 addr + offset, dmae_wr_max);
356 offset += dmae_wr_max * 4;
357 len -= dmae_wr_max;
573f2035
EG
358 }
359
360 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
361}
362
ad8d3948
EG
363/* used only for slowpath so not inlined */
364static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
365{
366 u32 wb_write[2];
367
368 wb_write[0] = val_hi;
369 wb_write[1] = val_lo;
370 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 371}
a2fbb9ea 372
ad8d3948
EG
373#ifdef USE_WB_RD
374static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
375{
376 u32 wb_data[2];
377
378 REG_RD_DMAE(bp, reg, wb_data, 2);
379
380 return HILO_U64(wb_data[0], wb_data[1]);
381}
382#endif
383
a2fbb9ea
ET
384static int bnx2x_mc_assert(struct bnx2x *bp)
385{
a2fbb9ea 386 char last_idx;
34f80b04
EG
387 int i, rc = 0;
388 u32 row0, row1, row2, row3;
389
390 /* XSTORM */
391 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
392 XSTORM_ASSERT_LIST_INDEX_OFFSET);
393 if (last_idx)
394 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
395
396 /* print the asserts */
397 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
398
399 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
400 XSTORM_ASSERT_LIST_OFFSET(i));
401 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
402 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
403 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
405 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
407
408 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
409 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
410 " 0x%08x 0x%08x 0x%08x\n",
411 i, row3, row2, row1, row0);
412 rc++;
413 } else {
414 break;
415 }
416 }
417
418 /* TSTORM */
419 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
420 TSTORM_ASSERT_LIST_INDEX_OFFSET);
421 if (last_idx)
422 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
423
424 /* print the asserts */
425 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
426
427 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
428 TSTORM_ASSERT_LIST_OFFSET(i));
429 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
430 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
431 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
433 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
435
436 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
437 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
438 " 0x%08x 0x%08x 0x%08x\n",
439 i, row3, row2, row1, row0);
440 rc++;
441 } else {
442 break;
443 }
444 }
445
446 /* CSTORM */
447 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
448 CSTORM_ASSERT_LIST_INDEX_OFFSET);
449 if (last_idx)
450 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
451
452 /* print the asserts */
453 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
454
455 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
456 CSTORM_ASSERT_LIST_OFFSET(i));
457 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
458 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
459 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
461 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
463
464 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
465 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
466 " 0x%08x 0x%08x 0x%08x\n",
467 i, row3, row2, row1, row0);
468 rc++;
469 } else {
470 break;
471 }
472 }
473
474 /* USTORM */
475 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
476 USTORM_ASSERT_LIST_INDEX_OFFSET);
477 if (last_idx)
478 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
479
480 /* print the asserts */
481 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
482
483 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
484 USTORM_ASSERT_LIST_OFFSET(i));
485 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
486 USTORM_ASSERT_LIST_OFFSET(i) + 4);
487 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i) + 8);
489 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 12);
491
492 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
493 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
494 " 0x%08x 0x%08x 0x%08x\n",
495 i, row3, row2, row1, row0);
496 rc++;
497 } else {
498 break;
a2fbb9ea
ET
499 }
500 }
34f80b04 501
a2fbb9ea
ET
502 return rc;
503}
c14423fe 504
a2fbb9ea
ET
505static void bnx2x_fw_dump(struct bnx2x *bp)
506{
cdaa7cb8 507 u32 addr;
a2fbb9ea 508 u32 mark, offset;
4781bfad 509 __be32 data[9];
a2fbb9ea
ET
510 int word;
511
2145a920
VZ
512 if (BP_NOMCP(bp)) {
513 BNX2X_ERR("NO MCP - can not dump\n");
514 return;
515 }
cdaa7cb8
VZ
516
517 addr = bp->common.shmem_base - 0x0800 + 4;
518 mark = REG_RD(bp, addr);
519 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
7995c64e 520 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 521
7995c64e 522 pr_err("");
cdaa7cb8 523 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
a2fbb9ea 524 for (word = 0; word < 8; word++)
cdaa7cb8 525 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 526 data[8] = 0x0;
7995c64e 527 pr_cont("%s", (char *)data);
a2fbb9ea 528 }
cdaa7cb8 529 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 530 for (word = 0; word < 8; word++)
cdaa7cb8 531 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 532 data[8] = 0x0;
7995c64e 533 pr_cont("%s", (char *)data);
a2fbb9ea 534 }
7995c64e 535 pr_err("end of fw dump\n");
a2fbb9ea
ET
536}
537
6c719d00 538void bnx2x_panic_dump(struct bnx2x *bp)
a2fbb9ea
ET
539{
540 int i;
541 u16 j, start, end;
542
66e855f3
YG
543 bp->stats_state = STATS_STATE_DISABLED;
544 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
545
a2fbb9ea
ET
546 BNX2X_ERR("begin crash dump -----------------\n");
547
8440d2b6
EG
548 /* Indices */
549 /* Common */
cdaa7cb8
VZ
550 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
551 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
552 " spq_prod_idx(0x%x)\n",
8440d2b6
EG
553 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
554 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
555
556 /* Rx */
54b9ddaa 557 for_each_queue(bp, i) {
a2fbb9ea 558 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 559
cdaa7cb8
VZ
560 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
561 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
562 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 563 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
564 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
565 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8
VZ
566 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
567 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
8440d2b6
EG
568 fp->rx_sge_prod, fp->last_max_sge,
569 le16_to_cpu(fp->fp_u_idx),
570 fp->status_blk->u_status_block.status_block_index);
571 }
a2fbb9ea 572
8440d2b6 573 /* Tx */
54b9ddaa 574 for_each_queue(bp, i) {
8440d2b6 575 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 576
cdaa7cb8
VZ
577 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
578 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
579 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
580 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
581 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
cdaa7cb8
VZ
582 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
583 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 584 fp->status_blk->c_status_block.status_block_index,
ca00392c 585 fp->tx_db.data.prod);
8440d2b6 586 }
a2fbb9ea 587
8440d2b6
EG
588 /* Rings */
589 /* Rx */
54b9ddaa 590 for_each_queue(bp, i) {
8440d2b6 591 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
592
593 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
594 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 595 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
596 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
597 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
598
c3eefaf6
EG
599 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
600 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
601 }
602
3196a88a
EG
603 start = RX_SGE(fp->rx_sge_prod);
604 end = RX_SGE(fp->last_max_sge);
8440d2b6 605 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
606 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
607 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
608
c3eefaf6
EG
609 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
610 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
611 }
612
a2fbb9ea
ET
613 start = RCQ_BD(fp->rx_comp_cons - 10);
614 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 615 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
616 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
617
c3eefaf6
EG
618 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
619 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
620 }
621 }
622
8440d2b6 623 /* Tx */
54b9ddaa 624 for_each_queue(bp, i) {
8440d2b6
EG
625 struct bnx2x_fastpath *fp = &bp->fp[i];
626
627 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
628 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
629 for (j = start; j != end; j = TX_BD(j + 1)) {
630 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
631
c3eefaf6
EG
632 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
633 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
634 }
635
636 start = TX_BD(fp->tx_bd_cons - 10);
637 end = TX_BD(fp->tx_bd_cons + 254);
638 for (j = start; j != end; j = TX_BD(j + 1)) {
639 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
640
c3eefaf6
EG
641 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
642 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
643 }
644 }
a2fbb9ea 645
34f80b04 646 bnx2x_fw_dump(bp);
a2fbb9ea
ET
647 bnx2x_mc_assert(bp);
648 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
649}
650
9f6c9258 651void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 652{
34f80b04 653 int port = BP_PORT(bp);
a2fbb9ea
ET
654 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
655 u32 val = REG_RD(bp, addr);
656 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 657 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
658
659 if (msix) {
8badd27a
EG
660 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
661 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
662 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
664 } else if (msi) {
665 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
666 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
669 } else {
670 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 671 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
672 HC_CONFIG_0_REG_INT_LINE_EN_0 |
673 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 674
8badd27a
EG
675 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
676 val, port, addr);
615f8fd9
ET
677
678 REG_WR(bp, addr, val);
679
a2fbb9ea
ET
680 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
681 }
682
8badd27a
EG
683 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
684 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
685
686 REG_WR(bp, addr, val);
37dbbf32
EG
687 /*
688 * Ensure that HC_CONFIG is written before leading/trailing edge config
689 */
690 mmiowb();
691 barrier();
34f80b04
EG
692
693 if (CHIP_IS_E1H(bp)) {
694 /* init leading/trailing edge */
695 if (IS_E1HMF(bp)) {
8badd27a 696 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 697 if (bp->port.pmf)
4acac6a5
EG
698 /* enable nig and gpio3 attention */
699 val |= 0x1100;
34f80b04
EG
700 } else
701 val = 0xffff;
702
703 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
704 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
705 }
37dbbf32
EG
706
707 /* Make sure that interrupts are indeed enabled from here on */
708 mmiowb();
a2fbb9ea
ET
709}
710
615f8fd9 711static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 712{
34f80b04 713 int port = BP_PORT(bp);
a2fbb9ea
ET
714 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
715 u32 val = REG_RD(bp, addr);
716
717 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
718 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
719 HC_CONFIG_0_REG_INT_LINE_EN_0 |
720 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
721
722 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
723 val, port, addr);
724
8badd27a
EG
725 /* flush all outstanding writes */
726 mmiowb();
727
a2fbb9ea
ET
728 REG_WR(bp, addr, val);
729 if (REG_RD(bp, addr) != val)
730 BNX2X_ERR("BUG! proper val not read from IGU!\n");
731}
732
9f6c9258 733void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 734{
a2fbb9ea 735 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 736 int i, offset;
a2fbb9ea 737
34f80b04 738 /* disable interrupt handling */
a2fbb9ea 739 atomic_inc(&bp->intr_sem);
e1510706
EG
740 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
741
f8ef6e44
YG
742 if (disable_hw)
743 /* prevent the HW from sending interrupts */
744 bnx2x_int_disable(bp);
a2fbb9ea
ET
745
746 /* make sure all ISRs are done */
747 if (msix) {
8badd27a
EG
748 synchronize_irq(bp->msix_table[0].vector);
749 offset = 1;
37b091ba
MC
750#ifdef BCM_CNIC
751 offset++;
752#endif
a2fbb9ea 753 for_each_queue(bp, i)
8badd27a 754 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
755 } else
756 synchronize_irq(bp->pdev->irq);
757
758 /* make sure sp_task is not running */
1cf167f2
EG
759 cancel_delayed_work(&bp->sp_task);
760 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
761}
762
34f80b04 763/* fast path */
a2fbb9ea
ET
764
765/*
34f80b04 766 * General service functions
a2fbb9ea
ET
767 */
768
72fd0718
VZ
769/* Return true if succeeded to acquire the lock */
770static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
771{
772 u32 lock_status;
773 u32 resource_bit = (1 << resource);
774 int func = BP_FUNC(bp);
775 u32 hw_lock_control_reg;
776
777 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
778
779 /* Validating that the resource is within range */
780 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
781 DP(NETIF_MSG_HW,
782 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
783 resource, HW_LOCK_MAX_RESOURCE_VALUE);
0fdf4d09 784 return false;
72fd0718
VZ
785 }
786
787 if (func <= 5)
788 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
789 else
790 hw_lock_control_reg =
791 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
792
793 /* Try to acquire the lock */
794 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
795 lock_status = REG_RD(bp, hw_lock_control_reg);
796 if (lock_status & resource_bit)
797 return true;
798
799 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
800 return false;
801}
802
a2fbb9ea 803
993ac7b5
MC
804#ifdef BCM_CNIC
805static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
806#endif
3196a88a 807
9f6c9258 808void bnx2x_sp_event(struct bnx2x_fastpath *fp,
a2fbb9ea
ET
809 union eth_rx_cqe *rr_cqe)
810{
811 struct bnx2x *bp = fp->bp;
812 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
813 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
814
34f80b04 815 DP(BNX2X_MSG_SP,
a2fbb9ea 816 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 817 fp->index, cid, command, bp->state,
34f80b04 818 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
819
820 bp->spq_left++;
821
0626b899 822 if (fp->index) {
a2fbb9ea
ET
823 switch (command | fp->state) {
824 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
825 BNX2X_FP_STATE_OPENING):
826 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
827 cid);
828 fp->state = BNX2X_FP_STATE_OPEN;
829 break;
830
831 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
832 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
833 cid);
834 fp->state = BNX2X_FP_STATE_HALTED;
835 break;
836
837 default:
34f80b04 838 BNX2X_ERR("unexpected MC reply (%d) "
cdaa7cb8
VZ
839 "fp[%d] state is %x\n",
840 command, fp->index, fp->state);
34f80b04 841 break;
a2fbb9ea 842 }
34f80b04 843 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
844 return;
845 }
c14423fe 846
a2fbb9ea
ET
847 switch (command | bp->state) {
848 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
849 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
850 bp->state = BNX2X_STATE_OPEN;
851 break;
852
853 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
854 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
855 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
856 fp->state = BNX2X_FP_STATE_HALTED;
857 break;
858
a2fbb9ea 859 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 860 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 861 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
862 break;
863
993ac7b5
MC
864#ifdef BCM_CNIC
865 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
866 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
867 bnx2x_cnic_cfc_comp(bp, cid);
868 break;
869#endif
3196a88a 870
a2fbb9ea 871 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 872 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 873 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
874 bp->set_mac_pending--;
875 smp_wmb();
a2fbb9ea
ET
876 break;
877
49d66772 878 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 879 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
880 bp->set_mac_pending--;
881 smp_wmb();
49d66772
ET
882 break;
883
a2fbb9ea 884 default:
34f80b04 885 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 886 command, bp->state);
34f80b04 887 break;
a2fbb9ea 888 }
34f80b04 889 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
890}
891
9f6c9258 892irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
a2fbb9ea 893{
555f6c78 894 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 895 u16 status = bnx2x_ack_int(bp);
34f80b04 896 u16 mask;
ca00392c 897 int i;
a2fbb9ea 898
34f80b04 899 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
900 if (unlikely(status == 0)) {
901 DP(NETIF_MSG_INTR, "not our interrupt!\n");
902 return IRQ_NONE;
903 }
f5372251 904 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 905
34f80b04 906 /* Return here if interrupt is disabled */
a2fbb9ea
ET
907 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
908 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
909 return IRQ_HANDLED;
910 }
911
3196a88a
EG
912#ifdef BNX2X_STOP_ON_ERROR
913 if (unlikely(bp->panic))
914 return IRQ_HANDLED;
915#endif
916
ca00392c
EG
917 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
918 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 919
ca00392c
EG
920 mask = 0x2 << fp->sb_id;
921 if (status & mask) {
54b9ddaa
VZ
922 /* Handle Rx and Tx according to SB id */
923 prefetch(fp->rx_cons_sb);
924 prefetch(&fp->status_blk->u_status_block.
925 status_block_index);
926 prefetch(fp->tx_cons_sb);
927 prefetch(&fp->status_blk->c_status_block.
928 status_block_index);
929 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
930 status &= ~mask;
931 }
a2fbb9ea
ET
932 }
933
993ac7b5
MC
934#ifdef BCM_CNIC
935 mask = 0x2 << CNIC_SB_ID(bp);
936 if (status & (mask | 0x1)) {
937 struct cnic_ops *c_ops = NULL;
938
939 rcu_read_lock();
940 c_ops = rcu_dereference(bp->cnic_ops);
941 if (c_ops)
942 c_ops->cnic_handler(bp->cnic_data, NULL);
943 rcu_read_unlock();
944
945 status &= ~mask;
946 }
947#endif
a2fbb9ea 948
34f80b04 949 if (unlikely(status & 0x1)) {
1cf167f2 950 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
951
952 status &= ~0x1;
953 if (!status)
954 return IRQ_HANDLED;
955 }
956
cdaa7cb8
VZ
957 if (unlikely(status))
958 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 959 status);
a2fbb9ea 960
c18487ee 961 return IRQ_HANDLED;
a2fbb9ea
ET
962}
963
c18487ee 964/* end of fast path */
a2fbb9ea 965
a2fbb9ea 966
c18487ee
YR
967/* Link */
968
969/*
970 * General service functions
971 */
a2fbb9ea 972
9f6c9258 973int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
974{
975 u32 lock_status;
976 u32 resource_bit = (1 << resource);
4a37fb66
YG
977 int func = BP_FUNC(bp);
978 u32 hw_lock_control_reg;
c18487ee 979 int cnt;
a2fbb9ea 980
c18487ee
YR
981 /* Validating that the resource is within range */
982 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
983 DP(NETIF_MSG_HW,
984 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
985 resource, HW_LOCK_MAX_RESOURCE_VALUE);
986 return -EINVAL;
987 }
a2fbb9ea 988
4a37fb66
YG
989 if (func <= 5) {
990 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
991 } else {
992 hw_lock_control_reg =
993 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
994 }
995
c18487ee 996 /* Validating that the resource is not already taken */
4a37fb66 997 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
998 if (lock_status & resource_bit) {
999 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1000 lock_status, resource_bit);
1001 return -EEXIST;
1002 }
a2fbb9ea 1003
46230476
EG
1004 /* Try for 5 second every 5ms */
1005 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1006 /* Try to acquire the lock */
4a37fb66
YG
1007 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1008 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1009 if (lock_status & resource_bit)
1010 return 0;
a2fbb9ea 1011
c18487ee 1012 msleep(5);
a2fbb9ea 1013 }
c18487ee
YR
1014 DP(NETIF_MSG_HW, "Timeout\n");
1015 return -EAGAIN;
1016}
a2fbb9ea 1017
9f6c9258 1018int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1019{
1020 u32 lock_status;
1021 u32 resource_bit = (1 << resource);
4a37fb66
YG
1022 int func = BP_FUNC(bp);
1023 u32 hw_lock_control_reg;
a2fbb9ea 1024
72fd0718
VZ
1025 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1026
c18487ee
YR
1027 /* Validating that the resource is within range */
1028 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1029 DP(NETIF_MSG_HW,
1030 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1031 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1032 return -EINVAL;
1033 }
1034
4a37fb66
YG
1035 if (func <= 5) {
1036 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1037 } else {
1038 hw_lock_control_reg =
1039 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1040 }
1041
c18487ee 1042 /* Validating that the resource is currently taken */
4a37fb66 1043 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1044 if (!(lock_status & resource_bit)) {
1045 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1046 lock_status, resource_bit);
1047 return -EFAULT;
a2fbb9ea
ET
1048 }
1049
9f6c9258
DK
1050 REG_WR(bp, hw_lock_control_reg, resource_bit);
1051 return 0;
c18487ee 1052}
a2fbb9ea 1053
9f6c9258 1054
4acac6a5
EG
1055int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1056{
1057 /* The GPIO should be swapped if swap register is set and active */
1058 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1059 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1060 int gpio_shift = gpio_num +
1061 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1062 u32 gpio_mask = (1 << gpio_shift);
1063 u32 gpio_reg;
1064 int value;
1065
1066 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1067 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1068 return -EINVAL;
1069 }
1070
1071 /* read GPIO value */
1072 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1073
1074 /* get the requested pin value */
1075 if ((gpio_reg & gpio_mask) == gpio_mask)
1076 value = 1;
1077 else
1078 value = 0;
1079
1080 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1081
1082 return value;
1083}
1084
17de50b7 1085int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1086{
1087 /* The GPIO should be swapped if swap register is set and active */
1088 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1089 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1090 int gpio_shift = gpio_num +
1091 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1092 u32 gpio_mask = (1 << gpio_shift);
1093 u32 gpio_reg;
a2fbb9ea 1094
c18487ee
YR
1095 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1096 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1097 return -EINVAL;
1098 }
a2fbb9ea 1099
4a37fb66 1100 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1101 /* read GPIO and mask except the float bits */
1102 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1103
c18487ee
YR
1104 switch (mode) {
1105 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1106 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1107 gpio_num, gpio_shift);
1108 /* clear FLOAT and set CLR */
1109 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1110 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1111 break;
a2fbb9ea 1112
c18487ee
YR
1113 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1114 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1115 gpio_num, gpio_shift);
1116 /* clear FLOAT and set SET */
1117 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1118 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1119 break;
a2fbb9ea 1120
17de50b7 1121 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1122 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1123 gpio_num, gpio_shift);
1124 /* set FLOAT */
1125 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1126 break;
a2fbb9ea 1127
c18487ee
YR
1128 default:
1129 break;
a2fbb9ea
ET
1130 }
1131
c18487ee 1132 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1133 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1134
c18487ee 1135 return 0;
a2fbb9ea
ET
1136}
1137
4acac6a5
EG
1138int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1139{
1140 /* The GPIO should be swapped if swap register is set and active */
1141 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1142 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1143 int gpio_shift = gpio_num +
1144 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1145 u32 gpio_mask = (1 << gpio_shift);
1146 u32 gpio_reg;
1147
1148 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1149 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1150 return -EINVAL;
1151 }
1152
1153 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1154 /* read GPIO int */
1155 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1156
1157 switch (mode) {
1158 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1159 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1160 "output low\n", gpio_num, gpio_shift);
1161 /* clear SET and set CLR */
1162 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1163 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1164 break;
1165
1166 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1167 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1168 "output high\n", gpio_num, gpio_shift);
1169 /* clear CLR and set SET */
1170 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1171 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1172 break;
1173
1174 default:
1175 break;
1176 }
1177
1178 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1179 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1180
1181 return 0;
1182}
1183
c18487ee 1184static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1185{
c18487ee
YR
1186 u32 spio_mask = (1 << spio_num);
1187 u32 spio_reg;
a2fbb9ea 1188
c18487ee
YR
1189 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1190 (spio_num > MISC_REGISTERS_SPIO_7)) {
1191 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1192 return -EINVAL;
a2fbb9ea
ET
1193 }
1194
4a37fb66 1195 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1196 /* read SPIO and mask except the float bits */
1197 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1198
c18487ee 1199 switch (mode) {
6378c025 1200 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1201 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1202 /* clear FLOAT and set CLR */
1203 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1204 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1205 break;
a2fbb9ea 1206
6378c025 1207 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1208 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1209 /* clear FLOAT and set SET */
1210 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1211 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1212 break;
a2fbb9ea 1213
c18487ee
YR
1214 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1215 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1216 /* set FLOAT */
1217 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1218 break;
a2fbb9ea 1219
c18487ee
YR
1220 default:
1221 break;
a2fbb9ea
ET
1222 }
1223
c18487ee 1224 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1225 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1226
a2fbb9ea
ET
1227 return 0;
1228}
1229
a22f0788
YR
1230int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1231{
1232 u32 sel_phy_idx = 0;
1233 if (bp->link_vars.link_up) {
1234 sel_phy_idx = EXT_PHY1;
1235 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1236 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1237 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1238 sel_phy_idx = EXT_PHY2;
1239 } else {
1240
1241 switch (bnx2x_phy_selection(&bp->link_params)) {
1242 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1243 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1244 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1245 sel_phy_idx = EXT_PHY1;
1246 break;
1247 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1248 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1249 sel_phy_idx = EXT_PHY2;
1250 break;
1251 }
1252 }
1253 /*
1254 * The selected actived PHY is always after swapping (in case PHY
1255 * swapping is enabled). So when swapping is enabled, we need to reverse
1256 * the configuration
1257 */
1258
1259 if (bp->link_params.multi_phy_config &
1260 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1261 if (sel_phy_idx == EXT_PHY1)
1262 sel_phy_idx = EXT_PHY2;
1263 else if (sel_phy_idx == EXT_PHY2)
1264 sel_phy_idx = EXT_PHY1;
1265 }
1266 return LINK_CONFIG_IDX(sel_phy_idx);
1267}
1268
9f6c9258 1269void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1270{
a22f0788 1271 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
ad33ea3a
EG
1272 switch (bp->link_vars.ieee_fc &
1273 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1274 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
a22f0788 1275 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1276 ADVERTISED_Pause);
1277 break;
356e2385 1278
c18487ee 1279 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
a22f0788 1280 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1281 ADVERTISED_Pause);
1282 break;
356e2385 1283
c18487ee 1284 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
a22f0788 1285 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
c18487ee 1286 break;
356e2385 1287
c18487ee 1288 default:
a22f0788 1289 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1290 ADVERTISED_Pause);
1291 break;
1292 }
1293}
f1410647 1294
c18487ee 1295
9f6c9258 1296u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 1297{
19680c48
EG
1298 if (!BP_NOMCP(bp)) {
1299 u8 rc;
a22f0788
YR
1300 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1301 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
19680c48 1302 /* Initialize link parameters structure variables */
8c99e7b0
YR
1303 /* It is recommended to turn off RX FC for jumbo frames
1304 for better performance */
0c593270 1305 if (bp->dev->mtu > 5000)
c0700f90 1306 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1307 else
c0700f90 1308 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1309
4a37fb66 1310 bnx2x_acquire_phy_lock(bp);
b5bf9068 1311
a22f0788 1312 if (load_mode == LOAD_DIAG) {
de6eae1f 1313 bp->link_params.loopback_mode = LOOPBACK_XGXS;
a22f0788
YR
1314 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1315 }
b5bf9068 1316
19680c48 1317 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 1318
4a37fb66 1319 bnx2x_release_phy_lock(bp);
a2fbb9ea 1320
3c96c68b
EG
1321 bnx2x_calc_fc_adv(bp);
1322
b5bf9068
EG
1323 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1324 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 1325 bnx2x_link_report(bp);
b5bf9068 1326 }
a22f0788 1327 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
19680c48
EG
1328 return rc;
1329 }
f5372251 1330 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 1331 return -EINVAL;
a2fbb9ea
ET
1332}
1333
9f6c9258 1334void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1335{
19680c48 1336 if (!BP_NOMCP(bp)) {
4a37fb66 1337 bnx2x_acquire_phy_lock(bp);
54c2fb78 1338 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
19680c48 1339 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1340 bnx2x_release_phy_lock(bp);
a2fbb9ea 1341
19680c48
EG
1342 bnx2x_calc_fc_adv(bp);
1343 } else
f5372251 1344 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 1345}
a2fbb9ea 1346
c18487ee
YR
1347static void bnx2x__link_reset(struct bnx2x *bp)
1348{
19680c48 1349 if (!BP_NOMCP(bp)) {
4a37fb66 1350 bnx2x_acquire_phy_lock(bp);
589abe3a 1351 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 1352 bnx2x_release_phy_lock(bp);
19680c48 1353 } else
f5372251 1354 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 1355}
a2fbb9ea 1356
a22f0788 1357u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
c18487ee 1358{
2145a920 1359 u8 rc = 0;
a2fbb9ea 1360
2145a920
VZ
1361 if (!BP_NOMCP(bp)) {
1362 bnx2x_acquire_phy_lock(bp);
a22f0788
YR
1363 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1364 is_serdes);
2145a920
VZ
1365 bnx2x_release_phy_lock(bp);
1366 } else
1367 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 1368
c18487ee
YR
1369 return rc;
1370}
a2fbb9ea 1371
8a1c38d1 1372static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 1373{
8a1c38d1
EG
1374 u32 r_param = bp->link_vars.line_speed / 8;
1375 u32 fair_periodic_timeout_usec;
1376 u32 t_fair;
34f80b04 1377
8a1c38d1
EG
1378 memset(&(bp->cmng.rs_vars), 0,
1379 sizeof(struct rate_shaping_vars_per_port));
1380 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 1381
8a1c38d1
EG
1382 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1383 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 1384
8a1c38d1
EG
1385 /* this is the threshold below which no timer arming will occur
1386 1.25 coefficient is for the threshold to be a little bigger
1387 than the real time, to compensate for timer in-accuracy */
1388 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
1389 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1390
8a1c38d1
EG
1391 /* resolution of fairness timer */
1392 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1393 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1394 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 1395
8a1c38d1
EG
1396 /* this is the threshold below which we won't arm the timer anymore */
1397 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 1398
8a1c38d1
EG
1399 /* we multiply by 1e3/8 to get bytes/msec.
1400 We don't want the credits to pass a credit
1401 of the t_fair*FAIR_MEM (algorithm resolution) */
1402 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1403 /* since each tick is 4 usec */
1404 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
1405}
1406
2691d51d
EG
1407/* Calculates the sum of vn_min_rates.
1408 It's needed for further normalizing of the min_rates.
1409 Returns:
1410 sum of vn_min_rates.
1411 or
1412 0 - if all the min_rates are 0.
1413 In the later case fainess algorithm should be deactivated.
1414 If not all min_rates are zero then those that are zeroes will be set to 1.
1415 */
1416static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1417{
1418 int all_zero = 1;
1419 int port = BP_PORT(bp);
1420 int vn;
1421
1422 bp->vn_weight_sum = 0;
1423 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1424 int func = 2*vn + port;
1425 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1426 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1427 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1428
1429 /* Skip hidden vns */
1430 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1431 continue;
1432
1433 /* If min rate is zero - set it to 1 */
1434 if (!vn_min_rate)
1435 vn_min_rate = DEF_MIN_RATE;
1436 else
1437 all_zero = 0;
1438
1439 bp->vn_weight_sum += vn_min_rate;
1440 }
1441
1442 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
1443 if (all_zero) {
1444 bp->cmng.flags.cmng_enables &=
1445 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1446 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1447 " fairness will be disabled\n");
1448 } else
1449 bp->cmng.flags.cmng_enables |=
1450 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
1451}
1452
8a1c38d1 1453static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
1454{
1455 struct rate_shaping_vars_per_vn m_rs_vn;
1456 struct fairness_vars_per_vn m_fair_vn;
1457 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1458 u16 vn_min_rate, vn_max_rate;
1459 int i;
1460
1461 /* If function is hidden - set min and max to zeroes */
1462 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1463 vn_min_rate = 0;
1464 vn_max_rate = 0;
1465
1466 } else {
1467 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1468 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
1469 /* If min rate is zero - set it to 1 */
1470 if (!vn_min_rate)
34f80b04
EG
1471 vn_min_rate = DEF_MIN_RATE;
1472 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1473 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1474 }
8a1c38d1 1475 DP(NETIF_MSG_IFUP,
b015e3d1 1476 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 1477 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
1478
1479 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1480 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1481
1482 /* global vn counter - maximal Mbps for this vn */
1483 m_rs_vn.vn_counter.rate = vn_max_rate;
1484
1485 /* quota - number of bytes transmitted in this period */
1486 m_rs_vn.vn_counter.quota =
1487 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1488
8a1c38d1 1489 if (bp->vn_weight_sum) {
34f80b04
EG
1490 /* credit for each period of the fairness algorithm:
1491 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
1492 vn_weight_sum should not be larger than 10000, thus
1493 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1494 than zero */
34f80b04 1495 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
1496 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1497 (8 * bp->vn_weight_sum))),
1498 (bp->cmng.fair_vars.fair_threshold * 2));
1499 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
1500 m_fair_vn.vn_credit_delta);
1501 }
1502
34f80b04
EG
1503 /* Store it to internal memory */
1504 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1505 REG_WR(bp, BAR_XSTRORM_INTMEM +
1506 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1507 ((u32 *)(&m_rs_vn))[i]);
1508
1509 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1510 REG_WR(bp, BAR_XSTRORM_INTMEM +
1511 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1512 ((u32 *)(&m_fair_vn))[i]);
1513}
1514
8a1c38d1 1515
c18487ee
YR
1516/* This function is called upon link interrupt */
1517static void bnx2x_link_attn(struct bnx2x *bp)
1518{
d9e8b185 1519 u32 prev_link_status = bp->link_vars.link_status;
bb2a0f7a
YG
1520 /* Make sure that we are synced with the current statistics */
1521 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1522
c18487ee 1523 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 1524
bb2a0f7a
YG
1525 if (bp->link_vars.link_up) {
1526
1c06328c 1527 /* dropless flow control */
a18f5128 1528 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
1529 int port = BP_PORT(bp);
1530 u32 pause_enabled = 0;
1531
1532 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1533 pause_enabled = 1;
1534
1535 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 1536 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
1537 pause_enabled);
1538 }
1539
bb2a0f7a
YG
1540 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1541 struct host_port_stats *pstats;
1542
1543 pstats = bnx2x_sp(bp, port_stats);
1544 /* reset old bmac stats */
1545 memset(&(pstats->mac_stx[0]), 0,
1546 sizeof(struct mac_stx));
1547 }
f34d28ea 1548 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
1549 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1550 }
1551
d9e8b185
VZ
1552 /* indicate link status only if link status actually changed */
1553 if (prev_link_status != bp->link_vars.link_status)
1554 bnx2x_link_report(bp);
34f80b04
EG
1555
1556 if (IS_E1HMF(bp)) {
8a1c38d1 1557 int port = BP_PORT(bp);
34f80b04 1558 int func;
8a1c38d1 1559 int vn;
34f80b04 1560
ab6ad5a4 1561 /* Set the attention towards other drivers on the same port */
34f80b04
EG
1562 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1563 if (vn == BP_E1HVN(bp))
1564 continue;
1565
8a1c38d1 1566 func = ((vn << 1) | port);
34f80b04
EG
1567 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1568 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1569 }
34f80b04 1570
8a1c38d1
EG
1571 if (bp->link_vars.link_up) {
1572 int i;
1573
1574 /* Init rate shaping and fairness contexts */
1575 bnx2x_init_port_minmax(bp);
34f80b04 1576
34f80b04 1577 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
1578 bnx2x_init_vn_minmax(bp, 2*vn + port);
1579
1580 /* Store it to internal memory */
1581 for (i = 0;
1582 i < sizeof(struct cmng_struct_per_port) / 4; i++)
1583 REG_WR(bp, BAR_XSTRORM_INTMEM +
1584 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1585 ((u32 *)(&bp->cmng))[i]);
1586 }
34f80b04 1587 }
c18487ee 1588}
a2fbb9ea 1589
9f6c9258 1590void bnx2x__link_status_update(struct bnx2x *bp)
c18487ee 1591{
f34d28ea 1592 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 1593 return;
a2fbb9ea 1594
c18487ee 1595 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 1596
bb2a0f7a
YG
1597 if (bp->link_vars.link_up)
1598 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1599 else
1600 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1601
2691d51d
EG
1602 bnx2x_calc_vn_weight_sum(bp);
1603
c18487ee
YR
1604 /* indicate link status */
1605 bnx2x_link_report(bp);
a2fbb9ea 1606}
a2fbb9ea 1607
34f80b04
EG
1608static void bnx2x_pmf_update(struct bnx2x *bp)
1609{
1610 int port = BP_PORT(bp);
1611 u32 val;
1612
1613 bp->port.pmf = 1;
1614 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1615
1616 /* enable nig attention */
1617 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1618 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1619 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
1620
1621 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
1622}
1623
c18487ee 1624/* end of Link */
a2fbb9ea
ET
1625
1626/* slow path */
1627
1628/*
1629 * General service functions
1630 */
1631
2691d51d 1632/* send the MCP a request, block until there is a reply */
a22f0788 1633u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2691d51d
EG
1634{
1635 int func = BP_FUNC(bp);
1636 u32 seq = ++bp->fw_seq;
1637 u32 rc = 0;
1638 u32 cnt = 1;
1639 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1640
c4ff7cbf 1641 mutex_lock(&bp->fw_mb_mutex);
a22f0788 1642 SHMEM_WR(bp, func_mb[func].drv_mb_param, param);
2691d51d
EG
1643 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1644 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1645
1646 do {
1647 /* let the FW do it's magic ... */
1648 msleep(delay);
1649
1650 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
1651
c4ff7cbf
EG
1652 /* Give the FW up to 5 second (500*10ms) */
1653 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
1654
1655 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
1656 cnt*delay, rc, seq);
1657
1658 /* is this a reply to our command? */
1659 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
1660 rc &= FW_MSG_CODE_MASK;
1661 else {
1662 /* FW BUG! */
1663 BNX2X_ERR("FW failed to respond!\n");
1664 bnx2x_fw_dump(bp);
1665 rc = 0;
1666 }
c4ff7cbf 1667 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
1668
1669 return rc;
1670}
1671
2691d51d
EG
1672static void bnx2x_e1h_disable(struct bnx2x *bp)
1673{
1674 int port = BP_PORT(bp);
2691d51d
EG
1675
1676 netif_tx_disable(bp->dev);
2691d51d
EG
1677
1678 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
1679
2691d51d
EG
1680 netif_carrier_off(bp->dev);
1681}
1682
1683static void bnx2x_e1h_enable(struct bnx2x *bp)
1684{
1685 int port = BP_PORT(bp);
1686
1687 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
1688
2691d51d
EG
1689 /* Tx queue should be only reenabled */
1690 netif_tx_wake_all_queues(bp->dev);
1691
061bc702
EG
1692 /*
1693 * Should not call netif_carrier_on since it will be called if the link
1694 * is up when checking for link state
1695 */
2691d51d
EG
1696}
1697
1698static void bnx2x_update_min_max(struct bnx2x *bp)
1699{
1700 int port = BP_PORT(bp);
1701 int vn, i;
1702
1703 /* Init rate shaping and fairness contexts */
1704 bnx2x_init_port_minmax(bp);
1705
1706 bnx2x_calc_vn_weight_sum(bp);
1707
1708 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1709 bnx2x_init_vn_minmax(bp, 2*vn + port);
1710
1711 if (bp->port.pmf) {
1712 int func;
1713
1714 /* Set the attention towards other drivers on the same port */
1715 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1716 if (vn == BP_E1HVN(bp))
1717 continue;
1718
1719 func = ((vn << 1) | port);
1720 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1721 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1722 }
1723
1724 /* Store it to internal memory */
1725 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1726 REG_WR(bp, BAR_XSTRORM_INTMEM +
1727 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1728 ((u32 *)(&bp->cmng))[i]);
1729 }
1730}
1731
1732static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1733{
2691d51d 1734 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
1735
1736 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
1737
f34d28ea
EG
1738 /*
1739 * This is the only place besides the function initialization
1740 * where the bp->flags can change so it is done without any
1741 * locks
1742 */
2691d51d
EG
1743 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1744 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
f34d28ea 1745 bp->flags |= MF_FUNC_DIS;
2691d51d
EG
1746
1747 bnx2x_e1h_disable(bp);
1748 } else {
1749 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
f34d28ea 1750 bp->flags &= ~MF_FUNC_DIS;
2691d51d
EG
1751
1752 bnx2x_e1h_enable(bp);
1753 }
1754 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
1755 }
1756 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1757
1758 bnx2x_update_min_max(bp);
1759 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1760 }
1761
1762 /* Report results to MCP */
1763 if (dcc_event)
a22f0788 1764 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2691d51d 1765 else
a22f0788 1766 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2691d51d
EG
1767}
1768
28912902
MC
1769/* must be called under the spq lock */
1770static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
1771{
1772 struct eth_spe *next_spe = bp->spq_prod_bd;
1773
1774 if (bp->spq_prod_bd == bp->spq_last_bd) {
1775 bp->spq_prod_bd = bp->spq;
1776 bp->spq_prod_idx = 0;
1777 DP(NETIF_MSG_TIMER, "end of spq\n");
1778 } else {
1779 bp->spq_prod_bd++;
1780 bp->spq_prod_idx++;
1781 }
1782 return next_spe;
1783}
1784
1785/* must be called under the spq lock */
1786static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1787{
1788 int func = BP_FUNC(bp);
1789
1790 /* Make sure that BD data is updated before writing the producer */
1791 wmb();
1792
1793 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1794 bp->spq_prod_idx);
1795 mmiowb();
1796}
1797
a2fbb9ea 1798/* the slow path queue is odd since completions arrive on the fastpath ring */
9f6c9258 1799int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
a2fbb9ea
ET
1800 u32 data_hi, u32 data_lo, int common)
1801{
28912902 1802 struct eth_spe *spe;
a2fbb9ea 1803
a2fbb9ea
ET
1804#ifdef BNX2X_STOP_ON_ERROR
1805 if (unlikely(bp->panic))
1806 return -EIO;
1807#endif
1808
34f80b04 1809 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
1810
1811 if (!bp->spq_left) {
1812 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 1813 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
1814 bnx2x_panic();
1815 return -EBUSY;
1816 }
f1410647 1817
28912902
MC
1818 spe = bnx2x_sp_get_next(bp);
1819
a2fbb9ea 1820 /* CID needs port number to be encoded int it */
28912902 1821 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
1822 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1823 HW_CID(bp, cid));
28912902 1824 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 1825 if (common)
28912902 1826 spe->hdr.type |=
a2fbb9ea
ET
1827 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1828
28912902
MC
1829 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1830 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
1831
1832 bp->spq_left--;
1833
cdaa7cb8
VZ
1834 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1835 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
1836 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1837 (u32)(U64_LO(bp->spq_mapping) +
1838 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1839 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1840
28912902 1841 bnx2x_sp_prod_update(bp);
34f80b04 1842 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
1843 return 0;
1844}
1845
1846/* acquire split MCP access lock register */
4a37fb66 1847static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 1848{
72fd0718 1849 u32 j, val;
34f80b04 1850 int rc = 0;
a2fbb9ea
ET
1851
1852 might_sleep();
72fd0718 1853 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
1854 val = (1UL << 31);
1855 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1856 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1857 if (val & (1L << 31))
1858 break;
1859
1860 msleep(5);
1861 }
a2fbb9ea 1862 if (!(val & (1L << 31))) {
19680c48 1863 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
1864 rc = -EBUSY;
1865 }
1866
1867 return rc;
1868}
1869
4a37fb66
YG
1870/* release split MCP access lock register */
1871static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 1872{
72fd0718 1873 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
1874}
1875
1876static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1877{
1878 struct host_def_status_block *def_sb = bp->def_status_blk;
1879 u16 rc = 0;
1880
1881 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
1882 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1883 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1884 rc |= 1;
1885 }
1886 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1887 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1888 rc |= 2;
1889 }
1890 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1891 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1892 rc |= 4;
1893 }
1894 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1895 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1896 rc |= 8;
1897 }
1898 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1899 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1900 rc |= 16;
1901 }
1902 return rc;
1903}
1904
1905/*
1906 * slow path service functions
1907 */
1908
1909static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1910{
34f80b04 1911 int port = BP_PORT(bp);
5c862848
EG
1912 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
1913 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
1914 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1915 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
1916 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1917 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 1918 u32 aeu_mask;
87942b46 1919 u32 nig_mask = 0;
a2fbb9ea 1920
a2fbb9ea
ET
1921 if (bp->attn_state & asserted)
1922 BNX2X_ERR("IGU ERROR\n");
1923
3fcaf2e5
EG
1924 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1925 aeu_mask = REG_RD(bp, aeu_addr);
1926
a2fbb9ea 1927 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 1928 aeu_mask, asserted);
72fd0718 1929 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 1930 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 1931
3fcaf2e5
EG
1932 REG_WR(bp, aeu_addr, aeu_mask);
1933 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 1934
3fcaf2e5 1935 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 1936 bp->attn_state |= asserted;
3fcaf2e5 1937 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
1938
1939 if (asserted & ATTN_HARD_WIRED_MASK) {
1940 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 1941
a5e9a7cf
EG
1942 bnx2x_acquire_phy_lock(bp);
1943
877e9aa4 1944 /* save nig interrupt mask */
87942b46 1945 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 1946 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 1947
c18487ee 1948 bnx2x_link_attn(bp);
a2fbb9ea
ET
1949
1950 /* handle unicore attn? */
1951 }
1952 if (asserted & ATTN_SW_TIMER_4_FUNC)
1953 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1954
1955 if (asserted & GPIO_2_FUNC)
1956 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1957
1958 if (asserted & GPIO_3_FUNC)
1959 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1960
1961 if (asserted & GPIO_4_FUNC)
1962 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1963
1964 if (port == 0) {
1965 if (asserted & ATTN_GENERAL_ATTN_1) {
1966 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1967 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1968 }
1969 if (asserted & ATTN_GENERAL_ATTN_2) {
1970 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1971 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1972 }
1973 if (asserted & ATTN_GENERAL_ATTN_3) {
1974 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1975 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1976 }
1977 } else {
1978 if (asserted & ATTN_GENERAL_ATTN_4) {
1979 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1980 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1981 }
1982 if (asserted & ATTN_GENERAL_ATTN_5) {
1983 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1984 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1985 }
1986 if (asserted & ATTN_GENERAL_ATTN_6) {
1987 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1988 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1989 }
1990 }
1991
1992 } /* if hardwired */
1993
5c862848
EG
1994 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
1995 asserted, hc_addr);
1996 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
1997
1998 /* now set back the mask */
a5e9a7cf 1999 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2000 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2001 bnx2x_release_phy_lock(bp);
2002 }
a2fbb9ea
ET
2003}
2004
fd4ef40d
EG
2005static inline void bnx2x_fan_failure(struct bnx2x *bp)
2006{
2007 int port = BP_PORT(bp);
b7737c9b 2008 u32 ext_phy_config;
fd4ef40d 2009 /* mark the failure */
b7737c9b
YR
2010 ext_phy_config =
2011 SHMEM_RD(bp,
2012 dev_info.port_hw_config[port].external_phy_config);
2013
2014 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2015 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
fd4ef40d 2016 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
b7737c9b 2017 ext_phy_config);
fd4ef40d
EG
2018
2019 /* log the failure */
cdaa7cb8
VZ
2020 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2021 " the driver to shutdown the card to prevent permanent"
2022 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 2023}
ab6ad5a4 2024
877e9aa4 2025static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2026{
34f80b04 2027 int port = BP_PORT(bp);
877e9aa4 2028 int reg_offset;
d90d96ba 2029 u32 val;
877e9aa4 2030
34f80b04
EG
2031 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2032 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2033
34f80b04 2034 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2035
2036 val = REG_RD(bp, reg_offset);
2037 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2038 REG_WR(bp, reg_offset, val);
2039
2040 BNX2X_ERR("SPIO5 hw attention\n");
2041
fd4ef40d 2042 /* Fan failure attention */
d90d96ba 2043 bnx2x_hw_reset_phy(&bp->link_params);
fd4ef40d 2044 bnx2x_fan_failure(bp);
877e9aa4 2045 }
34f80b04 2046
589abe3a
EG
2047 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2048 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2049 bnx2x_acquire_phy_lock(bp);
2050 bnx2x_handle_module_detect_int(&bp->link_params);
2051 bnx2x_release_phy_lock(bp);
2052 }
2053
34f80b04
EG
2054 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2055
2056 val = REG_RD(bp, reg_offset);
2057 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2058 REG_WR(bp, reg_offset, val);
2059
2060 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2061 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2062 bnx2x_panic();
2063 }
877e9aa4
ET
2064}
2065
2066static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2067{
2068 u32 val;
2069
0626b899 2070 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2071
2072 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2073 BNX2X_ERR("DB hw attention 0x%x\n", val);
2074 /* DORQ discard attention */
2075 if (val & 0x2)
2076 BNX2X_ERR("FATAL error from DORQ\n");
2077 }
34f80b04
EG
2078
2079 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2080
2081 int port = BP_PORT(bp);
2082 int reg_offset;
2083
2084 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2085 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2086
2087 val = REG_RD(bp, reg_offset);
2088 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2089 REG_WR(bp, reg_offset, val);
2090
2091 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 2092 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
2093 bnx2x_panic();
2094 }
877e9aa4
ET
2095}
2096
2097static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2098{
2099 u32 val;
2100
2101 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2102
2103 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2104 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2105 /* CFC error attention */
2106 if (val & 0x2)
2107 BNX2X_ERR("FATAL error from CFC\n");
2108 }
2109
2110 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2111
2112 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2113 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2114 /* RQ_USDMDP_FIFO_OVERFLOW */
2115 if (val & 0x18000)
2116 BNX2X_ERR("FATAL error from PXP\n");
2117 }
34f80b04
EG
2118
2119 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2120
2121 int port = BP_PORT(bp);
2122 int reg_offset;
2123
2124 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2125 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2126
2127 val = REG_RD(bp, reg_offset);
2128 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2129 REG_WR(bp, reg_offset, val);
2130
2131 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 2132 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
2133 bnx2x_panic();
2134 }
877e9aa4
ET
2135}
2136
2137static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2138{
34f80b04
EG
2139 u32 val;
2140
877e9aa4
ET
2141 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2142
34f80b04
EG
2143 if (attn & BNX2X_PMF_LINK_ASSERT) {
2144 int func = BP_FUNC(bp);
2145
2146 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
2147 bp->mf_config = SHMEM_RD(bp,
2148 mf_cfg.func_mf_config[func].config);
2691d51d
EG
2149 val = SHMEM_RD(bp, func_mb[func].drv_status);
2150 if (val & DRV_STATUS_DCC_EVENT_MASK)
2151 bnx2x_dcc_event(bp,
2152 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 2153 bnx2x__link_status_update(bp);
2691d51d 2154 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
2155 bnx2x_pmf_update(bp);
2156
2157 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2158
2159 BNX2X_ERR("MC assert!\n");
2160 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2161 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2162 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2163 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2164 bnx2x_panic();
2165
2166 } else if (attn & BNX2X_MCP_ASSERT) {
2167
2168 BNX2X_ERR("MCP assert!\n");
2169 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2170 bnx2x_fw_dump(bp);
877e9aa4
ET
2171
2172 } else
2173 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2174 }
2175
2176 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2177 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2178 if (attn & BNX2X_GRC_TIMEOUT) {
2179 val = CHIP_IS_E1H(bp) ?
2180 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2181 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2182 }
2183 if (attn & BNX2X_GRC_RSV) {
2184 val = CHIP_IS_E1H(bp) ?
2185 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2186 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2187 }
877e9aa4 2188 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2189 }
2190}
2191
72fd0718
VZ
2192#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
2193#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
2194#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2195#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
2196#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
2197#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2198/*
2199 * should be run under rtnl lock
2200 */
2201static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2202{
2203 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2204 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2205 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2206 barrier();
2207 mmiowb();
2208}
2209
2210/*
2211 * should be run under rtnl lock
2212 */
2213static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2214{
2215 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2216 val |= (1 << 16);
2217 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2218 barrier();
2219 mmiowb();
2220}
2221
2222/*
2223 * should be run under rtnl lock
2224 */
9f6c9258 2225bool bnx2x_reset_is_done(struct bnx2x *bp)
72fd0718
VZ
2226{
2227 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2228 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2229 return (val & RESET_DONE_FLAG_MASK) ? false : true;
2230}
2231
2232/*
2233 * should be run under rtnl lock
2234 */
9f6c9258 2235inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
72fd0718
VZ
2236{
2237 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2238
2239 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2240
2241 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2242 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2243 barrier();
2244 mmiowb();
2245}
2246
2247/*
2248 * should be run under rtnl lock
2249 */
9f6c9258 2250u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
72fd0718
VZ
2251{
2252 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2253
2254 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2255
2256 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2257 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2258 barrier();
2259 mmiowb();
2260
2261 return val1;
2262}
2263
2264/*
2265 * should be run under rtnl lock
2266 */
2267static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2268{
2269 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2270}
2271
2272static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2273{
2274 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2275 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2276}
2277
2278static inline void _print_next_block(int idx, const char *blk)
2279{
2280 if (idx)
2281 pr_cont(", ");
2282 pr_cont("%s", blk);
2283}
2284
2285static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2286{
2287 int i = 0;
2288 u32 cur_bit = 0;
2289 for (i = 0; sig; i++) {
2290 cur_bit = ((u32)0x1 << i);
2291 if (sig & cur_bit) {
2292 switch (cur_bit) {
2293 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2294 _print_next_block(par_num++, "BRB");
2295 break;
2296 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2297 _print_next_block(par_num++, "PARSER");
2298 break;
2299 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2300 _print_next_block(par_num++, "TSDM");
2301 break;
2302 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2303 _print_next_block(par_num++, "SEARCHER");
2304 break;
2305 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2306 _print_next_block(par_num++, "TSEMI");
2307 break;
2308 }
2309
2310 /* Clear the bit */
2311 sig &= ~cur_bit;
2312 }
2313 }
2314
2315 return par_num;
2316}
2317
2318static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2319{
2320 int i = 0;
2321 u32 cur_bit = 0;
2322 for (i = 0; sig; i++) {
2323 cur_bit = ((u32)0x1 << i);
2324 if (sig & cur_bit) {
2325 switch (cur_bit) {
2326 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2327 _print_next_block(par_num++, "PBCLIENT");
2328 break;
2329 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2330 _print_next_block(par_num++, "QM");
2331 break;
2332 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2333 _print_next_block(par_num++, "XSDM");
2334 break;
2335 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2336 _print_next_block(par_num++, "XSEMI");
2337 break;
2338 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2339 _print_next_block(par_num++, "DOORBELLQ");
2340 break;
2341 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2342 _print_next_block(par_num++, "VAUX PCI CORE");
2343 break;
2344 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2345 _print_next_block(par_num++, "DEBUG");
2346 break;
2347 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2348 _print_next_block(par_num++, "USDM");
2349 break;
2350 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2351 _print_next_block(par_num++, "USEMI");
2352 break;
2353 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2354 _print_next_block(par_num++, "UPB");
2355 break;
2356 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2357 _print_next_block(par_num++, "CSDM");
2358 break;
2359 }
2360
2361 /* Clear the bit */
2362 sig &= ~cur_bit;
2363 }
2364 }
2365
2366 return par_num;
2367}
2368
2369static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
2370{
2371 int i = 0;
2372 u32 cur_bit = 0;
2373 for (i = 0; sig; i++) {
2374 cur_bit = ((u32)0x1 << i);
2375 if (sig & cur_bit) {
2376 switch (cur_bit) {
2377 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
2378 _print_next_block(par_num++, "CSEMI");
2379 break;
2380 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
2381 _print_next_block(par_num++, "PXP");
2382 break;
2383 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
2384 _print_next_block(par_num++,
2385 "PXPPCICLOCKCLIENT");
2386 break;
2387 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
2388 _print_next_block(par_num++, "CFC");
2389 break;
2390 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
2391 _print_next_block(par_num++, "CDU");
2392 break;
2393 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
2394 _print_next_block(par_num++, "IGU");
2395 break;
2396 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
2397 _print_next_block(par_num++, "MISC");
2398 break;
2399 }
2400
2401 /* Clear the bit */
2402 sig &= ~cur_bit;
2403 }
2404 }
2405
2406 return par_num;
2407}
2408
2409static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
2410{
2411 int i = 0;
2412 u32 cur_bit = 0;
2413 for (i = 0; sig; i++) {
2414 cur_bit = ((u32)0x1 << i);
2415 if (sig & cur_bit) {
2416 switch (cur_bit) {
2417 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
2418 _print_next_block(par_num++, "MCP ROM");
2419 break;
2420 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
2421 _print_next_block(par_num++, "MCP UMP RX");
2422 break;
2423 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
2424 _print_next_block(par_num++, "MCP UMP TX");
2425 break;
2426 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
2427 _print_next_block(par_num++, "MCP SCPAD");
2428 break;
2429 }
2430
2431 /* Clear the bit */
2432 sig &= ~cur_bit;
2433 }
2434 }
2435
2436 return par_num;
2437}
2438
2439static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
2440 u32 sig2, u32 sig3)
2441{
2442 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
2443 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
2444 int par_num = 0;
2445 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
2446 "[0]:0x%08x [1]:0x%08x "
2447 "[2]:0x%08x [3]:0x%08x\n",
2448 sig0 & HW_PRTY_ASSERT_SET_0,
2449 sig1 & HW_PRTY_ASSERT_SET_1,
2450 sig2 & HW_PRTY_ASSERT_SET_2,
2451 sig3 & HW_PRTY_ASSERT_SET_3);
2452 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
2453 bp->dev->name);
2454 par_num = bnx2x_print_blocks_with_parity0(
2455 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
2456 par_num = bnx2x_print_blocks_with_parity1(
2457 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
2458 par_num = bnx2x_print_blocks_with_parity2(
2459 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
2460 par_num = bnx2x_print_blocks_with_parity3(
2461 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
2462 printk("\n");
2463 return true;
2464 } else
2465 return false;
2466}
2467
9f6c9258 2468bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 2469{
a2fbb9ea 2470 struct attn_route attn;
72fd0718
VZ
2471 int port = BP_PORT(bp);
2472
2473 attn.sig[0] = REG_RD(bp,
2474 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
2475 port*4);
2476 attn.sig[1] = REG_RD(bp,
2477 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
2478 port*4);
2479 attn.sig[2] = REG_RD(bp,
2480 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
2481 port*4);
2482 attn.sig[3] = REG_RD(bp,
2483 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
2484 port*4);
2485
2486 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
2487 attn.sig[3]);
2488}
2489
2490static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2491{
2492 struct attn_route attn, *group_mask;
34f80b04 2493 int port = BP_PORT(bp);
877e9aa4 2494 int index;
a2fbb9ea
ET
2495 u32 reg_addr;
2496 u32 val;
3fcaf2e5 2497 u32 aeu_mask;
a2fbb9ea
ET
2498
2499 /* need to take HW lock because MCP or other port might also
2500 try to handle this event */
4a37fb66 2501 bnx2x_acquire_alr(bp);
a2fbb9ea 2502
72fd0718
VZ
2503 if (bnx2x_chk_parity_attn(bp)) {
2504 bp->recovery_state = BNX2X_RECOVERY_INIT;
2505 bnx2x_set_reset_in_progress(bp);
2506 schedule_delayed_work(&bp->reset_task, 0);
2507 /* Disable HW interrupts */
2508 bnx2x_int_disable(bp);
2509 bnx2x_release_alr(bp);
2510 /* In case of parity errors don't handle attentions so that
2511 * other function would "see" parity errors.
2512 */
2513 return;
2514 }
2515
a2fbb9ea
ET
2516 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2517 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2518 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2519 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2520 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2521 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2522
2523 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2524 if (deasserted & (1 << index)) {
72fd0718 2525 group_mask = &bp->attn_group[index];
a2fbb9ea 2526
34f80b04 2527 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
72fd0718
VZ
2528 index, group_mask->sig[0], group_mask->sig[1],
2529 group_mask->sig[2], group_mask->sig[3]);
a2fbb9ea 2530
877e9aa4 2531 bnx2x_attn_int_deasserted3(bp,
72fd0718 2532 attn.sig[3] & group_mask->sig[3]);
877e9aa4 2533 bnx2x_attn_int_deasserted1(bp,
72fd0718 2534 attn.sig[1] & group_mask->sig[1]);
877e9aa4 2535 bnx2x_attn_int_deasserted2(bp,
72fd0718 2536 attn.sig[2] & group_mask->sig[2]);
877e9aa4 2537 bnx2x_attn_int_deasserted0(bp,
72fd0718 2538 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
2539 }
2540 }
2541
4a37fb66 2542 bnx2x_release_alr(bp);
a2fbb9ea 2543
5c862848 2544 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2545
2546 val = ~deasserted;
3fcaf2e5
EG
2547 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2548 val, reg_addr);
5c862848 2549 REG_WR(bp, reg_addr, val);
a2fbb9ea 2550
a2fbb9ea 2551 if (~bp->attn_state & deasserted)
3fcaf2e5 2552 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2553
2554 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2555 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2556
3fcaf2e5
EG
2557 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2558 aeu_mask = REG_RD(bp, reg_addr);
2559
2560 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2561 aeu_mask, deasserted);
72fd0718 2562 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 2563 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2564
3fcaf2e5
EG
2565 REG_WR(bp, reg_addr, aeu_mask);
2566 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2567
2568 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2569 bp->attn_state &= ~deasserted;
2570 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2571}
2572
2573static void bnx2x_attn_int(struct bnx2x *bp)
2574{
2575 /* read local copy of bits */
68d59484
EG
2576 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2577 attn_bits);
2578 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2579 attn_bits_ack);
a2fbb9ea
ET
2580 u32 attn_state = bp->attn_state;
2581
2582 /* look for changed bits */
2583 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2584 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2585
2586 DP(NETIF_MSG_HW,
2587 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2588 attn_bits, attn_ack, asserted, deasserted);
2589
2590 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2591 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2592
2593 /* handle bits that were raised */
2594 if (asserted)
2595 bnx2x_attn_int_asserted(bp, asserted);
2596
2597 if (deasserted)
2598 bnx2x_attn_int_deasserted(bp, deasserted);
2599}
2600
2601static void bnx2x_sp_task(struct work_struct *work)
2602{
1cf167f2 2603 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2604 u16 status;
2605
2606 /* Return here if interrupt is disabled */
2607 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2608 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2609 return;
2610 }
2611
2612 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2613/* if (status == 0) */
2614/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2615
cdaa7cb8 2616 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 2617
877e9aa4 2618 /* HW attentions */
cdaa7cb8 2619 if (status & 0x1) {
a2fbb9ea 2620 bnx2x_attn_int(bp);
cdaa7cb8
VZ
2621 status &= ~0x1;
2622 }
2623
2624 /* CStorm events: STAT_QUERY */
2625 if (status & 0x2) {
2626 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
2627 status &= ~0x2;
2628 }
2629
2630 if (unlikely(status))
2631 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2632 status);
a2fbb9ea 2633
68d59484 2634 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2635 IGU_INT_NOP, 1);
2636 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2637 IGU_INT_NOP, 1);
2638 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2639 IGU_INT_NOP, 1);
2640 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2641 IGU_INT_NOP, 1);
2642 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2643 IGU_INT_ENABLE, 1);
2644}
2645
9f6c9258 2646irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
a2fbb9ea
ET
2647{
2648 struct net_device *dev = dev_instance;
2649 struct bnx2x *bp = netdev_priv(dev);
2650
2651 /* Return here if interrupt is disabled */
2652 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2653 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2654 return IRQ_HANDLED;
2655 }
2656
8d9c5f34 2657 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2658
2659#ifdef BNX2X_STOP_ON_ERROR
2660 if (unlikely(bp->panic))
2661 return IRQ_HANDLED;
2662#endif
2663
993ac7b5
MC
2664#ifdef BCM_CNIC
2665 {
2666 struct cnic_ops *c_ops;
2667
2668 rcu_read_lock();
2669 c_ops = rcu_dereference(bp->cnic_ops);
2670 if (c_ops)
2671 c_ops->cnic_handler(bp->cnic_data, NULL);
2672 rcu_read_unlock();
2673 }
2674#endif
1cf167f2 2675 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2676
2677 return IRQ_HANDLED;
2678}
2679
2680/* end of slow path */
2681
a2fbb9ea
ET
2682static void bnx2x_timer(unsigned long data)
2683{
2684 struct bnx2x *bp = (struct bnx2x *) data;
2685
2686 if (!netif_running(bp->dev))
2687 return;
2688
2689 if (atomic_read(&bp->intr_sem) != 0)
f1410647 2690 goto timer_restart;
a2fbb9ea
ET
2691
2692 if (poll) {
2693 struct bnx2x_fastpath *fp = &bp->fp[0];
2694 int rc;
2695
7961f791 2696 bnx2x_tx_int(fp);
a2fbb9ea
ET
2697 rc = bnx2x_rx_int(fp, 1000);
2698 }
2699
34f80b04
EG
2700 if (!BP_NOMCP(bp)) {
2701 int func = BP_FUNC(bp);
a2fbb9ea
ET
2702 u32 drv_pulse;
2703 u32 mcp_pulse;
2704
2705 ++bp->fw_drv_pulse_wr_seq;
2706 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
2707 /* TBD - add SYSTEM_TIME */
2708 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 2709 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 2710
34f80b04 2711 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
2712 MCP_PULSE_SEQ_MASK);
2713 /* The delta between driver pulse and mcp response
2714 * should be 1 (before mcp response) or 0 (after mcp response)
2715 */
2716 if ((drv_pulse != mcp_pulse) &&
2717 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
2718 /* someone lost a heartbeat... */
2719 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2720 drv_pulse, mcp_pulse);
2721 }
2722 }
2723
f34d28ea 2724 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 2725 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 2726
f1410647 2727timer_restart:
a2fbb9ea
ET
2728 mod_timer(&bp->timer, jiffies + bp->current_interval);
2729}
2730
2731/* end of Statistics */
2732
2733/* nic init */
2734
2735/*
2736 * nic init service functions
2737 */
2738
34f80b04 2739static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 2740{
34f80b04
EG
2741 int port = BP_PORT(bp);
2742
ca00392c
EG
2743 /* "CSTORM" */
2744 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2745 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
2746 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
2747 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2748 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
2749 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
2750}
2751
9f6c9258 2752void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5c862848 2753 dma_addr_t mapping, int sb_id)
34f80b04
EG
2754{
2755 int port = BP_PORT(bp);
bb2a0f7a 2756 int func = BP_FUNC(bp);
a2fbb9ea 2757 int index;
34f80b04 2758 u64 section;
a2fbb9ea
ET
2759
2760 /* USTORM */
2761 section = ((u64)mapping) + offsetof(struct host_status_block,
2762 u_status_block);
34f80b04 2763 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 2764
ca00392c
EG
2765 REG_WR(bp, BAR_CSTRORM_INTMEM +
2766 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
2767 REG_WR(bp, BAR_CSTRORM_INTMEM +
2768 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 2769 U64_HI(section));
ca00392c
EG
2770 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
2771 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
2772
2773 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
2774 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2775 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
2776
2777 /* CSTORM */
2778 section = ((u64)mapping) + offsetof(struct host_status_block,
2779 c_status_block);
34f80b04 2780 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
2781
2782 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 2783 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 2784 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 2785 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 2786 U64_HI(section));
7a9b2557 2787 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 2788 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
2789
2790 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
2791 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 2792 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
2793
2794 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2795}
2796
2797static void bnx2x_zero_def_sb(struct bnx2x *bp)
2798{
2799 int func = BP_FUNC(bp);
a2fbb9ea 2800
ca00392c 2801 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
2802 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2803 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
2804 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2805 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
2806 sizeof(struct cstorm_def_status_block_u)/4);
2807 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2808 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
2809 sizeof(struct cstorm_def_status_block_c)/4);
2810 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
2811 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2812 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
2813}
2814
2815static void bnx2x_init_def_sb(struct bnx2x *bp,
2816 struct host_def_status_block *def_sb,
34f80b04 2817 dma_addr_t mapping, int sb_id)
a2fbb9ea 2818{
34f80b04
EG
2819 int port = BP_PORT(bp);
2820 int func = BP_FUNC(bp);
a2fbb9ea
ET
2821 int index, val, reg_offset;
2822 u64 section;
2823
2824 /* ATTN */
2825 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2826 atten_status_block);
34f80b04 2827 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 2828
49d66772
ET
2829 bp->attn_state = 0;
2830
a2fbb9ea
ET
2831 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2832 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2833
34f80b04 2834 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
2835 bp->attn_group[index].sig[0] = REG_RD(bp,
2836 reg_offset + 0x10*index);
2837 bp->attn_group[index].sig[1] = REG_RD(bp,
2838 reg_offset + 0x4 + 0x10*index);
2839 bp->attn_group[index].sig[2] = REG_RD(bp,
2840 reg_offset + 0x8 + 0x10*index);
2841 bp->attn_group[index].sig[3] = REG_RD(bp,
2842 reg_offset + 0xc + 0x10*index);
2843 }
2844
a2fbb9ea
ET
2845 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
2846 HC_REG_ATTN_MSG0_ADDR_L);
2847
2848 REG_WR(bp, reg_offset, U64_LO(section));
2849 REG_WR(bp, reg_offset + 4, U64_HI(section));
2850
2851 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
2852
2853 val = REG_RD(bp, reg_offset);
34f80b04 2854 val |= sb_id;
a2fbb9ea
ET
2855 REG_WR(bp, reg_offset, val);
2856
2857 /* USTORM */
2858 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2859 u_def_status_block);
34f80b04 2860 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 2861
ca00392c
EG
2862 REG_WR(bp, BAR_CSTRORM_INTMEM +
2863 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
2864 REG_WR(bp, BAR_CSTRORM_INTMEM +
2865 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 2866 U64_HI(section));
ca00392c
EG
2867 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
2868 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
2869
2870 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
2871 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2872 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
2873
2874 /* CSTORM */
2875 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2876 c_def_status_block);
34f80b04 2877 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
2878
2879 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 2880 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 2881 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 2882 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 2883 U64_HI(section));
5c862848 2884 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 2885 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
2886
2887 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2888 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 2889 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
2890
2891 /* TSTORM */
2892 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2893 t_def_status_block);
34f80b04 2894 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
2895
2896 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 2897 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 2898 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 2899 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 2900 U64_HI(section));
5c862848 2901 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 2902 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
2903
2904 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2905 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 2906 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
2907
2908 /* XSTORM */
2909 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2910 x_def_status_block);
34f80b04 2911 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
2912
2913 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 2914 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 2915 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 2916 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 2917 U64_HI(section));
5c862848 2918 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 2919 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
2920
2921 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2922 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 2923 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 2924
bb2a0f7a 2925 bp->stats_pending = 0;
66e855f3 2926 bp->set_mac_pending = 0;
bb2a0f7a 2927
34f80b04 2928 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
2929}
2930
9f6c9258 2931void bnx2x_update_coalesce(struct bnx2x *bp)
a2fbb9ea 2932{
34f80b04 2933 int port = BP_PORT(bp);
a2fbb9ea
ET
2934 int i;
2935
2936 for_each_queue(bp, i) {
34f80b04 2937 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
2938
2939 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
2940 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2941 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
2942 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 2943 bp->rx_ticks/(4 * BNX2X_BTR));
ca00392c
EG
2944 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2945 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
2946 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 2947 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
2948
2949 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2950 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
2951 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
2952 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 2953 bp->tx_ticks/(4 * BNX2X_BTR));
a2fbb9ea 2954 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
2955 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
2956 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 2957 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
2958 }
2959}
2960
a2fbb9ea
ET
2961static void bnx2x_init_sp_ring(struct bnx2x *bp)
2962{
34f80b04 2963 int func = BP_FUNC(bp);
a2fbb9ea
ET
2964
2965 spin_lock_init(&bp->spq_lock);
2966
2967 bp->spq_left = MAX_SPQ_PENDING;
2968 bp->spq_prod_idx = 0;
a2fbb9ea
ET
2969 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
2970 bp->spq_prod_bd = bp->spq;
2971 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
2972
34f80b04 2973 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 2974 U64_LO(bp->spq_mapping));
34f80b04
EG
2975 REG_WR(bp,
2976 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
2977 U64_HI(bp->spq_mapping));
2978
34f80b04 2979 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2980 bp->spq_prod_idx);
2981}
2982
2983static void bnx2x_init_context(struct bnx2x *bp)
2984{
2985 int i;
2986
54b9ddaa
VZ
2987 /* Rx */
2988 for_each_queue(bp, i) {
a2fbb9ea
ET
2989 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
2990 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 2991 u8 cl_id = fp->cl_id;
a2fbb9ea 2992
34f80b04
EG
2993 context->ustorm_st_context.common.sb_index_numbers =
2994 BNX2X_RX_SB_INDEX_NUM;
0626b899 2995 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 2996 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 2997 context->ustorm_st_context.common.flags =
de832a55
EG
2998 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
2999 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
3000 context->ustorm_st_context.common.statistics_counter_id =
3001 cl_id;
8d9c5f34 3002 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 3003 BNX2X_RX_ALIGN_SHIFT;
34f80b04 3004 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 3005 bp->rx_buf_size;
34f80b04 3006 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 3007 U64_HI(fp->rx_desc_mapping);
34f80b04 3008 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 3009 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
3010 if (!fp->disable_tpa) {
3011 context->ustorm_st_context.common.flags |=
ca00392c 3012 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 3013 context->ustorm_st_context.common.sge_buff_size =
cdaa7cb8
VZ
3014 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
3015 0xffff);
7a9b2557
VZ
3016 context->ustorm_st_context.common.sge_page_base_hi =
3017 U64_HI(fp->rx_sge_mapping);
3018 context->ustorm_st_context.common.sge_page_base_lo =
3019 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
3020
3021 context->ustorm_st_context.common.max_sges_for_packet =
3022 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
3023 context->ustorm_st_context.common.max_sges_for_packet =
3024 ((context->ustorm_st_context.common.
3025 max_sges_for_packet + PAGES_PER_SGE - 1) &
3026 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
3027 }
3028
8d9c5f34
EG
3029 context->ustorm_ag_context.cdu_usage =
3030 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3031 CDU_REGION_NUMBER_UCM_AG,
3032 ETH_CONNECTION_TYPE);
3033
ca00392c
EG
3034 context->xstorm_ag_context.cdu_reserved =
3035 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3036 CDU_REGION_NUMBER_XCM_AG,
3037 ETH_CONNECTION_TYPE);
3038 }
3039
54b9ddaa
VZ
3040 /* Tx */
3041 for_each_queue(bp, i) {
ca00392c
EG
3042 struct bnx2x_fastpath *fp = &bp->fp[i];
3043 struct eth_context *context =
54b9ddaa 3044 bnx2x_sp(bp, context[i].eth);
ca00392c
EG
3045
3046 context->cstorm_st_context.sb_index_number =
3047 C_SB_ETH_TX_CQ_INDEX;
3048 context->cstorm_st_context.status_block_id = fp->sb_id;
3049
8d9c5f34
EG
3050 context->xstorm_st_context.tx_bd_page_base_hi =
3051 U64_HI(fp->tx_desc_mapping);
3052 context->xstorm_st_context.tx_bd_page_base_lo =
3053 U64_LO(fp->tx_desc_mapping);
ca00392c 3054 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 3055 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
3056 }
3057}
3058
3059static void bnx2x_init_ind_table(struct bnx2x *bp)
3060{
26c8fa4d 3061 int func = BP_FUNC(bp);
a2fbb9ea
ET
3062 int i;
3063
555f6c78 3064 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
3065 return;
3066
555f6c78
EG
3067 DP(NETIF_MSG_IFUP,
3068 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 3069 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 3070 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 3071 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 3072 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
3073}
3074
9f6c9258 3075void bnx2x_set_client_config(struct bnx2x *bp)
49d66772 3076{
49d66772 3077 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
3078 int port = BP_PORT(bp);
3079 int i;
49d66772 3080
e7799c5f 3081 tstorm_client.mtu = bp->dev->mtu;
49d66772 3082 tstorm_client.config_flags =
de832a55
EG
3083 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
3084 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 3085#ifdef BCM_VLAN
0c6671b0 3086 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 3087 tstorm_client.config_flags |=
8d9c5f34 3088 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
3089 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3090 }
3091#endif
49d66772
ET
3092
3093 for_each_queue(bp, i) {
de832a55
EG
3094 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
3095
49d66772 3096 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3097 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
3098 ((u32 *)&tstorm_client)[0]);
3099 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3100 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
3101 ((u32 *)&tstorm_client)[1]);
3102 }
3103
34f80b04
EG
3104 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
3105 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
3106}
3107
9f6c9258 3108void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
a2fbb9ea 3109{
a2fbb9ea 3110 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 3111 int mode = bp->rx_mode;
37b091ba 3112 int mask = bp->rx_mode_cl_mask;
34f80b04 3113 int func = BP_FUNC(bp);
581ce43d 3114 int port = BP_PORT(bp);
a2fbb9ea 3115 int i;
581ce43d
EG
3116 /* All but management unicast packets should pass to the host as well */
3117 u32 llh_mask =
3118 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
3119 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
3120 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3121 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 3122
3196a88a 3123 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
3124
3125 switch (mode) {
3126 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
3127 tstorm_mac_filter.ucast_drop_all = mask;
3128 tstorm_mac_filter.mcast_drop_all = mask;
3129 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 3130 break;
356e2385 3131
a2fbb9ea 3132 case BNX2X_RX_MODE_NORMAL:
34f80b04 3133 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 3134 break;
356e2385 3135
a2fbb9ea 3136 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
3137 tstorm_mac_filter.mcast_accept_all = mask;
3138 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 3139 break;
356e2385 3140
a2fbb9ea 3141 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
3142 tstorm_mac_filter.ucast_accept_all = mask;
3143 tstorm_mac_filter.mcast_accept_all = mask;
3144 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
3145 /* pass management unicast packets as well */
3146 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 3147 break;
356e2385 3148
a2fbb9ea 3149 default:
34f80b04
EG
3150 BNX2X_ERR("BAD rx mode (%d)\n", mode);
3151 break;
a2fbb9ea
ET
3152 }
3153
581ce43d
EG
3154 REG_WR(bp,
3155 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
3156 llh_mask);
3157
a2fbb9ea
ET
3158 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
3159 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3160 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
3161 ((u32 *)&tstorm_mac_filter)[i]);
3162
34f80b04 3163/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
3164 ((u32 *)&tstorm_mac_filter)[i]); */
3165 }
a2fbb9ea 3166
49d66772
ET
3167 if (mode != BNX2X_RX_MODE_NONE)
3168 bnx2x_set_client_config(bp);
a2fbb9ea
ET
3169}
3170
471de716
EG
3171static void bnx2x_init_internal_common(struct bnx2x *bp)
3172{
3173 int i;
3174
3175 /* Zero this manually as its initialization is
3176 currently missing in the initTool */
3177 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3178 REG_WR(bp, BAR_USTRORM_INTMEM +
3179 USTORM_AGG_DATA_OFFSET + i * 4, 0);
3180}
3181
3182static void bnx2x_init_internal_port(struct bnx2x *bp)
3183{
3184 int port = BP_PORT(bp);
3185
ca00392c
EG
3186 REG_WR(bp,
3187 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
3188 REG_WR(bp,
3189 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
3190 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3191 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3192}
3193
3194static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 3195{
a2fbb9ea
ET
3196 struct tstorm_eth_function_common_config tstorm_config = {0};
3197 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
3198 int port = BP_PORT(bp);
3199 int func = BP_FUNC(bp);
de832a55
EG
3200 int i, j;
3201 u32 offset;
471de716 3202 u16 max_agg_size;
a2fbb9ea 3203
c68ed255
TH
3204 tstorm_config.config_flags = RSS_FLAGS(bp);
3205
3206 if (is_multi(bp))
a2fbb9ea 3207 tstorm_config.rss_result_mask = MULTI_MASK;
ca00392c
EG
3208
3209 /* Enable TPA if needed */
3210 if (bp->flags & TPA_ENABLE_FLAG)
3211 tstorm_config.config_flags |=
3212 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
3213
8d9c5f34
EG
3214 if (IS_E1HMF(bp))
3215 tstorm_config.config_flags |=
3216 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 3217
34f80b04
EG
3218 tstorm_config.leading_client_id = BP_L_ID(bp);
3219
a2fbb9ea 3220 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3221 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
3222 (*(u32 *)&tstorm_config));
3223
c14423fe 3224 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 3225 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
3226 bnx2x_set_storm_rx_mode(bp);
3227
de832a55
EG
3228 for_each_queue(bp, i) {
3229 u8 cl_id = bp->fp[i].cl_id;
3230
3231 /* reset xstorm per client statistics */
3232 offset = BAR_XSTRORM_INTMEM +
3233 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3234 for (j = 0;
3235 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
3236 REG_WR(bp, offset + j*4, 0);
3237
3238 /* reset tstorm per client statistics */
3239 offset = BAR_TSTRORM_INTMEM +
3240 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3241 for (j = 0;
3242 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
3243 REG_WR(bp, offset + j*4, 0);
3244
3245 /* reset ustorm per client statistics */
3246 offset = BAR_USTRORM_INTMEM +
3247 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3248 for (j = 0;
3249 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
3250 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
3251 }
3252
3253 /* Init statistics related context */
34f80b04 3254 stats_flags.collect_eth = 1;
a2fbb9ea 3255
66e855f3 3256 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 3257 ((u32 *)&stats_flags)[0]);
66e855f3 3258 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
3259 ((u32 *)&stats_flags)[1]);
3260
66e855f3 3261 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 3262 ((u32 *)&stats_flags)[0]);
66e855f3 3263 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
3264 ((u32 *)&stats_flags)[1]);
3265
de832a55
EG
3266 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
3267 ((u32 *)&stats_flags)[0]);
3268 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
3269 ((u32 *)&stats_flags)[1]);
3270
66e855f3 3271 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 3272 ((u32 *)&stats_flags)[0]);
66e855f3 3273 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
3274 ((u32 *)&stats_flags)[1]);
3275
66e855f3
YG
3276 REG_WR(bp, BAR_XSTRORM_INTMEM +
3277 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3278 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3279 REG_WR(bp, BAR_XSTRORM_INTMEM +
3280 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3281 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3282
3283 REG_WR(bp, BAR_TSTRORM_INTMEM +
3284 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3285 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3286 REG_WR(bp, BAR_TSTRORM_INTMEM +
3287 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3288 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 3289
de832a55
EG
3290 REG_WR(bp, BAR_USTRORM_INTMEM +
3291 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3292 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3293 REG_WR(bp, BAR_USTRORM_INTMEM +
3294 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3295 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3296
34f80b04
EG
3297 if (CHIP_IS_E1H(bp)) {
3298 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3299 IS_E1HMF(bp));
3300 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3301 IS_E1HMF(bp));
3302 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3303 IS_E1HMF(bp));
3304 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3305 IS_E1HMF(bp));
3306
7a9b2557
VZ
3307 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
3308 bp->e1hov);
34f80b04
EG
3309 }
3310
4f40f2cb 3311 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
cdaa7cb8
VZ
3312 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
3313 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
54b9ddaa 3314 for_each_queue(bp, i) {
7a9b2557 3315 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
3316
3317 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 3318 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
3319 U64_LO(fp->rx_comp_mapping));
3320 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 3321 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
3322 U64_HI(fp->rx_comp_mapping));
3323
ca00392c
EG
3324 /* Next page */
3325 REG_WR(bp, BAR_USTRORM_INTMEM +
3326 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
3327 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3328 REG_WR(bp, BAR_USTRORM_INTMEM +
3329 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
3330 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3331
7a9b2557 3332 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 3333 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
3334 max_agg_size);
3335 }
8a1c38d1 3336
1c06328c
EG
3337 /* dropless flow control */
3338 if (CHIP_IS_E1H(bp)) {
3339 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
3340
3341 rx_pause.bd_thr_low = 250;
3342 rx_pause.cqe_thr_low = 250;
3343 rx_pause.cos = 1;
3344 rx_pause.sge_thr_low = 0;
3345 rx_pause.bd_thr_high = 350;
3346 rx_pause.cqe_thr_high = 350;
3347 rx_pause.sge_thr_high = 0;
3348
54b9ddaa 3349 for_each_queue(bp, i) {
1c06328c
EG
3350 struct bnx2x_fastpath *fp = &bp->fp[i];
3351
3352 if (!fp->disable_tpa) {
3353 rx_pause.sge_thr_low = 150;
3354 rx_pause.sge_thr_high = 250;
3355 }
3356
3357
3358 offset = BAR_USTRORM_INTMEM +
3359 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
3360 fp->cl_id);
3361 for (j = 0;
3362 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
3363 j++)
3364 REG_WR(bp, offset + j*4,
3365 ((u32 *)&rx_pause)[j]);
3366 }
3367 }
3368
8a1c38d1
EG
3369 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3370
3371 /* Init rate shaping and fairness contexts */
3372 if (IS_E1HMF(bp)) {
3373 int vn;
3374
3375 /* During init there is no active link
3376 Until link is up, set link rate to 10Gbps */
3377 bp->link_vars.line_speed = SPEED_10000;
3378 bnx2x_init_port_minmax(bp);
3379
b015e3d1
EG
3380 if (!BP_NOMCP(bp))
3381 bp->mf_config =
3382 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
3383 bnx2x_calc_vn_weight_sum(bp);
3384
3385 for (vn = VN_0; vn < E1HVN_MAX; vn++)
3386 bnx2x_init_vn_minmax(bp, 2*vn + port);
3387
3388 /* Enable rate shaping and fairness */
b015e3d1 3389 bp->cmng.flags.cmng_enables |=
8a1c38d1 3390 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 3391
8a1c38d1
EG
3392 } else {
3393 /* rate shaping and fairness are disabled */
3394 DP(NETIF_MSG_IFUP,
3395 "single function mode minmax will be disabled\n");
3396 }
3397
3398
cdaa7cb8 3399 /* Store cmng structures to internal memory */
8a1c38d1
EG
3400 if (bp->port.pmf)
3401 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
3402 REG_WR(bp, BAR_XSTRORM_INTMEM +
3403 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
3404 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
3405}
3406
471de716
EG
3407static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3408{
3409 switch (load_code) {
3410 case FW_MSG_CODE_DRV_LOAD_COMMON:
3411 bnx2x_init_internal_common(bp);
3412 /* no break */
3413
3414 case FW_MSG_CODE_DRV_LOAD_PORT:
3415 bnx2x_init_internal_port(bp);
3416 /* no break */
3417
3418 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3419 bnx2x_init_internal_func(bp);
3420 break;
3421
3422 default:
3423 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3424 break;
3425 }
3426}
3427
9f6c9258 3428void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
3429{
3430 int i;
3431
3432 for_each_queue(bp, i) {
3433 struct bnx2x_fastpath *fp = &bp->fp[i];
3434
34f80b04 3435 fp->bp = bp;
a2fbb9ea 3436 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 3437 fp->index = i;
34f80b04 3438 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
3439#ifdef BCM_CNIC
3440 fp->sb_id = fp->cl_id + 1;
3441#else
34f80b04 3442 fp->sb_id = fp->cl_id;
37b091ba 3443#endif
34f80b04 3444 DP(NETIF_MSG_IFUP,
f5372251
EG
3445 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
3446 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 3447 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 3448 fp->sb_id);
5c862848 3449 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
3450 }
3451
16119785
EG
3452 /* ensure status block indices were read */
3453 rmb();
3454
3455
5c862848
EG
3456 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
3457 DEF_SB_ID);
3458 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
3459 bnx2x_update_coalesce(bp);
3460 bnx2x_init_rx_rings(bp);
3461 bnx2x_init_tx_ring(bp);
3462 bnx2x_init_sp_ring(bp);
3463 bnx2x_init_context(bp);
471de716 3464 bnx2x_init_internal(bp, load_code);
a2fbb9ea 3465 bnx2x_init_ind_table(bp);
0ef00459
EG
3466 bnx2x_stats_init(bp);
3467
3468 /* At this point, we are ready for interrupts */
3469 atomic_set(&bp->intr_sem, 0);
3470
3471 /* flush all before enabling interrupts */
3472 mb();
3473 mmiowb();
3474
615f8fd9 3475 bnx2x_int_enable(bp);
eb8da205
EG
3476
3477 /* Check for SPIO5 */
3478 bnx2x_attn_int_deasserted0(bp,
3479 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
3480 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
3481}
3482
3483/* end of nic init */
3484
3485/*
3486 * gzip service functions
3487 */
3488
3489static int bnx2x_gunzip_init(struct bnx2x *bp)
3490{
1a983142
FT
3491 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
3492 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
3493 if (bp->gunzip_buf == NULL)
3494 goto gunzip_nomem1;
3495
3496 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3497 if (bp->strm == NULL)
3498 goto gunzip_nomem2;
3499
3500 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3501 GFP_KERNEL);
3502 if (bp->strm->workspace == NULL)
3503 goto gunzip_nomem3;
3504
3505 return 0;
3506
3507gunzip_nomem3:
3508 kfree(bp->strm);
3509 bp->strm = NULL;
3510
3511gunzip_nomem2:
1a983142
FT
3512 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3513 bp->gunzip_mapping);
a2fbb9ea
ET
3514 bp->gunzip_buf = NULL;
3515
3516gunzip_nomem1:
cdaa7cb8
VZ
3517 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
3518 " un-compression\n");
a2fbb9ea
ET
3519 return -ENOMEM;
3520}
3521
3522static void bnx2x_gunzip_end(struct bnx2x *bp)
3523{
3524 kfree(bp->strm->workspace);
3525
3526 kfree(bp->strm);
3527 bp->strm = NULL;
3528
3529 if (bp->gunzip_buf) {
1a983142
FT
3530 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3531 bp->gunzip_mapping);
a2fbb9ea
ET
3532 bp->gunzip_buf = NULL;
3533 }
3534}
3535
94a78b79 3536static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
3537{
3538 int n, rc;
3539
3540 /* check gzip header */
94a78b79
VZ
3541 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
3542 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 3543 return -EINVAL;
94a78b79 3544 }
a2fbb9ea
ET
3545
3546 n = 10;
3547
34f80b04 3548#define FNAME 0x8
a2fbb9ea
ET
3549
3550 if (zbuf[3] & FNAME)
3551 while ((zbuf[n++] != 0) && (n < len));
3552
94a78b79 3553 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
3554 bp->strm->avail_in = len - n;
3555 bp->strm->next_out = bp->gunzip_buf;
3556 bp->strm->avail_out = FW_BUF_SIZE;
3557
3558 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
3559 if (rc != Z_OK)
3560 return rc;
3561
3562 rc = zlib_inflate(bp->strm, Z_FINISH);
3563 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
3564 netdev_err(bp->dev, "Firmware decompression error: %s\n",
3565 bp->strm->msg);
a2fbb9ea
ET
3566
3567 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
3568 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
3569 netdev_err(bp->dev, "Firmware decompression error:"
3570 " gunzip_outlen (%d) not aligned\n",
3571 bp->gunzip_outlen);
a2fbb9ea
ET
3572 bp->gunzip_outlen >>= 2;
3573
3574 zlib_inflateEnd(bp->strm);
3575
3576 if (rc == Z_STREAM_END)
3577 return 0;
3578
3579 return rc;
3580}
3581
3582/* nic load/unload */
3583
3584/*
34f80b04 3585 * General service functions
a2fbb9ea
ET
3586 */
3587
3588/* send a NIG loopback debug packet */
3589static void bnx2x_lb_pckt(struct bnx2x *bp)
3590{
a2fbb9ea 3591 u32 wb_write[3];
a2fbb9ea
ET
3592
3593 /* Ethernet source and destination addresses */
a2fbb9ea
ET
3594 wb_write[0] = 0x55555555;
3595 wb_write[1] = 0x55555555;
34f80b04 3596 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 3597 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
3598
3599 /* NON-IP protocol */
a2fbb9ea
ET
3600 wb_write[0] = 0x09000000;
3601 wb_write[1] = 0x55555555;
34f80b04 3602 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 3603 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
3604}
3605
3606/* some of the internal memories
3607 * are not directly readable from the driver
3608 * to test them we send debug packets
3609 */
3610static int bnx2x_int_mem_test(struct bnx2x *bp)
3611{
3612 int factor;
3613 int count, i;
3614 u32 val = 0;
3615
ad8d3948 3616 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 3617 factor = 120;
ad8d3948
EG
3618 else if (CHIP_REV_IS_EMUL(bp))
3619 factor = 200;
3620 else
a2fbb9ea 3621 factor = 1;
a2fbb9ea
ET
3622
3623 DP(NETIF_MSG_HW, "start part1\n");
3624
3625 /* Disable inputs of parser neighbor blocks */
3626 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3627 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3628 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 3629 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
3630
3631 /* Write 0 to parser credits for CFC search request */
3632 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3633
3634 /* send Ethernet packet */
3635 bnx2x_lb_pckt(bp);
3636
3637 /* TODO do i reset NIG statistic? */
3638 /* Wait until NIG register shows 1 packet of size 0x10 */
3639 count = 1000 * factor;
3640 while (count) {
34f80b04 3641
a2fbb9ea
ET
3642 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3643 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
3644 if (val == 0x10)
3645 break;
3646
3647 msleep(10);
3648 count--;
3649 }
3650 if (val != 0x10) {
3651 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3652 return -1;
3653 }
3654
3655 /* Wait until PRS register shows 1 packet */
3656 count = 1000 * factor;
3657 while (count) {
3658 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
3659 if (val == 1)
3660 break;
3661
3662 msleep(10);
3663 count--;
3664 }
3665 if (val != 0x1) {
3666 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3667 return -2;
3668 }
3669
3670 /* Reset and init BRB, PRS */
34f80b04 3671 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 3672 msleep(50);
34f80b04 3673 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 3674 msleep(50);
94a78b79
VZ
3675 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3676 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
3677
3678 DP(NETIF_MSG_HW, "part2\n");
3679
3680 /* Disable inputs of parser neighbor blocks */
3681 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3682 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3683 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 3684 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
3685
3686 /* Write 0 to parser credits for CFC search request */
3687 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3688
3689 /* send 10 Ethernet packets */
3690 for (i = 0; i < 10; i++)
3691 bnx2x_lb_pckt(bp);
3692
3693 /* Wait until NIG register shows 10 + 1
3694 packets of size 11*0x10 = 0xb0 */
3695 count = 1000 * factor;
3696 while (count) {
34f80b04 3697
a2fbb9ea
ET
3698 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3699 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
3700 if (val == 0xb0)
3701 break;
3702
3703 msleep(10);
3704 count--;
3705 }
3706 if (val != 0xb0) {
3707 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3708 return -3;
3709 }
3710
3711 /* Wait until PRS register shows 2 packets */
3712 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3713 if (val != 2)
3714 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3715
3716 /* Write 1 to parser credits for CFC search request */
3717 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
3718
3719 /* Wait until PRS register shows 3 packets */
3720 msleep(10 * factor);
3721 /* Wait until NIG register shows 1 packet of size 0x10 */
3722 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3723 if (val != 3)
3724 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3725
3726 /* clear NIG EOP FIFO */
3727 for (i = 0; i < 11; i++)
3728 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
3729 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
3730 if (val != 1) {
3731 BNX2X_ERR("clear of NIG failed\n");
3732 return -4;
3733 }
3734
3735 /* Reset and init BRB, PRS, NIG */
3736 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3737 msleep(50);
3738 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3739 msleep(50);
94a78b79
VZ
3740 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3741 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 3742#ifndef BCM_CNIC
a2fbb9ea
ET
3743 /* set NIC mode */
3744 REG_WR(bp, PRS_REG_NIC_MODE, 1);
3745#endif
3746
3747 /* Enable inputs of parser neighbor blocks */
3748 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
3749 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
3750 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 3751 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
3752
3753 DP(NETIF_MSG_HW, "done\n");
3754
3755 return 0; /* OK */
3756}
3757
3758static void enable_blocks_attention(struct bnx2x *bp)
3759{
3760 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3761 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
3762 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3763 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
3764 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
3765 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
3766 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
3767 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
3768 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
3769/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3770/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
3771 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
3772 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
3773 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
3774/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3775/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
3776 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
3777 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
3778 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
3779 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
3780/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3781/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3782 if (CHIP_REV_IS_FPGA(bp))
3783 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
3784 else
3785 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
3786 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
3787 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
3788 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
3789/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3790/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
3791 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
3792 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
3793/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3794 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
3795}
3796
72fd0718
VZ
3797static const struct {
3798 u32 addr;
3799 u32 mask;
3800} bnx2x_parity_mask[] = {
3801 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
3802 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
3803 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
3804 {HC_REG_HC_PRTY_MASK, 0xffffffff},
3805 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
3806 {QM_REG_QM_PRTY_MASK, 0x0},
3807 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
3808 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
3809 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
3810 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
3811 {CDU_REG_CDU_PRTY_MASK, 0x0},
3812 {CFC_REG_CFC_PRTY_MASK, 0x0},
3813 {DBG_REG_DBG_PRTY_MASK, 0x0},
3814 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
3815 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
3816 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
3817 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
3818 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
3819 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
3820 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
3821 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
3822 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
3823 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
3824 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
3825 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
3826 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
3827 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
3828 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
3829};
3830
3831static void enable_blocks_parity(struct bnx2x *bp)
3832{
cbd9da7b 3833 int i;
72fd0718 3834
cbd9da7b 3835 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
72fd0718
VZ
3836 REG_WR(bp, bnx2x_parity_mask[i].addr,
3837 bnx2x_parity_mask[i].mask);
3838}
3839
34f80b04 3840
81f75bbf
EG
3841static void bnx2x_reset_common(struct bnx2x *bp)
3842{
3843 /* reset_common */
3844 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
3845 0xd3ffff7f);
3846 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
3847}
3848
573f2035
EG
3849static void bnx2x_init_pxp(struct bnx2x *bp)
3850{
3851 u16 devctl;
3852 int r_order, w_order;
3853
3854 pci_read_config_word(bp->pdev,
3855 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
3856 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
3857 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3858 if (bp->mrrs == -1)
3859 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3860 else {
3861 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
3862 r_order = bp->mrrs;
3863 }
3864
3865 bnx2x_init_pxp_arb(bp, r_order, w_order);
3866}
fd4ef40d
EG
3867
3868static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3869{
2145a920 3870 int is_required;
fd4ef40d 3871 u32 val;
2145a920 3872 int port;
fd4ef40d 3873
2145a920
VZ
3874 if (BP_NOMCP(bp))
3875 return;
3876
3877 is_required = 0;
fd4ef40d
EG
3878 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
3879 SHARED_HW_CFG_FAN_FAILURE_MASK;
3880
3881 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
3882 is_required = 1;
3883
3884 /*
3885 * The fan failure mechanism is usually related to the PHY type since
3886 * the power consumption of the board is affected by the PHY. Currently,
3887 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
3888 */
3889 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
3890 for (port = PORT_0; port < PORT_MAX; port++) {
fd4ef40d 3891 is_required |=
d90d96ba
YR
3892 bnx2x_fan_failure_det_req(
3893 bp,
3894 bp->common.shmem_base,
a22f0788 3895 bp->common.shmem2_base,
d90d96ba 3896 port);
fd4ef40d
EG
3897 }
3898
3899 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
3900
3901 if (is_required == 0)
3902 return;
3903
3904 /* Fan failure is indicated by SPIO 5 */
3905 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
3906 MISC_REGISTERS_SPIO_INPUT_HI_Z);
3907
3908 /* set to active low mode */
3909 val = REG_RD(bp, MISC_REG_SPIO_INT);
3910 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 3911 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
3912 REG_WR(bp, MISC_REG_SPIO_INT, val);
3913
3914 /* enable interrupt to signal the IGU */
3915 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
3916 val |= (1 << MISC_REGISTERS_SPIO_5);
3917 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3918}
3919
34f80b04 3920static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 3921{
a2fbb9ea 3922 u32 val, i;
37b091ba
MC
3923#ifdef BCM_CNIC
3924 u32 wb_write[2];
3925#endif
a2fbb9ea 3926
34f80b04 3927 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 3928
81f75bbf 3929 bnx2x_reset_common(bp);
34f80b04
EG
3930 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
3931 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 3932
94a78b79 3933 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
3934 if (CHIP_IS_E1H(bp))
3935 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 3936
34f80b04
EG
3937 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
3938 msleep(30);
3939 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 3940
94a78b79 3941 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
3942 if (CHIP_IS_E1(bp)) {
3943 /* enable HW interrupt from PXP on USDM overflow
3944 bit 16 on INT_MASK_0 */
3945 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3946 }
a2fbb9ea 3947
94a78b79 3948 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 3949 bnx2x_init_pxp(bp);
a2fbb9ea
ET
3950
3951#ifdef __BIG_ENDIAN
34f80b04
EG
3952 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
3953 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
3954 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
3955 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
3956 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
3957 /* make sure this value is 0 */
3958 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
3959
3960/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3961 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
3962 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
3963 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
3964 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
3965#endif
3966
34f80b04 3967 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 3968#ifdef BCM_CNIC
34f80b04
EG
3969 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3970 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3971 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
3972#endif
3973
34f80b04
EG
3974 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
3975 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 3976
34f80b04
EG
3977 /* let the HW do it's magic ... */
3978 msleep(100);
3979 /* finish PXP init */
3980 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
3981 if (val != 1) {
3982 BNX2X_ERR("PXP2 CFG failed\n");
3983 return -EBUSY;
3984 }
3985 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
3986 if (val != 1) {
3987 BNX2X_ERR("PXP2 RD_INIT failed\n");
3988 return -EBUSY;
3989 }
a2fbb9ea 3990
34f80b04
EG
3991 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
3992 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 3993
94a78b79 3994 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 3995
34f80b04
EG
3996 /* clean the DMAE memory */
3997 bp->dmae_ready = 1;
3998 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 3999
94a78b79
VZ
4000 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
4001 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
4002 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
4003 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 4004
34f80b04
EG
4005 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
4006 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
4007 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
4008 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
4009
94a78b79 4010 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
4011
4012#ifdef BCM_CNIC
4013 wb_write[0] = 0;
4014 wb_write[1] = 0;
4015 for (i = 0; i < 64; i++) {
4016 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
4017 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
4018
4019 if (CHIP_IS_E1H(bp)) {
4020 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
4021 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
4022 wb_write, 2);
4023 }
4024 }
4025#endif
34f80b04
EG
4026 /* soft reset pulse */
4027 REG_WR(bp, QM_REG_SOFT_RESET, 1);
4028 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 4029
37b091ba 4030#ifdef BCM_CNIC
94a78b79 4031 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 4032#endif
a2fbb9ea 4033
94a78b79 4034 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
4035 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
4036 if (!CHIP_REV_IS_SLOW(bp)) {
4037 /* enable hw interrupt from doorbell Q */
4038 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4039 }
a2fbb9ea 4040
94a78b79
VZ
4041 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4042 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 4043 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 4044#ifndef BCM_CNIC
3196a88a
EG
4045 /* set NIC mode */
4046 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 4047#endif
34f80b04
EG
4048 if (CHIP_IS_E1H(bp))
4049 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 4050
94a78b79
VZ
4051 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4052 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4053 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4054 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 4055
ca00392c
EG
4056 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4057 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4058 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4059 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 4060
94a78b79
VZ
4061 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4062 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4063 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4064 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 4065
34f80b04
EG
4066 /* sync semi rtc */
4067 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4068 0x80000000);
4069 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4070 0x80000000);
a2fbb9ea 4071
94a78b79
VZ
4072 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4073 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4074 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 4075
34f80b04 4076 REG_WR(bp, SRC_REG_SOFT_RST, 1);
c68ed255
TH
4077 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4078 REG_WR(bp, i, random32());
94a78b79 4079 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
4080#ifdef BCM_CNIC
4081 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4082 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4083 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4084 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4085 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4086 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4087 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4088 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4089 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4090 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4091#endif
34f80b04 4092 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 4093
34f80b04
EG
4094 if (sizeof(union cdu_context) != 1024)
4095 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
4096 dev_alert(&bp->pdev->dev, "please adjust the size "
4097 "of cdu_context(%ld)\n",
7995c64e 4098 (long)sizeof(union cdu_context));
a2fbb9ea 4099
94a78b79 4100 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
4101 val = (4 << 24) + (0 << 12) + 1024;
4102 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 4103
94a78b79 4104 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 4105 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
4106 /* enable context validation interrupt from CFC */
4107 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4108
4109 /* set the thresholds to prevent CFC/CDU race */
4110 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 4111
94a78b79
VZ
4112 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
4113 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 4114
94a78b79 4115 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
4116 /* Reset PCIE errors for debug */
4117 REG_WR(bp, 0x2814, 0xffffffff);
4118 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 4119
94a78b79 4120 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 4121 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 4122 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 4123 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 4124
94a78b79 4125 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
4126 if (CHIP_IS_E1H(bp)) {
4127 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
4128 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
4129 }
4130
4131 if (CHIP_REV_IS_SLOW(bp))
4132 msleep(200);
4133
4134 /* finish CFC init */
4135 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4136 if (val != 1) {
4137 BNX2X_ERR("CFC LL_INIT failed\n");
4138 return -EBUSY;
4139 }
4140 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4141 if (val != 1) {
4142 BNX2X_ERR("CFC AC_INIT failed\n");
4143 return -EBUSY;
4144 }
4145 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4146 if (val != 1) {
4147 BNX2X_ERR("CFC CAM_INIT failed\n");
4148 return -EBUSY;
4149 }
4150 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 4151
34f80b04
EG
4152 /* read NIG statistic
4153 to see if this is our first up since powerup */
4154 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4155 val = *bnx2x_sp(bp, wb_data[0]);
4156
4157 /* do internal memory self test */
4158 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4159 BNX2X_ERR("internal mem self test failed\n");
4160 return -EBUSY;
4161 }
4162
d90d96ba 4163 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
a22f0788
YR
4164 bp->common.shmem_base,
4165 bp->common.shmem2_base);
f1410647 4166
fd4ef40d
EG
4167 bnx2x_setup_fan_failure_detection(bp);
4168
34f80b04
EG
4169 /* clear PXP2 attentions */
4170 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 4171
34f80b04 4172 enable_blocks_attention(bp);
72fd0718
VZ
4173 if (CHIP_PARITY_SUPPORTED(bp))
4174 enable_blocks_parity(bp);
a2fbb9ea 4175
6bbca910
YR
4176 if (!BP_NOMCP(bp)) {
4177 bnx2x_acquire_phy_lock(bp);
a22f0788
YR
4178 bnx2x_common_init_phy(bp, bp->common.shmem_base,
4179 bp->common.shmem2_base);
6bbca910
YR
4180 bnx2x_release_phy_lock(bp);
4181 } else
4182 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4183
34f80b04
EG
4184 return 0;
4185}
a2fbb9ea 4186
34f80b04
EG
4187static int bnx2x_init_port(struct bnx2x *bp)
4188{
4189 int port = BP_PORT(bp);
94a78b79 4190 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 4191 u32 low, high;
34f80b04 4192 u32 val;
a2fbb9ea 4193
cdaa7cb8 4194 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
4195
4196 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 4197
94a78b79 4198 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 4199 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
4200
4201 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4202 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4203 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 4204 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 4205
37b091ba
MC
4206#ifdef BCM_CNIC
4207 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 4208
94a78b79 4209 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
4210 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4211 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 4212#endif
cdaa7cb8 4213
94a78b79 4214 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 4215
94a78b79 4216 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
4217 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
4218 /* no pause for emulation and FPGA */
4219 low = 0;
4220 high = 513;
4221 } else {
4222 if (IS_E1HMF(bp))
4223 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4224 else if (bp->dev->mtu > 4096) {
4225 if (bp->flags & ONE_PORT_FLAG)
4226 low = 160;
4227 else {
4228 val = bp->dev->mtu;
4229 /* (24*1024 + val*4)/256 */
4230 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
4231 }
4232 } else
4233 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4234 high = low + 56; /* 14*1024/256 */
4235 }
4236 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4237 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4238
4239
94a78b79 4240 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 4241
94a78b79 4242 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 4243 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 4244 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 4245 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 4246
94a78b79
VZ
4247 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4248 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4249 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4250 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 4251
94a78b79 4252 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 4253 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 4254
94a78b79 4255 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
4256
4257 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 4258 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
4259
4260 /* update threshold */
34f80b04 4261 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 4262 /* update init credit */
34f80b04 4263 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
4264
4265 /* probe changes */
34f80b04 4266 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 4267 msleep(5);
34f80b04 4268 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 4269
37b091ba
MC
4270#ifdef BCM_CNIC
4271 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 4272#endif
94a78b79 4273 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 4274 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
4275
4276 if (CHIP_IS_E1(bp)) {
4277 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4278 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4279 }
94a78b79 4280 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 4281
94a78b79 4282 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
4283 /* init aeu_mask_attn_func_0/1:
4284 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4285 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4286 * bits 4-7 are used for "per vn group attention" */
4287 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4288 (IS_E1HMF(bp) ? 0xF7 : 0x7));
4289
94a78b79 4290 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 4291 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 4292 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 4293 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 4294 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 4295
94a78b79 4296 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
4297
4298 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4299
4300 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
4301 /* 0x2 disable e1hov, 0x1 enable */
4302 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4303 (IS_E1HMF(bp) ? 0x1 : 0x2));
4304
1c06328c
EG
4305 {
4306 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4307 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
4308 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
4309 }
34f80b04
EG
4310 }
4311
94a78b79 4312 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 4313 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
d90d96ba 4314 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
a22f0788
YR
4315 bp->common.shmem_base,
4316 bp->common.shmem2_base);
d90d96ba 4317 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
a22f0788 4318 bp->common.shmem2_base, port)) {
4d295db0
EG
4319 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4320 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4321 val = REG_RD(bp, reg_addr);
f1410647 4322 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0 4323 REG_WR(bp, reg_addr, val);
f1410647 4324 }
c18487ee 4325 bnx2x__link_reset(bp);
a2fbb9ea 4326
34f80b04
EG
4327 return 0;
4328}
4329
4330#define ILT_PER_FUNC (768/2)
4331#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
4332/* the phys address is shifted right 12 bits and has an added
4333 1=valid bit added to the 53rd bit
4334 then since this is a wide register(TM)
4335 we split it into two 32 bit writes
4336 */
4337#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4338#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
4339#define PXP_ONE_ILT(x) (((x) << 10) | x)
4340#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
4341
37b091ba
MC
4342#ifdef BCM_CNIC
4343#define CNIC_ILT_LINES 127
4344#define CNIC_CTX_PER_ILT 16
4345#else
34f80b04 4346#define CNIC_ILT_LINES 0
37b091ba 4347#endif
34f80b04
EG
4348
4349static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4350{
4351 int reg;
4352
4353 if (CHIP_IS_E1H(bp))
4354 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4355 else /* E1 */
4356 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4357
4358 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4359}
4360
4361static int bnx2x_init_func(struct bnx2x *bp)
4362{
4363 int port = BP_PORT(bp);
4364 int func = BP_FUNC(bp);
8badd27a 4365 u32 addr, val;
34f80b04
EG
4366 int i;
4367
cdaa7cb8 4368 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 4369
8badd27a
EG
4370 /* set MSI reconfigure capability */
4371 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4372 val = REG_RD(bp, addr);
4373 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
4374 REG_WR(bp, addr, val);
4375
34f80b04
EG
4376 i = FUNC_ILT_BASE(func);
4377
4378 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
4379 if (CHIP_IS_E1H(bp)) {
4380 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
4381 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
4382 } else /* E1 */
4383 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
4384 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
4385
37b091ba
MC
4386#ifdef BCM_CNIC
4387 i += 1 + CNIC_ILT_LINES;
4388 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
4389 if (CHIP_IS_E1(bp))
4390 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
4391 else {
4392 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
4393 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
4394 }
4395
4396 i++;
4397 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
4398 if (CHIP_IS_E1(bp))
4399 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
4400 else {
4401 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
4402 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
4403 }
4404
4405 i++;
4406 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
4407 if (CHIP_IS_E1(bp))
4408 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
4409 else {
4410 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
4411 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
4412 }
4413
4414 /* tell the searcher where the T2 table is */
4415 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
4416
4417 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
4418 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
4419
4420 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
4421 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
4422 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
4423
4424 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
4425#endif
34f80b04
EG
4426
4427 if (CHIP_IS_E1H(bp)) {
573f2035
EG
4428 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4429 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4430 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4431 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4432 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4433 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4434 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4435 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4436 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
4437
4438 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4439 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
4440 }
4441
4442 /* HC init per function */
4443 if (CHIP_IS_E1H(bp)) {
4444 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4445
4446 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4447 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4448 }
94a78b79 4449 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 4450
c14423fe 4451 /* Reset PCIE errors for debug */
a2fbb9ea
ET
4452 REG_WR(bp, 0x2114, 0xffffffff);
4453 REG_WR(bp, 0x2120, 0xffffffff);
b7737c9b 4454 bnx2x_phy_probe(&bp->link_params);
34f80b04
EG
4455 return 0;
4456}
4457
9f6c9258 4458int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
34f80b04
EG
4459{
4460 int i, rc = 0;
a2fbb9ea 4461
34f80b04
EG
4462 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
4463 BP_FUNC(bp), load_code);
a2fbb9ea 4464
34f80b04
EG
4465 bp->dmae_ready = 0;
4466 mutex_init(&bp->dmae_mutex);
54016b26
EG
4467 rc = bnx2x_gunzip_init(bp);
4468 if (rc)
4469 return rc;
a2fbb9ea 4470
34f80b04
EG
4471 switch (load_code) {
4472 case FW_MSG_CODE_DRV_LOAD_COMMON:
4473 rc = bnx2x_init_common(bp);
4474 if (rc)
4475 goto init_hw_err;
4476 /* no break */
4477
4478 case FW_MSG_CODE_DRV_LOAD_PORT:
4479 bp->dmae_ready = 1;
4480 rc = bnx2x_init_port(bp);
4481 if (rc)
4482 goto init_hw_err;
4483 /* no break */
4484
4485 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4486 bp->dmae_ready = 1;
4487 rc = bnx2x_init_func(bp);
4488 if (rc)
4489 goto init_hw_err;
4490 break;
4491
4492 default:
4493 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4494 break;
4495 }
4496
4497 if (!BP_NOMCP(bp)) {
4498 int func = BP_FUNC(bp);
a2fbb9ea
ET
4499
4500 bp->fw_drv_pulse_wr_seq =
34f80b04 4501 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 4502 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
4503 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4504 }
a2fbb9ea 4505
34f80b04
EG
4506 /* this needs to be done before gunzip end */
4507 bnx2x_zero_def_sb(bp);
4508 for_each_queue(bp, i)
4509 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
4510#ifdef BCM_CNIC
4511 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4512#endif
34f80b04
EG
4513
4514init_hw_err:
4515 bnx2x_gunzip_end(bp);
4516
4517 return rc;
a2fbb9ea
ET
4518}
4519
9f6c9258 4520void bnx2x_free_mem(struct bnx2x *bp)
a2fbb9ea
ET
4521{
4522
4523#define BNX2X_PCI_FREE(x, y, size) \
4524 do { \
4525 if (x) { \
1a983142 4526 dma_free_coherent(&bp->pdev->dev, size, x, y); \
a2fbb9ea
ET
4527 x = NULL; \
4528 y = 0; \
4529 } \
4530 } while (0)
4531
4532#define BNX2X_FREE(x) \
4533 do { \
4534 if (x) { \
4535 vfree(x); \
4536 x = NULL; \
4537 } \
4538 } while (0)
4539
4540 int i;
4541
4542 /* fastpath */
555f6c78 4543 /* Common */
a2fbb9ea
ET
4544 for_each_queue(bp, i) {
4545
555f6c78 4546 /* status blocks */
a2fbb9ea
ET
4547 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
4548 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 4549 sizeof(struct host_status_block));
555f6c78
EG
4550 }
4551 /* Rx */
54b9ddaa 4552 for_each_queue(bp, i) {
a2fbb9ea 4553
555f6c78 4554 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
4555 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
4556 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
4557 bnx2x_fp(bp, i, rx_desc_mapping),
4558 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4559
4560 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
4561 bnx2x_fp(bp, i, rx_comp_mapping),
4562 sizeof(struct eth_fast_path_rx_cqe) *
4563 NUM_RCQ_BD);
a2fbb9ea 4564
7a9b2557 4565 /* SGE ring */
32626230 4566 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
4567 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
4568 bnx2x_fp(bp, i, rx_sge_mapping),
4569 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4570 }
555f6c78 4571 /* Tx */
54b9ddaa 4572 for_each_queue(bp, i) {
555f6c78
EG
4573
4574 /* fastpath tx rings: tx_buf tx_desc */
4575 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
4576 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
4577 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 4578 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 4579 }
a2fbb9ea
ET
4580 /* end of fastpath */
4581
4582 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 4583 sizeof(struct host_def_status_block));
a2fbb9ea
ET
4584
4585 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 4586 sizeof(struct bnx2x_slowpath));
a2fbb9ea 4587
37b091ba 4588#ifdef BCM_CNIC
a2fbb9ea
ET
4589 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
4590 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
4591 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
4592 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
4593 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
4594 sizeof(struct host_status_block));
a2fbb9ea 4595#endif
7a9b2557 4596 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
4597
4598#undef BNX2X_PCI_FREE
4599#undef BNX2X_KFREE
4600}
4601
9f6c9258 4602int bnx2x_alloc_mem(struct bnx2x *bp)
a2fbb9ea
ET
4603{
4604
4605#define BNX2X_PCI_ALLOC(x, y, size) \
4606 do { \
1a983142 4607 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
9f6c9258
DK
4608 if (x == NULL) \
4609 goto alloc_mem_err; \
4610 memset(x, 0, size); \
4611 } while (0)
a2fbb9ea 4612
9f6c9258
DK
4613#define BNX2X_ALLOC(x, size) \
4614 do { \
4615 x = vmalloc(size); \
4616 if (x == NULL) \
4617 goto alloc_mem_err; \
4618 memset(x, 0, size); \
4619 } while (0)
a2fbb9ea 4620
9f6c9258 4621 int i;
a2fbb9ea 4622
9f6c9258
DK
4623 /* fastpath */
4624 /* Common */
a2fbb9ea 4625 for_each_queue(bp, i) {
9f6c9258 4626 bnx2x_fp(bp, i, bp) = bp;
a2fbb9ea 4627
9f6c9258
DK
4628 /* status blocks */
4629 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
4630 &bnx2x_fp(bp, i, status_blk_mapping),
4631 sizeof(struct host_status_block));
a2fbb9ea 4632 }
9f6c9258
DK
4633 /* Rx */
4634 for_each_queue(bp, i) {
a2fbb9ea 4635
9f6c9258
DK
4636 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4637 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
4638 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4639 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
4640 &bnx2x_fp(bp, i, rx_desc_mapping),
4641 sizeof(struct eth_rx_bd) * NUM_RX_BD);
555f6c78 4642
9f6c9258
DK
4643 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
4644 &bnx2x_fp(bp, i, rx_comp_mapping),
4645 sizeof(struct eth_fast_path_rx_cqe) *
4646 NUM_RCQ_BD);
a2fbb9ea 4647
9f6c9258
DK
4648 /* SGE ring */
4649 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
4650 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4651 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
4652 &bnx2x_fp(bp, i, rx_sge_mapping),
4653 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4654 }
4655 /* Tx */
4656 for_each_queue(bp, i) {
8badd27a 4657
9f6c9258
DK
4658 /* fastpath tx rings: tx_buf tx_desc */
4659 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
4660 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4661 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
4662 &bnx2x_fp(bp, i, tx_desc_mapping),
4663 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
8badd27a 4664 }
9f6c9258 4665 /* end of fastpath */
8badd27a 4666
9f6c9258
DK
4667 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4668 sizeof(struct host_def_status_block));
8badd27a 4669
9f6c9258
DK
4670 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4671 sizeof(struct bnx2x_slowpath));
a2fbb9ea 4672
9f6c9258
DK
4673#ifdef BCM_CNIC
4674 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
8badd27a 4675
9f6c9258
DK
4676 /* allocate searcher T2 table
4677 we allocate 1/4 of alloc num for T2
4678 (which is not entered into the ILT) */
4679 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
a2fbb9ea 4680
9f6c9258
DK
4681 /* Initialize T2 (for 1024 connections) */
4682 for (i = 0; i < 16*1024; i += 64)
4683 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 4684
9f6c9258
DK
4685 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
4686 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
65abd74d 4687
9f6c9258
DK
4688 /* QM queues (128*MAX_CONN) */
4689 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
65abd74d 4690
9f6c9258
DK
4691 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
4692 sizeof(struct host_status_block));
4693#endif
65abd74d 4694
9f6c9258
DK
4695 /* Slow path ring */
4696 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
65abd74d 4697
9f6c9258 4698 return 0;
e1510706 4699
9f6c9258
DK
4700alloc_mem_err:
4701 bnx2x_free_mem(bp);
4702 return -ENOMEM;
e1510706 4703
9f6c9258
DK
4704#undef BNX2X_PCI_ALLOC
4705#undef BNX2X_ALLOC
65abd74d
YG
4706}
4707
65abd74d 4708
a2fbb9ea
ET
4709/*
4710 * Init service functions
4711 */
4712
e665bfda
MC
4713/**
4714 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
4715 *
4716 * @param bp driver descriptor
4717 * @param set set or clear an entry (1 or 0)
4718 * @param mac pointer to a buffer containing a MAC
4719 * @param cl_bit_vec bit vector of clients to register a MAC for
4720 * @param cam_offset offset in a CAM to use
4721 * @param with_bcast set broadcast MAC as well
4722 */
4723static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
4724 u32 cl_bit_vec, u8 cam_offset,
4725 u8 with_bcast)
a2fbb9ea
ET
4726{
4727 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 4728 int port = BP_PORT(bp);
a2fbb9ea
ET
4729
4730 /* CAM allocation
4731 * unicasts 0-31:port0 32-63:port1
4732 * multicast 64-127:port0 128-191:port1
4733 */
e665bfda
MC
4734 config->hdr.length = 1 + (with_bcast ? 1 : 0);
4735 config->hdr.offset = cam_offset;
4736 config->hdr.client_id = 0xff;
a2fbb9ea
ET
4737 config->hdr.reserved1 = 0;
4738
4739 /* primary MAC */
4740 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 4741 swab16(*(u16 *)&mac[0]);
a2fbb9ea 4742 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 4743 swab16(*(u16 *)&mac[2]);
a2fbb9ea 4744 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 4745 swab16(*(u16 *)&mac[4]);
34f80b04 4746 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
4747 if (set)
4748 config->config_table[0].target_table_entry.flags = 0;
4749 else
4750 CAM_INVALIDATE(config->config_table[0]);
ca00392c 4751 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 4752 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
4753 config->config_table[0].target_table_entry.vlan_id = 0;
4754
3101c2bc
YG
4755 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
4756 (set ? "setting" : "clearing"),
a2fbb9ea
ET
4757 config->config_table[0].cam_entry.msb_mac_addr,
4758 config->config_table[0].cam_entry.middle_mac_addr,
4759 config->config_table[0].cam_entry.lsb_mac_addr);
4760
4761 /* broadcast */
e665bfda
MC
4762 if (with_bcast) {
4763 config->config_table[1].cam_entry.msb_mac_addr =
4764 cpu_to_le16(0xffff);
4765 config->config_table[1].cam_entry.middle_mac_addr =
4766 cpu_to_le16(0xffff);
4767 config->config_table[1].cam_entry.lsb_mac_addr =
4768 cpu_to_le16(0xffff);
4769 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
4770 if (set)
4771 config->config_table[1].target_table_entry.flags =
4772 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4773 else
4774 CAM_INVALIDATE(config->config_table[1]);
4775 config->config_table[1].target_table_entry.clients_bit_vector =
4776 cpu_to_le32(cl_bit_vec);
4777 config->config_table[1].target_table_entry.vlan_id = 0;
4778 }
a2fbb9ea
ET
4779
4780 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4781 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4782 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4783}
4784
e665bfda
MC
4785/**
4786 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
4787 *
4788 * @param bp driver descriptor
4789 * @param set set or clear an entry (1 or 0)
4790 * @param mac pointer to a buffer containing a MAC
4791 * @param cl_bit_vec bit vector of clients to register a MAC for
4792 * @param cam_offset offset in a CAM to use
4793 */
4794static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
4795 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
4796{
4797 struct mac_configuration_cmd_e1h *config =
4798 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
4799
8d9c5f34 4800 config->hdr.length = 1;
e665bfda
MC
4801 config->hdr.offset = cam_offset;
4802 config->hdr.client_id = 0xff;
34f80b04
EG
4803 config->hdr.reserved1 = 0;
4804
4805 /* primary MAC */
4806 config->config_table[0].msb_mac_addr =
e665bfda 4807 swab16(*(u16 *)&mac[0]);
34f80b04 4808 config->config_table[0].middle_mac_addr =
e665bfda 4809 swab16(*(u16 *)&mac[2]);
34f80b04 4810 config->config_table[0].lsb_mac_addr =
e665bfda 4811 swab16(*(u16 *)&mac[4]);
ca00392c 4812 config->config_table[0].clients_bit_vector =
e665bfda 4813 cpu_to_le32(cl_bit_vec);
34f80b04
EG
4814 config->config_table[0].vlan_id = 0;
4815 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
4816 if (set)
4817 config->config_table[0].flags = BP_PORT(bp);
4818 else
4819 config->config_table[0].flags =
4820 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 4821
e665bfda 4822 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 4823 (set ? "setting" : "clearing"),
34f80b04
EG
4824 config->config_table[0].msb_mac_addr,
4825 config->config_table[0].middle_mac_addr,
e665bfda 4826 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
4827
4828 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4829 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4830 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4831}
4832
a2fbb9ea
ET
4833static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4834 int *state_p, int poll)
4835{
4836 /* can take a while if any port is running */
8b3a0f0b 4837 int cnt = 5000;
a2fbb9ea 4838
c14423fe
ET
4839 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4840 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
4841
4842 might_sleep();
34f80b04 4843 while (cnt--) {
a2fbb9ea
ET
4844 if (poll) {
4845 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
4846 /* if index is different from 0
4847 * the reply for some commands will
3101c2bc 4848 * be on the non default queue
a2fbb9ea
ET
4849 */
4850 if (idx)
4851 bnx2x_rx_int(&bp->fp[idx], 10);
4852 }
a2fbb9ea 4853
3101c2bc 4854 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
4855 if (*state_p == state) {
4856#ifdef BNX2X_STOP_ON_ERROR
4857 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
4858#endif
a2fbb9ea 4859 return 0;
8b3a0f0b 4860 }
a2fbb9ea 4861
a2fbb9ea 4862 msleep(1);
e3553b29
EG
4863
4864 if (bp->panic)
4865 return -EIO;
a2fbb9ea
ET
4866 }
4867
a2fbb9ea 4868 /* timeout! */
49d66772
ET
4869 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4870 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
4871#ifdef BNX2X_STOP_ON_ERROR
4872 bnx2x_panic();
4873#endif
a2fbb9ea 4874
49d66772 4875 return -EBUSY;
a2fbb9ea
ET
4876}
4877
9f6c9258 4878void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
e665bfda
MC
4879{
4880 bp->set_mac_pending++;
4881 smp_wmb();
4882
4883 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
4884 (1 << bp->fp->cl_id), BP_FUNC(bp));
4885
4886 /* Wait for a completion */
4887 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4888}
4889
9f6c9258 4890void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
e665bfda
MC
4891{
4892 bp->set_mac_pending++;
4893 smp_wmb();
4894
4895 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
4896 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
4897 1);
4898
4899 /* Wait for a completion */
4900 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4901}
4902
993ac7b5
MC
4903#ifdef BCM_CNIC
4904/**
4905 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
4906 * MAC(s). This function will wait until the ramdord completion
4907 * returns.
4908 *
4909 * @param bp driver handle
4910 * @param set set or clear the CAM entry
4911 *
4912 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
4913 */
9f6c9258 4914int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
993ac7b5
MC
4915{
4916 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
4917
4918 bp->set_mac_pending++;
4919 smp_wmb();
4920
4921 /* Send a SET_MAC ramrod */
4922 if (CHIP_IS_E1(bp))
4923 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
4924 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
4925 1);
4926 else
4927 /* CAM allocation for E1H
4928 * unicasts: by func number
4929 * multicast: 20+FUNC*20, 20 each
4930 */
4931 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
4932 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
4933
4934 /* Wait for a completion when setting */
4935 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4936
4937 return 0;
4938}
4939#endif
4940
9f6c9258 4941int bnx2x_setup_leading(struct bnx2x *bp)
a2fbb9ea 4942{
34f80b04 4943 int rc;
a2fbb9ea 4944
c14423fe 4945 /* reset IGU state */
34f80b04 4946 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4947
4948 /* SETUP ramrod */
4949 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
4950
34f80b04
EG
4951 /* Wait for completion */
4952 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 4953
34f80b04 4954 return rc;
a2fbb9ea
ET
4955}
4956
9f6c9258 4957int bnx2x_setup_multi(struct bnx2x *bp, int index)
a2fbb9ea 4958{
555f6c78
EG
4959 struct bnx2x_fastpath *fp = &bp->fp[index];
4960
a2fbb9ea 4961 /* reset IGU state */
555f6c78 4962 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 4963
228241eb 4964 /* SETUP ramrod */
555f6c78
EG
4965 fp->state = BNX2X_FP_STATE_OPENING;
4966 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
4967 fp->cl_id, 0);
a2fbb9ea
ET
4968
4969 /* Wait for completion */
4970 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 4971 &(fp->state), 0);
a2fbb9ea
ET
4972}
4973
a2fbb9ea 4974
9f6c9258 4975void bnx2x_set_num_queues_msix(struct bnx2x *bp)
ca00392c 4976{
ca00392c
EG
4977
4978 switch (bp->multi_mode) {
4979 case ETH_RSS_MODE_DISABLED:
54b9ddaa 4980 bp->num_queues = 1;
ca00392c
EG
4981 break;
4982
4983 case ETH_RSS_MODE_REGULAR:
54b9ddaa
VZ
4984 if (num_queues)
4985 bp->num_queues = min_t(u32, num_queues,
4986 BNX2X_MAX_QUEUES(bp));
ca00392c 4987 else
54b9ddaa
VZ
4988 bp->num_queues = min_t(u32, num_online_cpus(),
4989 BNX2X_MAX_QUEUES(bp));
ca00392c
EG
4990 break;
4991
4992
4993 default:
54b9ddaa 4994 bp->num_queues = 1;
9f6c9258
DK
4995 break;
4996 }
a2fbb9ea
ET
4997}
4998
9f6c9258
DK
4999
5000
a2fbb9ea
ET
5001static int bnx2x_stop_multi(struct bnx2x *bp, int index)
5002{
555f6c78 5003 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
5004 int rc;
5005
c14423fe 5006 /* halt the connection */
555f6c78
EG
5007 fp->state = BNX2X_FP_STATE_HALTING;
5008 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 5009
34f80b04 5010 /* Wait for completion */
a2fbb9ea 5011 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 5012 &(fp->state), 1);
c14423fe 5013 if (rc) /* timeout */
a2fbb9ea
ET
5014 return rc;
5015
5016 /* delete cfc entry */
5017 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
5018
34f80b04
EG
5019 /* Wait for completion */
5020 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 5021 &(fp->state), 1);
34f80b04 5022 return rc;
a2fbb9ea
ET
5023}
5024
da5a662a 5025static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 5026{
4781bfad 5027 __le16 dsb_sp_prod_idx;
c14423fe 5028 /* if the other port is handling traffic,
a2fbb9ea 5029 this can take a lot of time */
34f80b04
EG
5030 int cnt = 500;
5031 int rc;
a2fbb9ea
ET
5032
5033 might_sleep();
5034
5035 /* Send HALT ramrod */
5036 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 5037 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 5038
34f80b04
EG
5039 /* Wait for completion */
5040 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
5041 &(bp->fp[0].state), 1);
5042 if (rc) /* timeout */
da5a662a 5043 return rc;
a2fbb9ea 5044
49d66772 5045 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 5046
228241eb 5047 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
5048 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
5049
49d66772 5050 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
5051 we are going to reset the chip anyway
5052 so there is not much to do if this times out
5053 */
34f80b04 5054 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
5055 if (!cnt) {
5056 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
5057 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5058 *bp->dsb_sp_prod, dsb_sp_prod_idx);
5059#ifdef BNX2X_STOP_ON_ERROR
5060 bnx2x_panic();
5061#endif
36e552ab 5062 rc = -EBUSY;
34f80b04
EG
5063 break;
5064 }
5065 cnt--;
da5a662a 5066 msleep(1);
5650d9d4 5067 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
5068 }
5069 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
5070 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
5071
5072 return rc;
a2fbb9ea
ET
5073}
5074
34f80b04
EG
5075static void bnx2x_reset_func(struct bnx2x *bp)
5076{
5077 int port = BP_PORT(bp);
5078 int func = BP_FUNC(bp);
5079 int base, i;
5080
5081 /* Configure IGU */
5082 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5083 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5084
37b091ba
MC
5085#ifdef BCM_CNIC
5086 /* Disable Timer scan */
5087 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
5088 /*
5089 * Wait for at least 10ms and up to 2 second for the timers scan to
5090 * complete
5091 */
5092 for (i = 0; i < 200; i++) {
5093 msleep(10);
5094 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
5095 break;
5096 }
5097#endif
34f80b04
EG
5098 /* Clear ILT */
5099 base = FUNC_ILT_BASE(func);
5100 for (i = base; i < base + ILT_PER_FUNC; i++)
5101 bnx2x_ilt_wr(bp, i, 0);
5102}
5103
5104static void bnx2x_reset_port(struct bnx2x *bp)
5105{
5106 int port = BP_PORT(bp);
5107 u32 val;
5108
5109 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5110
5111 /* Do not rcv packets to BRB */
5112 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5113 /* Do not direct rcv packets that are not for MCP to the BRB */
5114 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5115 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5116
5117 /* Configure AEU */
5118 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5119
5120 msleep(100);
5121 /* Check for BRB port occupancy */
5122 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5123 if (val)
5124 DP(NETIF_MSG_IFDOWN,
33471629 5125 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
5126
5127 /* TODO: Close Doorbell port? */
5128}
5129
34f80b04
EG
5130static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5131{
5132 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
5133 BP_FUNC(bp), reset_code);
5134
5135 switch (reset_code) {
5136 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5137 bnx2x_reset_port(bp);
5138 bnx2x_reset_func(bp);
5139 bnx2x_reset_common(bp);
5140 break;
5141
5142 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5143 bnx2x_reset_port(bp);
5144 bnx2x_reset_func(bp);
5145 break;
5146
5147 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5148 bnx2x_reset_func(bp);
5149 break;
49d66772 5150
34f80b04
EG
5151 default:
5152 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5153 break;
5154 }
5155}
5156
9f6c9258 5157void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 5158{
da5a662a 5159 int port = BP_PORT(bp);
a2fbb9ea 5160 u32 reset_code = 0;
da5a662a 5161 int i, cnt, rc;
a2fbb9ea 5162
555f6c78 5163 /* Wait until tx fastpath tasks complete */
54b9ddaa 5164 for_each_queue(bp, i) {
228241eb
ET
5165 struct bnx2x_fastpath *fp = &bp->fp[i];
5166
34f80b04 5167 cnt = 1000;
e8b5fc51 5168 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 5169
7961f791 5170 bnx2x_tx_int(fp);
34f80b04
EG
5171 if (!cnt) {
5172 BNX2X_ERR("timeout waiting for queue[%d]\n",
5173 i);
5174#ifdef BNX2X_STOP_ON_ERROR
5175 bnx2x_panic();
5176 return -EBUSY;
5177#else
5178 break;
5179#endif
5180 }
5181 cnt--;
da5a662a 5182 msleep(1);
34f80b04 5183 }
228241eb 5184 }
da5a662a
VZ
5185 /* Give HW time to discard old tx messages */
5186 msleep(1);
a2fbb9ea 5187
3101c2bc
YG
5188 if (CHIP_IS_E1(bp)) {
5189 struct mac_configuration_cmd *config =
5190 bnx2x_sp(bp, mcast_config);
5191
e665bfda 5192 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 5193
8d9c5f34 5194 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
5195 CAM_INVALIDATE(config->config_table[i]);
5196
8d9c5f34 5197 config->hdr.length = i;
3101c2bc
YG
5198 if (CHIP_REV_IS_SLOW(bp))
5199 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
5200 else
5201 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 5202 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
5203 config->hdr.reserved1 = 0;
5204
e665bfda
MC
5205 bp->set_mac_pending++;
5206 smp_wmb();
5207
3101c2bc
YG
5208 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5209 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
5210 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
5211
5212 } else { /* E1H */
65abd74d
YG
5213 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5214
e665bfda 5215 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
5216
5217 for (i = 0; i < MC_HASH_SIZE; i++)
5218 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
5219
5220 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 5221 }
993ac7b5
MC
5222#ifdef BCM_CNIC
5223 /* Clear iSCSI L2 MAC */
5224 mutex_lock(&bp->cnic_mutex);
5225 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
5226 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
5227 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
5228 }
5229 mutex_unlock(&bp->cnic_mutex);
5230#endif
3101c2bc 5231
65abd74d
YG
5232 if (unload_mode == UNLOAD_NORMAL)
5233 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5234
7d0446c2 5235 else if (bp->flags & NO_WOL_FLAG)
65abd74d 5236 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 5237
7d0446c2 5238 else if (bp->wol) {
65abd74d
YG
5239 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5240 u8 *mac_addr = bp->dev->dev_addr;
5241 u32 val;
5242 /* The mac address is written to entries 1-4 to
5243 preserve entry 0 which is used by the PMF */
5244 u8 entry = (BP_E1HVN(bp) + 1)*8;
5245
5246 val = (mac_addr[0] << 8) | mac_addr[1];
5247 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
5248
5249 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
5250 (mac_addr[4] << 8) | mac_addr[5];
5251 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
5252
5253 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
5254
5255 } else
5256 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 5257
34f80b04
EG
5258 /* Close multi and leading connections
5259 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
5260 for_each_nondefault_queue(bp, i)
5261 if (bnx2x_stop_multi(bp, i))
228241eb 5262 goto unload_error;
a2fbb9ea 5263
da5a662a
VZ
5264 rc = bnx2x_stop_leading(bp);
5265 if (rc) {
34f80b04 5266 BNX2X_ERR("Stop leading failed!\n");
da5a662a 5267#ifdef BNX2X_STOP_ON_ERROR
34f80b04 5268 return -EBUSY;
da5a662a
VZ
5269#else
5270 goto unload_error;
34f80b04 5271#endif
228241eb
ET
5272 }
5273
5274unload_error:
34f80b04 5275 if (!BP_NOMCP(bp))
a22f0788 5276 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04 5277 else {
f5372251 5278 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
5279 load_count[0], load_count[1], load_count[2]);
5280 load_count[0]--;
da5a662a 5281 load_count[1 + port]--;
f5372251 5282 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
5283 load_count[0], load_count[1], load_count[2]);
5284 if (load_count[0] == 0)
5285 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 5286 else if (load_count[1 + port] == 0)
34f80b04
EG
5287 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
5288 else
5289 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
5290 }
a2fbb9ea 5291
34f80b04
EG
5292 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
5293 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
5294 bnx2x__link_reset(bp);
a2fbb9ea
ET
5295
5296 /* Reset the chip */
228241eb 5297 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
5298
5299 /* Report UNLOAD_DONE to MCP */
34f80b04 5300 if (!BP_NOMCP(bp))
a22f0788 5301 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
356e2385 5302
72fd0718
VZ
5303}
5304
9f6c9258 5305void bnx2x_disable_close_the_gate(struct bnx2x *bp)
72fd0718
VZ
5306{
5307 u32 val;
5308
5309 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
5310
5311 if (CHIP_IS_E1(bp)) {
5312 int port = BP_PORT(bp);
5313 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5314 MISC_REG_AEU_MASK_ATTN_FUNC_0;
5315
5316 val = REG_RD(bp, addr);
5317 val &= ~(0x300);
5318 REG_WR(bp, addr, val);
5319 } else if (CHIP_IS_E1H(bp)) {
5320 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
5321 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
5322 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
5323 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
5324 }
5325}
5326
72fd0718
VZ
5327
5328/* Close gates #2, #3 and #4: */
5329static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
5330{
5331 u32 val, addr;
5332
5333 /* Gates #2 and #4a are closed/opened for "not E1" only */
5334 if (!CHIP_IS_E1(bp)) {
5335 /* #4 */
5336 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
5337 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
5338 close ? (val | 0x1) : (val & (~(u32)1)));
5339 /* #2 */
5340 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
5341 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
5342 close ? (val | 0x1) : (val & (~(u32)1)));
5343 }
5344
5345 /* #3 */
5346 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
5347 val = REG_RD(bp, addr);
5348 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
5349
5350 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
5351 close ? "closing" : "opening");
5352 mmiowb();
5353}
5354
5355#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
5356
5357static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
5358{
5359 /* Do some magic... */
5360 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5361 *magic_val = val & SHARED_MF_CLP_MAGIC;
5362 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
5363}
5364
5365/* Restore the value of the `magic' bit.
5366 *
5367 * @param pdev Device handle.
5368 * @param magic_val Old value of the `magic' bit.
5369 */
5370static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
5371{
5372 /* Restore the `magic' bit value... */
5373 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
5374 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
5375 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
5376 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5377 MF_CFG_WR(bp, shared_mf_config.clp_mb,
5378 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
5379}
5380
5381/* Prepares for MCP reset: takes care of CLP configurations.
5382 *
5383 * @param bp
5384 * @param magic_val Old value of 'magic' bit.
5385 */
5386static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
5387{
5388 u32 shmem;
5389 u32 validity_offset;
5390
5391 DP(NETIF_MSG_HW, "Starting\n");
5392
5393 /* Set `magic' bit in order to save MF config */
5394 if (!CHIP_IS_E1(bp))
5395 bnx2x_clp_reset_prep(bp, magic_val);
5396
5397 /* Get shmem offset */
5398 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5399 validity_offset = offsetof(struct shmem_region, validity_map[0]);
5400
5401 /* Clear validity map flags */
5402 if (shmem > 0)
5403 REG_WR(bp, shmem + validity_offset, 0);
5404}
5405
5406#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
5407#define MCP_ONE_TIMEOUT 100 /* 100 ms */
5408
5409/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
5410 * depending on the HW type.
5411 *
5412 * @param bp
5413 */
5414static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
5415{
5416 /* special handling for emulation and FPGA,
5417 wait 10 times longer */
5418 if (CHIP_REV_IS_SLOW(bp))
5419 msleep(MCP_ONE_TIMEOUT*10);
5420 else
5421 msleep(MCP_ONE_TIMEOUT);
5422}
5423
5424static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
5425{
5426 u32 shmem, cnt, validity_offset, val;
5427 int rc = 0;
5428
5429 msleep(100);
5430
5431 /* Get shmem offset */
5432 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5433 if (shmem == 0) {
5434 BNX2X_ERR("Shmem 0 return failure\n");
5435 rc = -ENOTTY;
5436 goto exit_lbl;
5437 }
5438
5439 validity_offset = offsetof(struct shmem_region, validity_map[0]);
5440
5441 /* Wait for MCP to come up */
5442 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
5443 /* TBD: its best to check validity map of last port.
5444 * currently checks on port 0.
5445 */
5446 val = REG_RD(bp, shmem + validity_offset);
5447 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
5448 shmem + validity_offset, val);
5449
5450 /* check that shared memory is valid. */
5451 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5452 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5453 break;
5454
5455 bnx2x_mcp_wait_one(bp);
5456 }
5457
5458 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
5459
5460 /* Check that shared memory is valid. This indicates that MCP is up. */
5461 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
5462 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
5463 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
5464 rc = -ENOTTY;
5465 goto exit_lbl;
5466 }
5467
5468exit_lbl:
5469 /* Restore the `magic' bit value */
5470 if (!CHIP_IS_E1(bp))
5471 bnx2x_clp_reset_done(bp, magic_val);
5472
5473 return rc;
5474}
5475
5476static void bnx2x_pxp_prep(struct bnx2x *bp)
5477{
5478 if (!CHIP_IS_E1(bp)) {
5479 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
5480 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
5481 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
5482 mmiowb();
5483 }
5484}
5485
5486/*
5487 * Reset the whole chip except for:
5488 * - PCIE core
5489 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
5490 * one reset bit)
5491 * - IGU
5492 * - MISC (including AEU)
5493 * - GRC
5494 * - RBCN, RBCP
5495 */
5496static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
5497{
5498 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
5499
5500 not_reset_mask1 =
5501 MISC_REGISTERS_RESET_REG_1_RST_HC |
5502 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
5503 MISC_REGISTERS_RESET_REG_1_RST_PXP;
5504
5505 not_reset_mask2 =
5506 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
5507 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
5508 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
5509 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
5510 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
5511 MISC_REGISTERS_RESET_REG_2_RST_GRC |
5512 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
5513 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
5514
5515 reset_mask1 = 0xffffffff;
5516
5517 if (CHIP_IS_E1(bp))
5518 reset_mask2 = 0xffff;
5519 else
5520 reset_mask2 = 0x1ffff;
5521
5522 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5523 reset_mask1 & (~not_reset_mask1));
5524 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5525 reset_mask2 & (~not_reset_mask2));
5526
5527 barrier();
5528 mmiowb();
5529
5530 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
5531 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
5532 mmiowb();
5533}
5534
5535static int bnx2x_process_kill(struct bnx2x *bp)
5536{
5537 int cnt = 1000;
5538 u32 val = 0;
5539 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
5540
5541
5542 /* Empty the Tetris buffer, wait for 1s */
5543 do {
5544 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
5545 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
5546 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
5547 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
5548 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
5549 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
5550 ((port_is_idle_0 & 0x1) == 0x1) &&
5551 ((port_is_idle_1 & 0x1) == 0x1) &&
5552 (pgl_exp_rom2 == 0xffffffff))
5553 break;
5554 msleep(1);
5555 } while (cnt-- > 0);
5556
5557 if (cnt <= 0) {
5558 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
5559 " are still"
5560 " outstanding read requests after 1s!\n");
5561 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
5562 " port_is_idle_0=0x%08x,"
5563 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
5564 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
5565 pgl_exp_rom2);
5566 return -EAGAIN;
5567 }
5568
5569 barrier();
5570
5571 /* Close gates #2, #3 and #4 */
5572 bnx2x_set_234_gates(bp, true);
5573
5574 /* TBD: Indicate that "process kill" is in progress to MCP */
5575
5576 /* Clear "unprepared" bit */
5577 REG_WR(bp, MISC_REG_UNPREPARED, 0);
5578 barrier();
5579
5580 /* Make sure all is written to the chip before the reset */
5581 mmiowb();
5582
5583 /* Wait for 1ms to empty GLUE and PCI-E core queues,
5584 * PSWHST, GRC and PSWRD Tetris buffer.
5585 */
5586 msleep(1);
5587
5588 /* Prepare to chip reset: */
5589 /* MCP */
5590 bnx2x_reset_mcp_prep(bp, &val);
5591
5592 /* PXP */
5593 bnx2x_pxp_prep(bp);
5594 barrier();
5595
5596 /* reset the chip */
5597 bnx2x_process_kill_chip_reset(bp);
5598 barrier();
5599
5600 /* Recover after reset: */
5601 /* MCP */
5602 if (bnx2x_reset_mcp_comp(bp, val))
5603 return -EAGAIN;
5604
5605 /* PXP */
5606 bnx2x_pxp_prep(bp);
5607
5608 /* Open the gates #2, #3 and #4 */
5609 bnx2x_set_234_gates(bp, false);
5610
5611 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
5612 * reset state, re-enable attentions. */
5613
a2fbb9ea
ET
5614 return 0;
5615}
5616
72fd0718
VZ
5617static int bnx2x_leader_reset(struct bnx2x *bp)
5618{
5619 int rc = 0;
5620 /* Try to recover after the failure */
5621 if (bnx2x_process_kill(bp)) {
5622 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
5623 bp->dev->name);
5624 rc = -EAGAIN;
5625 goto exit_leader_reset;
5626 }
5627
5628 /* Clear "reset is in progress" bit and update the driver state */
5629 bnx2x_set_reset_done(bp);
5630 bp->recovery_state = BNX2X_RECOVERY_DONE;
5631
5632exit_leader_reset:
5633 bp->is_leader = 0;
5634 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
5635 smp_wmb();
5636 return rc;
5637}
5638
72fd0718
VZ
5639/* Assumption: runs under rtnl lock. This together with the fact
5640 * that it's called only from bnx2x_reset_task() ensure that it
5641 * will never be called when netif_running(bp->dev) is false.
5642 */
5643static void bnx2x_parity_recover(struct bnx2x *bp)
5644{
5645 DP(NETIF_MSG_HW, "Handling parity\n");
5646 while (1) {
5647 switch (bp->recovery_state) {
5648 case BNX2X_RECOVERY_INIT:
5649 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
5650 /* Try to get a LEADER_LOCK HW lock */
5651 if (bnx2x_trylock_hw_lock(bp,
5652 HW_LOCK_RESOURCE_RESERVED_08))
5653 bp->is_leader = 1;
5654
5655 /* Stop the driver */
5656 /* If interface has been removed - break */
5657 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
5658 return;
5659
5660 bp->recovery_state = BNX2X_RECOVERY_WAIT;
5661 /* Ensure "is_leader" and "recovery_state"
5662 * update values are seen on other CPUs
5663 */
5664 smp_wmb();
5665 break;
5666
5667 case BNX2X_RECOVERY_WAIT:
5668 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
5669 if (bp->is_leader) {
5670 u32 load_counter = bnx2x_get_load_cnt(bp);
5671 if (load_counter) {
5672 /* Wait until all other functions get
5673 * down.
5674 */
5675 schedule_delayed_work(&bp->reset_task,
5676 HZ/10);
5677 return;
5678 } else {
5679 /* If all other functions got down -
5680 * try to bring the chip back to
5681 * normal. In any case it's an exit
5682 * point for a leader.
5683 */
5684 if (bnx2x_leader_reset(bp) ||
5685 bnx2x_nic_load(bp, LOAD_NORMAL)) {
5686 printk(KERN_ERR"%s: Recovery "
5687 "has failed. Power cycle is "
5688 "needed.\n", bp->dev->name);
5689 /* Disconnect this device */
5690 netif_device_detach(bp->dev);
5691 /* Block ifup for all function
5692 * of this ASIC until
5693 * "process kill" or power
5694 * cycle.
5695 */
5696 bnx2x_set_reset_in_progress(bp);
5697 /* Shut down the power */
5698 bnx2x_set_power_state(bp,
5699 PCI_D3hot);
5700 return;
5701 }
5702
5703 return;
5704 }
5705 } else { /* non-leader */
5706 if (!bnx2x_reset_is_done(bp)) {
5707 /* Try to get a LEADER_LOCK HW lock as
5708 * long as a former leader may have
5709 * been unloaded by the user or
5710 * released a leadership by another
5711 * reason.
5712 */
5713 if (bnx2x_trylock_hw_lock(bp,
5714 HW_LOCK_RESOURCE_RESERVED_08)) {
5715 /* I'm a leader now! Restart a
5716 * switch case.
5717 */
5718 bp->is_leader = 1;
5719 break;
5720 }
5721
5722 schedule_delayed_work(&bp->reset_task,
5723 HZ/10);
5724 return;
5725
5726 } else { /* A leader has completed
5727 * the "process kill". It's an exit
5728 * point for a non-leader.
5729 */
5730 bnx2x_nic_load(bp, LOAD_NORMAL);
5731 bp->recovery_state =
5732 BNX2X_RECOVERY_DONE;
5733 smp_wmb();
5734 return;
5735 }
5736 }
5737 default:
5738 return;
5739 }
5740 }
5741}
5742
5743/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
5744 * scheduled on a general queue in order to prevent a dead lock.
5745 */
34f80b04
EG
5746static void bnx2x_reset_task(struct work_struct *work)
5747{
72fd0718 5748 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
5749
5750#ifdef BNX2X_STOP_ON_ERROR
5751 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
5752 " so reset not done to allow debug dump,\n"
72fd0718 5753 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
5754 return;
5755#endif
5756
5757 rtnl_lock();
5758
5759 if (!netif_running(bp->dev))
5760 goto reset_task_exit;
5761
72fd0718
VZ
5762 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
5763 bnx2x_parity_recover(bp);
5764 else {
5765 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
5766 bnx2x_nic_load(bp, LOAD_NORMAL);
5767 }
34f80b04
EG
5768
5769reset_task_exit:
5770 rtnl_unlock();
5771}
5772
a2fbb9ea
ET
5773/* end of nic load/unload */
5774
a2fbb9ea
ET
5775/*
5776 * Init service functions
5777 */
5778
f1ef27ef
EG
5779static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
5780{
5781 switch (func) {
5782 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
5783 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
5784 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
5785 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
5786 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
5787 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
5788 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
5789 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
5790 default:
5791 BNX2X_ERR("Unsupported function index: %d\n", func);
5792 return (u32)(-1);
5793 }
5794}
5795
5796static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
5797{
5798 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
5799
5800 /* Flush all outstanding writes */
5801 mmiowb();
5802
5803 /* Pretend to be function 0 */
5804 REG_WR(bp, reg, 0);
5805 /* Flush the GRC transaction (in the chip) */
5806 new_val = REG_RD(bp, reg);
5807 if (new_val != 0) {
5808 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
5809 new_val);
5810 BUG();
5811 }
5812
5813 /* From now we are in the "like-E1" mode */
5814 bnx2x_int_disable(bp);
5815
5816 /* Flush all outstanding writes */
5817 mmiowb();
5818
5819 /* Restore the original funtion settings */
5820 REG_WR(bp, reg, orig_func);
5821 new_val = REG_RD(bp, reg);
5822 if (new_val != orig_func) {
5823 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
5824 orig_func, new_val);
5825 BUG();
5826 }
5827}
5828
5829static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
5830{
5831 if (CHIP_IS_E1H(bp))
5832 bnx2x_undi_int_disable_e1h(bp, func);
5833 else
5834 bnx2x_int_disable(bp);
5835}
5836
34f80b04
EG
5837static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5838{
5839 u32 val;
5840
5841 /* Check if there is any driver already loaded */
5842 val = REG_RD(bp, MISC_REG_UNPREPARED);
5843 if (val == 0x1) {
5844 /* Check if it is the UNDI driver
5845 * UNDI driver initializes CID offset for normal bell to 0x7
5846 */
4a37fb66 5847 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
5848 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
5849 if (val == 0x7) {
5850 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 5851 /* save our func */
34f80b04 5852 int func = BP_FUNC(bp);
da5a662a
VZ
5853 u32 swap_en;
5854 u32 swap_val;
34f80b04 5855
b4661739
EG
5856 /* clear the UNDI indication */
5857 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
5858
34f80b04
EG
5859 BNX2X_DEV_INFO("UNDI is active! reset device\n");
5860
5861 /* try unload UNDI on port 0 */
5862 bp->func = 0;
da5a662a
VZ
5863 bp->fw_seq =
5864 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5865 DRV_MSG_SEQ_NUMBER_MASK);
a22f0788 5866 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
5867
5868 /* if UNDI is loaded on the other port */
5869 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
5870
da5a662a 5871 /* send "DONE" for previous unload */
a22f0788
YR
5872 bnx2x_fw_command(bp,
5873 DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
5874
5875 /* unload UNDI on port 1 */
34f80b04 5876 bp->func = 1;
da5a662a
VZ
5877 bp->fw_seq =
5878 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5879 DRV_MSG_SEQ_NUMBER_MASK);
5880 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5881
a22f0788 5882 bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
5883 }
5884
b4661739
EG
5885 /* now it's safe to release the lock */
5886 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5887
f1ef27ef 5888 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
5889
5890 /* close input traffic and wait for it */
5891 /* Do not rcv packets to BRB */
5892 REG_WR(bp,
5893 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
5894 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
5895 /* Do not direct rcv packets that are not for MCP to
5896 * the BRB */
5897 REG_WR(bp,
5898 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
5899 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5900 /* clear AEU */
5901 REG_WR(bp,
5902 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5903 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
5904 msleep(10);
5905
5906 /* save NIG port swap info */
5907 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5908 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
5909 /* reset device */
5910 REG_WR(bp,
5911 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 5912 0xd3ffffff);
34f80b04
EG
5913 REG_WR(bp,
5914 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5915 0x1403);
da5a662a
VZ
5916 /* take the NIG out of reset and restore swap values */
5917 REG_WR(bp,
5918 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5919 MISC_REGISTERS_RESET_REG_1_RST_NIG);
5920 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
5921 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
5922
5923 /* send unload done to the MCP */
a22f0788 5924 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
5925
5926 /* restore our func and fw_seq */
5927 bp->func = func;
5928 bp->fw_seq =
5929 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5930 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
5931
5932 } else
5933 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
5934 }
5935}
5936
5937static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5938{
5939 u32 val, val2, val3, val4, id;
72ce58c3 5940 u16 pmc;
34f80b04
EG
5941
5942 /* Get the chip revision id and number. */
5943 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5944 val = REG_RD(bp, MISC_REG_CHIP_NUM);
5945 id = ((val & 0xffff) << 16);
5946 val = REG_RD(bp, MISC_REG_CHIP_REV);
5947 id |= ((val & 0xf) << 12);
5948 val = REG_RD(bp, MISC_REG_CHIP_METAL);
5949 id |= ((val & 0xff) << 4);
5a40e08e 5950 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
5951 id |= (val & 0xf);
5952 bp->common.chip_id = id;
5953 bp->link_params.chip_id = bp->common.chip_id;
5954 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
5955
1c06328c
EG
5956 val = (REG_RD(bp, 0x2874) & 0x55);
5957 if ((bp->common.chip_id & 0x1) ||
5958 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
5959 bp->flags |= ONE_PORT_FLAG;
5960 BNX2X_DEV_INFO("single port device\n");
5961 }
5962
34f80b04
EG
5963 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
5964 bp->common.flash_size = (NVRAM_1MB_SIZE <<
5965 (val & MCPR_NVM_CFG4_FLASH_SIZE));
5966 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5967 bp->common.flash_size, bp->common.flash_size);
5968
5969 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 5970 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 5971 bp->link_params.shmem_base = bp->common.shmem_base;
a22f0788 5972 bp->link_params.shmem2_base = bp->common.shmem2_base;
2691d51d
EG
5973 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
5974 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
5975
5976 if (!bp->common.shmem_base ||
5977 (bp->common.shmem_base < 0xA0000) ||
5978 (bp->common.shmem_base >= 0xC0000)) {
5979 BNX2X_DEV_INFO("MCP not active\n");
5980 bp->flags |= NO_MCP_FLAG;
5981 return;
5982 }
5983
5984 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
5985 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5986 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
cdaa7cb8 5987 BNX2X_ERROR("BAD MCP validity signature\n");
34f80b04
EG
5988
5989 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 5990 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
5991
5992 bp->link_params.hw_led_mode = ((bp->common.hw_config &
5993 SHARED_HW_CFG_LED_MODE_MASK) >>
5994 SHARED_HW_CFG_LED_MODE_SHIFT);
5995
c2c8b03e
EG
5996 bp->link_params.feature_config_flags = 0;
5997 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
5998 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
5999 bp->link_params.feature_config_flags |=
6000 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6001 else
6002 bp->link_params.feature_config_flags &=
6003 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6004
34f80b04
EG
6005 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6006 bp->common.bc_ver = val;
6007 BNX2X_DEV_INFO("bc_ver %X\n", val);
6008 if (val < BNX2X_BC_VER) {
6009 /* for now only warn
6010 * later we might need to enforce this */
cdaa7cb8
VZ
6011 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
6012 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 6013 }
4d295db0 6014 bp->link_params.feature_config_flags |=
a22f0788 6015 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
4d295db0 6016 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
a22f0788
YR
6017 bp->link_params.feature_config_flags |=
6018 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
6019 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
72ce58c3
EG
6020
6021 if (BP_E1HVN(bp) == 0) {
6022 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6023 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6024 } else {
6025 /* no WOL capability for E1HVN != 0 */
6026 bp->flags |= NO_WOL_FLAG;
6027 }
6028 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 6029 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
6030
6031 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6032 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6033 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6034 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6035
cdaa7cb8
VZ
6036 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
6037 val, val2, val3, val4);
34f80b04
EG
6038}
6039
6040static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6041 u32 switch_cfg)
a2fbb9ea 6042{
a22f0788
YR
6043 int cfg_size = 0, idx, port = BP_PORT(bp);
6044
6045 /* Aggregation of supported attributes of all external phys */
6046 bp->port.supported[0] = 0;
6047 bp->port.supported[1] = 0;
b7737c9b
YR
6048 switch (bp->link_params.num_phys) {
6049 case 1:
a22f0788
YR
6050 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
6051 cfg_size = 1;
6052 break;
b7737c9b 6053 case 2:
a22f0788
YR
6054 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
6055 cfg_size = 1;
6056 break;
6057 case 3:
6058 if (bp->link_params.multi_phy_config &
6059 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
6060 bp->port.supported[1] =
6061 bp->link_params.phy[EXT_PHY1].supported;
6062 bp->port.supported[0] =
6063 bp->link_params.phy[EXT_PHY2].supported;
6064 } else {
6065 bp->port.supported[0] =
6066 bp->link_params.phy[EXT_PHY1].supported;
6067 bp->port.supported[1] =
6068 bp->link_params.phy[EXT_PHY2].supported;
6069 }
6070 cfg_size = 2;
6071 break;
b7737c9b 6072 }
a2fbb9ea 6073
a22f0788 6074 if (!(bp->port.supported[0] || bp->port.supported[1])) {
b7737c9b 6075 BNX2X_ERR("NVRAM config error. BAD phy config."
a22f0788 6076 "PHY1 config 0x%x, PHY2 config 0x%x\n",
b7737c9b 6077 SHMEM_RD(bp,
a22f0788
YR
6078 dev_info.port_hw_config[port].external_phy_config),
6079 SHMEM_RD(bp,
6080 dev_info.port_hw_config[port].external_phy_config2));
a2fbb9ea
ET
6081 return;
6082 }
6083
b7737c9b
YR
6084 switch (switch_cfg) {
6085 case SWITCH_CFG_1G:
34f80b04
EG
6086 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6087 port*0x10);
6088 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
6089 break;
6090
6091 case SWITCH_CFG_10G:
34f80b04
EG
6092 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6093 port*0x18);
6094 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 6095
a2fbb9ea
ET
6096 break;
6097
6098 default:
6099 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
a22f0788 6100 bp->port.link_config[0]);
a2fbb9ea
ET
6101 return;
6102 }
a22f0788
YR
6103 /* mask what we support according to speed_cap_mask per configuration */
6104 for (idx = 0; idx < cfg_size; idx++) {
6105 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 6106 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
a22f0788 6107 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 6108
a22f0788 6109 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 6110 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
a22f0788 6111 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 6112
a22f0788 6113 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 6114 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
a22f0788 6115 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 6116
a22f0788 6117 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 6118 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
a22f0788 6119 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 6120
a22f0788 6121 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 6122 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
a22f0788 6123 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
34f80b04 6124 SUPPORTED_1000baseT_Full);
a2fbb9ea 6125
a22f0788 6126 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 6127 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
a22f0788 6128 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 6129
a22f0788 6130 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 6131 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
a22f0788
YR
6132 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
6133
6134 }
a2fbb9ea 6135
a22f0788
YR
6136 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
6137 bp->port.supported[1]);
a2fbb9ea
ET
6138}
6139
34f80b04 6140static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 6141{
a22f0788
YR
6142 u32 link_config, idx, cfg_size = 0;
6143 bp->port.advertising[0] = 0;
6144 bp->port.advertising[1] = 0;
6145 switch (bp->link_params.num_phys) {
6146 case 1:
6147 case 2:
6148 cfg_size = 1;
6149 break;
6150 case 3:
6151 cfg_size = 2;
6152 break;
6153 }
6154 for (idx = 0; idx < cfg_size; idx++) {
6155 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
6156 link_config = bp->port.link_config[idx];
6157 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 6158 case PORT_FEATURE_LINK_SPEED_AUTO:
a22f0788
YR
6159 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
6160 bp->link_params.req_line_speed[idx] =
6161 SPEED_AUTO_NEG;
6162 bp->port.advertising[idx] |=
6163 bp->port.supported[idx];
a2fbb9ea 6164 } else {
a22f0788
YR
6165 /* force 10G, no AN */
6166 bp->link_params.req_line_speed[idx] =
6167 SPEED_10000;
6168 bp->port.advertising[idx] |=
6169 (ADVERTISED_10000baseT_Full |
a2fbb9ea 6170 ADVERTISED_FIBRE);
a22f0788 6171 continue;
a2fbb9ea
ET
6172 }
6173 break;
6174
6175 case PORT_FEATURE_LINK_SPEED_10M_FULL:
a22f0788
YR
6176 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
6177 bp->link_params.req_line_speed[idx] =
6178 SPEED_10;
6179 bp->port.advertising[idx] |=
6180 (ADVERTISED_10baseT_Full |
34f80b04 6181 ADVERTISED_TP);
a2fbb9ea 6182 } else {
cdaa7cb8
VZ
6183 BNX2X_ERROR("NVRAM config error. "
6184 "Invalid link_config 0x%x"
6185 " speed_cap_mask 0x%x\n",
a22f0788
YR
6186 link_config,
6187 bp->link_params.speed_cap_mask[idx]);
a2fbb9ea
ET
6188 return;
6189 }
6190 break;
6191
6192 case PORT_FEATURE_LINK_SPEED_10M_HALF:
a22f0788
YR
6193 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
6194 bp->link_params.req_line_speed[idx] =
6195 SPEED_10;
6196 bp->link_params.req_duplex[idx] =
6197 DUPLEX_HALF;
6198 bp->port.advertising[idx] |=
6199 (ADVERTISED_10baseT_Half |
34f80b04 6200 ADVERTISED_TP);
a2fbb9ea 6201 } else {
cdaa7cb8
VZ
6202 BNX2X_ERROR("NVRAM config error. "
6203 "Invalid link_config 0x%x"
6204 " speed_cap_mask 0x%x\n",
a22f0788
YR
6205 link_config,
6206 bp->link_params.speed_cap_mask[idx]);
a2fbb9ea
ET
6207 return;
6208 }
6209 break;
6210
6211 case PORT_FEATURE_LINK_SPEED_100M_FULL:
a22f0788
YR
6212 if (bp->port.supported[idx] & SUPPORTED_100baseT_Full) {
6213 bp->link_params.req_line_speed[idx] =
6214 SPEED_100;
6215 bp->port.advertising[idx] |=
6216 (ADVERTISED_100baseT_Full |
34f80b04 6217 ADVERTISED_TP);
a2fbb9ea 6218 } else {
cdaa7cb8
VZ
6219 BNX2X_ERROR("NVRAM config error. "
6220 "Invalid link_config 0x%x"
6221 " speed_cap_mask 0x%x\n",
a22f0788
YR
6222 link_config,
6223 bp->link_params.speed_cap_mask[idx]);
a2fbb9ea
ET
6224 return;
6225 }
6226 break;
6227
6228 case PORT_FEATURE_LINK_SPEED_100M_HALF:
a22f0788
YR
6229 if (bp->port.supported[idx] & SUPPORTED_100baseT_Half) {
6230 bp->link_params.req_line_speed[idx] = SPEED_100;
6231 bp->link_params.req_duplex[idx] = DUPLEX_HALF;
6232 bp->port.advertising[idx] |=
6233 (ADVERTISED_100baseT_Half |
34f80b04 6234 ADVERTISED_TP);
a2fbb9ea 6235 } else {
cdaa7cb8
VZ
6236 BNX2X_ERROR("NVRAM config error. "
6237 "Invalid link_config 0x%x"
6238 " speed_cap_mask 0x%x\n",
a22f0788
YR
6239 link_config,
6240 bp->link_params.speed_cap_mask[idx]);
a2fbb9ea
ET
6241 return;
6242 }
6243 break;
6244
6245 case PORT_FEATURE_LINK_SPEED_1G:
a22f0788
YR
6246 if (bp->port.supported[idx] &
6247 SUPPORTED_1000baseT_Full) {
6248 bp->link_params.req_line_speed[idx] =
6249 SPEED_1000;
6250 bp->port.advertising[idx] |=
6251 (ADVERTISED_1000baseT_Full |
34f80b04 6252 ADVERTISED_TP);
a2fbb9ea 6253 } else {
cdaa7cb8
VZ
6254 BNX2X_ERROR("NVRAM config error. "
6255 "Invalid link_config 0x%x"
6256 " speed_cap_mask 0x%x\n",
a22f0788
YR
6257 link_config,
6258 bp->link_params.speed_cap_mask[idx]);
a2fbb9ea
ET
6259 return;
6260 }
6261 break;
6262
6263 case PORT_FEATURE_LINK_SPEED_2_5G:
a22f0788
YR
6264 if (bp->port.supported[idx] &
6265 SUPPORTED_2500baseX_Full) {
6266 bp->link_params.req_line_speed[idx] =
6267 SPEED_2500;
6268 bp->port.advertising[idx] |=
6269 (ADVERTISED_2500baseX_Full |
34f80b04 6270 ADVERTISED_TP);
a2fbb9ea 6271 } else {
cdaa7cb8
VZ
6272 BNX2X_ERROR("NVRAM config error. "
6273 "Invalid link_config 0x%x"
6274 " speed_cap_mask 0x%x\n",
a22f0788
YR
6275 link_config,
6276 bp->link_params.speed_cap_mask[idx]);
a2fbb9ea
ET
6277 return;
6278 }
6279 break;
6280
6281 case PORT_FEATURE_LINK_SPEED_10G_CX4:
6282 case PORT_FEATURE_LINK_SPEED_10G_KX4:
6283 case PORT_FEATURE_LINK_SPEED_10G_KR:
a22f0788
YR
6284 if (bp->port.supported[idx] &
6285 SUPPORTED_10000baseT_Full) {
6286 bp->link_params.req_line_speed[idx] =
6287 SPEED_10000;
6288 bp->port.advertising[idx] |=
6289 (ADVERTISED_10000baseT_Full |
34f80b04 6290 ADVERTISED_FIBRE);
a2fbb9ea 6291 } else {
cdaa7cb8
VZ
6292 BNX2X_ERROR("NVRAM config error. "
6293 "Invalid link_config 0x%x"
6294 " speed_cap_mask 0x%x\n",
a22f0788
YR
6295 link_config,
6296 bp->link_params.speed_cap_mask[idx]);
a2fbb9ea
ET
6297 return;
6298 }
6299 break;
6300
6301 default:
cdaa7cb8
VZ
6302 BNX2X_ERROR("NVRAM config error. "
6303 "BAD link speed link_config 0x%x\n",
a22f0788
YR
6304 link_config);
6305 bp->link_params.req_line_speed[idx] = SPEED_AUTO_NEG;
6306 bp->port.advertising[idx] = bp->port.supported[idx];
a2fbb9ea
ET
6307 break;
6308 }
a2fbb9ea 6309
a22f0788 6310 bp->link_params.req_flow_ctrl[idx] = (link_config &
34f80b04 6311 PORT_FEATURE_FLOW_CONTROL_MASK);
a22f0788
YR
6312 if ((bp->link_params.req_flow_ctrl[idx] ==
6313 BNX2X_FLOW_CTRL_AUTO) &&
6314 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
6315 bp->link_params.req_flow_ctrl[idx] =
6316 BNX2X_FLOW_CTRL_NONE;
6317 }
a2fbb9ea 6318
a22f0788
YR
6319 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
6320 " 0x%x advertising 0x%x\n",
6321 bp->link_params.req_line_speed[idx],
6322 bp->link_params.req_duplex[idx],
6323 bp->link_params.req_flow_ctrl[idx],
6324 bp->port.advertising[idx]);
6325 }
a2fbb9ea
ET
6326}
6327
e665bfda
MC
6328static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
6329{
6330 mac_hi = cpu_to_be16(mac_hi);
6331 mac_lo = cpu_to_be32(mac_lo);
6332 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
6333 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
6334}
6335
34f80b04 6336static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 6337{
34f80b04
EG
6338 int port = BP_PORT(bp);
6339 u32 val, val2;
589abe3a 6340 u32 config;
b7737c9b 6341 u32 ext_phy_type, ext_phy_config;;
a2fbb9ea 6342
c18487ee 6343 bp->link_params.bp = bp;
34f80b04 6344 bp->link_params.port = port;
c18487ee 6345
c18487ee 6346 bp->link_params.lane_config =
a2fbb9ea 6347 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
4d295db0 6348
a22f0788 6349 bp->link_params.speed_cap_mask[0] =
a2fbb9ea
ET
6350 SHMEM_RD(bp,
6351 dev_info.port_hw_config[port].speed_capability_mask);
a22f0788
YR
6352 bp->link_params.speed_cap_mask[1] =
6353 SHMEM_RD(bp,
6354 dev_info.port_hw_config[port].speed_capability_mask2);
6355 bp->port.link_config[0] =
a2fbb9ea
ET
6356 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6357
a22f0788
YR
6358 bp->port.link_config[1] =
6359 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
c2c8b03e 6360
a22f0788
YR
6361 bp->link_params.multi_phy_config =
6362 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
3ce2c3f9
EG
6363 /* If the device is capable of WoL, set the default state according
6364 * to the HW
6365 */
4d295db0 6366 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
6367 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
6368 (config & PORT_FEATURE_WOL_ENABLED));
6369
b7737c9b 6370 BNX2X_DEV_INFO("lane_config 0x%08x"
a22f0788 6371 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
c18487ee 6372 bp->link_params.lane_config,
a22f0788
YR
6373 bp->link_params.speed_cap_mask[0],
6374 bp->port.link_config[0]);
a2fbb9ea 6375
a22f0788 6376 bp->link_params.switch_cfg = (bp->port.link_config[0] &
4d295db0 6377 PORT_FEATURE_CONNECTED_SWITCH_MASK);
b7737c9b 6378 bnx2x_phy_probe(&bp->link_params);
c18487ee 6379 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
6380
6381 bnx2x_link_settings_requested(bp);
6382
01cd4528
EG
6383 /*
6384 * If connected directly, work with the internal PHY, otherwise, work
6385 * with the external PHY
6386 */
b7737c9b
YR
6387 ext_phy_config =
6388 SHMEM_RD(bp,
6389 dev_info.port_hw_config[port].external_phy_config);
6390 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
01cd4528 6391 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
b7737c9b 6392 bp->mdio.prtad = bp->port.phy_addr;
01cd4528
EG
6393
6394 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
6395 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
6396 bp->mdio.prtad =
b7737c9b 6397 XGXS_EXT_PHY_ADDR(ext_phy_config);
01cd4528 6398
a2fbb9ea
ET
6399 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6400 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 6401 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
6402 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
6403 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
6404
6405#ifdef BCM_CNIC
6406 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
6407 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
6408 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
6409#endif
34f80b04
EG
6410}
6411
6412static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6413{
6414 int func = BP_FUNC(bp);
6415 u32 val, val2;
6416 int rc = 0;
a2fbb9ea 6417
34f80b04 6418 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 6419
34f80b04
EG
6420 bp->e1hov = 0;
6421 bp->e1hmf = 0;
2145a920 6422 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
34f80b04
EG
6423 bp->mf_config =
6424 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 6425
2691d51d 6426 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 6427 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 6428 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 6429 bp->e1hmf = 1;
2691d51d
EG
6430 BNX2X_DEV_INFO("%s function mode\n",
6431 IS_E1HMF(bp) ? "multi" : "single");
6432
6433 if (IS_E1HMF(bp)) {
6434 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
6435 e1hov_tag) &
6436 FUNC_MF_CFG_E1HOV_TAG_MASK);
6437 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
6438 bp->e1hov = val;
6439 BNX2X_DEV_INFO("E1HOV for func %d is %d "
6440 "(0x%04x)\n",
6441 func, bp->e1hov, bp->e1hov);
6442 } else {
cdaa7cb8
VZ
6443 BNX2X_ERROR("No valid E1HOV for func %d,"
6444 " aborting\n", func);
34f80b04
EG
6445 rc = -EPERM;
6446 }
2691d51d
EG
6447 } else {
6448 if (BP_E1HVN(bp)) {
cdaa7cb8
VZ
6449 BNX2X_ERROR("VN %d in single function mode,"
6450 " aborting\n", BP_E1HVN(bp));
2691d51d
EG
6451 rc = -EPERM;
6452 }
34f80b04
EG
6453 }
6454 }
a2fbb9ea 6455
34f80b04
EG
6456 if (!BP_NOMCP(bp)) {
6457 bnx2x_get_port_hwinfo(bp);
6458
6459 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
6460 DRV_MSG_SEQ_NUMBER_MASK);
6461 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
6462 }
6463
6464 if (IS_E1HMF(bp)) {
6465 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
6466 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
6467 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
6468 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
6469 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6470 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6471 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6472 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6473 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
6474 bp->dev->dev_addr[5] = (u8)(val & 0xff);
6475 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
6476 ETH_ALEN);
6477 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
6478 ETH_ALEN);
a2fbb9ea 6479 }
34f80b04
EG
6480
6481 return rc;
a2fbb9ea
ET
6482 }
6483
34f80b04
EG
6484 if (BP_NOMCP(bp)) {
6485 /* only supposed to happen on emulation/FPGA */
cdaa7cb8 6486 BNX2X_ERROR("warning: random MAC workaround active\n");
34f80b04
EG
6487 random_ether_addr(bp->dev->dev_addr);
6488 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6489 }
a2fbb9ea 6490
34f80b04
EG
6491 return rc;
6492}
6493
34f24c7f
VZ
6494static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
6495{
6496 int cnt, i, block_end, rodi;
6497 char vpd_data[BNX2X_VPD_LEN+1];
6498 char str_id_reg[VENDOR_ID_LEN+1];
6499 char str_id_cap[VENDOR_ID_LEN+1];
6500 u8 len;
6501
6502 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
6503 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
6504
6505 if (cnt < BNX2X_VPD_LEN)
6506 goto out_not_found;
6507
6508 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
6509 PCI_VPD_LRDT_RO_DATA);
6510 if (i < 0)
6511 goto out_not_found;
6512
6513
6514 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
6515 pci_vpd_lrdt_size(&vpd_data[i]);
6516
6517 i += PCI_VPD_LRDT_TAG_SIZE;
6518
6519 if (block_end > BNX2X_VPD_LEN)
6520 goto out_not_found;
6521
6522 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6523 PCI_VPD_RO_KEYWORD_MFR_ID);
6524 if (rodi < 0)
6525 goto out_not_found;
6526
6527 len = pci_vpd_info_field_size(&vpd_data[rodi]);
6528
6529 if (len != VENDOR_ID_LEN)
6530 goto out_not_found;
6531
6532 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6533
6534 /* vendor specific info */
6535 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
6536 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
6537 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
6538 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
6539
6540 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6541 PCI_VPD_RO_KEYWORD_VENDOR0);
6542 if (rodi >= 0) {
6543 len = pci_vpd_info_field_size(&vpd_data[rodi]);
6544
6545 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6546
6547 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
6548 memcpy(bp->fw_ver, &vpd_data[rodi], len);
6549 bp->fw_ver[len] = ' ';
6550 }
6551 }
6552 return;
6553 }
6554out_not_found:
6555 return;
6556}
6557
34f80b04
EG
6558static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6559{
6560 int func = BP_FUNC(bp);
87942b46 6561 int timer_interval;
34f80b04
EG
6562 int rc;
6563
da5a662a
VZ
6564 /* Disable interrupt handling until HW is initialized */
6565 atomic_set(&bp->intr_sem, 1);
e1510706 6566 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 6567
34f80b04 6568 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 6569 mutex_init(&bp->fw_mb_mutex);
bb7e95c8 6570 spin_lock_init(&bp->stats_lock);
993ac7b5
MC
6571#ifdef BCM_CNIC
6572 mutex_init(&bp->cnic_mutex);
6573#endif
a2fbb9ea 6574
1cf167f2 6575 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 6576 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
6577
6578 rc = bnx2x_get_hwinfo(bp);
6579
34f24c7f 6580 bnx2x_read_fwinfo(bp);
34f80b04
EG
6581 /* need to reset chip if undi was active */
6582 if (!BP_NOMCP(bp))
6583 bnx2x_undi_unload(bp);
6584
6585 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 6586 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
6587
6588 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
6589 dev_err(&bp->pdev->dev, "MCP disabled, "
6590 "must load devices in order!\n");
34f80b04 6591
555f6c78 6592 /* Set multi queue mode */
8badd27a
EG
6593 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
6594 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
cdaa7cb8
VZ
6595 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
6596 "requested is not MSI-X\n");
555f6c78
EG
6597 multi_mode = ETH_RSS_MODE_DISABLED;
6598 }
6599 bp->multi_mode = multi_mode;
5d7cd496 6600 bp->int_mode = int_mode;
555f6c78 6601
4fd89b7a
DK
6602 bp->dev->features |= NETIF_F_GRO;
6603
7a9b2557
VZ
6604 /* Set TPA flags */
6605 if (disable_tpa) {
6606 bp->flags &= ~TPA_ENABLE_FLAG;
6607 bp->dev->features &= ~NETIF_F_LRO;
6608 } else {
6609 bp->flags |= TPA_ENABLE_FLAG;
6610 bp->dev->features |= NETIF_F_LRO;
6611 }
5d7cd496 6612 bp->disable_tpa = disable_tpa;
7a9b2557 6613
a18f5128
EG
6614 if (CHIP_IS_E1(bp))
6615 bp->dropless_fc = 0;
6616 else
6617 bp->dropless_fc = dropless_fc;
6618
8d5726c4 6619 bp->mrrs = mrrs;
7a9b2557 6620
34f80b04 6621 bp->tx_ring_size = MAX_TX_AVAIL;
34f80b04
EG
6622
6623 bp->rx_csum = 1;
34f80b04 6624
7d323bfd
EG
6625 /* make sure that the numbers are in the right granularity */
6626 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6627 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
34f80b04 6628
87942b46
EG
6629 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
6630 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
6631
6632 init_timer(&bp->timer);
6633 bp->timer.expires = jiffies + bp->current_interval;
6634 bp->timer.data = (unsigned long) bp;
6635 bp->timer.function = bnx2x_timer;
6636
6637 return rc;
a2fbb9ea
ET
6638}
6639
a2fbb9ea 6640
de0c62db
DK
6641/****************************************************************************
6642* General service functions
6643****************************************************************************/
a2fbb9ea 6644
bb2a0f7a 6645/* called with rtnl_lock */
a2fbb9ea
ET
6646static int bnx2x_open(struct net_device *dev)
6647{
6648 struct bnx2x *bp = netdev_priv(dev);
6649
6eccabb3
EG
6650 netif_carrier_off(dev);
6651
a2fbb9ea
ET
6652 bnx2x_set_power_state(bp, PCI_D0);
6653
72fd0718
VZ
6654 if (!bnx2x_reset_is_done(bp)) {
6655 do {
6656 /* Reset MCP mail box sequence if there is on going
6657 * recovery
6658 */
6659 bp->fw_seq = 0;
6660
6661 /* If it's the first function to load and reset done
6662 * is still not cleared it may mean that. We don't
6663 * check the attention state here because it may have
6664 * already been cleared by a "common" reset but we
6665 * shell proceed with "process kill" anyway.
6666 */
6667 if ((bnx2x_get_load_cnt(bp) == 0) &&
6668 bnx2x_trylock_hw_lock(bp,
6669 HW_LOCK_RESOURCE_RESERVED_08) &&
6670 (!bnx2x_leader_reset(bp))) {
6671 DP(NETIF_MSG_HW, "Recovered in open\n");
6672 break;
6673 }
6674
6675 bnx2x_set_power_state(bp, PCI_D3hot);
6676
6677 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
6678 " completed yet. Try again later. If u still see this"
6679 " message after a few retries then power cycle is"
6680 " required.\n", bp->dev->name);
6681
6682 return -EAGAIN;
6683 } while (0);
6684 }
6685
6686 bp->recovery_state = BNX2X_RECOVERY_DONE;
6687
bb2a0f7a 6688 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
6689}
6690
bb2a0f7a 6691/* called with rtnl_lock */
a2fbb9ea
ET
6692static int bnx2x_close(struct net_device *dev)
6693{
a2fbb9ea
ET
6694 struct bnx2x *bp = netdev_priv(dev);
6695
6696 /* Unload the driver, release IRQs */
bb2a0f7a 6697 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 6698 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
6699
6700 return 0;
6701}
6702
f5372251 6703/* called with netif_tx_lock from dev_mcast.c */
9f6c9258 6704void bnx2x_set_rx_mode(struct net_device *dev)
34f80b04
EG
6705{
6706 struct bnx2x *bp = netdev_priv(dev);
6707 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
6708 int port = BP_PORT(bp);
6709
6710 if (bp->state != BNX2X_STATE_OPEN) {
6711 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6712 return;
6713 }
6714
6715 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
6716
6717 if (dev->flags & IFF_PROMISC)
6718 rx_mode = BNX2X_RX_MODE_PROMISC;
6719
6720 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
6721 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
6722 CHIP_IS_E1(bp)))
34f80b04
EG
6723 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6724
6725 else { /* some multicasts */
6726 if (CHIP_IS_E1(bp)) {
6727 int i, old, offset;
22bedad3 6728 struct netdev_hw_addr *ha;
34f80b04
EG
6729 struct mac_configuration_cmd *config =
6730 bnx2x_sp(bp, mcast_config);
6731
0ddf477b 6732 i = 0;
22bedad3 6733 netdev_for_each_mc_addr(ha, dev) {
34f80b04
EG
6734 config->config_table[i].
6735 cam_entry.msb_mac_addr =
22bedad3 6736 swab16(*(u16 *)&ha->addr[0]);
34f80b04
EG
6737 config->config_table[i].
6738 cam_entry.middle_mac_addr =
22bedad3 6739 swab16(*(u16 *)&ha->addr[2]);
34f80b04
EG
6740 config->config_table[i].
6741 cam_entry.lsb_mac_addr =
22bedad3 6742 swab16(*(u16 *)&ha->addr[4]);
34f80b04
EG
6743 config->config_table[i].cam_entry.flags =
6744 cpu_to_le16(port);
6745 config->config_table[i].
6746 target_table_entry.flags = 0;
ca00392c
EG
6747 config->config_table[i].target_table_entry.
6748 clients_bit_vector =
6749 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
6750 config->config_table[i].
6751 target_table_entry.vlan_id = 0;
6752
6753 DP(NETIF_MSG_IFUP,
6754 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6755 config->config_table[i].
6756 cam_entry.msb_mac_addr,
6757 config->config_table[i].
6758 cam_entry.middle_mac_addr,
6759 config->config_table[i].
6760 cam_entry.lsb_mac_addr);
0ddf477b 6761 i++;
34f80b04 6762 }
8d9c5f34 6763 old = config->hdr.length;
34f80b04
EG
6764 if (old > i) {
6765 for (; i < old; i++) {
6766 if (CAM_IS_INVALID(config->
6767 config_table[i])) {
af246401 6768 /* already invalidated */
34f80b04
EG
6769 break;
6770 }
6771 /* invalidate */
6772 CAM_INVALIDATE(config->
6773 config_table[i]);
6774 }
6775 }
6776
6777 if (CHIP_REV_IS_SLOW(bp))
6778 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6779 else
6780 offset = BNX2X_MAX_MULTICAST*(1 + port);
6781
8d9c5f34 6782 config->hdr.length = i;
34f80b04 6783 config->hdr.offset = offset;
8d9c5f34 6784 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6785 config->hdr.reserved1 = 0;
6786
e665bfda
MC
6787 bp->set_mac_pending++;
6788 smp_wmb();
6789
34f80b04
EG
6790 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6791 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6792 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
6793 0);
6794 } else { /* E1H */
6795 /* Accept one or more multicasts */
22bedad3 6796 struct netdev_hw_addr *ha;
34f80b04
EG
6797 u32 mc_filter[MC_HASH_SIZE];
6798 u32 crc, bit, regidx;
6799 int i;
6800
6801 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6802
22bedad3 6803 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 6804 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
22bedad3 6805 ha->addr);
34f80b04 6806
22bedad3 6807 crc = crc32c_le(0, ha->addr, ETH_ALEN);
34f80b04
EG
6808 bit = (crc >> 24) & 0xff;
6809 regidx = bit >> 5;
6810 bit &= 0x1f;
6811 mc_filter[regidx] |= (1 << bit);
6812 }
6813
6814 for (i = 0; i < MC_HASH_SIZE; i++)
6815 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6816 mc_filter[i]);
6817 }
6818 }
6819
6820 bp->rx_mode = rx_mode;
6821 bnx2x_set_storm_rx_mode(bp);
6822}
6823
a2fbb9ea 6824
c18487ee 6825/* called with rtnl_lock */
01cd4528
EG
6826static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
6827 int devad, u16 addr)
a2fbb9ea 6828{
01cd4528
EG
6829 struct bnx2x *bp = netdev_priv(netdev);
6830 u16 value;
6831 int rc;
a2fbb9ea 6832
01cd4528
EG
6833 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
6834 prtad, devad, addr);
a2fbb9ea 6835
01cd4528
EG
6836 /* The HW expects different devad if CL22 is used */
6837 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 6838
01cd4528 6839 bnx2x_acquire_phy_lock(bp);
e10bc84d 6840 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
01cd4528
EG
6841 bnx2x_release_phy_lock(bp);
6842 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 6843
01cd4528
EG
6844 if (!rc)
6845 rc = value;
6846 return rc;
6847}
a2fbb9ea 6848
01cd4528
EG
6849/* called with rtnl_lock */
6850static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
6851 u16 addr, u16 value)
6852{
6853 struct bnx2x *bp = netdev_priv(netdev);
01cd4528
EG
6854 int rc;
6855
6856 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
6857 " value 0x%x\n", prtad, devad, addr, value);
6858
01cd4528
EG
6859 /* The HW expects different devad if CL22 is used */
6860 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 6861
01cd4528 6862 bnx2x_acquire_phy_lock(bp);
e10bc84d 6863 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
01cd4528
EG
6864 bnx2x_release_phy_lock(bp);
6865 return rc;
6866}
c18487ee 6867
01cd4528
EG
6868/* called with rtnl_lock */
6869static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6870{
6871 struct bnx2x *bp = netdev_priv(dev);
6872 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 6873
01cd4528
EG
6874 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
6875 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 6876
01cd4528
EG
6877 if (!netif_running(dev))
6878 return -EAGAIN;
6879
6880 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
6881}
6882
257ddbda 6883#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
6884static void poll_bnx2x(struct net_device *dev)
6885{
6886 struct bnx2x *bp = netdev_priv(dev);
6887
6888 disable_irq(bp->pdev->irq);
6889 bnx2x_interrupt(bp->pdev->irq, dev);
6890 enable_irq(bp->pdev->irq);
6891}
6892#endif
6893
c64213cd
SH
6894static const struct net_device_ops bnx2x_netdev_ops = {
6895 .ndo_open = bnx2x_open,
6896 .ndo_stop = bnx2x_close,
6897 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 6898 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
6899 .ndo_set_mac_address = bnx2x_change_mac_addr,
6900 .ndo_validate_addr = eth_validate_addr,
6901 .ndo_do_ioctl = bnx2x_ioctl,
6902 .ndo_change_mtu = bnx2x_change_mtu,
6903 .ndo_tx_timeout = bnx2x_tx_timeout,
6904#ifdef BCM_VLAN
6905 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
6906#endif
257ddbda 6907#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
6908 .ndo_poll_controller = poll_bnx2x,
6909#endif
6910};
6911
34f80b04
EG
6912static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
6913 struct net_device *dev)
a2fbb9ea
ET
6914{
6915 struct bnx2x *bp;
6916 int rc;
6917
6918 SET_NETDEV_DEV(dev, &pdev->dev);
6919 bp = netdev_priv(dev);
6920
34f80b04
EG
6921 bp->dev = dev;
6922 bp->pdev = pdev;
a2fbb9ea 6923 bp->flags = 0;
34f80b04 6924 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
6925
6926 rc = pci_enable_device(pdev);
6927 if (rc) {
cdaa7cb8
VZ
6928 dev_err(&bp->pdev->dev,
6929 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
6930 goto err_out;
6931 }
6932
6933 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
6934 dev_err(&bp->pdev->dev,
6935 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
6936 rc = -ENODEV;
6937 goto err_out_disable;
6938 }
6939
6940 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
6941 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
6942 " base address, aborting\n");
a2fbb9ea
ET
6943 rc = -ENODEV;
6944 goto err_out_disable;
6945 }
6946
34f80b04
EG
6947 if (atomic_read(&pdev->enable_cnt) == 1) {
6948 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6949 if (rc) {
cdaa7cb8
VZ
6950 dev_err(&bp->pdev->dev,
6951 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
6952 goto err_out_disable;
6953 }
a2fbb9ea 6954
34f80b04
EG
6955 pci_set_master(pdev);
6956 pci_save_state(pdev);
6957 }
a2fbb9ea
ET
6958
6959 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6960 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
6961 dev_err(&bp->pdev->dev,
6962 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
6963 rc = -EIO;
6964 goto err_out_release;
6965 }
6966
6967 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
6968 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
6969 dev_err(&bp->pdev->dev,
6970 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
6971 rc = -EIO;
6972 goto err_out_release;
6973 }
6974
1a983142 6975 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 6976 bp->flags |= USING_DAC_FLAG;
1a983142 6977 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
6978 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
6979 " failed, aborting\n");
a2fbb9ea
ET
6980 rc = -EIO;
6981 goto err_out_release;
6982 }
6983
1a983142 6984 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
6985 dev_err(&bp->pdev->dev,
6986 "System does not support DMA, aborting\n");
a2fbb9ea
ET
6987 rc = -EIO;
6988 goto err_out_release;
6989 }
6990
34f80b04
EG
6991 dev->mem_start = pci_resource_start(pdev, 0);
6992 dev->base_addr = dev->mem_start;
6993 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
6994
6995 dev->irq = pdev->irq;
6996
275f165f 6997 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 6998 if (!bp->regview) {
cdaa7cb8
VZ
6999 dev_err(&bp->pdev->dev,
7000 "Cannot map register space, aborting\n");
a2fbb9ea
ET
7001 rc = -ENOMEM;
7002 goto err_out_release;
7003 }
7004
34f80b04
EG
7005 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
7006 min_t(u64, BNX2X_DB_SIZE,
7007 pci_resource_len(pdev, 2)));
a2fbb9ea 7008 if (!bp->doorbells) {
cdaa7cb8
VZ
7009 dev_err(&bp->pdev->dev,
7010 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
7011 rc = -ENOMEM;
7012 goto err_out_unmap;
7013 }
7014
7015 bnx2x_set_power_state(bp, PCI_D0);
7016
34f80b04
EG
7017 /* clean indirect addresses */
7018 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
7019 PCICFG_VENDOR_ID_OFFSET);
7020 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
7021 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
7022 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
7023 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 7024
72fd0718
VZ
7025 /* Reset the load counter */
7026 bnx2x_clear_load_cnt(bp);
7027
34f80b04 7028 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 7029
c64213cd 7030 dev->netdev_ops = &bnx2x_netdev_ops;
de0c62db 7031 bnx2x_set_ethtool_ops(dev);
34f80b04
EG
7032 dev->features |= NETIF_F_SG;
7033 dev->features |= NETIF_F_HW_CSUM;
7034 if (bp->flags & USING_DAC_FLAG)
7035 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
7036 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7037 dev->features |= NETIF_F_TSO6;
34f80b04
EG
7038#ifdef BCM_VLAN
7039 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 7040 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
7041
7042 dev->vlan_features |= NETIF_F_SG;
7043 dev->vlan_features |= NETIF_F_HW_CSUM;
7044 if (bp->flags & USING_DAC_FLAG)
7045 dev->vlan_features |= NETIF_F_HIGHDMA;
7046 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7047 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 7048#endif
a2fbb9ea 7049
01cd4528
EG
7050 /* get_port_hwinfo() will set prtad and mmds properly */
7051 bp->mdio.prtad = MDIO_PRTAD_NONE;
7052 bp->mdio.mmds = 0;
7053 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7054 bp->mdio.dev = dev;
7055 bp->mdio.mdio_read = bnx2x_mdio_read;
7056 bp->mdio.mdio_write = bnx2x_mdio_write;
7057
a2fbb9ea
ET
7058 return 0;
7059
7060err_out_unmap:
7061 if (bp->regview) {
7062 iounmap(bp->regview);
7063 bp->regview = NULL;
7064 }
a2fbb9ea
ET
7065 if (bp->doorbells) {
7066 iounmap(bp->doorbells);
7067 bp->doorbells = NULL;
7068 }
7069
7070err_out_release:
34f80b04
EG
7071 if (atomic_read(&pdev->enable_cnt) == 1)
7072 pci_release_regions(pdev);
a2fbb9ea
ET
7073
7074err_out_disable:
7075 pci_disable_device(pdev);
7076 pci_set_drvdata(pdev, NULL);
7077
7078err_out:
7079 return rc;
7080}
7081
37f9ce62
EG
7082static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
7083 int *width, int *speed)
25047950
ET
7084{
7085 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7086
37f9ce62 7087 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 7088
37f9ce62
EG
7089 /* return value of 1=2.5GHz 2=5GHz */
7090 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 7091}
37f9ce62 7092
6891dd25 7093static int bnx2x_check_firmware(struct bnx2x *bp)
94a78b79 7094{
37f9ce62 7095 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
7096 struct bnx2x_fw_file_hdr *fw_hdr;
7097 struct bnx2x_fw_file_section *sections;
94a78b79 7098 u32 offset, len, num_ops;
37f9ce62 7099 u16 *ops_offsets;
94a78b79 7100 int i;
37f9ce62 7101 const u8 *fw_ver;
94a78b79
VZ
7102
7103 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
7104 return -EINVAL;
7105
7106 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
7107 sections = (struct bnx2x_fw_file_section *)fw_hdr;
7108
7109 /* Make sure none of the offsets and sizes make us read beyond
7110 * the end of the firmware data */
7111 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
7112 offset = be32_to_cpu(sections[i].offset);
7113 len = be32_to_cpu(sections[i].len);
7114 if (offset + len > firmware->size) {
cdaa7cb8
VZ
7115 dev_err(&bp->pdev->dev,
7116 "Section %d length is out of bounds\n", i);
94a78b79
VZ
7117 return -EINVAL;
7118 }
7119 }
7120
7121 /* Likewise for the init_ops offsets */
7122 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
7123 ops_offsets = (u16 *)(firmware->data + offset);
7124 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
7125
7126 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
7127 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
7128 dev_err(&bp->pdev->dev,
7129 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
7130 return -EINVAL;
7131 }
7132 }
7133
7134 /* Check FW version */
7135 offset = be32_to_cpu(fw_hdr->fw_version.offset);
7136 fw_ver = firmware->data + offset;
7137 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
7138 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
7139 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
7140 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
7141 dev_err(&bp->pdev->dev,
7142 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
7143 fw_ver[0], fw_ver[1], fw_ver[2],
7144 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
7145 BCM_5710_FW_MINOR_VERSION,
7146 BCM_5710_FW_REVISION_VERSION,
7147 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 7148 return -EINVAL;
94a78b79
VZ
7149 }
7150
7151 return 0;
7152}
7153
ab6ad5a4 7154static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 7155{
ab6ad5a4
EG
7156 const __be32 *source = (const __be32 *)_source;
7157 u32 *target = (u32 *)_target;
94a78b79 7158 u32 i;
94a78b79
VZ
7159
7160 for (i = 0; i < n/4; i++)
7161 target[i] = be32_to_cpu(source[i]);
7162}
7163
7164/*
7165 Ops array is stored in the following format:
7166 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7167 */
ab6ad5a4 7168static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 7169{
ab6ad5a4
EG
7170 const __be32 *source = (const __be32 *)_source;
7171 struct raw_op *target = (struct raw_op *)_target;
94a78b79 7172 u32 i, j, tmp;
94a78b79 7173
ab6ad5a4 7174 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
7175 tmp = be32_to_cpu(source[j]);
7176 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
7177 target[i].offset = tmp & 0xffffff;
7178 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
7179 }
7180}
ab6ad5a4
EG
7181
7182static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 7183{
ab6ad5a4
EG
7184 const __be16 *source = (const __be16 *)_source;
7185 u16 *target = (u16 *)_target;
94a78b79 7186 u32 i;
94a78b79
VZ
7187
7188 for (i = 0; i < n/2; i++)
7189 target[i] = be16_to_cpu(source[i]);
7190}
7191
7995c64e
JP
7192#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
7193do { \
7194 u32 len = be32_to_cpu(fw_hdr->arr.len); \
7195 bp->arr = kmalloc(len, GFP_KERNEL); \
7196 if (!bp->arr) { \
7197 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7198 goto lbl; \
7199 } \
7200 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
7201 (u8 *)bp->arr, len); \
7202} while (0)
94a78b79 7203
6891dd25 7204int bnx2x_init_firmware(struct bnx2x *bp)
94a78b79 7205{
45229b42 7206 const char *fw_file_name;
94a78b79 7207 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 7208 int rc;
94a78b79 7209
94a78b79 7210 if (CHIP_IS_E1(bp))
45229b42 7211 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 7212 else if (CHIP_IS_E1H(bp))
45229b42 7213 fw_file_name = FW_FILE_NAME_E1H;
cdaa7cb8 7214 else {
6891dd25 7215 BNX2X_ERR("Unsupported chip revision\n");
cdaa7cb8
VZ
7216 return -EINVAL;
7217 }
94a78b79 7218
6891dd25 7219 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
94a78b79 7220
6891dd25 7221 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
94a78b79 7222 if (rc) {
6891dd25 7223 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
7224 goto request_firmware_exit;
7225 }
7226
7227 rc = bnx2x_check_firmware(bp);
7228 if (rc) {
6891dd25 7229 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
7230 goto request_firmware_exit;
7231 }
7232
7233 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
7234
7235 /* Initialize the pointers to the init arrays */
7236 /* Blob */
7237 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
7238
7239 /* Opcodes */
7240 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
7241
7242 /* Offsets */
ab6ad5a4
EG
7243 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
7244 be16_to_cpu_n);
94a78b79
VZ
7245
7246 /* STORMs firmware */
573f2035
EG
7247 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7248 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
7249 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
7250 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
7251 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7252 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
7253 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
7254 be32_to_cpu(fw_hdr->usem_pram_data.offset);
7255 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7256 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
7257 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
7258 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
7259 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7260 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
7261 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
7262 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
7263
7264 return 0;
ab6ad5a4 7265
94a78b79
VZ
7266init_offsets_alloc_err:
7267 kfree(bp->init_ops);
7268init_ops_alloc_err:
7269 kfree(bp->init_data);
7270request_firmware_exit:
7271 release_firmware(bp->firmware);
7272
7273 return rc;
7274}
7275
7276
a2fbb9ea
ET
7277static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7278 const struct pci_device_id *ent)
7279{
a2fbb9ea
ET
7280 struct net_device *dev = NULL;
7281 struct bnx2x *bp;
37f9ce62 7282 int pcie_width, pcie_speed;
25047950 7283 int rc;
a2fbb9ea 7284
a2fbb9ea 7285 /* dev zeroed in init_etherdev */
555f6c78 7286 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04 7287 if (!dev) {
cdaa7cb8 7288 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 7289 return -ENOMEM;
34f80b04 7290 }
a2fbb9ea 7291
a2fbb9ea 7292 bp = netdev_priv(dev);
7995c64e 7293 bp->msg_enable = debug;
a2fbb9ea 7294
df4770de
EG
7295 pci_set_drvdata(pdev, dev);
7296
34f80b04 7297 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
7298 if (rc < 0) {
7299 free_netdev(dev);
7300 return rc;
7301 }
7302
34f80b04 7303 rc = bnx2x_init_bp(bp);
693fc0d1
EG
7304 if (rc)
7305 goto init_one_exit;
7306
7307 rc = register_netdev(dev);
34f80b04 7308 if (rc) {
693fc0d1 7309 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
7310 goto init_one_exit;
7311 }
7312
37f9ce62 7313 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
cdaa7cb8
VZ
7314 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
7315 " IRQ %d, ", board_info[ent->driver_data].name,
7316 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
7317 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
7318 dev->base_addr, bp->pdev->irq);
7319 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 7320
a2fbb9ea 7321 return 0;
34f80b04
EG
7322
7323init_one_exit:
7324 if (bp->regview)
7325 iounmap(bp->regview);
7326
7327 if (bp->doorbells)
7328 iounmap(bp->doorbells);
7329
7330 free_netdev(dev);
7331
7332 if (atomic_read(&pdev->enable_cnt) == 1)
7333 pci_release_regions(pdev);
7334
7335 pci_disable_device(pdev);
7336 pci_set_drvdata(pdev, NULL);
7337
7338 return rc;
a2fbb9ea
ET
7339}
7340
7341static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7342{
7343 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
7344 struct bnx2x *bp;
7345
7346 if (!dev) {
cdaa7cb8 7347 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
7348 return;
7349 }
228241eb 7350 bp = netdev_priv(dev);
a2fbb9ea 7351
a2fbb9ea
ET
7352 unregister_netdev(dev);
7353
72fd0718
VZ
7354 /* Make sure RESET task is not scheduled before continuing */
7355 cancel_delayed_work_sync(&bp->reset_task);
7356
a2fbb9ea
ET
7357 if (bp->regview)
7358 iounmap(bp->regview);
7359
7360 if (bp->doorbells)
7361 iounmap(bp->doorbells);
7362
7363 free_netdev(dev);
34f80b04
EG
7364
7365 if (atomic_read(&pdev->enable_cnt) == 1)
7366 pci_release_regions(pdev);
7367
a2fbb9ea
ET
7368 pci_disable_device(pdev);
7369 pci_set_drvdata(pdev, NULL);
7370}
7371
f8ef6e44
YG
7372static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
7373{
7374 int i;
7375
7376 bp->state = BNX2X_STATE_ERROR;
7377
7378 bp->rx_mode = BNX2X_RX_MODE_NONE;
7379
7380 bnx2x_netif_stop(bp, 0);
c89af1a3 7381 netif_carrier_off(bp->dev);
f8ef6e44
YG
7382
7383 del_timer_sync(&bp->timer);
7384 bp->stats_state = STATS_STATE_DISABLED;
7385 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
7386
7387 /* Release IRQs */
6cbe5065 7388 bnx2x_free_irq(bp, false);
f8ef6e44
YG
7389
7390 if (CHIP_IS_E1(bp)) {
7391 struct mac_configuration_cmd *config =
7392 bnx2x_sp(bp, mcast_config);
7393
8d9c5f34 7394 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
7395 CAM_INVALIDATE(config->config_table[i]);
7396 }
7397
7398 /* Free SKBs, SGEs, TPA pool and driver internals */
7399 bnx2x_free_skbs(bp);
54b9ddaa 7400 for_each_queue(bp, i)
f8ef6e44 7401 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 7402 for_each_queue(bp, i)
7cde1c8b 7403 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
7404 bnx2x_free_mem(bp);
7405
7406 bp->state = BNX2X_STATE_CLOSED;
7407
f8ef6e44
YG
7408 return 0;
7409}
7410
7411static void bnx2x_eeh_recover(struct bnx2x *bp)
7412{
7413 u32 val;
7414
7415 mutex_init(&bp->port.phy_mutex);
7416
7417 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7418 bp->link_params.shmem_base = bp->common.shmem_base;
7419 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7420
7421 if (!bp->common.shmem_base ||
7422 (bp->common.shmem_base < 0xA0000) ||
7423 (bp->common.shmem_base >= 0xC0000)) {
7424 BNX2X_DEV_INFO("MCP not active\n");
7425 bp->flags |= NO_MCP_FLAG;
7426 return;
7427 }
7428
7429 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7430 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7431 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7432 BNX2X_ERR("BAD MCP validity signature\n");
7433
7434 if (!BP_NOMCP(bp)) {
7435 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
7436 & DRV_MSG_SEQ_NUMBER_MASK);
7437 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7438 }
7439}
7440
493adb1f
WX
7441/**
7442 * bnx2x_io_error_detected - called when PCI error is detected
7443 * @pdev: Pointer to PCI device
7444 * @state: The current pci connection state
7445 *
7446 * This function is called after a PCI bus error affecting
7447 * this device has been detected.
7448 */
7449static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
7450 pci_channel_state_t state)
7451{
7452 struct net_device *dev = pci_get_drvdata(pdev);
7453 struct bnx2x *bp = netdev_priv(dev);
7454
7455 rtnl_lock();
7456
7457 netif_device_detach(dev);
7458
07ce50e4
DN
7459 if (state == pci_channel_io_perm_failure) {
7460 rtnl_unlock();
7461 return PCI_ERS_RESULT_DISCONNECT;
7462 }
7463
493adb1f 7464 if (netif_running(dev))
f8ef6e44 7465 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
7466
7467 pci_disable_device(pdev);
7468
7469 rtnl_unlock();
7470
7471 /* Request a slot reset */
7472 return PCI_ERS_RESULT_NEED_RESET;
7473}
7474
7475/**
7476 * bnx2x_io_slot_reset - called after the PCI bus has been reset
7477 * @pdev: Pointer to PCI device
7478 *
7479 * Restart the card from scratch, as if from a cold-boot.
7480 */
7481static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
7482{
7483 struct net_device *dev = pci_get_drvdata(pdev);
7484 struct bnx2x *bp = netdev_priv(dev);
7485
7486 rtnl_lock();
7487
7488 if (pci_enable_device(pdev)) {
7489 dev_err(&pdev->dev,
7490 "Cannot re-enable PCI device after reset\n");
7491 rtnl_unlock();
7492 return PCI_ERS_RESULT_DISCONNECT;
7493 }
7494
7495 pci_set_master(pdev);
7496 pci_restore_state(pdev);
7497
7498 if (netif_running(dev))
7499 bnx2x_set_power_state(bp, PCI_D0);
7500
7501 rtnl_unlock();
7502
7503 return PCI_ERS_RESULT_RECOVERED;
7504}
7505
7506/**
7507 * bnx2x_io_resume - called when traffic can start flowing again
7508 * @pdev: Pointer to PCI device
7509 *
7510 * This callback is called when the error recovery driver tells us that
7511 * its OK to resume normal operation.
7512 */
7513static void bnx2x_io_resume(struct pci_dev *pdev)
7514{
7515 struct net_device *dev = pci_get_drvdata(pdev);
7516 struct bnx2x *bp = netdev_priv(dev);
7517
72fd0718
VZ
7518 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
7519 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
7520 return;
7521 }
7522
493adb1f
WX
7523 rtnl_lock();
7524
f8ef6e44
YG
7525 bnx2x_eeh_recover(bp);
7526
493adb1f 7527 if (netif_running(dev))
f8ef6e44 7528 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
7529
7530 netif_device_attach(dev);
7531
7532 rtnl_unlock();
7533}
7534
7535static struct pci_error_handlers bnx2x_err_handler = {
7536 .error_detected = bnx2x_io_error_detected,
356e2385
EG
7537 .slot_reset = bnx2x_io_slot_reset,
7538 .resume = bnx2x_io_resume,
493adb1f
WX
7539};
7540
a2fbb9ea 7541static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
7542 .name = DRV_MODULE_NAME,
7543 .id_table = bnx2x_pci_tbl,
7544 .probe = bnx2x_init_one,
7545 .remove = __devexit_p(bnx2x_remove_one),
7546 .suspend = bnx2x_suspend,
7547 .resume = bnx2x_resume,
7548 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
7549};
7550
7551static int __init bnx2x_init(void)
7552{
dd21ca6d
SG
7553 int ret;
7554
7995c64e 7555 pr_info("%s", version);
938cf541 7556
1cf167f2
EG
7557 bnx2x_wq = create_singlethread_workqueue("bnx2x");
7558 if (bnx2x_wq == NULL) {
7995c64e 7559 pr_err("Cannot create workqueue\n");
1cf167f2
EG
7560 return -ENOMEM;
7561 }
7562
dd21ca6d
SG
7563 ret = pci_register_driver(&bnx2x_pci_driver);
7564 if (ret) {
7995c64e 7565 pr_err("Cannot register driver\n");
dd21ca6d
SG
7566 destroy_workqueue(bnx2x_wq);
7567 }
7568 return ret;
a2fbb9ea
ET
7569}
7570
7571static void __exit bnx2x_cleanup(void)
7572{
7573 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
7574
7575 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
7576}
7577
7578module_init(bnx2x_init);
7579module_exit(bnx2x_cleanup);
7580
993ac7b5
MC
7581#ifdef BCM_CNIC
7582
7583/* count denotes the number of new completions we have seen */
7584static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
7585{
7586 struct eth_spe *spe;
7587
7588#ifdef BNX2X_STOP_ON_ERROR
7589 if (unlikely(bp->panic))
7590 return;
7591#endif
7592
7593 spin_lock_bh(&bp->spq_lock);
7594 bp->cnic_spq_pending -= count;
7595
7596 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
7597 bp->cnic_spq_pending++) {
7598
7599 if (!bp->cnic_kwq_pending)
7600 break;
7601
7602 spe = bnx2x_sp_get_next(bp);
7603 *spe = *bp->cnic_kwq_cons;
7604
7605 bp->cnic_kwq_pending--;
7606
7607 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
7608 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
7609
7610 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
7611 bp->cnic_kwq_cons = bp->cnic_kwq;
7612 else
7613 bp->cnic_kwq_cons++;
7614 }
7615 bnx2x_sp_prod_update(bp);
7616 spin_unlock_bh(&bp->spq_lock);
7617}
7618
7619static int bnx2x_cnic_sp_queue(struct net_device *dev,
7620 struct kwqe_16 *kwqes[], u32 count)
7621{
7622 struct bnx2x *bp = netdev_priv(dev);
7623 int i;
7624
7625#ifdef BNX2X_STOP_ON_ERROR
7626 if (unlikely(bp->panic))
7627 return -EIO;
7628#endif
7629
7630 spin_lock_bh(&bp->spq_lock);
7631
7632 for (i = 0; i < count; i++) {
7633 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
7634
7635 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
7636 break;
7637
7638 *bp->cnic_kwq_prod = *spe;
7639
7640 bp->cnic_kwq_pending++;
7641
7642 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
7643 spe->hdr.conn_and_cmd_data, spe->hdr.type,
7644 spe->data.mac_config_addr.hi,
7645 spe->data.mac_config_addr.lo,
7646 bp->cnic_kwq_pending);
7647
7648 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
7649 bp->cnic_kwq_prod = bp->cnic_kwq;
7650 else
7651 bp->cnic_kwq_prod++;
7652 }
7653
7654 spin_unlock_bh(&bp->spq_lock);
7655
7656 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
7657 bnx2x_cnic_sp_post(bp, 0);
7658
7659 return i;
7660}
7661
7662static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7663{
7664 struct cnic_ops *c_ops;
7665 int rc = 0;
7666
7667 mutex_lock(&bp->cnic_mutex);
7668 c_ops = bp->cnic_ops;
7669 if (c_ops)
7670 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7671 mutex_unlock(&bp->cnic_mutex);
7672
7673 return rc;
7674}
7675
7676static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7677{
7678 struct cnic_ops *c_ops;
7679 int rc = 0;
7680
7681 rcu_read_lock();
7682 c_ops = rcu_dereference(bp->cnic_ops);
7683 if (c_ops)
7684 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7685 rcu_read_unlock();
7686
7687 return rc;
7688}
7689
7690/*
7691 * for commands that have no data
7692 */
9f6c9258 7693int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
993ac7b5
MC
7694{
7695 struct cnic_ctl_info ctl = {0};
7696
7697 ctl.cmd = cmd;
7698
7699 return bnx2x_cnic_ctl_send(bp, &ctl);
7700}
7701
7702static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
7703{
7704 struct cnic_ctl_info ctl;
7705
7706 /* first we tell CNIC and only then we count this as a completion */
7707 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
7708 ctl.data.comp.cid = cid;
7709
7710 bnx2x_cnic_ctl_send_bh(bp, &ctl);
7711 bnx2x_cnic_sp_post(bp, 1);
7712}
7713
7714static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7715{
7716 struct bnx2x *bp = netdev_priv(dev);
7717 int rc = 0;
7718
7719 switch (ctl->cmd) {
7720 case DRV_CTL_CTXTBL_WR_CMD: {
7721 u32 index = ctl->data.io.offset;
7722 dma_addr_t addr = ctl->data.io.dma_addr;
7723
7724 bnx2x_ilt_wr(bp, index, addr);
7725 break;
7726 }
7727
7728 case DRV_CTL_COMPLETION_CMD: {
7729 int count = ctl->data.comp.comp_count;
7730
7731 bnx2x_cnic_sp_post(bp, count);
7732 break;
7733 }
7734
7735 /* rtnl_lock is held. */
7736 case DRV_CTL_START_L2_CMD: {
7737 u32 cli = ctl->data.ring.client_id;
7738
7739 bp->rx_mode_cl_mask |= (1 << cli);
7740 bnx2x_set_storm_rx_mode(bp);
7741 break;
7742 }
7743
7744 /* rtnl_lock is held. */
7745 case DRV_CTL_STOP_L2_CMD: {
7746 u32 cli = ctl->data.ring.client_id;
7747
7748 bp->rx_mode_cl_mask &= ~(1 << cli);
7749 bnx2x_set_storm_rx_mode(bp);
7750 break;
7751 }
7752
7753 default:
7754 BNX2X_ERR("unknown command %x\n", ctl->cmd);
7755 rc = -EINVAL;
7756 }
7757
7758 return rc;
7759}
7760
9f6c9258 7761void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
993ac7b5
MC
7762{
7763 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7764
7765 if (bp->flags & USING_MSIX_FLAG) {
7766 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
7767 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
7768 cp->irq_arr[0].vector = bp->msix_table[1].vector;
7769 } else {
7770 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
7771 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
7772 }
7773 cp->irq_arr[0].status_blk = bp->cnic_sb;
7774 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
7775 cp->irq_arr[1].status_blk = bp->def_status_blk;
7776 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
7777
7778 cp->num_irq = 2;
7779}
7780
7781static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
7782 void *data)
7783{
7784 struct bnx2x *bp = netdev_priv(dev);
7785 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7786
7787 if (ops == NULL)
7788 return -EINVAL;
7789
7790 if (atomic_read(&bp->intr_sem) != 0)
7791 return -EBUSY;
7792
7793 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
7794 if (!bp->cnic_kwq)
7795 return -ENOMEM;
7796
7797 bp->cnic_kwq_cons = bp->cnic_kwq;
7798 bp->cnic_kwq_prod = bp->cnic_kwq;
7799 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
7800
7801 bp->cnic_spq_pending = 0;
7802 bp->cnic_kwq_pending = 0;
7803
7804 bp->cnic_data = data;
7805
7806 cp->num_irq = 0;
7807 cp->drv_state = CNIC_DRV_STATE_REGD;
7808
7809 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
7810
7811 bnx2x_setup_cnic_irq_info(bp);
7812 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7813 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7814 rcu_assign_pointer(bp->cnic_ops, ops);
7815
7816 return 0;
7817}
7818
7819static int bnx2x_unregister_cnic(struct net_device *dev)
7820{
7821 struct bnx2x *bp = netdev_priv(dev);
7822 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7823
7824 mutex_lock(&bp->cnic_mutex);
7825 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7826 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7827 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7828 }
7829 cp->drv_state = 0;
7830 rcu_assign_pointer(bp->cnic_ops, NULL);
7831 mutex_unlock(&bp->cnic_mutex);
7832 synchronize_rcu();
7833 kfree(bp->cnic_kwq);
7834 bp->cnic_kwq = NULL;
7835
7836 return 0;
7837}
7838
7839struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
7840{
7841 struct bnx2x *bp = netdev_priv(dev);
7842 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7843
7844 cp->drv_owner = THIS_MODULE;
7845 cp->chip_id = CHIP_ID(bp);
7846 cp->pdev = bp->pdev;
7847 cp->io_base = bp->regview;
7848 cp->io_base2 = bp->doorbells;
7849 cp->max_kwqe_pending = 8;
7850 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
7851 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
7852 cp->ctx_tbl_len = CNIC_ILT_LINES;
7853 cp->starting_cid = BCM_CNIC_CID_START;
7854 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
7855 cp->drv_ctl = bnx2x_drv_ctl;
7856 cp->drv_register_cnic = bnx2x_register_cnic;
7857 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
7858
7859 return cp;
7860}
7861EXPORT_SYMBOL(bnx2x_cnic_probe);
7862
7863#endif /* BCM_CNIC */
94a78b79 7864