]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x/bnx2x_main.c
bnx2x: Adjust flow-control with the new scheme
[net-next-2.6.git] / drivers / net / bnx2x / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea 51#include <linux/io.h>
45229b42 52#include <linux/stringify.h>
a2fbb9ea 53
b0efbb99 54#define BNX2X_MAIN
a2fbb9ea
ET
55#include "bnx2x.h"
56#include "bnx2x_init.h"
94a78b79 57#include "bnx2x_init_ops.h"
9f6c9258 58#include "bnx2x_cmn.h"
a2fbb9ea 59
a2fbb9ea 60
94a78b79
VZ
61#include <linux/firmware.h>
62#include "bnx2x_fw_file_hdr.h"
63/* FW files */
45229b42
BH
64#define FW_FILE_VERSION \
65 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
66 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
67 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
68 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
69#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
70#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
94a78b79 71
34f80b04
EG
72/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ)
a2fbb9ea 74
53a10565 75static char version[] __devinitdata =
34f80b04 76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
24e3fcef 79MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 80MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
81MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
83MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H);
a2fbb9ea 85
555f6c78
EG
86static int multi_mode = 1;
87module_param(multi_mode, int, 0);
ca00392c
EG
88MODULE_PARM_DESC(multi_mode, " Multi queue mode "
89 "(0 Disable; 1 Enable (default))");
90
54b9ddaa
VZ
91static int num_queues;
92module_param(num_queues, int, 0);
93MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
94 " (default is as a number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
cdaa7cb8
VZ
102MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
103 "(1 INT#x; 2 MSI)");
8badd27a 104
a18f5128
EG
105static int dropless_fc;
106module_param(dropless_fc, int, 0);
107MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
108
9898f86d 109static int poll;
a2fbb9ea 110module_param(poll, int, 0);
9898f86d 111MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
112
113static int mrrs = -1;
114module_param(mrrs, int, 0);
115MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
116
9898f86d 117static int debug;
a2fbb9ea 118module_param(debug, int, 0);
9898f86d
EG
119MODULE_PARM_DESC(debug, " Default debug msglevel");
120
1cf167f2 121static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
122
123enum bnx2x_board_type {
124 BCM57710 = 0,
34f80b04
EG
125 BCM57711 = 1,
126 BCM57711E = 2,
a2fbb9ea
ET
127};
128
34f80b04 129/* indexed by board_type, above */
53a10565 130static struct {
a2fbb9ea
ET
131 char *name;
132} board_info[] __devinitdata = {
34f80b04
EG
133 { "Broadcom NetXtreme II BCM57710 XGb" },
134 { "Broadcom NetXtreme II BCM57711 XGb" },
135 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
136};
137
34f80b04 138
a3aa1884 139static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
140 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
143 { 0 }
144};
145
146MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148/****************************************************************************
149* General service functions
150****************************************************************************/
151
152/* used only at init
153 * locking is done by mcp
154 */
573f2035 155void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
156{
157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160 PCICFG_VENDOR_ID_OFFSET);
161}
162
a2fbb9ea
ET
163static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164{
165 u32 val;
166
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170 PCICFG_VENDOR_ID_OFFSET);
171
172 return val;
173}
a2fbb9ea 174
6c719d00 175const u32 dmae_reg_go_c[] = {
a2fbb9ea
ET
176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180};
181
182/* copy command into DMAE command memory and set DMAE command go */
6c719d00 183void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
a2fbb9ea
ET
184{
185 u32 cmd_offset;
186 int i;
187
188 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
189 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
190 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
191
ad8d3948
EG
192 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
193 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
194 }
195 REG_WR(bp, dmae_reg_go_c[idx], 1);
196}
197
ad8d3948
EG
198void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
199 u32 len32)
a2fbb9ea 200{
5ff7b6d4 201 struct dmae_command dmae;
a2fbb9ea 202 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
203 int cnt = 200;
204
205 if (!bp->dmae_ready) {
206 u32 *data = bnx2x_sp(bp, wb_data[0]);
207
208 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
209 " using indirect\n", dst_addr, len32);
210 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
211 return;
212 }
213
5ff7b6d4 214 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 215
5ff7b6d4
EG
216 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
217 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
218 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 219#ifdef __BIG_ENDIAN
5ff7b6d4 220 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 221#else
5ff7b6d4 222 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 223#endif
5ff7b6d4
EG
224 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
225 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
226 dmae.src_addr_lo = U64_LO(dma_addr);
227 dmae.src_addr_hi = U64_HI(dma_addr);
228 dmae.dst_addr_lo = dst_addr >> 2;
229 dmae.dst_addr_hi = 0;
230 dmae.len = len32;
231 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
232 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
233 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 234
c3eefaf6 235 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
236 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
237 "dst_addr [%x:%08x (%08x)]\n"
238 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
239 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
240 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
241 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 242 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
243 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
244 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 245
5ff7b6d4
EG
246 mutex_lock(&bp->dmae_mutex);
247
a2fbb9ea
ET
248 *wb_comp = 0;
249
5ff7b6d4 250 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
251
252 udelay(5);
ad8d3948
EG
253
254 while (*wb_comp != DMAE_COMP_VAL) {
255 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
256
ad8d3948 257 if (!cnt) {
c3eefaf6 258 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
259 break;
260 }
ad8d3948 261 cnt--;
12469401
YG
262 /* adjust delay for emulation/FPGA */
263 if (CHIP_REV_IS_SLOW(bp))
264 msleep(100);
265 else
266 udelay(5);
a2fbb9ea 267 }
ad8d3948
EG
268
269 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
270}
271
c18487ee 272void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 273{
5ff7b6d4 274 struct dmae_command dmae;
a2fbb9ea 275 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
276 int cnt = 200;
277
278 if (!bp->dmae_ready) {
279 u32 *data = bnx2x_sp(bp, wb_data[0]);
280 int i;
281
282 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
283 " using indirect\n", src_addr, len32);
284 for (i = 0; i < len32; i++)
285 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
286 return;
287 }
288
5ff7b6d4 289 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 290
5ff7b6d4
EG
291 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
292 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
293 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 294#ifdef __BIG_ENDIAN
5ff7b6d4 295 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 296#else
5ff7b6d4 297 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 298#endif
5ff7b6d4
EG
299 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
300 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
301 dmae.src_addr_lo = src_addr >> 2;
302 dmae.src_addr_hi = 0;
303 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
304 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
305 dmae.len = len32;
306 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
307 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
308 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 309
c3eefaf6 310 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
311 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
312 "dst_addr [%x:%08x (%08x)]\n"
313 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
314 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
315 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
316 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 317
5ff7b6d4
EG
318 mutex_lock(&bp->dmae_mutex);
319
320 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
321 *wb_comp = 0;
322
5ff7b6d4 323 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
324
325 udelay(5);
ad8d3948
EG
326
327 while (*wb_comp != DMAE_COMP_VAL) {
328
ad8d3948 329 if (!cnt) {
c3eefaf6 330 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
331 break;
332 }
ad8d3948 333 cnt--;
12469401
YG
334 /* adjust delay for emulation/FPGA */
335 if (CHIP_REV_IS_SLOW(bp))
336 msleep(100);
337 else
338 udelay(5);
a2fbb9ea 339 }
ad8d3948 340 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
341 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
342 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
343
344 mutex_unlock(&bp->dmae_mutex);
345}
346
573f2035
EG
347void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
348 u32 addr, u32 len)
349{
02e3c6cb 350 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
351 int offset = 0;
352
02e3c6cb 353 while (len > dmae_wr_max) {
573f2035 354 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
355 addr + offset, dmae_wr_max);
356 offset += dmae_wr_max * 4;
357 len -= dmae_wr_max;
573f2035
EG
358 }
359
360 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
361}
362
ad8d3948
EG
363/* used only for slowpath so not inlined */
364static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
365{
366 u32 wb_write[2];
367
368 wb_write[0] = val_hi;
369 wb_write[1] = val_lo;
370 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 371}
a2fbb9ea 372
ad8d3948
EG
373#ifdef USE_WB_RD
374static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
375{
376 u32 wb_data[2];
377
378 REG_RD_DMAE(bp, reg, wb_data, 2);
379
380 return HILO_U64(wb_data[0], wb_data[1]);
381}
382#endif
383
a2fbb9ea
ET
384static int bnx2x_mc_assert(struct bnx2x *bp)
385{
a2fbb9ea 386 char last_idx;
34f80b04
EG
387 int i, rc = 0;
388 u32 row0, row1, row2, row3;
389
390 /* XSTORM */
391 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
392 XSTORM_ASSERT_LIST_INDEX_OFFSET);
393 if (last_idx)
394 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
395
396 /* print the asserts */
397 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
398
399 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
400 XSTORM_ASSERT_LIST_OFFSET(i));
401 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
402 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
403 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
405 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
407
408 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
409 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
410 " 0x%08x 0x%08x 0x%08x\n",
411 i, row3, row2, row1, row0);
412 rc++;
413 } else {
414 break;
415 }
416 }
417
418 /* TSTORM */
419 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
420 TSTORM_ASSERT_LIST_INDEX_OFFSET);
421 if (last_idx)
422 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
423
424 /* print the asserts */
425 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
426
427 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
428 TSTORM_ASSERT_LIST_OFFSET(i));
429 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
430 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
431 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
433 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
435
436 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
437 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
438 " 0x%08x 0x%08x 0x%08x\n",
439 i, row3, row2, row1, row0);
440 rc++;
441 } else {
442 break;
443 }
444 }
445
446 /* CSTORM */
447 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
448 CSTORM_ASSERT_LIST_INDEX_OFFSET);
449 if (last_idx)
450 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
451
452 /* print the asserts */
453 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
454
455 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
456 CSTORM_ASSERT_LIST_OFFSET(i));
457 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
458 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
459 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
461 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
463
464 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
465 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
466 " 0x%08x 0x%08x 0x%08x\n",
467 i, row3, row2, row1, row0);
468 rc++;
469 } else {
470 break;
471 }
472 }
473
474 /* USTORM */
475 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
476 USTORM_ASSERT_LIST_INDEX_OFFSET);
477 if (last_idx)
478 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
479
480 /* print the asserts */
481 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
482
483 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
484 USTORM_ASSERT_LIST_OFFSET(i));
485 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
486 USTORM_ASSERT_LIST_OFFSET(i) + 4);
487 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i) + 8);
489 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 12);
491
492 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
493 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
494 " 0x%08x 0x%08x 0x%08x\n",
495 i, row3, row2, row1, row0);
496 rc++;
497 } else {
498 break;
a2fbb9ea
ET
499 }
500 }
34f80b04 501
a2fbb9ea
ET
502 return rc;
503}
c14423fe 504
a2fbb9ea
ET
505static void bnx2x_fw_dump(struct bnx2x *bp)
506{
cdaa7cb8 507 u32 addr;
a2fbb9ea 508 u32 mark, offset;
4781bfad 509 __be32 data[9];
a2fbb9ea
ET
510 int word;
511
2145a920
VZ
512 if (BP_NOMCP(bp)) {
513 BNX2X_ERR("NO MCP - can not dump\n");
514 return;
515 }
cdaa7cb8
VZ
516
517 addr = bp->common.shmem_base - 0x0800 + 4;
518 mark = REG_RD(bp, addr);
519 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
7995c64e 520 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 521
7995c64e 522 pr_err("");
cdaa7cb8 523 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
a2fbb9ea 524 for (word = 0; word < 8; word++)
cdaa7cb8 525 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 526 data[8] = 0x0;
7995c64e 527 pr_cont("%s", (char *)data);
a2fbb9ea 528 }
cdaa7cb8 529 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 530 for (word = 0; word < 8; word++)
cdaa7cb8 531 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 532 data[8] = 0x0;
7995c64e 533 pr_cont("%s", (char *)data);
a2fbb9ea 534 }
7995c64e 535 pr_err("end of fw dump\n");
a2fbb9ea
ET
536}
537
6c719d00 538void bnx2x_panic_dump(struct bnx2x *bp)
a2fbb9ea
ET
539{
540 int i;
541 u16 j, start, end;
542
66e855f3
YG
543 bp->stats_state = STATS_STATE_DISABLED;
544 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
545
a2fbb9ea
ET
546 BNX2X_ERR("begin crash dump -----------------\n");
547
8440d2b6
EG
548 /* Indices */
549 /* Common */
cdaa7cb8
VZ
550 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
551 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
552 " spq_prod_idx(0x%x)\n",
8440d2b6
EG
553 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
554 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
555
556 /* Rx */
54b9ddaa 557 for_each_queue(bp, i) {
a2fbb9ea 558 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 559
cdaa7cb8
VZ
560 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
561 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
562 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 563 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
564 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
565 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8
VZ
566 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
567 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
8440d2b6
EG
568 fp->rx_sge_prod, fp->last_max_sge,
569 le16_to_cpu(fp->fp_u_idx),
570 fp->status_blk->u_status_block.status_block_index);
571 }
a2fbb9ea 572
8440d2b6 573 /* Tx */
54b9ddaa 574 for_each_queue(bp, i) {
8440d2b6 575 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 576
cdaa7cb8
VZ
577 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
578 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
579 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
580 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
581 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
cdaa7cb8
VZ
582 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
583 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 584 fp->status_blk->c_status_block.status_block_index,
ca00392c 585 fp->tx_db.data.prod);
8440d2b6 586 }
a2fbb9ea 587
8440d2b6
EG
588 /* Rings */
589 /* Rx */
54b9ddaa 590 for_each_queue(bp, i) {
8440d2b6 591 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
592
593 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
594 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 595 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
596 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
597 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
598
c3eefaf6
EG
599 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
600 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
601 }
602
3196a88a
EG
603 start = RX_SGE(fp->rx_sge_prod);
604 end = RX_SGE(fp->last_max_sge);
8440d2b6 605 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
606 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
607 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
608
c3eefaf6
EG
609 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
610 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
611 }
612
a2fbb9ea
ET
613 start = RCQ_BD(fp->rx_comp_cons - 10);
614 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 615 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
616 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
617
c3eefaf6
EG
618 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
619 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
620 }
621 }
622
8440d2b6 623 /* Tx */
54b9ddaa 624 for_each_queue(bp, i) {
8440d2b6
EG
625 struct bnx2x_fastpath *fp = &bp->fp[i];
626
627 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
628 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
629 for (j = start; j != end; j = TX_BD(j + 1)) {
630 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
631
c3eefaf6
EG
632 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
633 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
634 }
635
636 start = TX_BD(fp->tx_bd_cons - 10);
637 end = TX_BD(fp->tx_bd_cons + 254);
638 for (j = start; j != end; j = TX_BD(j + 1)) {
639 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
640
c3eefaf6
EG
641 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
642 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
643 }
644 }
a2fbb9ea 645
34f80b04 646 bnx2x_fw_dump(bp);
a2fbb9ea
ET
647 bnx2x_mc_assert(bp);
648 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
649}
650
9f6c9258 651void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 652{
34f80b04 653 int port = BP_PORT(bp);
a2fbb9ea
ET
654 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
655 u32 val = REG_RD(bp, addr);
656 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 657 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
658
659 if (msix) {
8badd27a
EG
660 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
661 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
662 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
664 } else if (msi) {
665 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
666 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
669 } else {
670 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 671 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
672 HC_CONFIG_0_REG_INT_LINE_EN_0 |
673 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 674
8badd27a
EG
675 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
676 val, port, addr);
615f8fd9
ET
677
678 REG_WR(bp, addr, val);
679
a2fbb9ea
ET
680 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
681 }
682
8badd27a
EG
683 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
684 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
685
686 REG_WR(bp, addr, val);
37dbbf32
EG
687 /*
688 * Ensure that HC_CONFIG is written before leading/trailing edge config
689 */
690 mmiowb();
691 barrier();
34f80b04
EG
692
693 if (CHIP_IS_E1H(bp)) {
694 /* init leading/trailing edge */
695 if (IS_E1HMF(bp)) {
8badd27a 696 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 697 if (bp->port.pmf)
4acac6a5
EG
698 /* enable nig and gpio3 attention */
699 val |= 0x1100;
34f80b04
EG
700 } else
701 val = 0xffff;
702
703 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
704 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
705 }
37dbbf32
EG
706
707 /* Make sure that interrupts are indeed enabled from here on */
708 mmiowb();
a2fbb9ea
ET
709}
710
615f8fd9 711static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 712{
34f80b04 713 int port = BP_PORT(bp);
a2fbb9ea
ET
714 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
715 u32 val = REG_RD(bp, addr);
716
717 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
718 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
719 HC_CONFIG_0_REG_INT_LINE_EN_0 |
720 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
721
722 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
723 val, port, addr);
724
8badd27a
EG
725 /* flush all outstanding writes */
726 mmiowb();
727
a2fbb9ea
ET
728 REG_WR(bp, addr, val);
729 if (REG_RD(bp, addr) != val)
730 BNX2X_ERR("BUG! proper val not read from IGU!\n");
731}
732
9f6c9258 733void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 734{
a2fbb9ea 735 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 736 int i, offset;
a2fbb9ea 737
34f80b04 738 /* disable interrupt handling */
a2fbb9ea 739 atomic_inc(&bp->intr_sem);
e1510706
EG
740 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
741
f8ef6e44
YG
742 if (disable_hw)
743 /* prevent the HW from sending interrupts */
744 bnx2x_int_disable(bp);
a2fbb9ea
ET
745
746 /* make sure all ISRs are done */
747 if (msix) {
8badd27a
EG
748 synchronize_irq(bp->msix_table[0].vector);
749 offset = 1;
37b091ba
MC
750#ifdef BCM_CNIC
751 offset++;
752#endif
a2fbb9ea 753 for_each_queue(bp, i)
8badd27a 754 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
755 } else
756 synchronize_irq(bp->pdev->irq);
757
758 /* make sure sp_task is not running */
1cf167f2
EG
759 cancel_delayed_work(&bp->sp_task);
760 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
761}
762
34f80b04 763/* fast path */
a2fbb9ea
ET
764
765/*
34f80b04 766 * General service functions
a2fbb9ea
ET
767 */
768
72fd0718
VZ
769/* Return true if succeeded to acquire the lock */
770static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
771{
772 u32 lock_status;
773 u32 resource_bit = (1 << resource);
774 int func = BP_FUNC(bp);
775 u32 hw_lock_control_reg;
776
777 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
778
779 /* Validating that the resource is within range */
780 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
781 DP(NETIF_MSG_HW,
782 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
783 resource, HW_LOCK_MAX_RESOURCE_VALUE);
0fdf4d09 784 return false;
72fd0718
VZ
785 }
786
787 if (func <= 5)
788 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
789 else
790 hw_lock_control_reg =
791 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
792
793 /* Try to acquire the lock */
794 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
795 lock_status = REG_RD(bp, hw_lock_control_reg);
796 if (lock_status & resource_bit)
797 return true;
798
799 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
800 return false;
801}
802
a2fbb9ea 803
993ac7b5
MC
804#ifdef BCM_CNIC
805static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
806#endif
3196a88a 807
9f6c9258 808void bnx2x_sp_event(struct bnx2x_fastpath *fp,
a2fbb9ea
ET
809 union eth_rx_cqe *rr_cqe)
810{
811 struct bnx2x *bp = fp->bp;
812 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
813 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
814
34f80b04 815 DP(BNX2X_MSG_SP,
a2fbb9ea 816 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 817 fp->index, cid, command, bp->state,
34f80b04 818 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
819
820 bp->spq_left++;
821
0626b899 822 if (fp->index) {
a2fbb9ea
ET
823 switch (command | fp->state) {
824 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
825 BNX2X_FP_STATE_OPENING):
826 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
827 cid);
828 fp->state = BNX2X_FP_STATE_OPEN;
829 break;
830
831 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
832 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
833 cid);
834 fp->state = BNX2X_FP_STATE_HALTED;
835 break;
836
837 default:
34f80b04 838 BNX2X_ERR("unexpected MC reply (%d) "
cdaa7cb8
VZ
839 "fp[%d] state is %x\n",
840 command, fp->index, fp->state);
34f80b04 841 break;
a2fbb9ea 842 }
34f80b04 843 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
844 return;
845 }
c14423fe 846
a2fbb9ea
ET
847 switch (command | bp->state) {
848 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
849 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
850 bp->state = BNX2X_STATE_OPEN;
851 break;
852
853 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
854 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
855 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
856 fp->state = BNX2X_FP_STATE_HALTED;
857 break;
858
a2fbb9ea 859 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 860 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 861 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
862 break;
863
993ac7b5
MC
864#ifdef BCM_CNIC
865 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
866 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
867 bnx2x_cnic_cfc_comp(bp, cid);
868 break;
869#endif
3196a88a 870
a2fbb9ea 871 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 872 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 873 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
874 bp->set_mac_pending--;
875 smp_wmb();
a2fbb9ea
ET
876 break;
877
49d66772 878 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 879 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
880 bp->set_mac_pending--;
881 smp_wmb();
49d66772
ET
882 break;
883
a2fbb9ea 884 default:
34f80b04 885 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 886 command, bp->state);
34f80b04 887 break;
a2fbb9ea 888 }
34f80b04 889 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
890}
891
9f6c9258 892irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
a2fbb9ea 893{
555f6c78 894 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 895 u16 status = bnx2x_ack_int(bp);
34f80b04 896 u16 mask;
ca00392c 897 int i;
a2fbb9ea 898
34f80b04 899 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
900 if (unlikely(status == 0)) {
901 DP(NETIF_MSG_INTR, "not our interrupt!\n");
902 return IRQ_NONE;
903 }
f5372251 904 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 905
34f80b04 906 /* Return here if interrupt is disabled */
a2fbb9ea
ET
907 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
908 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
909 return IRQ_HANDLED;
910 }
911
3196a88a
EG
912#ifdef BNX2X_STOP_ON_ERROR
913 if (unlikely(bp->panic))
914 return IRQ_HANDLED;
915#endif
916
ca00392c
EG
917 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
918 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 919
ca00392c
EG
920 mask = 0x2 << fp->sb_id;
921 if (status & mask) {
54b9ddaa
VZ
922 /* Handle Rx and Tx according to SB id */
923 prefetch(fp->rx_cons_sb);
924 prefetch(&fp->status_blk->u_status_block.
925 status_block_index);
926 prefetch(fp->tx_cons_sb);
927 prefetch(&fp->status_blk->c_status_block.
928 status_block_index);
929 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
930 status &= ~mask;
931 }
a2fbb9ea
ET
932 }
933
993ac7b5
MC
934#ifdef BCM_CNIC
935 mask = 0x2 << CNIC_SB_ID(bp);
936 if (status & (mask | 0x1)) {
937 struct cnic_ops *c_ops = NULL;
938
939 rcu_read_lock();
940 c_ops = rcu_dereference(bp->cnic_ops);
941 if (c_ops)
942 c_ops->cnic_handler(bp->cnic_data, NULL);
943 rcu_read_unlock();
944
945 status &= ~mask;
946 }
947#endif
a2fbb9ea 948
34f80b04 949 if (unlikely(status & 0x1)) {
1cf167f2 950 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
951
952 status &= ~0x1;
953 if (!status)
954 return IRQ_HANDLED;
955 }
956
cdaa7cb8
VZ
957 if (unlikely(status))
958 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 959 status);
a2fbb9ea 960
c18487ee 961 return IRQ_HANDLED;
a2fbb9ea
ET
962}
963
c18487ee 964/* end of fast path */
a2fbb9ea 965
a2fbb9ea 966
c18487ee
YR
967/* Link */
968
969/*
970 * General service functions
971 */
a2fbb9ea 972
9f6c9258 973int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
974{
975 u32 lock_status;
976 u32 resource_bit = (1 << resource);
4a37fb66
YG
977 int func = BP_FUNC(bp);
978 u32 hw_lock_control_reg;
c18487ee 979 int cnt;
a2fbb9ea 980
c18487ee
YR
981 /* Validating that the resource is within range */
982 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
983 DP(NETIF_MSG_HW,
984 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
985 resource, HW_LOCK_MAX_RESOURCE_VALUE);
986 return -EINVAL;
987 }
a2fbb9ea 988
4a37fb66
YG
989 if (func <= 5) {
990 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
991 } else {
992 hw_lock_control_reg =
993 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
994 }
995
c18487ee 996 /* Validating that the resource is not already taken */
4a37fb66 997 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
998 if (lock_status & resource_bit) {
999 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1000 lock_status, resource_bit);
1001 return -EEXIST;
1002 }
a2fbb9ea 1003
46230476
EG
1004 /* Try for 5 second every 5ms */
1005 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1006 /* Try to acquire the lock */
4a37fb66
YG
1007 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1008 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1009 if (lock_status & resource_bit)
1010 return 0;
a2fbb9ea 1011
c18487ee 1012 msleep(5);
a2fbb9ea 1013 }
c18487ee
YR
1014 DP(NETIF_MSG_HW, "Timeout\n");
1015 return -EAGAIN;
1016}
a2fbb9ea 1017
9f6c9258 1018int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1019{
1020 u32 lock_status;
1021 u32 resource_bit = (1 << resource);
4a37fb66
YG
1022 int func = BP_FUNC(bp);
1023 u32 hw_lock_control_reg;
a2fbb9ea 1024
72fd0718
VZ
1025 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1026
c18487ee
YR
1027 /* Validating that the resource is within range */
1028 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1029 DP(NETIF_MSG_HW,
1030 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1031 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1032 return -EINVAL;
1033 }
1034
4a37fb66
YG
1035 if (func <= 5) {
1036 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1037 } else {
1038 hw_lock_control_reg =
1039 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1040 }
1041
c18487ee 1042 /* Validating that the resource is currently taken */
4a37fb66 1043 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1044 if (!(lock_status & resource_bit)) {
1045 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1046 lock_status, resource_bit);
1047 return -EFAULT;
a2fbb9ea
ET
1048 }
1049
9f6c9258
DK
1050 REG_WR(bp, hw_lock_control_reg, resource_bit);
1051 return 0;
c18487ee 1052}
a2fbb9ea 1053
9f6c9258 1054
4acac6a5
EG
1055int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1056{
1057 /* The GPIO should be swapped if swap register is set and active */
1058 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1059 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1060 int gpio_shift = gpio_num +
1061 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1062 u32 gpio_mask = (1 << gpio_shift);
1063 u32 gpio_reg;
1064 int value;
1065
1066 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1067 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1068 return -EINVAL;
1069 }
1070
1071 /* read GPIO value */
1072 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1073
1074 /* get the requested pin value */
1075 if ((gpio_reg & gpio_mask) == gpio_mask)
1076 value = 1;
1077 else
1078 value = 0;
1079
1080 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1081
1082 return value;
1083}
1084
17de50b7 1085int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1086{
1087 /* The GPIO should be swapped if swap register is set and active */
1088 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1089 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1090 int gpio_shift = gpio_num +
1091 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1092 u32 gpio_mask = (1 << gpio_shift);
1093 u32 gpio_reg;
a2fbb9ea 1094
c18487ee
YR
1095 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1096 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1097 return -EINVAL;
1098 }
a2fbb9ea 1099
4a37fb66 1100 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1101 /* read GPIO and mask except the float bits */
1102 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1103
c18487ee
YR
1104 switch (mode) {
1105 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1106 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1107 gpio_num, gpio_shift);
1108 /* clear FLOAT and set CLR */
1109 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1110 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1111 break;
a2fbb9ea 1112
c18487ee
YR
1113 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1114 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1115 gpio_num, gpio_shift);
1116 /* clear FLOAT and set SET */
1117 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1118 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1119 break;
a2fbb9ea 1120
17de50b7 1121 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1122 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1123 gpio_num, gpio_shift);
1124 /* set FLOAT */
1125 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1126 break;
a2fbb9ea 1127
c18487ee
YR
1128 default:
1129 break;
a2fbb9ea
ET
1130 }
1131
c18487ee 1132 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1133 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1134
c18487ee 1135 return 0;
a2fbb9ea
ET
1136}
1137
4acac6a5
EG
1138int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1139{
1140 /* The GPIO should be swapped if swap register is set and active */
1141 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1142 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1143 int gpio_shift = gpio_num +
1144 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1145 u32 gpio_mask = (1 << gpio_shift);
1146 u32 gpio_reg;
1147
1148 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1149 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1150 return -EINVAL;
1151 }
1152
1153 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1154 /* read GPIO int */
1155 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1156
1157 switch (mode) {
1158 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1159 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1160 "output low\n", gpio_num, gpio_shift);
1161 /* clear SET and set CLR */
1162 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1163 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1164 break;
1165
1166 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1167 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1168 "output high\n", gpio_num, gpio_shift);
1169 /* clear CLR and set SET */
1170 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1171 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1172 break;
1173
1174 default:
1175 break;
1176 }
1177
1178 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1179 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1180
1181 return 0;
1182}
1183
c18487ee 1184static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1185{
c18487ee
YR
1186 u32 spio_mask = (1 << spio_num);
1187 u32 spio_reg;
a2fbb9ea 1188
c18487ee
YR
1189 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1190 (spio_num > MISC_REGISTERS_SPIO_7)) {
1191 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1192 return -EINVAL;
a2fbb9ea
ET
1193 }
1194
4a37fb66 1195 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1196 /* read SPIO and mask except the float bits */
1197 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1198
c18487ee 1199 switch (mode) {
6378c025 1200 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1201 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1202 /* clear FLOAT and set CLR */
1203 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1204 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1205 break;
a2fbb9ea 1206
6378c025 1207 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1208 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1209 /* clear FLOAT and set SET */
1210 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1211 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1212 break;
a2fbb9ea 1213
c18487ee
YR
1214 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1215 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1216 /* set FLOAT */
1217 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1218 break;
a2fbb9ea 1219
c18487ee
YR
1220 default:
1221 break;
a2fbb9ea
ET
1222 }
1223
c18487ee 1224 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1225 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1226
a2fbb9ea
ET
1227 return 0;
1228}
1229
9f6c9258 1230void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1231{
ad33ea3a
EG
1232 switch (bp->link_vars.ieee_fc &
1233 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1234 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1235 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1236 ADVERTISED_Pause);
1237 break;
356e2385 1238
c18487ee 1239 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1240 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1241 ADVERTISED_Pause);
1242 break;
356e2385 1243
c18487ee 1244 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1245 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 1246 break;
356e2385 1247
c18487ee 1248 default:
34f80b04 1249 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1250 ADVERTISED_Pause);
1251 break;
1252 }
1253}
f1410647 1254
c18487ee 1255
9f6c9258 1256u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 1257{
19680c48
EG
1258 if (!BP_NOMCP(bp)) {
1259 u8 rc;
a2fbb9ea 1260
19680c48 1261 /* Initialize link parameters structure variables */
8c99e7b0
YR
1262 /* It is recommended to turn off RX FC for jumbo frames
1263 for better performance */
0c593270 1264 if (bp->dev->mtu > 5000)
c0700f90 1265 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1266 else
c0700f90 1267 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1268
4a37fb66 1269 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
1270
1271 if (load_mode == LOAD_DIAG)
1272 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
1273
19680c48 1274 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 1275
4a37fb66 1276 bnx2x_release_phy_lock(bp);
a2fbb9ea 1277
3c96c68b
EG
1278 bnx2x_calc_fc_adv(bp);
1279
b5bf9068
EG
1280 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1281 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 1282 bnx2x_link_report(bp);
b5bf9068 1283 }
34f80b04 1284
19680c48
EG
1285 return rc;
1286 }
f5372251 1287 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 1288 return -EINVAL;
a2fbb9ea
ET
1289}
1290
9f6c9258 1291void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1292{
19680c48 1293 if (!BP_NOMCP(bp)) {
4a37fb66 1294 bnx2x_acquire_phy_lock(bp);
54c2fb78 1295 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
19680c48 1296 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1297 bnx2x_release_phy_lock(bp);
a2fbb9ea 1298
19680c48
EG
1299 bnx2x_calc_fc_adv(bp);
1300 } else
f5372251 1301 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 1302}
a2fbb9ea 1303
c18487ee
YR
1304static void bnx2x__link_reset(struct bnx2x *bp)
1305{
19680c48 1306 if (!BP_NOMCP(bp)) {
4a37fb66 1307 bnx2x_acquire_phy_lock(bp);
589abe3a 1308 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 1309 bnx2x_release_phy_lock(bp);
19680c48 1310 } else
f5372251 1311 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 1312}
a2fbb9ea 1313
9f6c9258 1314u8 bnx2x_link_test(struct bnx2x *bp)
c18487ee 1315{
2145a920 1316 u8 rc = 0;
a2fbb9ea 1317
2145a920
VZ
1318 if (!BP_NOMCP(bp)) {
1319 bnx2x_acquire_phy_lock(bp);
1320 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1321 bnx2x_release_phy_lock(bp);
1322 } else
1323 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 1324
c18487ee
YR
1325 return rc;
1326}
a2fbb9ea 1327
8a1c38d1 1328static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 1329{
8a1c38d1
EG
1330 u32 r_param = bp->link_vars.line_speed / 8;
1331 u32 fair_periodic_timeout_usec;
1332 u32 t_fair;
34f80b04 1333
8a1c38d1
EG
1334 memset(&(bp->cmng.rs_vars), 0,
1335 sizeof(struct rate_shaping_vars_per_port));
1336 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 1337
8a1c38d1
EG
1338 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1339 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 1340
8a1c38d1
EG
1341 /* this is the threshold below which no timer arming will occur
1342 1.25 coefficient is for the threshold to be a little bigger
1343 than the real time, to compensate for timer in-accuracy */
1344 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
1345 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1346
8a1c38d1
EG
1347 /* resolution of fairness timer */
1348 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1349 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1350 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 1351
8a1c38d1
EG
1352 /* this is the threshold below which we won't arm the timer anymore */
1353 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 1354
8a1c38d1
EG
1355 /* we multiply by 1e3/8 to get bytes/msec.
1356 We don't want the credits to pass a credit
1357 of the t_fair*FAIR_MEM (algorithm resolution) */
1358 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1359 /* since each tick is 4 usec */
1360 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
1361}
1362
2691d51d
EG
1363/* Calculates the sum of vn_min_rates.
1364 It's needed for further normalizing of the min_rates.
1365 Returns:
1366 sum of vn_min_rates.
1367 or
1368 0 - if all the min_rates are 0.
1369 In the later case fainess algorithm should be deactivated.
1370 If not all min_rates are zero then those that are zeroes will be set to 1.
1371 */
1372static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1373{
1374 int all_zero = 1;
1375 int port = BP_PORT(bp);
1376 int vn;
1377
1378 bp->vn_weight_sum = 0;
1379 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1380 int func = 2*vn + port;
1381 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1382 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1383 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1384
1385 /* Skip hidden vns */
1386 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1387 continue;
1388
1389 /* If min rate is zero - set it to 1 */
1390 if (!vn_min_rate)
1391 vn_min_rate = DEF_MIN_RATE;
1392 else
1393 all_zero = 0;
1394
1395 bp->vn_weight_sum += vn_min_rate;
1396 }
1397
1398 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
1399 if (all_zero) {
1400 bp->cmng.flags.cmng_enables &=
1401 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1402 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1403 " fairness will be disabled\n");
1404 } else
1405 bp->cmng.flags.cmng_enables |=
1406 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
1407}
1408
8a1c38d1 1409static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
1410{
1411 struct rate_shaping_vars_per_vn m_rs_vn;
1412 struct fairness_vars_per_vn m_fair_vn;
1413 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1414 u16 vn_min_rate, vn_max_rate;
1415 int i;
1416
1417 /* If function is hidden - set min and max to zeroes */
1418 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1419 vn_min_rate = 0;
1420 vn_max_rate = 0;
1421
1422 } else {
1423 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1424 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
1425 /* If min rate is zero - set it to 1 */
1426 if (!vn_min_rate)
34f80b04
EG
1427 vn_min_rate = DEF_MIN_RATE;
1428 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1429 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1430 }
8a1c38d1 1431 DP(NETIF_MSG_IFUP,
b015e3d1 1432 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 1433 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
1434
1435 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1436 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1437
1438 /* global vn counter - maximal Mbps for this vn */
1439 m_rs_vn.vn_counter.rate = vn_max_rate;
1440
1441 /* quota - number of bytes transmitted in this period */
1442 m_rs_vn.vn_counter.quota =
1443 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1444
8a1c38d1 1445 if (bp->vn_weight_sum) {
34f80b04
EG
1446 /* credit for each period of the fairness algorithm:
1447 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
1448 vn_weight_sum should not be larger than 10000, thus
1449 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1450 than zero */
34f80b04 1451 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
1452 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1453 (8 * bp->vn_weight_sum))),
1454 (bp->cmng.fair_vars.fair_threshold * 2));
1455 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
1456 m_fair_vn.vn_credit_delta);
1457 }
1458
34f80b04
EG
1459 /* Store it to internal memory */
1460 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1461 REG_WR(bp, BAR_XSTRORM_INTMEM +
1462 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1463 ((u32 *)(&m_rs_vn))[i]);
1464
1465 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1466 REG_WR(bp, BAR_XSTRORM_INTMEM +
1467 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1468 ((u32 *)(&m_fair_vn))[i]);
1469}
1470
8a1c38d1 1471
c18487ee
YR
1472/* This function is called upon link interrupt */
1473static void bnx2x_link_attn(struct bnx2x *bp)
1474{
d9e8b185 1475 u32 prev_link_status = bp->link_vars.link_status;
bb2a0f7a
YG
1476 /* Make sure that we are synced with the current statistics */
1477 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1478
c18487ee 1479 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 1480
bb2a0f7a
YG
1481 if (bp->link_vars.link_up) {
1482
1c06328c 1483 /* dropless flow control */
a18f5128 1484 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
1485 int port = BP_PORT(bp);
1486 u32 pause_enabled = 0;
1487
1488 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1489 pause_enabled = 1;
1490
1491 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 1492 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
1493 pause_enabled);
1494 }
1495
bb2a0f7a
YG
1496 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1497 struct host_port_stats *pstats;
1498
1499 pstats = bnx2x_sp(bp, port_stats);
1500 /* reset old bmac stats */
1501 memset(&(pstats->mac_stx[0]), 0,
1502 sizeof(struct mac_stx));
1503 }
f34d28ea 1504 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
1505 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1506 }
1507
d9e8b185
VZ
1508 /* indicate link status only if link status actually changed */
1509 if (prev_link_status != bp->link_vars.link_status)
1510 bnx2x_link_report(bp);
34f80b04
EG
1511
1512 if (IS_E1HMF(bp)) {
8a1c38d1 1513 int port = BP_PORT(bp);
34f80b04 1514 int func;
8a1c38d1 1515 int vn;
34f80b04 1516
ab6ad5a4 1517 /* Set the attention towards other drivers on the same port */
34f80b04
EG
1518 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1519 if (vn == BP_E1HVN(bp))
1520 continue;
1521
8a1c38d1 1522 func = ((vn << 1) | port);
34f80b04
EG
1523 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1524 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1525 }
34f80b04 1526
8a1c38d1
EG
1527 if (bp->link_vars.link_up) {
1528 int i;
1529
1530 /* Init rate shaping and fairness contexts */
1531 bnx2x_init_port_minmax(bp);
34f80b04 1532
34f80b04 1533 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
1534 bnx2x_init_vn_minmax(bp, 2*vn + port);
1535
1536 /* Store it to internal memory */
1537 for (i = 0;
1538 i < sizeof(struct cmng_struct_per_port) / 4; i++)
1539 REG_WR(bp, BAR_XSTRORM_INTMEM +
1540 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1541 ((u32 *)(&bp->cmng))[i]);
1542 }
34f80b04 1543 }
c18487ee 1544}
a2fbb9ea 1545
9f6c9258 1546void bnx2x__link_status_update(struct bnx2x *bp)
c18487ee 1547{
f34d28ea 1548 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 1549 return;
a2fbb9ea 1550
c18487ee 1551 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 1552
bb2a0f7a
YG
1553 if (bp->link_vars.link_up)
1554 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1555 else
1556 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1557
2691d51d
EG
1558 bnx2x_calc_vn_weight_sum(bp);
1559
c18487ee
YR
1560 /* indicate link status */
1561 bnx2x_link_report(bp);
a2fbb9ea 1562}
a2fbb9ea 1563
34f80b04
EG
1564static void bnx2x_pmf_update(struct bnx2x *bp)
1565{
1566 int port = BP_PORT(bp);
1567 u32 val;
1568
1569 bp->port.pmf = 1;
1570 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1571
1572 /* enable nig attention */
1573 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1574 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1575 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
1576
1577 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
1578}
1579
c18487ee 1580/* end of Link */
a2fbb9ea
ET
1581
1582/* slow path */
1583
1584/*
1585 * General service functions
1586 */
1587
2691d51d
EG
1588/* send the MCP a request, block until there is a reply */
1589u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
1590{
1591 int func = BP_FUNC(bp);
1592 u32 seq = ++bp->fw_seq;
1593 u32 rc = 0;
1594 u32 cnt = 1;
1595 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1596
c4ff7cbf 1597 mutex_lock(&bp->fw_mb_mutex);
2691d51d
EG
1598 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1599 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1600
1601 do {
1602 /* let the FW do it's magic ... */
1603 msleep(delay);
1604
1605 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
1606
c4ff7cbf
EG
1607 /* Give the FW up to 5 second (500*10ms) */
1608 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
1609
1610 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
1611 cnt*delay, rc, seq);
1612
1613 /* is this a reply to our command? */
1614 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
1615 rc &= FW_MSG_CODE_MASK;
1616 else {
1617 /* FW BUG! */
1618 BNX2X_ERR("FW failed to respond!\n");
1619 bnx2x_fw_dump(bp);
1620 rc = 0;
1621 }
c4ff7cbf 1622 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
1623
1624 return rc;
1625}
1626
2691d51d
EG
1627static void bnx2x_e1h_disable(struct bnx2x *bp)
1628{
1629 int port = BP_PORT(bp);
2691d51d
EG
1630
1631 netif_tx_disable(bp->dev);
2691d51d
EG
1632
1633 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
1634
2691d51d
EG
1635 netif_carrier_off(bp->dev);
1636}
1637
1638static void bnx2x_e1h_enable(struct bnx2x *bp)
1639{
1640 int port = BP_PORT(bp);
1641
1642 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
1643
2691d51d
EG
1644 /* Tx queue should be only reenabled */
1645 netif_tx_wake_all_queues(bp->dev);
1646
061bc702
EG
1647 /*
1648 * Should not call netif_carrier_on since it will be called if the link
1649 * is up when checking for link state
1650 */
2691d51d
EG
1651}
1652
1653static void bnx2x_update_min_max(struct bnx2x *bp)
1654{
1655 int port = BP_PORT(bp);
1656 int vn, i;
1657
1658 /* Init rate shaping and fairness contexts */
1659 bnx2x_init_port_minmax(bp);
1660
1661 bnx2x_calc_vn_weight_sum(bp);
1662
1663 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1664 bnx2x_init_vn_minmax(bp, 2*vn + port);
1665
1666 if (bp->port.pmf) {
1667 int func;
1668
1669 /* Set the attention towards other drivers on the same port */
1670 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1671 if (vn == BP_E1HVN(bp))
1672 continue;
1673
1674 func = ((vn << 1) | port);
1675 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1676 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1677 }
1678
1679 /* Store it to internal memory */
1680 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1681 REG_WR(bp, BAR_XSTRORM_INTMEM +
1682 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1683 ((u32 *)(&bp->cmng))[i]);
1684 }
1685}
1686
1687static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1688{
2691d51d 1689 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
1690
1691 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
1692
f34d28ea
EG
1693 /*
1694 * This is the only place besides the function initialization
1695 * where the bp->flags can change so it is done without any
1696 * locks
1697 */
2691d51d
EG
1698 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1699 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
f34d28ea 1700 bp->flags |= MF_FUNC_DIS;
2691d51d
EG
1701
1702 bnx2x_e1h_disable(bp);
1703 } else {
1704 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
f34d28ea 1705 bp->flags &= ~MF_FUNC_DIS;
2691d51d
EG
1706
1707 bnx2x_e1h_enable(bp);
1708 }
1709 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
1710 }
1711 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1712
1713 bnx2x_update_min_max(bp);
1714 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1715 }
1716
1717 /* Report results to MCP */
1718 if (dcc_event)
1719 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
1720 else
1721 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
1722}
1723
28912902
MC
1724/* must be called under the spq lock */
1725static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
1726{
1727 struct eth_spe *next_spe = bp->spq_prod_bd;
1728
1729 if (bp->spq_prod_bd == bp->spq_last_bd) {
1730 bp->spq_prod_bd = bp->spq;
1731 bp->spq_prod_idx = 0;
1732 DP(NETIF_MSG_TIMER, "end of spq\n");
1733 } else {
1734 bp->spq_prod_bd++;
1735 bp->spq_prod_idx++;
1736 }
1737 return next_spe;
1738}
1739
1740/* must be called under the spq lock */
1741static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1742{
1743 int func = BP_FUNC(bp);
1744
1745 /* Make sure that BD data is updated before writing the producer */
1746 wmb();
1747
1748 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1749 bp->spq_prod_idx);
1750 mmiowb();
1751}
1752
a2fbb9ea 1753/* the slow path queue is odd since completions arrive on the fastpath ring */
9f6c9258 1754int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
a2fbb9ea
ET
1755 u32 data_hi, u32 data_lo, int common)
1756{
28912902 1757 struct eth_spe *spe;
a2fbb9ea 1758
a2fbb9ea
ET
1759#ifdef BNX2X_STOP_ON_ERROR
1760 if (unlikely(bp->panic))
1761 return -EIO;
1762#endif
1763
34f80b04 1764 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
1765
1766 if (!bp->spq_left) {
1767 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 1768 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
1769 bnx2x_panic();
1770 return -EBUSY;
1771 }
f1410647 1772
28912902
MC
1773 spe = bnx2x_sp_get_next(bp);
1774
a2fbb9ea 1775 /* CID needs port number to be encoded int it */
28912902 1776 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
1777 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1778 HW_CID(bp, cid));
28912902 1779 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 1780 if (common)
28912902 1781 spe->hdr.type |=
a2fbb9ea
ET
1782 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1783
28912902
MC
1784 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1785 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
1786
1787 bp->spq_left--;
1788
cdaa7cb8
VZ
1789 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1790 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
1791 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1792 (u32)(U64_LO(bp->spq_mapping) +
1793 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1794 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1795
28912902 1796 bnx2x_sp_prod_update(bp);
34f80b04 1797 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
1798 return 0;
1799}
1800
1801/* acquire split MCP access lock register */
4a37fb66 1802static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 1803{
72fd0718 1804 u32 j, val;
34f80b04 1805 int rc = 0;
a2fbb9ea
ET
1806
1807 might_sleep();
72fd0718 1808 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
1809 val = (1UL << 31);
1810 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1811 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1812 if (val & (1L << 31))
1813 break;
1814
1815 msleep(5);
1816 }
a2fbb9ea 1817 if (!(val & (1L << 31))) {
19680c48 1818 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
1819 rc = -EBUSY;
1820 }
1821
1822 return rc;
1823}
1824
4a37fb66
YG
1825/* release split MCP access lock register */
1826static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 1827{
72fd0718 1828 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
1829}
1830
1831static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1832{
1833 struct host_def_status_block *def_sb = bp->def_status_blk;
1834 u16 rc = 0;
1835
1836 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
1837 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1838 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1839 rc |= 1;
1840 }
1841 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1842 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1843 rc |= 2;
1844 }
1845 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1846 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1847 rc |= 4;
1848 }
1849 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1850 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1851 rc |= 8;
1852 }
1853 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1854 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1855 rc |= 16;
1856 }
1857 return rc;
1858}
1859
1860/*
1861 * slow path service functions
1862 */
1863
1864static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1865{
34f80b04 1866 int port = BP_PORT(bp);
5c862848
EG
1867 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
1868 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
1869 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1870 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
1871 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1872 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 1873 u32 aeu_mask;
87942b46 1874 u32 nig_mask = 0;
a2fbb9ea 1875
a2fbb9ea
ET
1876 if (bp->attn_state & asserted)
1877 BNX2X_ERR("IGU ERROR\n");
1878
3fcaf2e5
EG
1879 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1880 aeu_mask = REG_RD(bp, aeu_addr);
1881
a2fbb9ea 1882 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 1883 aeu_mask, asserted);
72fd0718 1884 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 1885 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 1886
3fcaf2e5
EG
1887 REG_WR(bp, aeu_addr, aeu_mask);
1888 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 1889
3fcaf2e5 1890 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 1891 bp->attn_state |= asserted;
3fcaf2e5 1892 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
1893
1894 if (asserted & ATTN_HARD_WIRED_MASK) {
1895 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 1896
a5e9a7cf
EG
1897 bnx2x_acquire_phy_lock(bp);
1898
877e9aa4 1899 /* save nig interrupt mask */
87942b46 1900 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 1901 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 1902
c18487ee 1903 bnx2x_link_attn(bp);
a2fbb9ea
ET
1904
1905 /* handle unicore attn? */
1906 }
1907 if (asserted & ATTN_SW_TIMER_4_FUNC)
1908 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1909
1910 if (asserted & GPIO_2_FUNC)
1911 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1912
1913 if (asserted & GPIO_3_FUNC)
1914 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1915
1916 if (asserted & GPIO_4_FUNC)
1917 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1918
1919 if (port == 0) {
1920 if (asserted & ATTN_GENERAL_ATTN_1) {
1921 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1922 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1923 }
1924 if (asserted & ATTN_GENERAL_ATTN_2) {
1925 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1926 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1927 }
1928 if (asserted & ATTN_GENERAL_ATTN_3) {
1929 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1930 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1931 }
1932 } else {
1933 if (asserted & ATTN_GENERAL_ATTN_4) {
1934 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1935 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1936 }
1937 if (asserted & ATTN_GENERAL_ATTN_5) {
1938 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1939 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1940 }
1941 if (asserted & ATTN_GENERAL_ATTN_6) {
1942 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1943 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1944 }
1945 }
1946
1947 } /* if hardwired */
1948
5c862848
EG
1949 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
1950 asserted, hc_addr);
1951 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
1952
1953 /* now set back the mask */
a5e9a7cf 1954 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 1955 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
1956 bnx2x_release_phy_lock(bp);
1957 }
a2fbb9ea
ET
1958}
1959
fd4ef40d
EG
1960static inline void bnx2x_fan_failure(struct bnx2x *bp)
1961{
1962 int port = BP_PORT(bp);
b7737c9b 1963 u32 ext_phy_config;
fd4ef40d 1964 /* mark the failure */
b7737c9b
YR
1965 ext_phy_config =
1966 SHMEM_RD(bp,
1967 dev_info.port_hw_config[port].external_phy_config);
1968
1969 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
1970 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
fd4ef40d 1971 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
b7737c9b 1972 ext_phy_config);
fd4ef40d
EG
1973
1974 /* log the failure */
cdaa7cb8
VZ
1975 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
1976 " the driver to shutdown the card to prevent permanent"
1977 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 1978}
ab6ad5a4 1979
877e9aa4 1980static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 1981{
34f80b04 1982 int port = BP_PORT(bp);
877e9aa4 1983 int reg_offset;
4d295db0 1984 u32 val, swap_val, swap_override;
877e9aa4 1985
34f80b04
EG
1986 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
1987 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 1988
34f80b04 1989 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
1990
1991 val = REG_RD(bp, reg_offset);
1992 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
1993 REG_WR(bp, reg_offset, val);
1994
1995 BNX2X_ERR("SPIO5 hw attention\n");
1996
fd4ef40d 1997 /* Fan failure attention */
b7737c9b 1998 switch (bp->link_params.phy[EXT_PHY1].type) {
35b19ba5 1999 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2000 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2001 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2002 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2003 /* The PHY reset is controlled by GPIO 1 */
2004 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2005 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2006 break;
2007
4d295db0
EG
2008 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2009 /* The PHY reset is controlled by GPIO 1 */
2010 /* fake the port number to cancel the swap done in
2011 set_gpio() */
2012 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2013 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2014 port = (swap_val && swap_override) ^ 1;
2015 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2016 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2017 break;
2018
877e9aa4
ET
2019 default:
2020 break;
2021 }
fd4ef40d 2022 bnx2x_fan_failure(bp);
877e9aa4 2023 }
34f80b04 2024
589abe3a
EG
2025 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2026 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2027 bnx2x_acquire_phy_lock(bp);
2028 bnx2x_handle_module_detect_int(&bp->link_params);
2029 bnx2x_release_phy_lock(bp);
2030 }
2031
34f80b04
EG
2032 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2033
2034 val = REG_RD(bp, reg_offset);
2035 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2036 REG_WR(bp, reg_offset, val);
2037
2038 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2039 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2040 bnx2x_panic();
2041 }
877e9aa4
ET
2042}
2043
2044static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2045{
2046 u32 val;
2047
0626b899 2048 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2049
2050 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2051 BNX2X_ERR("DB hw attention 0x%x\n", val);
2052 /* DORQ discard attention */
2053 if (val & 0x2)
2054 BNX2X_ERR("FATAL error from DORQ\n");
2055 }
34f80b04
EG
2056
2057 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2058
2059 int port = BP_PORT(bp);
2060 int reg_offset;
2061
2062 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2063 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2064
2065 val = REG_RD(bp, reg_offset);
2066 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2067 REG_WR(bp, reg_offset, val);
2068
2069 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 2070 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
2071 bnx2x_panic();
2072 }
877e9aa4
ET
2073}
2074
2075static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2076{
2077 u32 val;
2078
2079 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2080
2081 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2082 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2083 /* CFC error attention */
2084 if (val & 0x2)
2085 BNX2X_ERR("FATAL error from CFC\n");
2086 }
2087
2088 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2089
2090 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2091 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2092 /* RQ_USDMDP_FIFO_OVERFLOW */
2093 if (val & 0x18000)
2094 BNX2X_ERR("FATAL error from PXP\n");
2095 }
34f80b04
EG
2096
2097 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2098
2099 int port = BP_PORT(bp);
2100 int reg_offset;
2101
2102 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2103 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2104
2105 val = REG_RD(bp, reg_offset);
2106 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2107 REG_WR(bp, reg_offset, val);
2108
2109 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 2110 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
2111 bnx2x_panic();
2112 }
877e9aa4
ET
2113}
2114
2115static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2116{
34f80b04
EG
2117 u32 val;
2118
877e9aa4
ET
2119 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2120
34f80b04
EG
2121 if (attn & BNX2X_PMF_LINK_ASSERT) {
2122 int func = BP_FUNC(bp);
2123
2124 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
2125 bp->mf_config = SHMEM_RD(bp,
2126 mf_cfg.func_mf_config[func].config);
2691d51d
EG
2127 val = SHMEM_RD(bp, func_mb[func].drv_status);
2128 if (val & DRV_STATUS_DCC_EVENT_MASK)
2129 bnx2x_dcc_event(bp,
2130 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 2131 bnx2x__link_status_update(bp);
2691d51d 2132 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
2133 bnx2x_pmf_update(bp);
2134
2135 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2136
2137 BNX2X_ERR("MC assert!\n");
2138 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2139 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2140 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2141 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2142 bnx2x_panic();
2143
2144 } else if (attn & BNX2X_MCP_ASSERT) {
2145
2146 BNX2X_ERR("MCP assert!\n");
2147 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2148 bnx2x_fw_dump(bp);
877e9aa4
ET
2149
2150 } else
2151 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2152 }
2153
2154 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2155 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2156 if (attn & BNX2X_GRC_TIMEOUT) {
2157 val = CHIP_IS_E1H(bp) ?
2158 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2159 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2160 }
2161 if (attn & BNX2X_GRC_RSV) {
2162 val = CHIP_IS_E1H(bp) ?
2163 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2164 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2165 }
877e9aa4 2166 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2167 }
2168}
2169
72fd0718
VZ
2170#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
2171#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
2172#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2173#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
2174#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
2175#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2176/*
2177 * should be run under rtnl lock
2178 */
2179static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2180{
2181 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2182 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2183 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2184 barrier();
2185 mmiowb();
2186}
2187
2188/*
2189 * should be run under rtnl lock
2190 */
2191static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2192{
2193 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2194 val |= (1 << 16);
2195 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2196 barrier();
2197 mmiowb();
2198}
2199
2200/*
2201 * should be run under rtnl lock
2202 */
9f6c9258 2203bool bnx2x_reset_is_done(struct bnx2x *bp)
72fd0718
VZ
2204{
2205 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2206 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2207 return (val & RESET_DONE_FLAG_MASK) ? false : true;
2208}
2209
2210/*
2211 * should be run under rtnl lock
2212 */
9f6c9258 2213inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
72fd0718
VZ
2214{
2215 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2216
2217 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2218
2219 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2220 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2221 barrier();
2222 mmiowb();
2223}
2224
2225/*
2226 * should be run under rtnl lock
2227 */
9f6c9258 2228u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
72fd0718
VZ
2229{
2230 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2231
2232 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2233
2234 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2235 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2236 barrier();
2237 mmiowb();
2238
2239 return val1;
2240}
2241
2242/*
2243 * should be run under rtnl lock
2244 */
2245static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2246{
2247 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2248}
2249
2250static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2251{
2252 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2253 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2254}
2255
2256static inline void _print_next_block(int idx, const char *blk)
2257{
2258 if (idx)
2259 pr_cont(", ");
2260 pr_cont("%s", blk);
2261}
2262
2263static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2264{
2265 int i = 0;
2266 u32 cur_bit = 0;
2267 for (i = 0; sig; i++) {
2268 cur_bit = ((u32)0x1 << i);
2269 if (sig & cur_bit) {
2270 switch (cur_bit) {
2271 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2272 _print_next_block(par_num++, "BRB");
2273 break;
2274 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2275 _print_next_block(par_num++, "PARSER");
2276 break;
2277 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2278 _print_next_block(par_num++, "TSDM");
2279 break;
2280 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2281 _print_next_block(par_num++, "SEARCHER");
2282 break;
2283 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2284 _print_next_block(par_num++, "TSEMI");
2285 break;
2286 }
2287
2288 /* Clear the bit */
2289 sig &= ~cur_bit;
2290 }
2291 }
2292
2293 return par_num;
2294}
2295
2296static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2297{
2298 int i = 0;
2299 u32 cur_bit = 0;
2300 for (i = 0; sig; i++) {
2301 cur_bit = ((u32)0x1 << i);
2302 if (sig & cur_bit) {
2303 switch (cur_bit) {
2304 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2305 _print_next_block(par_num++, "PBCLIENT");
2306 break;
2307 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2308 _print_next_block(par_num++, "QM");
2309 break;
2310 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2311 _print_next_block(par_num++, "XSDM");
2312 break;
2313 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2314 _print_next_block(par_num++, "XSEMI");
2315 break;
2316 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2317 _print_next_block(par_num++, "DOORBELLQ");
2318 break;
2319 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2320 _print_next_block(par_num++, "VAUX PCI CORE");
2321 break;
2322 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2323 _print_next_block(par_num++, "DEBUG");
2324 break;
2325 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2326 _print_next_block(par_num++, "USDM");
2327 break;
2328 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2329 _print_next_block(par_num++, "USEMI");
2330 break;
2331 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2332 _print_next_block(par_num++, "UPB");
2333 break;
2334 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2335 _print_next_block(par_num++, "CSDM");
2336 break;
2337 }
2338
2339 /* Clear the bit */
2340 sig &= ~cur_bit;
2341 }
2342 }
2343
2344 return par_num;
2345}
2346
2347static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
2348{
2349 int i = 0;
2350 u32 cur_bit = 0;
2351 for (i = 0; sig; i++) {
2352 cur_bit = ((u32)0x1 << i);
2353 if (sig & cur_bit) {
2354 switch (cur_bit) {
2355 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
2356 _print_next_block(par_num++, "CSEMI");
2357 break;
2358 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
2359 _print_next_block(par_num++, "PXP");
2360 break;
2361 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
2362 _print_next_block(par_num++,
2363 "PXPPCICLOCKCLIENT");
2364 break;
2365 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
2366 _print_next_block(par_num++, "CFC");
2367 break;
2368 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
2369 _print_next_block(par_num++, "CDU");
2370 break;
2371 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
2372 _print_next_block(par_num++, "IGU");
2373 break;
2374 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
2375 _print_next_block(par_num++, "MISC");
2376 break;
2377 }
2378
2379 /* Clear the bit */
2380 sig &= ~cur_bit;
2381 }
2382 }
2383
2384 return par_num;
2385}
2386
2387static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
2388{
2389 int i = 0;
2390 u32 cur_bit = 0;
2391 for (i = 0; sig; i++) {
2392 cur_bit = ((u32)0x1 << i);
2393 if (sig & cur_bit) {
2394 switch (cur_bit) {
2395 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
2396 _print_next_block(par_num++, "MCP ROM");
2397 break;
2398 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
2399 _print_next_block(par_num++, "MCP UMP RX");
2400 break;
2401 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
2402 _print_next_block(par_num++, "MCP UMP TX");
2403 break;
2404 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
2405 _print_next_block(par_num++, "MCP SCPAD");
2406 break;
2407 }
2408
2409 /* Clear the bit */
2410 sig &= ~cur_bit;
2411 }
2412 }
2413
2414 return par_num;
2415}
2416
2417static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
2418 u32 sig2, u32 sig3)
2419{
2420 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
2421 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
2422 int par_num = 0;
2423 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
2424 "[0]:0x%08x [1]:0x%08x "
2425 "[2]:0x%08x [3]:0x%08x\n",
2426 sig0 & HW_PRTY_ASSERT_SET_0,
2427 sig1 & HW_PRTY_ASSERT_SET_1,
2428 sig2 & HW_PRTY_ASSERT_SET_2,
2429 sig3 & HW_PRTY_ASSERT_SET_3);
2430 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
2431 bp->dev->name);
2432 par_num = bnx2x_print_blocks_with_parity0(
2433 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
2434 par_num = bnx2x_print_blocks_with_parity1(
2435 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
2436 par_num = bnx2x_print_blocks_with_parity2(
2437 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
2438 par_num = bnx2x_print_blocks_with_parity3(
2439 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
2440 printk("\n");
2441 return true;
2442 } else
2443 return false;
2444}
2445
9f6c9258 2446bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 2447{
a2fbb9ea 2448 struct attn_route attn;
72fd0718
VZ
2449 int port = BP_PORT(bp);
2450
2451 attn.sig[0] = REG_RD(bp,
2452 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
2453 port*4);
2454 attn.sig[1] = REG_RD(bp,
2455 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
2456 port*4);
2457 attn.sig[2] = REG_RD(bp,
2458 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
2459 port*4);
2460 attn.sig[3] = REG_RD(bp,
2461 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
2462 port*4);
2463
2464 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
2465 attn.sig[3]);
2466}
2467
2468static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2469{
2470 struct attn_route attn, *group_mask;
34f80b04 2471 int port = BP_PORT(bp);
877e9aa4 2472 int index;
a2fbb9ea
ET
2473 u32 reg_addr;
2474 u32 val;
3fcaf2e5 2475 u32 aeu_mask;
a2fbb9ea
ET
2476
2477 /* need to take HW lock because MCP or other port might also
2478 try to handle this event */
4a37fb66 2479 bnx2x_acquire_alr(bp);
a2fbb9ea 2480
72fd0718
VZ
2481 if (bnx2x_chk_parity_attn(bp)) {
2482 bp->recovery_state = BNX2X_RECOVERY_INIT;
2483 bnx2x_set_reset_in_progress(bp);
2484 schedule_delayed_work(&bp->reset_task, 0);
2485 /* Disable HW interrupts */
2486 bnx2x_int_disable(bp);
2487 bnx2x_release_alr(bp);
2488 /* In case of parity errors don't handle attentions so that
2489 * other function would "see" parity errors.
2490 */
2491 return;
2492 }
2493
a2fbb9ea
ET
2494 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2495 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2496 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2497 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2498 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2499 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2500
2501 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2502 if (deasserted & (1 << index)) {
72fd0718 2503 group_mask = &bp->attn_group[index];
a2fbb9ea 2504
34f80b04 2505 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
72fd0718
VZ
2506 index, group_mask->sig[0], group_mask->sig[1],
2507 group_mask->sig[2], group_mask->sig[3]);
a2fbb9ea 2508
877e9aa4 2509 bnx2x_attn_int_deasserted3(bp,
72fd0718 2510 attn.sig[3] & group_mask->sig[3]);
877e9aa4 2511 bnx2x_attn_int_deasserted1(bp,
72fd0718 2512 attn.sig[1] & group_mask->sig[1]);
877e9aa4 2513 bnx2x_attn_int_deasserted2(bp,
72fd0718 2514 attn.sig[2] & group_mask->sig[2]);
877e9aa4 2515 bnx2x_attn_int_deasserted0(bp,
72fd0718 2516 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
2517 }
2518 }
2519
4a37fb66 2520 bnx2x_release_alr(bp);
a2fbb9ea 2521
5c862848 2522 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2523
2524 val = ~deasserted;
3fcaf2e5
EG
2525 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2526 val, reg_addr);
5c862848 2527 REG_WR(bp, reg_addr, val);
a2fbb9ea 2528
a2fbb9ea 2529 if (~bp->attn_state & deasserted)
3fcaf2e5 2530 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2531
2532 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2533 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2534
3fcaf2e5
EG
2535 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2536 aeu_mask = REG_RD(bp, reg_addr);
2537
2538 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2539 aeu_mask, deasserted);
72fd0718 2540 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 2541 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2542
3fcaf2e5
EG
2543 REG_WR(bp, reg_addr, aeu_mask);
2544 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2545
2546 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2547 bp->attn_state &= ~deasserted;
2548 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2549}
2550
2551static void bnx2x_attn_int(struct bnx2x *bp)
2552{
2553 /* read local copy of bits */
68d59484
EG
2554 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2555 attn_bits);
2556 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2557 attn_bits_ack);
a2fbb9ea
ET
2558 u32 attn_state = bp->attn_state;
2559
2560 /* look for changed bits */
2561 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2562 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2563
2564 DP(NETIF_MSG_HW,
2565 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2566 attn_bits, attn_ack, asserted, deasserted);
2567
2568 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2569 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2570
2571 /* handle bits that were raised */
2572 if (asserted)
2573 bnx2x_attn_int_asserted(bp, asserted);
2574
2575 if (deasserted)
2576 bnx2x_attn_int_deasserted(bp, deasserted);
2577}
2578
2579static void bnx2x_sp_task(struct work_struct *work)
2580{
1cf167f2 2581 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2582 u16 status;
2583
2584 /* Return here if interrupt is disabled */
2585 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2586 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2587 return;
2588 }
2589
2590 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2591/* if (status == 0) */
2592/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2593
cdaa7cb8 2594 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 2595
877e9aa4 2596 /* HW attentions */
cdaa7cb8 2597 if (status & 0x1) {
a2fbb9ea 2598 bnx2x_attn_int(bp);
cdaa7cb8
VZ
2599 status &= ~0x1;
2600 }
2601
2602 /* CStorm events: STAT_QUERY */
2603 if (status & 0x2) {
2604 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
2605 status &= ~0x2;
2606 }
2607
2608 if (unlikely(status))
2609 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2610 status);
a2fbb9ea 2611
68d59484 2612 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2613 IGU_INT_NOP, 1);
2614 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2615 IGU_INT_NOP, 1);
2616 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2617 IGU_INT_NOP, 1);
2618 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2619 IGU_INT_NOP, 1);
2620 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2621 IGU_INT_ENABLE, 1);
2622}
2623
9f6c9258 2624irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
a2fbb9ea
ET
2625{
2626 struct net_device *dev = dev_instance;
2627 struct bnx2x *bp = netdev_priv(dev);
2628
2629 /* Return here if interrupt is disabled */
2630 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2631 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2632 return IRQ_HANDLED;
2633 }
2634
8d9c5f34 2635 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2636
2637#ifdef BNX2X_STOP_ON_ERROR
2638 if (unlikely(bp->panic))
2639 return IRQ_HANDLED;
2640#endif
2641
993ac7b5
MC
2642#ifdef BCM_CNIC
2643 {
2644 struct cnic_ops *c_ops;
2645
2646 rcu_read_lock();
2647 c_ops = rcu_dereference(bp->cnic_ops);
2648 if (c_ops)
2649 c_ops->cnic_handler(bp->cnic_data, NULL);
2650 rcu_read_unlock();
2651 }
2652#endif
1cf167f2 2653 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2654
2655 return IRQ_HANDLED;
2656}
2657
2658/* end of slow path */
2659
a2fbb9ea
ET
2660static void bnx2x_timer(unsigned long data)
2661{
2662 struct bnx2x *bp = (struct bnx2x *) data;
2663
2664 if (!netif_running(bp->dev))
2665 return;
2666
2667 if (atomic_read(&bp->intr_sem) != 0)
f1410647 2668 goto timer_restart;
a2fbb9ea
ET
2669
2670 if (poll) {
2671 struct bnx2x_fastpath *fp = &bp->fp[0];
2672 int rc;
2673
7961f791 2674 bnx2x_tx_int(fp);
a2fbb9ea
ET
2675 rc = bnx2x_rx_int(fp, 1000);
2676 }
2677
34f80b04
EG
2678 if (!BP_NOMCP(bp)) {
2679 int func = BP_FUNC(bp);
a2fbb9ea
ET
2680 u32 drv_pulse;
2681 u32 mcp_pulse;
2682
2683 ++bp->fw_drv_pulse_wr_seq;
2684 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
2685 /* TBD - add SYSTEM_TIME */
2686 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 2687 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 2688
34f80b04 2689 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
2690 MCP_PULSE_SEQ_MASK);
2691 /* The delta between driver pulse and mcp response
2692 * should be 1 (before mcp response) or 0 (after mcp response)
2693 */
2694 if ((drv_pulse != mcp_pulse) &&
2695 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
2696 /* someone lost a heartbeat... */
2697 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2698 drv_pulse, mcp_pulse);
2699 }
2700 }
2701
f34d28ea 2702 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 2703 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 2704
f1410647 2705timer_restart:
a2fbb9ea
ET
2706 mod_timer(&bp->timer, jiffies + bp->current_interval);
2707}
2708
2709/* end of Statistics */
2710
2711/* nic init */
2712
2713/*
2714 * nic init service functions
2715 */
2716
34f80b04 2717static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 2718{
34f80b04
EG
2719 int port = BP_PORT(bp);
2720
ca00392c
EG
2721 /* "CSTORM" */
2722 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2723 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
2724 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
2725 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2726 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
2727 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
2728}
2729
9f6c9258 2730void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5c862848 2731 dma_addr_t mapping, int sb_id)
34f80b04
EG
2732{
2733 int port = BP_PORT(bp);
bb2a0f7a 2734 int func = BP_FUNC(bp);
a2fbb9ea 2735 int index;
34f80b04 2736 u64 section;
a2fbb9ea
ET
2737
2738 /* USTORM */
2739 section = ((u64)mapping) + offsetof(struct host_status_block,
2740 u_status_block);
34f80b04 2741 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 2742
ca00392c
EG
2743 REG_WR(bp, BAR_CSTRORM_INTMEM +
2744 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
2745 REG_WR(bp, BAR_CSTRORM_INTMEM +
2746 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 2747 U64_HI(section));
ca00392c
EG
2748 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
2749 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
2750
2751 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
2752 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2753 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
2754
2755 /* CSTORM */
2756 section = ((u64)mapping) + offsetof(struct host_status_block,
2757 c_status_block);
34f80b04 2758 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
2759
2760 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 2761 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 2762 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 2763 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 2764 U64_HI(section));
7a9b2557 2765 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 2766 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
2767
2768 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
2769 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 2770 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
2771
2772 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2773}
2774
2775static void bnx2x_zero_def_sb(struct bnx2x *bp)
2776{
2777 int func = BP_FUNC(bp);
a2fbb9ea 2778
ca00392c 2779 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
2780 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2781 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
2782 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2783 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
2784 sizeof(struct cstorm_def_status_block_u)/4);
2785 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2786 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
2787 sizeof(struct cstorm_def_status_block_c)/4);
2788 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
2789 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2790 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
2791}
2792
2793static void bnx2x_init_def_sb(struct bnx2x *bp,
2794 struct host_def_status_block *def_sb,
34f80b04 2795 dma_addr_t mapping, int sb_id)
a2fbb9ea 2796{
34f80b04
EG
2797 int port = BP_PORT(bp);
2798 int func = BP_FUNC(bp);
a2fbb9ea
ET
2799 int index, val, reg_offset;
2800 u64 section;
2801
2802 /* ATTN */
2803 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2804 atten_status_block);
34f80b04 2805 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 2806
49d66772
ET
2807 bp->attn_state = 0;
2808
a2fbb9ea
ET
2809 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2810 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2811
34f80b04 2812 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
2813 bp->attn_group[index].sig[0] = REG_RD(bp,
2814 reg_offset + 0x10*index);
2815 bp->attn_group[index].sig[1] = REG_RD(bp,
2816 reg_offset + 0x4 + 0x10*index);
2817 bp->attn_group[index].sig[2] = REG_RD(bp,
2818 reg_offset + 0x8 + 0x10*index);
2819 bp->attn_group[index].sig[3] = REG_RD(bp,
2820 reg_offset + 0xc + 0x10*index);
2821 }
2822
a2fbb9ea
ET
2823 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
2824 HC_REG_ATTN_MSG0_ADDR_L);
2825
2826 REG_WR(bp, reg_offset, U64_LO(section));
2827 REG_WR(bp, reg_offset + 4, U64_HI(section));
2828
2829 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
2830
2831 val = REG_RD(bp, reg_offset);
34f80b04 2832 val |= sb_id;
a2fbb9ea
ET
2833 REG_WR(bp, reg_offset, val);
2834
2835 /* USTORM */
2836 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2837 u_def_status_block);
34f80b04 2838 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 2839
ca00392c
EG
2840 REG_WR(bp, BAR_CSTRORM_INTMEM +
2841 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
2842 REG_WR(bp, BAR_CSTRORM_INTMEM +
2843 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 2844 U64_HI(section));
ca00392c
EG
2845 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
2846 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
2847
2848 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
2849 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2850 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
2851
2852 /* CSTORM */
2853 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2854 c_def_status_block);
34f80b04 2855 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
2856
2857 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 2858 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 2859 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 2860 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 2861 U64_HI(section));
5c862848 2862 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 2863 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
2864
2865 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2866 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 2867 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
2868
2869 /* TSTORM */
2870 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2871 t_def_status_block);
34f80b04 2872 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
2873
2874 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 2875 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 2876 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 2877 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 2878 U64_HI(section));
5c862848 2879 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 2880 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
2881
2882 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2883 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 2884 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
2885
2886 /* XSTORM */
2887 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2888 x_def_status_block);
34f80b04 2889 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
2890
2891 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 2892 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 2893 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 2894 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 2895 U64_HI(section));
5c862848 2896 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 2897 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
2898
2899 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2900 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 2901 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 2902
bb2a0f7a 2903 bp->stats_pending = 0;
66e855f3 2904 bp->set_mac_pending = 0;
bb2a0f7a 2905
34f80b04 2906 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
2907}
2908
9f6c9258 2909void bnx2x_update_coalesce(struct bnx2x *bp)
a2fbb9ea 2910{
34f80b04 2911 int port = BP_PORT(bp);
a2fbb9ea
ET
2912 int i;
2913
2914 for_each_queue(bp, i) {
34f80b04 2915 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
2916
2917 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
2918 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2919 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
2920 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 2921 bp->rx_ticks/(4 * BNX2X_BTR));
ca00392c
EG
2922 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2923 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
2924 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 2925 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
2926
2927 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2928 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
2929 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
2930 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 2931 bp->tx_ticks/(4 * BNX2X_BTR));
a2fbb9ea 2932 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
2933 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
2934 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 2935 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
2936 }
2937}
2938
a2fbb9ea
ET
2939static void bnx2x_init_sp_ring(struct bnx2x *bp)
2940{
34f80b04 2941 int func = BP_FUNC(bp);
a2fbb9ea
ET
2942
2943 spin_lock_init(&bp->spq_lock);
2944
2945 bp->spq_left = MAX_SPQ_PENDING;
2946 bp->spq_prod_idx = 0;
a2fbb9ea
ET
2947 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
2948 bp->spq_prod_bd = bp->spq;
2949 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
2950
34f80b04 2951 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 2952 U64_LO(bp->spq_mapping));
34f80b04
EG
2953 REG_WR(bp,
2954 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
2955 U64_HI(bp->spq_mapping));
2956
34f80b04 2957 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2958 bp->spq_prod_idx);
2959}
2960
2961static void bnx2x_init_context(struct bnx2x *bp)
2962{
2963 int i;
2964
54b9ddaa
VZ
2965 /* Rx */
2966 for_each_queue(bp, i) {
a2fbb9ea
ET
2967 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
2968 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 2969 u8 cl_id = fp->cl_id;
a2fbb9ea 2970
34f80b04
EG
2971 context->ustorm_st_context.common.sb_index_numbers =
2972 BNX2X_RX_SB_INDEX_NUM;
0626b899 2973 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 2974 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 2975 context->ustorm_st_context.common.flags =
de832a55
EG
2976 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
2977 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
2978 context->ustorm_st_context.common.statistics_counter_id =
2979 cl_id;
8d9c5f34 2980 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 2981 BNX2X_RX_ALIGN_SHIFT;
34f80b04 2982 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 2983 bp->rx_buf_size;
34f80b04 2984 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 2985 U64_HI(fp->rx_desc_mapping);
34f80b04 2986 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 2987 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
2988 if (!fp->disable_tpa) {
2989 context->ustorm_st_context.common.flags |=
ca00392c 2990 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 2991 context->ustorm_st_context.common.sge_buff_size =
cdaa7cb8
VZ
2992 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
2993 0xffff);
7a9b2557
VZ
2994 context->ustorm_st_context.common.sge_page_base_hi =
2995 U64_HI(fp->rx_sge_mapping);
2996 context->ustorm_st_context.common.sge_page_base_lo =
2997 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
2998
2999 context->ustorm_st_context.common.max_sges_for_packet =
3000 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
3001 context->ustorm_st_context.common.max_sges_for_packet =
3002 ((context->ustorm_st_context.common.
3003 max_sges_for_packet + PAGES_PER_SGE - 1) &
3004 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
3005 }
3006
8d9c5f34
EG
3007 context->ustorm_ag_context.cdu_usage =
3008 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3009 CDU_REGION_NUMBER_UCM_AG,
3010 ETH_CONNECTION_TYPE);
3011
ca00392c
EG
3012 context->xstorm_ag_context.cdu_reserved =
3013 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3014 CDU_REGION_NUMBER_XCM_AG,
3015 ETH_CONNECTION_TYPE);
3016 }
3017
54b9ddaa
VZ
3018 /* Tx */
3019 for_each_queue(bp, i) {
ca00392c
EG
3020 struct bnx2x_fastpath *fp = &bp->fp[i];
3021 struct eth_context *context =
54b9ddaa 3022 bnx2x_sp(bp, context[i].eth);
ca00392c
EG
3023
3024 context->cstorm_st_context.sb_index_number =
3025 C_SB_ETH_TX_CQ_INDEX;
3026 context->cstorm_st_context.status_block_id = fp->sb_id;
3027
8d9c5f34
EG
3028 context->xstorm_st_context.tx_bd_page_base_hi =
3029 U64_HI(fp->tx_desc_mapping);
3030 context->xstorm_st_context.tx_bd_page_base_lo =
3031 U64_LO(fp->tx_desc_mapping);
ca00392c 3032 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 3033 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
3034 }
3035}
3036
3037static void bnx2x_init_ind_table(struct bnx2x *bp)
3038{
26c8fa4d 3039 int func = BP_FUNC(bp);
a2fbb9ea
ET
3040 int i;
3041
555f6c78 3042 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
3043 return;
3044
555f6c78
EG
3045 DP(NETIF_MSG_IFUP,
3046 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 3047 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 3048 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 3049 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 3050 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
3051}
3052
9f6c9258 3053void bnx2x_set_client_config(struct bnx2x *bp)
49d66772 3054{
49d66772 3055 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
3056 int port = BP_PORT(bp);
3057 int i;
49d66772 3058
e7799c5f 3059 tstorm_client.mtu = bp->dev->mtu;
49d66772 3060 tstorm_client.config_flags =
de832a55
EG
3061 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
3062 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 3063#ifdef BCM_VLAN
0c6671b0 3064 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 3065 tstorm_client.config_flags |=
8d9c5f34 3066 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
3067 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3068 }
3069#endif
49d66772
ET
3070
3071 for_each_queue(bp, i) {
de832a55
EG
3072 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
3073
49d66772 3074 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3075 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
3076 ((u32 *)&tstorm_client)[0]);
3077 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3078 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
3079 ((u32 *)&tstorm_client)[1]);
3080 }
3081
34f80b04
EG
3082 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
3083 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
3084}
3085
9f6c9258 3086void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
a2fbb9ea 3087{
a2fbb9ea 3088 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 3089 int mode = bp->rx_mode;
37b091ba 3090 int mask = bp->rx_mode_cl_mask;
34f80b04 3091 int func = BP_FUNC(bp);
581ce43d 3092 int port = BP_PORT(bp);
a2fbb9ea 3093 int i;
581ce43d
EG
3094 /* All but management unicast packets should pass to the host as well */
3095 u32 llh_mask =
3096 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
3097 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
3098 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3099 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 3100
3196a88a 3101 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
3102
3103 switch (mode) {
3104 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
3105 tstorm_mac_filter.ucast_drop_all = mask;
3106 tstorm_mac_filter.mcast_drop_all = mask;
3107 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 3108 break;
356e2385 3109
a2fbb9ea 3110 case BNX2X_RX_MODE_NORMAL:
34f80b04 3111 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 3112 break;
356e2385 3113
a2fbb9ea 3114 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
3115 tstorm_mac_filter.mcast_accept_all = mask;
3116 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 3117 break;
356e2385 3118
a2fbb9ea 3119 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
3120 tstorm_mac_filter.ucast_accept_all = mask;
3121 tstorm_mac_filter.mcast_accept_all = mask;
3122 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
3123 /* pass management unicast packets as well */
3124 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 3125 break;
356e2385 3126
a2fbb9ea 3127 default:
34f80b04
EG
3128 BNX2X_ERR("BAD rx mode (%d)\n", mode);
3129 break;
a2fbb9ea
ET
3130 }
3131
581ce43d
EG
3132 REG_WR(bp,
3133 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
3134 llh_mask);
3135
a2fbb9ea
ET
3136 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
3137 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3138 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
3139 ((u32 *)&tstorm_mac_filter)[i]);
3140
34f80b04 3141/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
3142 ((u32 *)&tstorm_mac_filter)[i]); */
3143 }
a2fbb9ea 3144
49d66772
ET
3145 if (mode != BNX2X_RX_MODE_NONE)
3146 bnx2x_set_client_config(bp);
a2fbb9ea
ET
3147}
3148
471de716
EG
3149static void bnx2x_init_internal_common(struct bnx2x *bp)
3150{
3151 int i;
3152
3153 /* Zero this manually as its initialization is
3154 currently missing in the initTool */
3155 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3156 REG_WR(bp, BAR_USTRORM_INTMEM +
3157 USTORM_AGG_DATA_OFFSET + i * 4, 0);
3158}
3159
3160static void bnx2x_init_internal_port(struct bnx2x *bp)
3161{
3162 int port = BP_PORT(bp);
3163
ca00392c
EG
3164 REG_WR(bp,
3165 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
3166 REG_WR(bp,
3167 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
3168 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3169 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3170}
3171
3172static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 3173{
a2fbb9ea
ET
3174 struct tstorm_eth_function_common_config tstorm_config = {0};
3175 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
3176 int port = BP_PORT(bp);
3177 int func = BP_FUNC(bp);
de832a55
EG
3178 int i, j;
3179 u32 offset;
471de716 3180 u16 max_agg_size;
a2fbb9ea 3181
c68ed255
TH
3182 tstorm_config.config_flags = RSS_FLAGS(bp);
3183
3184 if (is_multi(bp))
a2fbb9ea 3185 tstorm_config.rss_result_mask = MULTI_MASK;
ca00392c
EG
3186
3187 /* Enable TPA if needed */
3188 if (bp->flags & TPA_ENABLE_FLAG)
3189 tstorm_config.config_flags |=
3190 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
3191
8d9c5f34
EG
3192 if (IS_E1HMF(bp))
3193 tstorm_config.config_flags |=
3194 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 3195
34f80b04
EG
3196 tstorm_config.leading_client_id = BP_L_ID(bp);
3197
a2fbb9ea 3198 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3199 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
3200 (*(u32 *)&tstorm_config));
3201
c14423fe 3202 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 3203 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
3204 bnx2x_set_storm_rx_mode(bp);
3205
de832a55
EG
3206 for_each_queue(bp, i) {
3207 u8 cl_id = bp->fp[i].cl_id;
3208
3209 /* reset xstorm per client statistics */
3210 offset = BAR_XSTRORM_INTMEM +
3211 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3212 for (j = 0;
3213 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
3214 REG_WR(bp, offset + j*4, 0);
3215
3216 /* reset tstorm per client statistics */
3217 offset = BAR_TSTRORM_INTMEM +
3218 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3219 for (j = 0;
3220 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
3221 REG_WR(bp, offset + j*4, 0);
3222
3223 /* reset ustorm per client statistics */
3224 offset = BAR_USTRORM_INTMEM +
3225 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3226 for (j = 0;
3227 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
3228 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
3229 }
3230
3231 /* Init statistics related context */
34f80b04 3232 stats_flags.collect_eth = 1;
a2fbb9ea 3233
66e855f3 3234 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 3235 ((u32 *)&stats_flags)[0]);
66e855f3 3236 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
3237 ((u32 *)&stats_flags)[1]);
3238
66e855f3 3239 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 3240 ((u32 *)&stats_flags)[0]);
66e855f3 3241 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
3242 ((u32 *)&stats_flags)[1]);
3243
de832a55
EG
3244 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
3245 ((u32 *)&stats_flags)[0]);
3246 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
3247 ((u32 *)&stats_flags)[1]);
3248
66e855f3 3249 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 3250 ((u32 *)&stats_flags)[0]);
66e855f3 3251 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
3252 ((u32 *)&stats_flags)[1]);
3253
66e855f3
YG
3254 REG_WR(bp, BAR_XSTRORM_INTMEM +
3255 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3256 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3257 REG_WR(bp, BAR_XSTRORM_INTMEM +
3258 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3259 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3260
3261 REG_WR(bp, BAR_TSTRORM_INTMEM +
3262 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3263 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3264 REG_WR(bp, BAR_TSTRORM_INTMEM +
3265 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3266 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 3267
de832a55
EG
3268 REG_WR(bp, BAR_USTRORM_INTMEM +
3269 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3270 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3271 REG_WR(bp, BAR_USTRORM_INTMEM +
3272 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3273 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3274
34f80b04
EG
3275 if (CHIP_IS_E1H(bp)) {
3276 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3277 IS_E1HMF(bp));
3278 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3279 IS_E1HMF(bp));
3280 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3281 IS_E1HMF(bp));
3282 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3283 IS_E1HMF(bp));
3284
7a9b2557
VZ
3285 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
3286 bp->e1hov);
34f80b04
EG
3287 }
3288
4f40f2cb 3289 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
cdaa7cb8
VZ
3290 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
3291 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
54b9ddaa 3292 for_each_queue(bp, i) {
7a9b2557 3293 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
3294
3295 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 3296 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
3297 U64_LO(fp->rx_comp_mapping));
3298 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 3299 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
3300 U64_HI(fp->rx_comp_mapping));
3301
ca00392c
EG
3302 /* Next page */
3303 REG_WR(bp, BAR_USTRORM_INTMEM +
3304 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
3305 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3306 REG_WR(bp, BAR_USTRORM_INTMEM +
3307 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
3308 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3309
7a9b2557 3310 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 3311 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
3312 max_agg_size);
3313 }
8a1c38d1 3314
1c06328c
EG
3315 /* dropless flow control */
3316 if (CHIP_IS_E1H(bp)) {
3317 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
3318
3319 rx_pause.bd_thr_low = 250;
3320 rx_pause.cqe_thr_low = 250;
3321 rx_pause.cos = 1;
3322 rx_pause.sge_thr_low = 0;
3323 rx_pause.bd_thr_high = 350;
3324 rx_pause.cqe_thr_high = 350;
3325 rx_pause.sge_thr_high = 0;
3326
54b9ddaa 3327 for_each_queue(bp, i) {
1c06328c
EG
3328 struct bnx2x_fastpath *fp = &bp->fp[i];
3329
3330 if (!fp->disable_tpa) {
3331 rx_pause.sge_thr_low = 150;
3332 rx_pause.sge_thr_high = 250;
3333 }
3334
3335
3336 offset = BAR_USTRORM_INTMEM +
3337 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
3338 fp->cl_id);
3339 for (j = 0;
3340 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
3341 j++)
3342 REG_WR(bp, offset + j*4,
3343 ((u32 *)&rx_pause)[j]);
3344 }
3345 }
3346
8a1c38d1
EG
3347 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3348
3349 /* Init rate shaping and fairness contexts */
3350 if (IS_E1HMF(bp)) {
3351 int vn;
3352
3353 /* During init there is no active link
3354 Until link is up, set link rate to 10Gbps */
3355 bp->link_vars.line_speed = SPEED_10000;
3356 bnx2x_init_port_minmax(bp);
3357
b015e3d1
EG
3358 if (!BP_NOMCP(bp))
3359 bp->mf_config =
3360 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
3361 bnx2x_calc_vn_weight_sum(bp);
3362
3363 for (vn = VN_0; vn < E1HVN_MAX; vn++)
3364 bnx2x_init_vn_minmax(bp, 2*vn + port);
3365
3366 /* Enable rate shaping and fairness */
b015e3d1 3367 bp->cmng.flags.cmng_enables |=
8a1c38d1 3368 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 3369
8a1c38d1
EG
3370 } else {
3371 /* rate shaping and fairness are disabled */
3372 DP(NETIF_MSG_IFUP,
3373 "single function mode minmax will be disabled\n");
3374 }
3375
3376
cdaa7cb8 3377 /* Store cmng structures to internal memory */
8a1c38d1
EG
3378 if (bp->port.pmf)
3379 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
3380 REG_WR(bp, BAR_XSTRORM_INTMEM +
3381 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
3382 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
3383}
3384
471de716
EG
3385static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3386{
3387 switch (load_code) {
3388 case FW_MSG_CODE_DRV_LOAD_COMMON:
3389 bnx2x_init_internal_common(bp);
3390 /* no break */
3391
3392 case FW_MSG_CODE_DRV_LOAD_PORT:
3393 bnx2x_init_internal_port(bp);
3394 /* no break */
3395
3396 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3397 bnx2x_init_internal_func(bp);
3398 break;
3399
3400 default:
3401 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3402 break;
3403 }
3404}
3405
9f6c9258 3406void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
3407{
3408 int i;
3409
3410 for_each_queue(bp, i) {
3411 struct bnx2x_fastpath *fp = &bp->fp[i];
3412
34f80b04 3413 fp->bp = bp;
a2fbb9ea 3414 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 3415 fp->index = i;
34f80b04 3416 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
3417#ifdef BCM_CNIC
3418 fp->sb_id = fp->cl_id + 1;
3419#else
34f80b04 3420 fp->sb_id = fp->cl_id;
37b091ba 3421#endif
34f80b04 3422 DP(NETIF_MSG_IFUP,
f5372251
EG
3423 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
3424 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 3425 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 3426 fp->sb_id);
5c862848 3427 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
3428 }
3429
16119785
EG
3430 /* ensure status block indices were read */
3431 rmb();
3432
3433
5c862848
EG
3434 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
3435 DEF_SB_ID);
3436 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
3437 bnx2x_update_coalesce(bp);
3438 bnx2x_init_rx_rings(bp);
3439 bnx2x_init_tx_ring(bp);
3440 bnx2x_init_sp_ring(bp);
3441 bnx2x_init_context(bp);
471de716 3442 bnx2x_init_internal(bp, load_code);
a2fbb9ea 3443 bnx2x_init_ind_table(bp);
0ef00459
EG
3444 bnx2x_stats_init(bp);
3445
3446 /* At this point, we are ready for interrupts */
3447 atomic_set(&bp->intr_sem, 0);
3448
3449 /* flush all before enabling interrupts */
3450 mb();
3451 mmiowb();
3452
615f8fd9 3453 bnx2x_int_enable(bp);
eb8da205
EG
3454
3455 /* Check for SPIO5 */
3456 bnx2x_attn_int_deasserted0(bp,
3457 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
3458 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
3459}
3460
3461/* end of nic init */
3462
3463/*
3464 * gzip service functions
3465 */
3466
3467static int bnx2x_gunzip_init(struct bnx2x *bp)
3468{
1a983142
FT
3469 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
3470 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
3471 if (bp->gunzip_buf == NULL)
3472 goto gunzip_nomem1;
3473
3474 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3475 if (bp->strm == NULL)
3476 goto gunzip_nomem2;
3477
3478 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3479 GFP_KERNEL);
3480 if (bp->strm->workspace == NULL)
3481 goto gunzip_nomem3;
3482
3483 return 0;
3484
3485gunzip_nomem3:
3486 kfree(bp->strm);
3487 bp->strm = NULL;
3488
3489gunzip_nomem2:
1a983142
FT
3490 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3491 bp->gunzip_mapping);
a2fbb9ea
ET
3492 bp->gunzip_buf = NULL;
3493
3494gunzip_nomem1:
cdaa7cb8
VZ
3495 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
3496 " un-compression\n");
a2fbb9ea
ET
3497 return -ENOMEM;
3498}
3499
3500static void bnx2x_gunzip_end(struct bnx2x *bp)
3501{
3502 kfree(bp->strm->workspace);
3503
3504 kfree(bp->strm);
3505 bp->strm = NULL;
3506
3507 if (bp->gunzip_buf) {
1a983142
FT
3508 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3509 bp->gunzip_mapping);
a2fbb9ea
ET
3510 bp->gunzip_buf = NULL;
3511 }
3512}
3513
94a78b79 3514static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
3515{
3516 int n, rc;
3517
3518 /* check gzip header */
94a78b79
VZ
3519 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
3520 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 3521 return -EINVAL;
94a78b79 3522 }
a2fbb9ea
ET
3523
3524 n = 10;
3525
34f80b04 3526#define FNAME 0x8
a2fbb9ea
ET
3527
3528 if (zbuf[3] & FNAME)
3529 while ((zbuf[n++] != 0) && (n < len));
3530
94a78b79 3531 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
3532 bp->strm->avail_in = len - n;
3533 bp->strm->next_out = bp->gunzip_buf;
3534 bp->strm->avail_out = FW_BUF_SIZE;
3535
3536 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
3537 if (rc != Z_OK)
3538 return rc;
3539
3540 rc = zlib_inflate(bp->strm, Z_FINISH);
3541 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
3542 netdev_err(bp->dev, "Firmware decompression error: %s\n",
3543 bp->strm->msg);
a2fbb9ea
ET
3544
3545 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
3546 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
3547 netdev_err(bp->dev, "Firmware decompression error:"
3548 " gunzip_outlen (%d) not aligned\n",
3549 bp->gunzip_outlen);
a2fbb9ea
ET
3550 bp->gunzip_outlen >>= 2;
3551
3552 zlib_inflateEnd(bp->strm);
3553
3554 if (rc == Z_STREAM_END)
3555 return 0;
3556
3557 return rc;
3558}
3559
3560/* nic load/unload */
3561
3562/*
34f80b04 3563 * General service functions
a2fbb9ea
ET
3564 */
3565
3566/* send a NIG loopback debug packet */
3567static void bnx2x_lb_pckt(struct bnx2x *bp)
3568{
a2fbb9ea 3569 u32 wb_write[3];
a2fbb9ea
ET
3570
3571 /* Ethernet source and destination addresses */
a2fbb9ea
ET
3572 wb_write[0] = 0x55555555;
3573 wb_write[1] = 0x55555555;
34f80b04 3574 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 3575 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
3576
3577 /* NON-IP protocol */
a2fbb9ea
ET
3578 wb_write[0] = 0x09000000;
3579 wb_write[1] = 0x55555555;
34f80b04 3580 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 3581 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
3582}
3583
3584/* some of the internal memories
3585 * are not directly readable from the driver
3586 * to test them we send debug packets
3587 */
3588static int bnx2x_int_mem_test(struct bnx2x *bp)
3589{
3590 int factor;
3591 int count, i;
3592 u32 val = 0;
3593
ad8d3948 3594 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 3595 factor = 120;
ad8d3948
EG
3596 else if (CHIP_REV_IS_EMUL(bp))
3597 factor = 200;
3598 else
a2fbb9ea 3599 factor = 1;
a2fbb9ea
ET
3600
3601 DP(NETIF_MSG_HW, "start part1\n");
3602
3603 /* Disable inputs of parser neighbor blocks */
3604 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3605 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3606 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 3607 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
3608
3609 /* Write 0 to parser credits for CFC search request */
3610 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3611
3612 /* send Ethernet packet */
3613 bnx2x_lb_pckt(bp);
3614
3615 /* TODO do i reset NIG statistic? */
3616 /* Wait until NIG register shows 1 packet of size 0x10 */
3617 count = 1000 * factor;
3618 while (count) {
34f80b04 3619
a2fbb9ea
ET
3620 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3621 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
3622 if (val == 0x10)
3623 break;
3624
3625 msleep(10);
3626 count--;
3627 }
3628 if (val != 0x10) {
3629 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3630 return -1;
3631 }
3632
3633 /* Wait until PRS register shows 1 packet */
3634 count = 1000 * factor;
3635 while (count) {
3636 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
3637 if (val == 1)
3638 break;
3639
3640 msleep(10);
3641 count--;
3642 }
3643 if (val != 0x1) {
3644 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3645 return -2;
3646 }
3647
3648 /* Reset and init BRB, PRS */
34f80b04 3649 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 3650 msleep(50);
34f80b04 3651 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 3652 msleep(50);
94a78b79
VZ
3653 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3654 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
3655
3656 DP(NETIF_MSG_HW, "part2\n");
3657
3658 /* Disable inputs of parser neighbor blocks */
3659 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3660 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3661 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 3662 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
3663
3664 /* Write 0 to parser credits for CFC search request */
3665 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3666
3667 /* send 10 Ethernet packets */
3668 for (i = 0; i < 10; i++)
3669 bnx2x_lb_pckt(bp);
3670
3671 /* Wait until NIG register shows 10 + 1
3672 packets of size 11*0x10 = 0xb0 */
3673 count = 1000 * factor;
3674 while (count) {
34f80b04 3675
a2fbb9ea
ET
3676 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3677 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
3678 if (val == 0xb0)
3679 break;
3680
3681 msleep(10);
3682 count--;
3683 }
3684 if (val != 0xb0) {
3685 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3686 return -3;
3687 }
3688
3689 /* Wait until PRS register shows 2 packets */
3690 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3691 if (val != 2)
3692 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3693
3694 /* Write 1 to parser credits for CFC search request */
3695 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
3696
3697 /* Wait until PRS register shows 3 packets */
3698 msleep(10 * factor);
3699 /* Wait until NIG register shows 1 packet of size 0x10 */
3700 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3701 if (val != 3)
3702 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3703
3704 /* clear NIG EOP FIFO */
3705 for (i = 0; i < 11; i++)
3706 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
3707 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
3708 if (val != 1) {
3709 BNX2X_ERR("clear of NIG failed\n");
3710 return -4;
3711 }
3712
3713 /* Reset and init BRB, PRS, NIG */
3714 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3715 msleep(50);
3716 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3717 msleep(50);
94a78b79
VZ
3718 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3719 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 3720#ifndef BCM_CNIC
a2fbb9ea
ET
3721 /* set NIC mode */
3722 REG_WR(bp, PRS_REG_NIC_MODE, 1);
3723#endif
3724
3725 /* Enable inputs of parser neighbor blocks */
3726 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
3727 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
3728 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 3729 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
3730
3731 DP(NETIF_MSG_HW, "done\n");
3732
3733 return 0; /* OK */
3734}
3735
3736static void enable_blocks_attention(struct bnx2x *bp)
3737{
3738 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3739 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
3740 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3741 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
3742 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
3743 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
3744 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
3745 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
3746 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
3747/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3748/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
3749 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
3750 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
3751 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
3752/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3753/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
3754 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
3755 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
3756 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
3757 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
3758/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3759/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3760 if (CHIP_REV_IS_FPGA(bp))
3761 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
3762 else
3763 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
3764 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
3765 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
3766 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
3767/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3768/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
3769 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
3770 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
3771/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3772 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
3773}
3774
72fd0718
VZ
3775static const struct {
3776 u32 addr;
3777 u32 mask;
3778} bnx2x_parity_mask[] = {
3779 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
3780 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
3781 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
3782 {HC_REG_HC_PRTY_MASK, 0xffffffff},
3783 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
3784 {QM_REG_QM_PRTY_MASK, 0x0},
3785 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
3786 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
3787 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
3788 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
3789 {CDU_REG_CDU_PRTY_MASK, 0x0},
3790 {CFC_REG_CFC_PRTY_MASK, 0x0},
3791 {DBG_REG_DBG_PRTY_MASK, 0x0},
3792 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
3793 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
3794 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
3795 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
3796 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
3797 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
3798 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
3799 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
3800 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
3801 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
3802 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
3803 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
3804 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
3805 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
3806 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
3807};
3808
3809static void enable_blocks_parity(struct bnx2x *bp)
3810{
3811 int i, mask_arr_len =
3812 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
3813
3814 for (i = 0; i < mask_arr_len; i++)
3815 REG_WR(bp, bnx2x_parity_mask[i].addr,
3816 bnx2x_parity_mask[i].mask);
3817}
3818
34f80b04 3819
81f75bbf
EG
3820static void bnx2x_reset_common(struct bnx2x *bp)
3821{
3822 /* reset_common */
3823 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
3824 0xd3ffff7f);
3825 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
3826}
3827
573f2035
EG
3828static void bnx2x_init_pxp(struct bnx2x *bp)
3829{
3830 u16 devctl;
3831 int r_order, w_order;
3832
3833 pci_read_config_word(bp->pdev,
3834 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
3835 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
3836 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3837 if (bp->mrrs == -1)
3838 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3839 else {
3840 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
3841 r_order = bp->mrrs;
3842 }
3843
3844 bnx2x_init_pxp_arb(bp, r_order, w_order);
3845}
fd4ef40d
EG
3846
3847static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3848{
2145a920 3849 int is_required;
fd4ef40d 3850 u32 val;
2145a920 3851 int port;
fd4ef40d 3852
2145a920
VZ
3853 if (BP_NOMCP(bp))
3854 return;
3855
3856 is_required = 0;
fd4ef40d
EG
3857 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
3858 SHARED_HW_CFG_FAN_FAILURE_MASK;
3859
3860 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
3861 is_required = 1;
3862
3863 /*
3864 * The fan failure mechanism is usually related to the PHY type since
3865 * the power consumption of the board is affected by the PHY. Currently,
3866 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
3867 */
3868 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
3869 for (port = PORT_0; port < PORT_MAX; port++) {
3870 u32 phy_type =
3871 SHMEM_RD(bp, dev_info.port_hw_config[port].
3872 external_phy_config) &
3873 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3874 is_required |=
3875 ((phy_type ==
3876 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
3877 (phy_type ==
3878 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
3879 (phy_type ==
3880 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
3881 }
3882
3883 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
3884
3885 if (is_required == 0)
3886 return;
3887
3888 /* Fan failure is indicated by SPIO 5 */
3889 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
3890 MISC_REGISTERS_SPIO_INPUT_HI_Z);
3891
3892 /* set to active low mode */
3893 val = REG_RD(bp, MISC_REG_SPIO_INT);
3894 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 3895 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
3896 REG_WR(bp, MISC_REG_SPIO_INT, val);
3897
3898 /* enable interrupt to signal the IGU */
3899 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
3900 val |= (1 << MISC_REGISTERS_SPIO_5);
3901 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3902}
3903
34f80b04 3904static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 3905{
a2fbb9ea 3906 u32 val, i;
37b091ba
MC
3907#ifdef BCM_CNIC
3908 u32 wb_write[2];
3909#endif
a2fbb9ea 3910
34f80b04 3911 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 3912
81f75bbf 3913 bnx2x_reset_common(bp);
34f80b04
EG
3914 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
3915 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 3916
94a78b79 3917 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
3918 if (CHIP_IS_E1H(bp))
3919 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 3920
34f80b04
EG
3921 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
3922 msleep(30);
3923 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 3924
94a78b79 3925 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
3926 if (CHIP_IS_E1(bp)) {
3927 /* enable HW interrupt from PXP on USDM overflow
3928 bit 16 on INT_MASK_0 */
3929 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3930 }
a2fbb9ea 3931
94a78b79 3932 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 3933 bnx2x_init_pxp(bp);
a2fbb9ea
ET
3934
3935#ifdef __BIG_ENDIAN
34f80b04
EG
3936 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
3937 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
3938 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
3939 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
3940 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
3941 /* make sure this value is 0 */
3942 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
3943
3944/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3945 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
3946 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
3947 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
3948 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
3949#endif
3950
34f80b04 3951 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 3952#ifdef BCM_CNIC
34f80b04
EG
3953 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3954 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3955 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
3956#endif
3957
34f80b04
EG
3958 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
3959 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 3960
34f80b04
EG
3961 /* let the HW do it's magic ... */
3962 msleep(100);
3963 /* finish PXP init */
3964 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
3965 if (val != 1) {
3966 BNX2X_ERR("PXP2 CFG failed\n");
3967 return -EBUSY;
3968 }
3969 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
3970 if (val != 1) {
3971 BNX2X_ERR("PXP2 RD_INIT failed\n");
3972 return -EBUSY;
3973 }
a2fbb9ea 3974
34f80b04
EG
3975 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
3976 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 3977
94a78b79 3978 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 3979
34f80b04
EG
3980 /* clean the DMAE memory */
3981 bp->dmae_ready = 1;
3982 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 3983
94a78b79
VZ
3984 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
3985 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
3986 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
3987 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 3988
34f80b04
EG
3989 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
3990 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
3991 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
3992 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
3993
94a78b79 3994 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
3995
3996#ifdef BCM_CNIC
3997 wb_write[0] = 0;
3998 wb_write[1] = 0;
3999 for (i = 0; i < 64; i++) {
4000 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
4001 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
4002
4003 if (CHIP_IS_E1H(bp)) {
4004 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
4005 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
4006 wb_write, 2);
4007 }
4008 }
4009#endif
34f80b04
EG
4010 /* soft reset pulse */
4011 REG_WR(bp, QM_REG_SOFT_RESET, 1);
4012 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 4013
37b091ba 4014#ifdef BCM_CNIC
94a78b79 4015 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 4016#endif
a2fbb9ea 4017
94a78b79 4018 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
4019 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
4020 if (!CHIP_REV_IS_SLOW(bp)) {
4021 /* enable hw interrupt from doorbell Q */
4022 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4023 }
a2fbb9ea 4024
94a78b79
VZ
4025 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4026 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 4027 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 4028#ifndef BCM_CNIC
3196a88a
EG
4029 /* set NIC mode */
4030 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 4031#endif
34f80b04
EG
4032 if (CHIP_IS_E1H(bp))
4033 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 4034
94a78b79
VZ
4035 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4036 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4037 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4038 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 4039
ca00392c
EG
4040 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4041 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4042 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4043 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 4044
94a78b79
VZ
4045 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4046 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4047 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4048 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 4049
34f80b04
EG
4050 /* sync semi rtc */
4051 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4052 0x80000000);
4053 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4054 0x80000000);
a2fbb9ea 4055
94a78b79
VZ
4056 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4057 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4058 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 4059
34f80b04 4060 REG_WR(bp, SRC_REG_SOFT_RST, 1);
c68ed255
TH
4061 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4062 REG_WR(bp, i, random32());
94a78b79 4063 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
4064#ifdef BCM_CNIC
4065 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4066 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4067 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4068 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4069 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4070 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4071 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4072 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4073 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4074 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4075#endif
34f80b04 4076 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 4077
34f80b04
EG
4078 if (sizeof(union cdu_context) != 1024)
4079 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
4080 dev_alert(&bp->pdev->dev, "please adjust the size "
4081 "of cdu_context(%ld)\n",
7995c64e 4082 (long)sizeof(union cdu_context));
a2fbb9ea 4083
94a78b79 4084 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
4085 val = (4 << 24) + (0 << 12) + 1024;
4086 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 4087
94a78b79 4088 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 4089 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
4090 /* enable context validation interrupt from CFC */
4091 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4092
4093 /* set the thresholds to prevent CFC/CDU race */
4094 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 4095
94a78b79
VZ
4096 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
4097 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 4098
94a78b79 4099 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
4100 /* Reset PCIE errors for debug */
4101 REG_WR(bp, 0x2814, 0xffffffff);
4102 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 4103
94a78b79 4104 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 4105 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 4106 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 4107 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 4108
94a78b79 4109 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
4110 if (CHIP_IS_E1H(bp)) {
4111 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
4112 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
4113 }
4114
4115 if (CHIP_REV_IS_SLOW(bp))
4116 msleep(200);
4117
4118 /* finish CFC init */
4119 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4120 if (val != 1) {
4121 BNX2X_ERR("CFC LL_INIT failed\n");
4122 return -EBUSY;
4123 }
4124 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4125 if (val != 1) {
4126 BNX2X_ERR("CFC AC_INIT failed\n");
4127 return -EBUSY;
4128 }
4129 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4130 if (val != 1) {
4131 BNX2X_ERR("CFC CAM_INIT failed\n");
4132 return -EBUSY;
4133 }
4134 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 4135
34f80b04
EG
4136 /* read NIG statistic
4137 to see if this is our first up since powerup */
4138 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4139 val = *bnx2x_sp(bp, wb_data[0]);
4140
4141 /* do internal memory self test */
4142 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4143 BNX2X_ERR("internal mem self test failed\n");
4144 return -EBUSY;
4145 }
4146
b7737c9b 4147 switch (bp->link_params.phy[EXT_PHY1].type) {
46c6a674
EG
4148 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4149 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4150 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 4151 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
4152 bp->port.need_hw_lock = 1;
4153 break;
4154
34f80b04
EG
4155 default:
4156 break;
4157 }
f1410647 4158
fd4ef40d
EG
4159 bnx2x_setup_fan_failure_detection(bp);
4160
34f80b04
EG
4161 /* clear PXP2 attentions */
4162 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 4163
34f80b04 4164 enable_blocks_attention(bp);
72fd0718
VZ
4165 if (CHIP_PARITY_SUPPORTED(bp))
4166 enable_blocks_parity(bp);
a2fbb9ea 4167
6bbca910
YR
4168 if (!BP_NOMCP(bp)) {
4169 bnx2x_acquire_phy_lock(bp);
4170 bnx2x_common_init_phy(bp, bp->common.shmem_base);
4171 bnx2x_release_phy_lock(bp);
4172 } else
4173 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4174
34f80b04
EG
4175 return 0;
4176}
a2fbb9ea 4177
34f80b04
EG
4178static int bnx2x_init_port(struct bnx2x *bp)
4179{
4180 int port = BP_PORT(bp);
94a78b79 4181 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 4182 u32 low, high;
34f80b04 4183 u32 val;
a2fbb9ea 4184
cdaa7cb8 4185 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
4186
4187 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 4188
94a78b79 4189 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 4190 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
4191
4192 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4193 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4194 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 4195 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 4196
37b091ba
MC
4197#ifdef BCM_CNIC
4198 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 4199
94a78b79 4200 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
4201 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4202 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 4203#endif
cdaa7cb8 4204
94a78b79 4205 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 4206
94a78b79 4207 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
4208 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
4209 /* no pause for emulation and FPGA */
4210 low = 0;
4211 high = 513;
4212 } else {
4213 if (IS_E1HMF(bp))
4214 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4215 else if (bp->dev->mtu > 4096) {
4216 if (bp->flags & ONE_PORT_FLAG)
4217 low = 160;
4218 else {
4219 val = bp->dev->mtu;
4220 /* (24*1024 + val*4)/256 */
4221 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
4222 }
4223 } else
4224 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4225 high = low + 56; /* 14*1024/256 */
4226 }
4227 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4228 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4229
4230
94a78b79 4231 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 4232
94a78b79 4233 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 4234 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 4235 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 4236 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 4237
94a78b79
VZ
4238 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4239 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4240 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4241 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 4242
94a78b79 4243 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 4244 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 4245
94a78b79 4246 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
4247
4248 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 4249 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
4250
4251 /* update threshold */
34f80b04 4252 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 4253 /* update init credit */
34f80b04 4254 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
4255
4256 /* probe changes */
34f80b04 4257 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 4258 msleep(5);
34f80b04 4259 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 4260
37b091ba
MC
4261#ifdef BCM_CNIC
4262 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 4263#endif
94a78b79 4264 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 4265 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
4266
4267 if (CHIP_IS_E1(bp)) {
4268 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4269 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4270 }
94a78b79 4271 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 4272
94a78b79 4273 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
4274 /* init aeu_mask_attn_func_0/1:
4275 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4276 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4277 * bits 4-7 are used for "per vn group attention" */
4278 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4279 (IS_E1HMF(bp) ? 0xF7 : 0x7));
4280
94a78b79 4281 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 4282 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 4283 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 4284 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 4285 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 4286
94a78b79 4287 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
4288
4289 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4290
4291 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
4292 /* 0x2 disable e1hov, 0x1 enable */
4293 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4294 (IS_E1HMF(bp) ? 0x1 : 0x2));
4295
1c06328c
EG
4296 {
4297 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4298 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
4299 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
4300 }
34f80b04
EG
4301 }
4302
94a78b79 4303 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 4304 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 4305
b7737c9b 4306 switch (bp->link_params.phy[EXT_PHY1].type) {
589abe3a
EG
4307 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4308 {
4309 u32 swap_val, swap_override, aeu_gpio_mask, offset;
4310
4311 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
4312 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
4313
4314 /* The GPIO should be swapped if the swap register is
4315 set and active */
4316 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4317 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
4318
4319 /* Select function upon port-swap configuration */
4320 if (port == 0) {
4321 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
4322 aeu_gpio_mask = (swap_val && swap_override) ?
4323 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
4324 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
4325 } else {
4326 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
4327 aeu_gpio_mask = (swap_val && swap_override) ?
4328 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
4329 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
4330 }
4331 val = REG_RD(bp, offset);
4332 /* add GPIO3 to group */
4333 val |= aeu_gpio_mask;
4334 REG_WR(bp, offset, val);
4335 }
3971a230 4336 bp->port.need_hw_lock = 1;
589abe3a
EG
4337 break;
4338
4d295db0 4339 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
3971a230
YR
4340 bp->port.need_hw_lock = 1;
4341 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
f1410647 4342 /* add SPIO 5 to group 0 */
4d295db0
EG
4343 {
4344 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4345 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4346 val = REG_RD(bp, reg_addr);
f1410647 4347 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
4348 REG_WR(bp, reg_addr, val);
4349 }
f1410647 4350 break;
3971a230
YR
4351 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4352 bp->port.need_hw_lock = 1;
4353 break;
f1410647
ET
4354 default:
4355 break;
4356 }
c18487ee 4357 bnx2x__link_reset(bp);
a2fbb9ea 4358
34f80b04
EG
4359 return 0;
4360}
4361
4362#define ILT_PER_FUNC (768/2)
4363#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
4364/* the phys address is shifted right 12 bits and has an added
4365 1=valid bit added to the 53rd bit
4366 then since this is a wide register(TM)
4367 we split it into two 32 bit writes
4368 */
4369#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4370#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
4371#define PXP_ONE_ILT(x) (((x) << 10) | x)
4372#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
4373
37b091ba
MC
4374#ifdef BCM_CNIC
4375#define CNIC_ILT_LINES 127
4376#define CNIC_CTX_PER_ILT 16
4377#else
34f80b04 4378#define CNIC_ILT_LINES 0
37b091ba 4379#endif
34f80b04
EG
4380
4381static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4382{
4383 int reg;
4384
4385 if (CHIP_IS_E1H(bp))
4386 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4387 else /* E1 */
4388 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4389
4390 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4391}
4392
4393static int bnx2x_init_func(struct bnx2x *bp)
4394{
4395 int port = BP_PORT(bp);
4396 int func = BP_FUNC(bp);
8badd27a 4397 u32 addr, val;
34f80b04
EG
4398 int i;
4399
cdaa7cb8 4400 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 4401
8badd27a
EG
4402 /* set MSI reconfigure capability */
4403 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4404 val = REG_RD(bp, addr);
4405 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
4406 REG_WR(bp, addr, val);
4407
34f80b04
EG
4408 i = FUNC_ILT_BASE(func);
4409
4410 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
4411 if (CHIP_IS_E1H(bp)) {
4412 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
4413 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
4414 } else /* E1 */
4415 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
4416 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
4417
37b091ba
MC
4418#ifdef BCM_CNIC
4419 i += 1 + CNIC_ILT_LINES;
4420 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
4421 if (CHIP_IS_E1(bp))
4422 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
4423 else {
4424 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
4425 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
4426 }
4427
4428 i++;
4429 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
4430 if (CHIP_IS_E1(bp))
4431 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
4432 else {
4433 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
4434 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
4435 }
4436
4437 i++;
4438 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
4439 if (CHIP_IS_E1(bp))
4440 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
4441 else {
4442 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
4443 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
4444 }
4445
4446 /* tell the searcher where the T2 table is */
4447 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
4448
4449 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
4450 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
4451
4452 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
4453 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
4454 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
4455
4456 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
4457#endif
34f80b04
EG
4458
4459 if (CHIP_IS_E1H(bp)) {
573f2035
EG
4460 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4461 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4462 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4463 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4464 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4465 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4466 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4467 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4468 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
4469
4470 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4471 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
4472 }
4473
4474 /* HC init per function */
4475 if (CHIP_IS_E1H(bp)) {
4476 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4477
4478 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4479 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4480 }
94a78b79 4481 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 4482
c14423fe 4483 /* Reset PCIE errors for debug */
a2fbb9ea
ET
4484 REG_WR(bp, 0x2114, 0xffffffff);
4485 REG_WR(bp, 0x2120, 0xffffffff);
b7737c9b 4486 bnx2x_phy_probe(&bp->link_params);
34f80b04
EG
4487 return 0;
4488}
4489
9f6c9258 4490int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
34f80b04
EG
4491{
4492 int i, rc = 0;
a2fbb9ea 4493
34f80b04
EG
4494 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
4495 BP_FUNC(bp), load_code);
a2fbb9ea 4496
34f80b04
EG
4497 bp->dmae_ready = 0;
4498 mutex_init(&bp->dmae_mutex);
54016b26
EG
4499 rc = bnx2x_gunzip_init(bp);
4500 if (rc)
4501 return rc;
a2fbb9ea 4502
34f80b04
EG
4503 switch (load_code) {
4504 case FW_MSG_CODE_DRV_LOAD_COMMON:
4505 rc = bnx2x_init_common(bp);
4506 if (rc)
4507 goto init_hw_err;
4508 /* no break */
4509
4510 case FW_MSG_CODE_DRV_LOAD_PORT:
4511 bp->dmae_ready = 1;
4512 rc = bnx2x_init_port(bp);
4513 if (rc)
4514 goto init_hw_err;
4515 /* no break */
4516
4517 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4518 bp->dmae_ready = 1;
4519 rc = bnx2x_init_func(bp);
4520 if (rc)
4521 goto init_hw_err;
4522 break;
4523
4524 default:
4525 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4526 break;
4527 }
4528
4529 if (!BP_NOMCP(bp)) {
4530 int func = BP_FUNC(bp);
a2fbb9ea
ET
4531
4532 bp->fw_drv_pulse_wr_seq =
34f80b04 4533 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 4534 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
4535 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4536 }
a2fbb9ea 4537
34f80b04
EG
4538 /* this needs to be done before gunzip end */
4539 bnx2x_zero_def_sb(bp);
4540 for_each_queue(bp, i)
4541 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
4542#ifdef BCM_CNIC
4543 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4544#endif
34f80b04
EG
4545
4546init_hw_err:
4547 bnx2x_gunzip_end(bp);
4548
4549 return rc;
a2fbb9ea
ET
4550}
4551
9f6c9258 4552void bnx2x_free_mem(struct bnx2x *bp)
a2fbb9ea
ET
4553{
4554
4555#define BNX2X_PCI_FREE(x, y, size) \
4556 do { \
4557 if (x) { \
1a983142 4558 dma_free_coherent(&bp->pdev->dev, size, x, y); \
a2fbb9ea
ET
4559 x = NULL; \
4560 y = 0; \
4561 } \
4562 } while (0)
4563
4564#define BNX2X_FREE(x) \
4565 do { \
4566 if (x) { \
4567 vfree(x); \
4568 x = NULL; \
4569 } \
4570 } while (0)
4571
4572 int i;
4573
4574 /* fastpath */
555f6c78 4575 /* Common */
a2fbb9ea
ET
4576 for_each_queue(bp, i) {
4577
555f6c78 4578 /* status blocks */
a2fbb9ea
ET
4579 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
4580 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 4581 sizeof(struct host_status_block));
555f6c78
EG
4582 }
4583 /* Rx */
54b9ddaa 4584 for_each_queue(bp, i) {
a2fbb9ea 4585
555f6c78 4586 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
4587 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
4588 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
4589 bnx2x_fp(bp, i, rx_desc_mapping),
4590 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4591
4592 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
4593 bnx2x_fp(bp, i, rx_comp_mapping),
4594 sizeof(struct eth_fast_path_rx_cqe) *
4595 NUM_RCQ_BD);
a2fbb9ea 4596
7a9b2557 4597 /* SGE ring */
32626230 4598 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
4599 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
4600 bnx2x_fp(bp, i, rx_sge_mapping),
4601 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4602 }
555f6c78 4603 /* Tx */
54b9ddaa 4604 for_each_queue(bp, i) {
555f6c78
EG
4605
4606 /* fastpath tx rings: tx_buf tx_desc */
4607 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
4608 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
4609 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 4610 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 4611 }
a2fbb9ea
ET
4612 /* end of fastpath */
4613
4614 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 4615 sizeof(struct host_def_status_block));
a2fbb9ea
ET
4616
4617 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 4618 sizeof(struct bnx2x_slowpath));
a2fbb9ea 4619
37b091ba 4620#ifdef BCM_CNIC
a2fbb9ea
ET
4621 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
4622 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
4623 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
4624 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
4625 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
4626 sizeof(struct host_status_block));
a2fbb9ea 4627#endif
7a9b2557 4628 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
4629
4630#undef BNX2X_PCI_FREE
4631#undef BNX2X_KFREE
4632}
4633
9f6c9258 4634int bnx2x_alloc_mem(struct bnx2x *bp)
a2fbb9ea
ET
4635{
4636
4637#define BNX2X_PCI_ALLOC(x, y, size) \
4638 do { \
1a983142 4639 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
9f6c9258
DK
4640 if (x == NULL) \
4641 goto alloc_mem_err; \
4642 memset(x, 0, size); \
4643 } while (0)
a2fbb9ea 4644
9f6c9258
DK
4645#define BNX2X_ALLOC(x, size) \
4646 do { \
4647 x = vmalloc(size); \
4648 if (x == NULL) \
4649 goto alloc_mem_err; \
4650 memset(x, 0, size); \
4651 } while (0)
a2fbb9ea 4652
9f6c9258 4653 int i;
a2fbb9ea 4654
9f6c9258
DK
4655 /* fastpath */
4656 /* Common */
a2fbb9ea 4657 for_each_queue(bp, i) {
9f6c9258 4658 bnx2x_fp(bp, i, bp) = bp;
a2fbb9ea 4659
9f6c9258
DK
4660 /* status blocks */
4661 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
4662 &bnx2x_fp(bp, i, status_blk_mapping),
4663 sizeof(struct host_status_block));
a2fbb9ea 4664 }
9f6c9258
DK
4665 /* Rx */
4666 for_each_queue(bp, i) {
a2fbb9ea 4667
9f6c9258
DK
4668 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4669 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
4670 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4671 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
4672 &bnx2x_fp(bp, i, rx_desc_mapping),
4673 sizeof(struct eth_rx_bd) * NUM_RX_BD);
555f6c78 4674
9f6c9258
DK
4675 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
4676 &bnx2x_fp(bp, i, rx_comp_mapping),
4677 sizeof(struct eth_fast_path_rx_cqe) *
4678 NUM_RCQ_BD);
a2fbb9ea 4679
9f6c9258
DK
4680 /* SGE ring */
4681 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
4682 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4683 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
4684 &bnx2x_fp(bp, i, rx_sge_mapping),
4685 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4686 }
4687 /* Tx */
4688 for_each_queue(bp, i) {
8badd27a 4689
9f6c9258
DK
4690 /* fastpath tx rings: tx_buf tx_desc */
4691 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
4692 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4693 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
4694 &bnx2x_fp(bp, i, tx_desc_mapping),
4695 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
8badd27a 4696 }
9f6c9258 4697 /* end of fastpath */
8badd27a 4698
9f6c9258
DK
4699 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4700 sizeof(struct host_def_status_block));
8badd27a 4701
9f6c9258
DK
4702 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4703 sizeof(struct bnx2x_slowpath));
a2fbb9ea 4704
9f6c9258
DK
4705#ifdef BCM_CNIC
4706 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
8badd27a 4707
9f6c9258
DK
4708 /* allocate searcher T2 table
4709 we allocate 1/4 of alloc num for T2
4710 (which is not entered into the ILT) */
4711 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
a2fbb9ea 4712
9f6c9258
DK
4713 /* Initialize T2 (for 1024 connections) */
4714 for (i = 0; i < 16*1024; i += 64)
4715 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 4716
9f6c9258
DK
4717 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
4718 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
65abd74d 4719
9f6c9258
DK
4720 /* QM queues (128*MAX_CONN) */
4721 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
65abd74d 4722
9f6c9258
DK
4723 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
4724 sizeof(struct host_status_block));
4725#endif
65abd74d 4726
9f6c9258
DK
4727 /* Slow path ring */
4728 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
65abd74d 4729
9f6c9258 4730 return 0;
e1510706 4731
9f6c9258
DK
4732alloc_mem_err:
4733 bnx2x_free_mem(bp);
4734 return -ENOMEM;
e1510706 4735
9f6c9258
DK
4736#undef BNX2X_PCI_ALLOC
4737#undef BNX2X_ALLOC
65abd74d
YG
4738}
4739
65abd74d 4740
a2fbb9ea
ET
4741/*
4742 * Init service functions
4743 */
4744
e665bfda
MC
4745/**
4746 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
4747 *
4748 * @param bp driver descriptor
4749 * @param set set or clear an entry (1 or 0)
4750 * @param mac pointer to a buffer containing a MAC
4751 * @param cl_bit_vec bit vector of clients to register a MAC for
4752 * @param cam_offset offset in a CAM to use
4753 * @param with_bcast set broadcast MAC as well
4754 */
4755static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
4756 u32 cl_bit_vec, u8 cam_offset,
4757 u8 with_bcast)
a2fbb9ea
ET
4758{
4759 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 4760 int port = BP_PORT(bp);
a2fbb9ea
ET
4761
4762 /* CAM allocation
4763 * unicasts 0-31:port0 32-63:port1
4764 * multicast 64-127:port0 128-191:port1
4765 */
e665bfda
MC
4766 config->hdr.length = 1 + (with_bcast ? 1 : 0);
4767 config->hdr.offset = cam_offset;
4768 config->hdr.client_id = 0xff;
a2fbb9ea
ET
4769 config->hdr.reserved1 = 0;
4770
4771 /* primary MAC */
4772 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 4773 swab16(*(u16 *)&mac[0]);
a2fbb9ea 4774 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 4775 swab16(*(u16 *)&mac[2]);
a2fbb9ea 4776 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 4777 swab16(*(u16 *)&mac[4]);
34f80b04 4778 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
4779 if (set)
4780 config->config_table[0].target_table_entry.flags = 0;
4781 else
4782 CAM_INVALIDATE(config->config_table[0]);
ca00392c 4783 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 4784 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
4785 config->config_table[0].target_table_entry.vlan_id = 0;
4786
3101c2bc
YG
4787 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
4788 (set ? "setting" : "clearing"),
a2fbb9ea
ET
4789 config->config_table[0].cam_entry.msb_mac_addr,
4790 config->config_table[0].cam_entry.middle_mac_addr,
4791 config->config_table[0].cam_entry.lsb_mac_addr);
4792
4793 /* broadcast */
e665bfda
MC
4794 if (with_bcast) {
4795 config->config_table[1].cam_entry.msb_mac_addr =
4796 cpu_to_le16(0xffff);
4797 config->config_table[1].cam_entry.middle_mac_addr =
4798 cpu_to_le16(0xffff);
4799 config->config_table[1].cam_entry.lsb_mac_addr =
4800 cpu_to_le16(0xffff);
4801 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
4802 if (set)
4803 config->config_table[1].target_table_entry.flags =
4804 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4805 else
4806 CAM_INVALIDATE(config->config_table[1]);
4807 config->config_table[1].target_table_entry.clients_bit_vector =
4808 cpu_to_le32(cl_bit_vec);
4809 config->config_table[1].target_table_entry.vlan_id = 0;
4810 }
a2fbb9ea
ET
4811
4812 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4813 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4814 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4815}
4816
e665bfda
MC
4817/**
4818 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
4819 *
4820 * @param bp driver descriptor
4821 * @param set set or clear an entry (1 or 0)
4822 * @param mac pointer to a buffer containing a MAC
4823 * @param cl_bit_vec bit vector of clients to register a MAC for
4824 * @param cam_offset offset in a CAM to use
4825 */
4826static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
4827 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
4828{
4829 struct mac_configuration_cmd_e1h *config =
4830 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
4831
8d9c5f34 4832 config->hdr.length = 1;
e665bfda
MC
4833 config->hdr.offset = cam_offset;
4834 config->hdr.client_id = 0xff;
34f80b04
EG
4835 config->hdr.reserved1 = 0;
4836
4837 /* primary MAC */
4838 config->config_table[0].msb_mac_addr =
e665bfda 4839 swab16(*(u16 *)&mac[0]);
34f80b04 4840 config->config_table[0].middle_mac_addr =
e665bfda 4841 swab16(*(u16 *)&mac[2]);
34f80b04 4842 config->config_table[0].lsb_mac_addr =
e665bfda 4843 swab16(*(u16 *)&mac[4]);
ca00392c 4844 config->config_table[0].clients_bit_vector =
e665bfda 4845 cpu_to_le32(cl_bit_vec);
34f80b04
EG
4846 config->config_table[0].vlan_id = 0;
4847 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
4848 if (set)
4849 config->config_table[0].flags = BP_PORT(bp);
4850 else
4851 config->config_table[0].flags =
4852 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 4853
e665bfda 4854 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 4855 (set ? "setting" : "clearing"),
34f80b04
EG
4856 config->config_table[0].msb_mac_addr,
4857 config->config_table[0].middle_mac_addr,
e665bfda 4858 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
4859
4860 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4861 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4862 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4863}
4864
a2fbb9ea
ET
4865static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4866 int *state_p, int poll)
4867{
4868 /* can take a while if any port is running */
8b3a0f0b 4869 int cnt = 5000;
a2fbb9ea 4870
c14423fe
ET
4871 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4872 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
4873
4874 might_sleep();
34f80b04 4875 while (cnt--) {
a2fbb9ea
ET
4876 if (poll) {
4877 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
4878 /* if index is different from 0
4879 * the reply for some commands will
3101c2bc 4880 * be on the non default queue
a2fbb9ea
ET
4881 */
4882 if (idx)
4883 bnx2x_rx_int(&bp->fp[idx], 10);
4884 }
a2fbb9ea 4885
3101c2bc 4886 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
4887 if (*state_p == state) {
4888#ifdef BNX2X_STOP_ON_ERROR
4889 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
4890#endif
a2fbb9ea 4891 return 0;
8b3a0f0b 4892 }
a2fbb9ea 4893
a2fbb9ea 4894 msleep(1);
e3553b29
EG
4895
4896 if (bp->panic)
4897 return -EIO;
a2fbb9ea
ET
4898 }
4899
a2fbb9ea 4900 /* timeout! */
49d66772
ET
4901 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4902 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
4903#ifdef BNX2X_STOP_ON_ERROR
4904 bnx2x_panic();
4905#endif
a2fbb9ea 4906
49d66772 4907 return -EBUSY;
a2fbb9ea
ET
4908}
4909
9f6c9258 4910void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
e665bfda
MC
4911{
4912 bp->set_mac_pending++;
4913 smp_wmb();
4914
4915 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
4916 (1 << bp->fp->cl_id), BP_FUNC(bp));
4917
4918 /* Wait for a completion */
4919 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4920}
4921
9f6c9258 4922void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
e665bfda
MC
4923{
4924 bp->set_mac_pending++;
4925 smp_wmb();
4926
4927 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
4928 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
4929 1);
4930
4931 /* Wait for a completion */
4932 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4933}
4934
993ac7b5
MC
4935#ifdef BCM_CNIC
4936/**
4937 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
4938 * MAC(s). This function will wait until the ramdord completion
4939 * returns.
4940 *
4941 * @param bp driver handle
4942 * @param set set or clear the CAM entry
4943 *
4944 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
4945 */
9f6c9258 4946int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
993ac7b5
MC
4947{
4948 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
4949
4950 bp->set_mac_pending++;
4951 smp_wmb();
4952
4953 /* Send a SET_MAC ramrod */
4954 if (CHIP_IS_E1(bp))
4955 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
4956 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
4957 1);
4958 else
4959 /* CAM allocation for E1H
4960 * unicasts: by func number
4961 * multicast: 20+FUNC*20, 20 each
4962 */
4963 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
4964 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
4965
4966 /* Wait for a completion when setting */
4967 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4968
4969 return 0;
4970}
4971#endif
4972
9f6c9258 4973int bnx2x_setup_leading(struct bnx2x *bp)
a2fbb9ea 4974{
34f80b04 4975 int rc;
a2fbb9ea 4976
c14423fe 4977 /* reset IGU state */
34f80b04 4978 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4979
4980 /* SETUP ramrod */
4981 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
4982
34f80b04
EG
4983 /* Wait for completion */
4984 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 4985
34f80b04 4986 return rc;
a2fbb9ea
ET
4987}
4988
9f6c9258 4989int bnx2x_setup_multi(struct bnx2x *bp, int index)
a2fbb9ea 4990{
555f6c78
EG
4991 struct bnx2x_fastpath *fp = &bp->fp[index];
4992
a2fbb9ea 4993 /* reset IGU state */
555f6c78 4994 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 4995
228241eb 4996 /* SETUP ramrod */
555f6c78
EG
4997 fp->state = BNX2X_FP_STATE_OPENING;
4998 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
4999 fp->cl_id, 0);
a2fbb9ea
ET
5000
5001 /* Wait for completion */
5002 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 5003 &(fp->state), 0);
a2fbb9ea
ET
5004}
5005
a2fbb9ea 5006
9f6c9258 5007void bnx2x_set_num_queues_msix(struct bnx2x *bp)
ca00392c 5008{
ca00392c
EG
5009
5010 switch (bp->multi_mode) {
5011 case ETH_RSS_MODE_DISABLED:
54b9ddaa 5012 bp->num_queues = 1;
ca00392c
EG
5013 break;
5014
5015 case ETH_RSS_MODE_REGULAR:
54b9ddaa
VZ
5016 if (num_queues)
5017 bp->num_queues = min_t(u32, num_queues,
5018 BNX2X_MAX_QUEUES(bp));
ca00392c 5019 else
54b9ddaa
VZ
5020 bp->num_queues = min_t(u32, num_online_cpus(),
5021 BNX2X_MAX_QUEUES(bp));
ca00392c
EG
5022 break;
5023
5024
5025 default:
54b9ddaa 5026 bp->num_queues = 1;
9f6c9258
DK
5027 break;
5028 }
a2fbb9ea
ET
5029}
5030
9f6c9258
DK
5031
5032
a2fbb9ea
ET
5033static int bnx2x_stop_multi(struct bnx2x *bp, int index)
5034{
555f6c78 5035 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
5036 int rc;
5037
c14423fe 5038 /* halt the connection */
555f6c78
EG
5039 fp->state = BNX2X_FP_STATE_HALTING;
5040 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 5041
34f80b04 5042 /* Wait for completion */
a2fbb9ea 5043 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 5044 &(fp->state), 1);
c14423fe 5045 if (rc) /* timeout */
a2fbb9ea
ET
5046 return rc;
5047
5048 /* delete cfc entry */
5049 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
5050
34f80b04
EG
5051 /* Wait for completion */
5052 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 5053 &(fp->state), 1);
34f80b04 5054 return rc;
a2fbb9ea
ET
5055}
5056
da5a662a 5057static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 5058{
4781bfad 5059 __le16 dsb_sp_prod_idx;
c14423fe 5060 /* if the other port is handling traffic,
a2fbb9ea 5061 this can take a lot of time */
34f80b04
EG
5062 int cnt = 500;
5063 int rc;
a2fbb9ea
ET
5064
5065 might_sleep();
5066
5067 /* Send HALT ramrod */
5068 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 5069 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 5070
34f80b04
EG
5071 /* Wait for completion */
5072 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
5073 &(bp->fp[0].state), 1);
5074 if (rc) /* timeout */
da5a662a 5075 return rc;
a2fbb9ea 5076
49d66772 5077 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 5078
228241eb 5079 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
5080 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
5081
49d66772 5082 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
5083 we are going to reset the chip anyway
5084 so there is not much to do if this times out
5085 */
34f80b04 5086 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
5087 if (!cnt) {
5088 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
5089 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5090 *bp->dsb_sp_prod, dsb_sp_prod_idx);
5091#ifdef BNX2X_STOP_ON_ERROR
5092 bnx2x_panic();
5093#endif
36e552ab 5094 rc = -EBUSY;
34f80b04
EG
5095 break;
5096 }
5097 cnt--;
da5a662a 5098 msleep(1);
5650d9d4 5099 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
5100 }
5101 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
5102 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
5103
5104 return rc;
a2fbb9ea
ET
5105}
5106
34f80b04
EG
5107static void bnx2x_reset_func(struct bnx2x *bp)
5108{
5109 int port = BP_PORT(bp);
5110 int func = BP_FUNC(bp);
5111 int base, i;
5112
5113 /* Configure IGU */
5114 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5115 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5116
37b091ba
MC
5117#ifdef BCM_CNIC
5118 /* Disable Timer scan */
5119 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
5120 /*
5121 * Wait for at least 10ms and up to 2 second for the timers scan to
5122 * complete
5123 */
5124 for (i = 0; i < 200; i++) {
5125 msleep(10);
5126 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
5127 break;
5128 }
5129#endif
34f80b04
EG
5130 /* Clear ILT */
5131 base = FUNC_ILT_BASE(func);
5132 for (i = base; i < base + ILT_PER_FUNC; i++)
5133 bnx2x_ilt_wr(bp, i, 0);
5134}
5135
5136static void bnx2x_reset_port(struct bnx2x *bp)
5137{
5138 int port = BP_PORT(bp);
5139 u32 val;
5140
5141 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5142
5143 /* Do not rcv packets to BRB */
5144 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5145 /* Do not direct rcv packets that are not for MCP to the BRB */
5146 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5147 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5148
5149 /* Configure AEU */
5150 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5151
5152 msleep(100);
5153 /* Check for BRB port occupancy */
5154 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5155 if (val)
5156 DP(NETIF_MSG_IFDOWN,
33471629 5157 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
5158
5159 /* TODO: Close Doorbell port? */
5160}
5161
34f80b04
EG
5162static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5163{
5164 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
5165 BP_FUNC(bp), reset_code);
5166
5167 switch (reset_code) {
5168 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5169 bnx2x_reset_port(bp);
5170 bnx2x_reset_func(bp);
5171 bnx2x_reset_common(bp);
5172 break;
5173
5174 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5175 bnx2x_reset_port(bp);
5176 bnx2x_reset_func(bp);
5177 break;
5178
5179 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5180 bnx2x_reset_func(bp);
5181 break;
49d66772 5182
34f80b04
EG
5183 default:
5184 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5185 break;
5186 }
5187}
5188
9f6c9258 5189void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 5190{
da5a662a 5191 int port = BP_PORT(bp);
a2fbb9ea 5192 u32 reset_code = 0;
da5a662a 5193 int i, cnt, rc;
a2fbb9ea 5194
555f6c78 5195 /* Wait until tx fastpath tasks complete */
54b9ddaa 5196 for_each_queue(bp, i) {
228241eb
ET
5197 struct bnx2x_fastpath *fp = &bp->fp[i];
5198
34f80b04 5199 cnt = 1000;
e8b5fc51 5200 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 5201
7961f791 5202 bnx2x_tx_int(fp);
34f80b04
EG
5203 if (!cnt) {
5204 BNX2X_ERR("timeout waiting for queue[%d]\n",
5205 i);
5206#ifdef BNX2X_STOP_ON_ERROR
5207 bnx2x_panic();
5208 return -EBUSY;
5209#else
5210 break;
5211#endif
5212 }
5213 cnt--;
da5a662a 5214 msleep(1);
34f80b04 5215 }
228241eb 5216 }
da5a662a
VZ
5217 /* Give HW time to discard old tx messages */
5218 msleep(1);
a2fbb9ea 5219
3101c2bc
YG
5220 if (CHIP_IS_E1(bp)) {
5221 struct mac_configuration_cmd *config =
5222 bnx2x_sp(bp, mcast_config);
5223
e665bfda 5224 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 5225
8d9c5f34 5226 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
5227 CAM_INVALIDATE(config->config_table[i]);
5228
8d9c5f34 5229 config->hdr.length = i;
3101c2bc
YG
5230 if (CHIP_REV_IS_SLOW(bp))
5231 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
5232 else
5233 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 5234 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
5235 config->hdr.reserved1 = 0;
5236
e665bfda
MC
5237 bp->set_mac_pending++;
5238 smp_wmb();
5239
3101c2bc
YG
5240 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5241 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
5242 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
5243
5244 } else { /* E1H */
65abd74d
YG
5245 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5246
e665bfda 5247 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
5248
5249 for (i = 0; i < MC_HASH_SIZE; i++)
5250 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
5251
5252 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 5253 }
993ac7b5
MC
5254#ifdef BCM_CNIC
5255 /* Clear iSCSI L2 MAC */
5256 mutex_lock(&bp->cnic_mutex);
5257 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
5258 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
5259 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
5260 }
5261 mutex_unlock(&bp->cnic_mutex);
5262#endif
3101c2bc 5263
65abd74d
YG
5264 if (unload_mode == UNLOAD_NORMAL)
5265 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5266
7d0446c2 5267 else if (bp->flags & NO_WOL_FLAG)
65abd74d 5268 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 5269
7d0446c2 5270 else if (bp->wol) {
65abd74d
YG
5271 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5272 u8 *mac_addr = bp->dev->dev_addr;
5273 u32 val;
5274 /* The mac address is written to entries 1-4 to
5275 preserve entry 0 which is used by the PMF */
5276 u8 entry = (BP_E1HVN(bp) + 1)*8;
5277
5278 val = (mac_addr[0] << 8) | mac_addr[1];
5279 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
5280
5281 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
5282 (mac_addr[4] << 8) | mac_addr[5];
5283 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
5284
5285 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
5286
5287 } else
5288 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 5289
34f80b04
EG
5290 /* Close multi and leading connections
5291 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
5292 for_each_nondefault_queue(bp, i)
5293 if (bnx2x_stop_multi(bp, i))
228241eb 5294 goto unload_error;
a2fbb9ea 5295
da5a662a
VZ
5296 rc = bnx2x_stop_leading(bp);
5297 if (rc) {
34f80b04 5298 BNX2X_ERR("Stop leading failed!\n");
da5a662a 5299#ifdef BNX2X_STOP_ON_ERROR
34f80b04 5300 return -EBUSY;
da5a662a
VZ
5301#else
5302 goto unload_error;
34f80b04 5303#endif
228241eb
ET
5304 }
5305
5306unload_error:
34f80b04 5307 if (!BP_NOMCP(bp))
228241eb 5308 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 5309 else {
f5372251 5310 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
5311 load_count[0], load_count[1], load_count[2]);
5312 load_count[0]--;
da5a662a 5313 load_count[1 + port]--;
f5372251 5314 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
5315 load_count[0], load_count[1], load_count[2]);
5316 if (load_count[0] == 0)
5317 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 5318 else if (load_count[1 + port] == 0)
34f80b04
EG
5319 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
5320 else
5321 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
5322 }
a2fbb9ea 5323
34f80b04
EG
5324 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
5325 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
5326 bnx2x__link_reset(bp);
a2fbb9ea
ET
5327
5328 /* Reset the chip */
228241eb 5329 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
5330
5331 /* Report UNLOAD_DONE to MCP */
34f80b04 5332 if (!BP_NOMCP(bp))
a2fbb9ea 5333 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 5334
72fd0718
VZ
5335}
5336
9f6c9258 5337void bnx2x_disable_close_the_gate(struct bnx2x *bp)
72fd0718
VZ
5338{
5339 u32 val;
5340
5341 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
5342
5343 if (CHIP_IS_E1(bp)) {
5344 int port = BP_PORT(bp);
5345 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5346 MISC_REG_AEU_MASK_ATTN_FUNC_0;
5347
5348 val = REG_RD(bp, addr);
5349 val &= ~(0x300);
5350 REG_WR(bp, addr, val);
5351 } else if (CHIP_IS_E1H(bp)) {
5352 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
5353 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
5354 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
5355 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
5356 }
5357}
5358
72fd0718
VZ
5359
5360/* Close gates #2, #3 and #4: */
5361static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
5362{
5363 u32 val, addr;
5364
5365 /* Gates #2 and #4a are closed/opened for "not E1" only */
5366 if (!CHIP_IS_E1(bp)) {
5367 /* #4 */
5368 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
5369 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
5370 close ? (val | 0x1) : (val & (~(u32)1)));
5371 /* #2 */
5372 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
5373 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
5374 close ? (val | 0x1) : (val & (~(u32)1)));
5375 }
5376
5377 /* #3 */
5378 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
5379 val = REG_RD(bp, addr);
5380 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
5381
5382 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
5383 close ? "closing" : "opening");
5384 mmiowb();
5385}
5386
5387#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
5388
5389static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
5390{
5391 /* Do some magic... */
5392 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5393 *magic_val = val & SHARED_MF_CLP_MAGIC;
5394 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
5395}
5396
5397/* Restore the value of the `magic' bit.
5398 *
5399 * @param pdev Device handle.
5400 * @param magic_val Old value of the `magic' bit.
5401 */
5402static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
5403{
5404 /* Restore the `magic' bit value... */
5405 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
5406 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
5407 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
5408 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5409 MF_CFG_WR(bp, shared_mf_config.clp_mb,
5410 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
5411}
5412
5413/* Prepares for MCP reset: takes care of CLP configurations.
5414 *
5415 * @param bp
5416 * @param magic_val Old value of 'magic' bit.
5417 */
5418static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
5419{
5420 u32 shmem;
5421 u32 validity_offset;
5422
5423 DP(NETIF_MSG_HW, "Starting\n");
5424
5425 /* Set `magic' bit in order to save MF config */
5426 if (!CHIP_IS_E1(bp))
5427 bnx2x_clp_reset_prep(bp, magic_val);
5428
5429 /* Get shmem offset */
5430 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5431 validity_offset = offsetof(struct shmem_region, validity_map[0]);
5432
5433 /* Clear validity map flags */
5434 if (shmem > 0)
5435 REG_WR(bp, shmem + validity_offset, 0);
5436}
5437
5438#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
5439#define MCP_ONE_TIMEOUT 100 /* 100 ms */
5440
5441/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
5442 * depending on the HW type.
5443 *
5444 * @param bp
5445 */
5446static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
5447{
5448 /* special handling for emulation and FPGA,
5449 wait 10 times longer */
5450 if (CHIP_REV_IS_SLOW(bp))
5451 msleep(MCP_ONE_TIMEOUT*10);
5452 else
5453 msleep(MCP_ONE_TIMEOUT);
5454}
5455
5456static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
5457{
5458 u32 shmem, cnt, validity_offset, val;
5459 int rc = 0;
5460
5461 msleep(100);
5462
5463 /* Get shmem offset */
5464 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5465 if (shmem == 0) {
5466 BNX2X_ERR("Shmem 0 return failure\n");
5467 rc = -ENOTTY;
5468 goto exit_lbl;
5469 }
5470
5471 validity_offset = offsetof(struct shmem_region, validity_map[0]);
5472
5473 /* Wait for MCP to come up */
5474 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
5475 /* TBD: its best to check validity map of last port.
5476 * currently checks on port 0.
5477 */
5478 val = REG_RD(bp, shmem + validity_offset);
5479 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
5480 shmem + validity_offset, val);
5481
5482 /* check that shared memory is valid. */
5483 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5484 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5485 break;
5486
5487 bnx2x_mcp_wait_one(bp);
5488 }
5489
5490 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
5491
5492 /* Check that shared memory is valid. This indicates that MCP is up. */
5493 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
5494 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
5495 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
5496 rc = -ENOTTY;
5497 goto exit_lbl;
5498 }
5499
5500exit_lbl:
5501 /* Restore the `magic' bit value */
5502 if (!CHIP_IS_E1(bp))
5503 bnx2x_clp_reset_done(bp, magic_val);
5504
5505 return rc;
5506}
5507
5508static void bnx2x_pxp_prep(struct bnx2x *bp)
5509{
5510 if (!CHIP_IS_E1(bp)) {
5511 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
5512 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
5513 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
5514 mmiowb();
5515 }
5516}
5517
5518/*
5519 * Reset the whole chip except for:
5520 * - PCIE core
5521 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
5522 * one reset bit)
5523 * - IGU
5524 * - MISC (including AEU)
5525 * - GRC
5526 * - RBCN, RBCP
5527 */
5528static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
5529{
5530 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
5531
5532 not_reset_mask1 =
5533 MISC_REGISTERS_RESET_REG_1_RST_HC |
5534 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
5535 MISC_REGISTERS_RESET_REG_1_RST_PXP;
5536
5537 not_reset_mask2 =
5538 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
5539 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
5540 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
5541 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
5542 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
5543 MISC_REGISTERS_RESET_REG_2_RST_GRC |
5544 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
5545 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
5546
5547 reset_mask1 = 0xffffffff;
5548
5549 if (CHIP_IS_E1(bp))
5550 reset_mask2 = 0xffff;
5551 else
5552 reset_mask2 = 0x1ffff;
5553
5554 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5555 reset_mask1 & (~not_reset_mask1));
5556 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5557 reset_mask2 & (~not_reset_mask2));
5558
5559 barrier();
5560 mmiowb();
5561
5562 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
5563 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
5564 mmiowb();
5565}
5566
5567static int bnx2x_process_kill(struct bnx2x *bp)
5568{
5569 int cnt = 1000;
5570 u32 val = 0;
5571 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
5572
5573
5574 /* Empty the Tetris buffer, wait for 1s */
5575 do {
5576 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
5577 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
5578 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
5579 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
5580 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
5581 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
5582 ((port_is_idle_0 & 0x1) == 0x1) &&
5583 ((port_is_idle_1 & 0x1) == 0x1) &&
5584 (pgl_exp_rom2 == 0xffffffff))
5585 break;
5586 msleep(1);
5587 } while (cnt-- > 0);
5588
5589 if (cnt <= 0) {
5590 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
5591 " are still"
5592 " outstanding read requests after 1s!\n");
5593 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
5594 " port_is_idle_0=0x%08x,"
5595 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
5596 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
5597 pgl_exp_rom2);
5598 return -EAGAIN;
5599 }
5600
5601 barrier();
5602
5603 /* Close gates #2, #3 and #4 */
5604 bnx2x_set_234_gates(bp, true);
5605
5606 /* TBD: Indicate that "process kill" is in progress to MCP */
5607
5608 /* Clear "unprepared" bit */
5609 REG_WR(bp, MISC_REG_UNPREPARED, 0);
5610 barrier();
5611
5612 /* Make sure all is written to the chip before the reset */
5613 mmiowb();
5614
5615 /* Wait for 1ms to empty GLUE and PCI-E core queues,
5616 * PSWHST, GRC and PSWRD Tetris buffer.
5617 */
5618 msleep(1);
5619
5620 /* Prepare to chip reset: */
5621 /* MCP */
5622 bnx2x_reset_mcp_prep(bp, &val);
5623
5624 /* PXP */
5625 bnx2x_pxp_prep(bp);
5626 barrier();
5627
5628 /* reset the chip */
5629 bnx2x_process_kill_chip_reset(bp);
5630 barrier();
5631
5632 /* Recover after reset: */
5633 /* MCP */
5634 if (bnx2x_reset_mcp_comp(bp, val))
5635 return -EAGAIN;
5636
5637 /* PXP */
5638 bnx2x_pxp_prep(bp);
5639
5640 /* Open the gates #2, #3 and #4 */
5641 bnx2x_set_234_gates(bp, false);
5642
5643 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
5644 * reset state, re-enable attentions. */
5645
a2fbb9ea
ET
5646 return 0;
5647}
5648
72fd0718
VZ
5649static int bnx2x_leader_reset(struct bnx2x *bp)
5650{
5651 int rc = 0;
5652 /* Try to recover after the failure */
5653 if (bnx2x_process_kill(bp)) {
5654 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
5655 bp->dev->name);
5656 rc = -EAGAIN;
5657 goto exit_leader_reset;
5658 }
5659
5660 /* Clear "reset is in progress" bit and update the driver state */
5661 bnx2x_set_reset_done(bp);
5662 bp->recovery_state = BNX2X_RECOVERY_DONE;
5663
5664exit_leader_reset:
5665 bp->is_leader = 0;
5666 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
5667 smp_wmb();
5668 return rc;
5669}
5670
72fd0718
VZ
5671/* Assumption: runs under rtnl lock. This together with the fact
5672 * that it's called only from bnx2x_reset_task() ensure that it
5673 * will never be called when netif_running(bp->dev) is false.
5674 */
5675static void bnx2x_parity_recover(struct bnx2x *bp)
5676{
5677 DP(NETIF_MSG_HW, "Handling parity\n");
5678 while (1) {
5679 switch (bp->recovery_state) {
5680 case BNX2X_RECOVERY_INIT:
5681 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
5682 /* Try to get a LEADER_LOCK HW lock */
5683 if (bnx2x_trylock_hw_lock(bp,
5684 HW_LOCK_RESOURCE_RESERVED_08))
5685 bp->is_leader = 1;
5686
5687 /* Stop the driver */
5688 /* If interface has been removed - break */
5689 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
5690 return;
5691
5692 bp->recovery_state = BNX2X_RECOVERY_WAIT;
5693 /* Ensure "is_leader" and "recovery_state"
5694 * update values are seen on other CPUs
5695 */
5696 smp_wmb();
5697 break;
5698
5699 case BNX2X_RECOVERY_WAIT:
5700 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
5701 if (bp->is_leader) {
5702 u32 load_counter = bnx2x_get_load_cnt(bp);
5703 if (load_counter) {
5704 /* Wait until all other functions get
5705 * down.
5706 */
5707 schedule_delayed_work(&bp->reset_task,
5708 HZ/10);
5709 return;
5710 } else {
5711 /* If all other functions got down -
5712 * try to bring the chip back to
5713 * normal. In any case it's an exit
5714 * point for a leader.
5715 */
5716 if (bnx2x_leader_reset(bp) ||
5717 bnx2x_nic_load(bp, LOAD_NORMAL)) {
5718 printk(KERN_ERR"%s: Recovery "
5719 "has failed. Power cycle is "
5720 "needed.\n", bp->dev->name);
5721 /* Disconnect this device */
5722 netif_device_detach(bp->dev);
5723 /* Block ifup for all function
5724 * of this ASIC until
5725 * "process kill" or power
5726 * cycle.
5727 */
5728 bnx2x_set_reset_in_progress(bp);
5729 /* Shut down the power */
5730 bnx2x_set_power_state(bp,
5731 PCI_D3hot);
5732 return;
5733 }
5734
5735 return;
5736 }
5737 } else { /* non-leader */
5738 if (!bnx2x_reset_is_done(bp)) {
5739 /* Try to get a LEADER_LOCK HW lock as
5740 * long as a former leader may have
5741 * been unloaded by the user or
5742 * released a leadership by another
5743 * reason.
5744 */
5745 if (bnx2x_trylock_hw_lock(bp,
5746 HW_LOCK_RESOURCE_RESERVED_08)) {
5747 /* I'm a leader now! Restart a
5748 * switch case.
5749 */
5750 bp->is_leader = 1;
5751 break;
5752 }
5753
5754 schedule_delayed_work(&bp->reset_task,
5755 HZ/10);
5756 return;
5757
5758 } else { /* A leader has completed
5759 * the "process kill". It's an exit
5760 * point for a non-leader.
5761 */
5762 bnx2x_nic_load(bp, LOAD_NORMAL);
5763 bp->recovery_state =
5764 BNX2X_RECOVERY_DONE;
5765 smp_wmb();
5766 return;
5767 }
5768 }
5769 default:
5770 return;
5771 }
5772 }
5773}
5774
5775/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
5776 * scheduled on a general queue in order to prevent a dead lock.
5777 */
34f80b04
EG
5778static void bnx2x_reset_task(struct work_struct *work)
5779{
72fd0718 5780 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
5781
5782#ifdef BNX2X_STOP_ON_ERROR
5783 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
5784 " so reset not done to allow debug dump,\n"
72fd0718 5785 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
5786 return;
5787#endif
5788
5789 rtnl_lock();
5790
5791 if (!netif_running(bp->dev))
5792 goto reset_task_exit;
5793
72fd0718
VZ
5794 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
5795 bnx2x_parity_recover(bp);
5796 else {
5797 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
5798 bnx2x_nic_load(bp, LOAD_NORMAL);
5799 }
34f80b04
EG
5800
5801reset_task_exit:
5802 rtnl_unlock();
5803}
5804
a2fbb9ea
ET
5805/* end of nic load/unload */
5806
a2fbb9ea
ET
5807/*
5808 * Init service functions
5809 */
5810
f1ef27ef
EG
5811static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
5812{
5813 switch (func) {
5814 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
5815 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
5816 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
5817 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
5818 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
5819 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
5820 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
5821 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
5822 default:
5823 BNX2X_ERR("Unsupported function index: %d\n", func);
5824 return (u32)(-1);
5825 }
5826}
5827
5828static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
5829{
5830 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
5831
5832 /* Flush all outstanding writes */
5833 mmiowb();
5834
5835 /* Pretend to be function 0 */
5836 REG_WR(bp, reg, 0);
5837 /* Flush the GRC transaction (in the chip) */
5838 new_val = REG_RD(bp, reg);
5839 if (new_val != 0) {
5840 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
5841 new_val);
5842 BUG();
5843 }
5844
5845 /* From now we are in the "like-E1" mode */
5846 bnx2x_int_disable(bp);
5847
5848 /* Flush all outstanding writes */
5849 mmiowb();
5850
5851 /* Restore the original funtion settings */
5852 REG_WR(bp, reg, orig_func);
5853 new_val = REG_RD(bp, reg);
5854 if (new_val != orig_func) {
5855 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
5856 orig_func, new_val);
5857 BUG();
5858 }
5859}
5860
5861static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
5862{
5863 if (CHIP_IS_E1H(bp))
5864 bnx2x_undi_int_disable_e1h(bp, func);
5865 else
5866 bnx2x_int_disable(bp);
5867}
5868
34f80b04
EG
5869static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5870{
5871 u32 val;
5872
5873 /* Check if there is any driver already loaded */
5874 val = REG_RD(bp, MISC_REG_UNPREPARED);
5875 if (val == 0x1) {
5876 /* Check if it is the UNDI driver
5877 * UNDI driver initializes CID offset for normal bell to 0x7
5878 */
4a37fb66 5879 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
5880 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
5881 if (val == 0x7) {
5882 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 5883 /* save our func */
34f80b04 5884 int func = BP_FUNC(bp);
da5a662a
VZ
5885 u32 swap_en;
5886 u32 swap_val;
34f80b04 5887
b4661739
EG
5888 /* clear the UNDI indication */
5889 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
5890
34f80b04
EG
5891 BNX2X_DEV_INFO("UNDI is active! reset device\n");
5892
5893 /* try unload UNDI on port 0 */
5894 bp->func = 0;
da5a662a
VZ
5895 bp->fw_seq =
5896 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5897 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 5898 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
5899
5900 /* if UNDI is loaded on the other port */
5901 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
5902
da5a662a
VZ
5903 /* send "DONE" for previous unload */
5904 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5905
5906 /* unload UNDI on port 1 */
34f80b04 5907 bp->func = 1;
da5a662a
VZ
5908 bp->fw_seq =
5909 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5910 DRV_MSG_SEQ_NUMBER_MASK);
5911 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5912
5913 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
5914 }
5915
b4661739
EG
5916 /* now it's safe to release the lock */
5917 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5918
f1ef27ef 5919 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
5920
5921 /* close input traffic and wait for it */
5922 /* Do not rcv packets to BRB */
5923 REG_WR(bp,
5924 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
5925 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
5926 /* Do not direct rcv packets that are not for MCP to
5927 * the BRB */
5928 REG_WR(bp,
5929 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
5930 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5931 /* clear AEU */
5932 REG_WR(bp,
5933 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5934 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
5935 msleep(10);
5936
5937 /* save NIG port swap info */
5938 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5939 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
5940 /* reset device */
5941 REG_WR(bp,
5942 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 5943 0xd3ffffff);
34f80b04
EG
5944 REG_WR(bp,
5945 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5946 0x1403);
da5a662a
VZ
5947 /* take the NIG out of reset and restore swap values */
5948 REG_WR(bp,
5949 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5950 MISC_REGISTERS_RESET_REG_1_RST_NIG);
5951 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
5952 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
5953
5954 /* send unload done to the MCP */
5955 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5956
5957 /* restore our func and fw_seq */
5958 bp->func = func;
5959 bp->fw_seq =
5960 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5961 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
5962
5963 } else
5964 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
5965 }
5966}
5967
5968static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5969{
5970 u32 val, val2, val3, val4, id;
72ce58c3 5971 u16 pmc;
34f80b04
EG
5972
5973 /* Get the chip revision id and number. */
5974 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5975 val = REG_RD(bp, MISC_REG_CHIP_NUM);
5976 id = ((val & 0xffff) << 16);
5977 val = REG_RD(bp, MISC_REG_CHIP_REV);
5978 id |= ((val & 0xf) << 12);
5979 val = REG_RD(bp, MISC_REG_CHIP_METAL);
5980 id |= ((val & 0xff) << 4);
5a40e08e 5981 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
5982 id |= (val & 0xf);
5983 bp->common.chip_id = id;
5984 bp->link_params.chip_id = bp->common.chip_id;
5985 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
5986
1c06328c
EG
5987 val = (REG_RD(bp, 0x2874) & 0x55);
5988 if ((bp->common.chip_id & 0x1) ||
5989 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
5990 bp->flags |= ONE_PORT_FLAG;
5991 BNX2X_DEV_INFO("single port device\n");
5992 }
5993
34f80b04
EG
5994 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
5995 bp->common.flash_size = (NVRAM_1MB_SIZE <<
5996 (val & MCPR_NVM_CFG4_FLASH_SIZE));
5997 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5998 bp->common.flash_size, bp->common.flash_size);
5999
6000 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 6001 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 6002 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
6003 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
6004 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
6005
6006 if (!bp->common.shmem_base ||
6007 (bp->common.shmem_base < 0xA0000) ||
6008 (bp->common.shmem_base >= 0xC0000)) {
6009 BNX2X_DEV_INFO("MCP not active\n");
6010 bp->flags |= NO_MCP_FLAG;
6011 return;
6012 }
6013
6014 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6015 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6016 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
cdaa7cb8 6017 BNX2X_ERROR("BAD MCP validity signature\n");
34f80b04
EG
6018
6019 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 6020 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
6021
6022 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6023 SHARED_HW_CFG_LED_MODE_MASK) >>
6024 SHARED_HW_CFG_LED_MODE_SHIFT);
6025
c2c8b03e
EG
6026 bp->link_params.feature_config_flags = 0;
6027 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
6028 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
6029 bp->link_params.feature_config_flags |=
6030 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6031 else
6032 bp->link_params.feature_config_flags &=
6033 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6034
34f80b04
EG
6035 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6036 bp->common.bc_ver = val;
6037 BNX2X_DEV_INFO("bc_ver %X\n", val);
6038 if (val < BNX2X_BC_VER) {
6039 /* for now only warn
6040 * later we might need to enforce this */
cdaa7cb8
VZ
6041 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
6042 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 6043 }
4d295db0
EG
6044 bp->link_params.feature_config_flags |=
6045 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
6046 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
6047
6048 if (BP_E1HVN(bp) == 0) {
6049 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6050 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6051 } else {
6052 /* no WOL capability for E1HVN != 0 */
6053 bp->flags |= NO_WOL_FLAG;
6054 }
6055 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 6056 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
6057
6058 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6059 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6060 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6061 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6062
cdaa7cb8
VZ
6063 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
6064 val, val2, val3, val4);
34f80b04
EG
6065}
6066
6067static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6068 u32 switch_cfg)
a2fbb9ea 6069{
34f80b04 6070 int port = BP_PORT(bp);
b7737c9b
YR
6071 bp->port.supported = 0;
6072 switch (bp->link_params.num_phys) {
6073 case 1:
6074 bp->port.supported = bp->link_params.phy[INT_PHY].supported;
a2fbb9ea 6075 break;
b7737c9b
YR
6076 case 2:
6077 bp->port.supported = bp->link_params.phy[EXT_PHY1].supported;
a2fbb9ea 6078 break;
b7737c9b 6079 }
a2fbb9ea 6080
b7737c9b
YR
6081 if (!(bp->port.supported)) {
6082 BNX2X_ERR("NVRAM config error. BAD phy config."
6083 "PHY1 config 0x%x\n",
6084 SHMEM_RD(bp,
6085 dev_info.port_hw_config[port].external_phy_config));
a2fbb9ea
ET
6086 return;
6087 }
6088
b7737c9b
YR
6089 switch (switch_cfg) {
6090 case SWITCH_CFG_1G:
34f80b04
EG
6091 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6092 port*0x10);
6093 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
6094 break;
6095
6096 case SWITCH_CFG_10G:
34f80b04
EG
6097 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6098 port*0x18);
6099 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 6100
a2fbb9ea
ET
6101 break;
6102
6103 default:
6104 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 6105 bp->port.link_config);
a2fbb9ea
ET
6106 return;
6107 }
a2fbb9ea 6108 /* mask what we support according to speed_cap_mask */
c18487ee
YR
6109 if (!(bp->link_params.speed_cap_mask &
6110 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 6111 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 6112
c18487ee
YR
6113 if (!(bp->link_params.speed_cap_mask &
6114 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 6115 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 6116
c18487ee
YR
6117 if (!(bp->link_params.speed_cap_mask &
6118 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 6119 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 6120
c18487ee
YR
6121 if (!(bp->link_params.speed_cap_mask &
6122 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 6123 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 6124
c18487ee
YR
6125 if (!(bp->link_params.speed_cap_mask &
6126 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
6127 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
6128 SUPPORTED_1000baseT_Full);
a2fbb9ea 6129
c18487ee
YR
6130 if (!(bp->link_params.speed_cap_mask &
6131 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 6132 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 6133
c18487ee
YR
6134 if (!(bp->link_params.speed_cap_mask &
6135 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 6136 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 6137
34f80b04 6138 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
6139}
6140
34f80b04 6141static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 6142{
c18487ee 6143 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 6144
34f80b04 6145 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 6146 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 6147 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 6148 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 6149 bp->port.advertising = bp->port.supported;
a2fbb9ea 6150 } else {
a2fbb9ea 6151 /* force 10G, no AN */
c18487ee 6152 bp->link_params.req_line_speed = SPEED_10000;
b7737c9b 6153 bp->port.advertising = (ADVERTISED_10000baseT_Full |
a2fbb9ea 6154 ADVERTISED_FIBRE);
a2fbb9ea
ET
6155 }
6156 break;
6157
6158 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 6159 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 6160 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
6161 bp->port.advertising = (ADVERTISED_10baseT_Full |
6162 ADVERTISED_TP);
a2fbb9ea 6163 } else {
cdaa7cb8
VZ
6164 BNX2X_ERROR("NVRAM config error. "
6165 "Invalid link_config 0x%x"
6166 " speed_cap_mask 0x%x\n",
6167 bp->port.link_config,
6168 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6169 return;
6170 }
6171 break;
6172
6173 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 6174 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
6175 bp->link_params.req_line_speed = SPEED_10;
6176 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
6177 bp->port.advertising = (ADVERTISED_10baseT_Half |
6178 ADVERTISED_TP);
a2fbb9ea 6179 } else {
cdaa7cb8
VZ
6180 BNX2X_ERROR("NVRAM config error. "
6181 "Invalid link_config 0x%x"
6182 " speed_cap_mask 0x%x\n",
6183 bp->port.link_config,
6184 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6185 return;
6186 }
6187 break;
6188
6189 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 6190 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 6191 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
6192 bp->port.advertising = (ADVERTISED_100baseT_Full |
6193 ADVERTISED_TP);
a2fbb9ea 6194 } else {
cdaa7cb8
VZ
6195 BNX2X_ERROR("NVRAM config error. "
6196 "Invalid link_config 0x%x"
6197 " speed_cap_mask 0x%x\n",
6198 bp->port.link_config,
6199 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6200 return;
6201 }
6202 break;
6203
6204 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 6205 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
6206 bp->link_params.req_line_speed = SPEED_100;
6207 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
6208 bp->port.advertising = (ADVERTISED_100baseT_Half |
6209 ADVERTISED_TP);
a2fbb9ea 6210 } else {
cdaa7cb8
VZ
6211 BNX2X_ERROR("NVRAM config error. "
6212 "Invalid link_config 0x%x"
6213 " speed_cap_mask 0x%x\n",
6214 bp->port.link_config,
6215 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6216 return;
6217 }
6218 break;
6219
6220 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 6221 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 6222 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
6223 bp->port.advertising = (ADVERTISED_1000baseT_Full |
6224 ADVERTISED_TP);
a2fbb9ea 6225 } else {
cdaa7cb8
VZ
6226 BNX2X_ERROR("NVRAM config error. "
6227 "Invalid link_config 0x%x"
6228 " speed_cap_mask 0x%x\n",
6229 bp->port.link_config,
6230 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6231 return;
6232 }
6233 break;
6234
6235 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 6236 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 6237 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
6238 bp->port.advertising = (ADVERTISED_2500baseX_Full |
6239 ADVERTISED_TP);
a2fbb9ea 6240 } else {
cdaa7cb8
VZ
6241 BNX2X_ERROR("NVRAM config error. "
6242 "Invalid link_config 0x%x"
6243 " speed_cap_mask 0x%x\n",
6244 bp->port.link_config,
6245 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6246 return;
6247 }
6248 break;
6249
6250 case PORT_FEATURE_LINK_SPEED_10G_CX4:
6251 case PORT_FEATURE_LINK_SPEED_10G_KX4:
6252 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 6253 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 6254 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
6255 bp->port.advertising = (ADVERTISED_10000baseT_Full |
6256 ADVERTISED_FIBRE);
a2fbb9ea 6257 } else {
cdaa7cb8
VZ
6258 BNX2X_ERROR("NVRAM config error. "
6259 "Invalid link_config 0x%x"
6260 " speed_cap_mask 0x%x\n",
6261 bp->port.link_config,
6262 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6263 return;
6264 }
6265 break;
6266
6267 default:
cdaa7cb8
VZ
6268 BNX2X_ERROR("NVRAM config error. "
6269 "BAD link speed link_config 0x%x\n",
6270 bp->port.link_config);
c18487ee 6271 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 6272 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
6273 break;
6274 }
a2fbb9ea 6275
34f80b04
EG
6276 bp->link_params.req_flow_ctrl = (bp->port.link_config &
6277 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 6278 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 6279 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 6280 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 6281
c18487ee 6282 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 6283 " advertising 0x%x\n",
c18487ee
YR
6284 bp->link_params.req_line_speed,
6285 bp->link_params.req_duplex,
34f80b04 6286 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
6287}
6288
e665bfda
MC
6289static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
6290{
6291 mac_hi = cpu_to_be16(mac_hi);
6292 mac_lo = cpu_to_be32(mac_lo);
6293 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
6294 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
6295}
6296
34f80b04 6297static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 6298{
34f80b04
EG
6299 int port = BP_PORT(bp);
6300 u32 val, val2;
589abe3a 6301 u32 config;
b7737c9b 6302 u32 ext_phy_type, ext_phy_config;;
a2fbb9ea 6303
c18487ee 6304 bp->link_params.bp = bp;
34f80b04 6305 bp->link_params.port = port;
c18487ee 6306
c18487ee 6307 bp->link_params.lane_config =
a2fbb9ea 6308 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
4d295db0 6309
c18487ee 6310 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
6311 SHMEM_RD(bp,
6312 dev_info.port_hw_config[port].speed_capability_mask);
6313
34f80b04 6314 bp->port.link_config =
a2fbb9ea
ET
6315 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6316
c2c8b03e 6317
3ce2c3f9
EG
6318 /* If the device is capable of WoL, set the default state according
6319 * to the HW
6320 */
4d295db0 6321 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
6322 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
6323 (config & PORT_FEATURE_WOL_ENABLED));
6324
b7737c9b 6325 BNX2X_DEV_INFO("lane_config 0x%08x"
c2c8b03e 6326 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee 6327 bp->link_params.lane_config,
34f80b04 6328 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 6329
4d295db0
EG
6330 bp->link_params.switch_cfg |= (bp->port.link_config &
6331 PORT_FEATURE_CONNECTED_SWITCH_MASK);
b7737c9b 6332 bnx2x_phy_probe(&bp->link_params);
c18487ee 6333 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
6334
6335 bnx2x_link_settings_requested(bp);
6336
01cd4528
EG
6337 /*
6338 * If connected directly, work with the internal PHY, otherwise, work
6339 * with the external PHY
6340 */
b7737c9b
YR
6341 ext_phy_config =
6342 SHMEM_RD(bp,
6343 dev_info.port_hw_config[port].external_phy_config);
6344 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
01cd4528 6345 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
b7737c9b 6346 bp->mdio.prtad = bp->port.phy_addr;
01cd4528
EG
6347
6348 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
6349 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
6350 bp->mdio.prtad =
b7737c9b 6351 XGXS_EXT_PHY_ADDR(ext_phy_config);
01cd4528 6352
a2fbb9ea
ET
6353 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6354 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 6355 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
6356 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
6357 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
6358
6359#ifdef BCM_CNIC
6360 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
6361 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
6362 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
6363#endif
34f80b04
EG
6364}
6365
6366static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6367{
6368 int func = BP_FUNC(bp);
6369 u32 val, val2;
6370 int rc = 0;
a2fbb9ea 6371
34f80b04 6372 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 6373
34f80b04
EG
6374 bp->e1hov = 0;
6375 bp->e1hmf = 0;
2145a920 6376 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
34f80b04
EG
6377 bp->mf_config =
6378 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 6379
2691d51d 6380 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 6381 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 6382 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 6383 bp->e1hmf = 1;
2691d51d
EG
6384 BNX2X_DEV_INFO("%s function mode\n",
6385 IS_E1HMF(bp) ? "multi" : "single");
6386
6387 if (IS_E1HMF(bp)) {
6388 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
6389 e1hov_tag) &
6390 FUNC_MF_CFG_E1HOV_TAG_MASK);
6391 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
6392 bp->e1hov = val;
6393 BNX2X_DEV_INFO("E1HOV for func %d is %d "
6394 "(0x%04x)\n",
6395 func, bp->e1hov, bp->e1hov);
6396 } else {
cdaa7cb8
VZ
6397 BNX2X_ERROR("No valid E1HOV for func %d,"
6398 " aborting\n", func);
34f80b04
EG
6399 rc = -EPERM;
6400 }
2691d51d
EG
6401 } else {
6402 if (BP_E1HVN(bp)) {
cdaa7cb8
VZ
6403 BNX2X_ERROR("VN %d in single function mode,"
6404 " aborting\n", BP_E1HVN(bp));
2691d51d
EG
6405 rc = -EPERM;
6406 }
34f80b04
EG
6407 }
6408 }
a2fbb9ea 6409
34f80b04
EG
6410 if (!BP_NOMCP(bp)) {
6411 bnx2x_get_port_hwinfo(bp);
6412
6413 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
6414 DRV_MSG_SEQ_NUMBER_MASK);
6415 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
6416 }
6417
6418 if (IS_E1HMF(bp)) {
6419 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
6420 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
6421 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
6422 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
6423 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6424 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6425 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6426 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6427 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
6428 bp->dev->dev_addr[5] = (u8)(val & 0xff);
6429 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
6430 ETH_ALEN);
6431 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
6432 ETH_ALEN);
a2fbb9ea 6433 }
34f80b04
EG
6434
6435 return rc;
a2fbb9ea
ET
6436 }
6437
34f80b04
EG
6438 if (BP_NOMCP(bp)) {
6439 /* only supposed to happen on emulation/FPGA */
cdaa7cb8 6440 BNX2X_ERROR("warning: random MAC workaround active\n");
34f80b04
EG
6441 random_ether_addr(bp->dev->dev_addr);
6442 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6443 }
a2fbb9ea 6444
34f80b04
EG
6445 return rc;
6446}
6447
34f24c7f
VZ
6448static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
6449{
6450 int cnt, i, block_end, rodi;
6451 char vpd_data[BNX2X_VPD_LEN+1];
6452 char str_id_reg[VENDOR_ID_LEN+1];
6453 char str_id_cap[VENDOR_ID_LEN+1];
6454 u8 len;
6455
6456 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
6457 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
6458
6459 if (cnt < BNX2X_VPD_LEN)
6460 goto out_not_found;
6461
6462 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
6463 PCI_VPD_LRDT_RO_DATA);
6464 if (i < 0)
6465 goto out_not_found;
6466
6467
6468 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
6469 pci_vpd_lrdt_size(&vpd_data[i]);
6470
6471 i += PCI_VPD_LRDT_TAG_SIZE;
6472
6473 if (block_end > BNX2X_VPD_LEN)
6474 goto out_not_found;
6475
6476 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6477 PCI_VPD_RO_KEYWORD_MFR_ID);
6478 if (rodi < 0)
6479 goto out_not_found;
6480
6481 len = pci_vpd_info_field_size(&vpd_data[rodi]);
6482
6483 if (len != VENDOR_ID_LEN)
6484 goto out_not_found;
6485
6486 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6487
6488 /* vendor specific info */
6489 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
6490 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
6491 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
6492 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
6493
6494 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6495 PCI_VPD_RO_KEYWORD_VENDOR0);
6496 if (rodi >= 0) {
6497 len = pci_vpd_info_field_size(&vpd_data[rodi]);
6498
6499 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6500
6501 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
6502 memcpy(bp->fw_ver, &vpd_data[rodi], len);
6503 bp->fw_ver[len] = ' ';
6504 }
6505 }
6506 return;
6507 }
6508out_not_found:
6509 return;
6510}
6511
34f80b04
EG
6512static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6513{
6514 int func = BP_FUNC(bp);
87942b46 6515 int timer_interval;
34f80b04
EG
6516 int rc;
6517
da5a662a
VZ
6518 /* Disable interrupt handling until HW is initialized */
6519 atomic_set(&bp->intr_sem, 1);
e1510706 6520 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 6521
34f80b04 6522 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 6523 mutex_init(&bp->fw_mb_mutex);
bb7e95c8 6524 spin_lock_init(&bp->stats_lock);
993ac7b5
MC
6525#ifdef BCM_CNIC
6526 mutex_init(&bp->cnic_mutex);
6527#endif
a2fbb9ea 6528
1cf167f2 6529 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 6530 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
6531
6532 rc = bnx2x_get_hwinfo(bp);
6533
34f24c7f 6534 bnx2x_read_fwinfo(bp);
34f80b04
EG
6535 /* need to reset chip if undi was active */
6536 if (!BP_NOMCP(bp))
6537 bnx2x_undi_unload(bp);
6538
6539 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 6540 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
6541
6542 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
6543 dev_err(&bp->pdev->dev, "MCP disabled, "
6544 "must load devices in order!\n");
34f80b04 6545
555f6c78 6546 /* Set multi queue mode */
8badd27a
EG
6547 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
6548 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
cdaa7cb8
VZ
6549 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
6550 "requested is not MSI-X\n");
555f6c78
EG
6551 multi_mode = ETH_RSS_MODE_DISABLED;
6552 }
6553 bp->multi_mode = multi_mode;
5d7cd496 6554 bp->int_mode = int_mode;
555f6c78 6555
4fd89b7a
DK
6556 bp->dev->features |= NETIF_F_GRO;
6557
7a9b2557
VZ
6558 /* Set TPA flags */
6559 if (disable_tpa) {
6560 bp->flags &= ~TPA_ENABLE_FLAG;
6561 bp->dev->features &= ~NETIF_F_LRO;
6562 } else {
6563 bp->flags |= TPA_ENABLE_FLAG;
6564 bp->dev->features |= NETIF_F_LRO;
6565 }
5d7cd496 6566 bp->disable_tpa = disable_tpa;
7a9b2557 6567
a18f5128
EG
6568 if (CHIP_IS_E1(bp))
6569 bp->dropless_fc = 0;
6570 else
6571 bp->dropless_fc = dropless_fc;
6572
8d5726c4 6573 bp->mrrs = mrrs;
7a9b2557 6574
34f80b04
EG
6575 bp->tx_ring_size = MAX_TX_AVAIL;
6576 bp->rx_ring_size = MAX_RX_AVAIL;
6577
6578 bp->rx_csum = 1;
34f80b04 6579
7d323bfd
EG
6580 /* make sure that the numbers are in the right granularity */
6581 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6582 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
34f80b04 6583
87942b46
EG
6584 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
6585 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
6586
6587 init_timer(&bp->timer);
6588 bp->timer.expires = jiffies + bp->current_interval;
6589 bp->timer.data = (unsigned long) bp;
6590 bp->timer.function = bnx2x_timer;
6591
6592 return rc;
a2fbb9ea
ET
6593}
6594
a2fbb9ea 6595
de0c62db
DK
6596/****************************************************************************
6597* General service functions
6598****************************************************************************/
a2fbb9ea 6599
bb2a0f7a 6600/* called with rtnl_lock */
a2fbb9ea
ET
6601static int bnx2x_open(struct net_device *dev)
6602{
6603 struct bnx2x *bp = netdev_priv(dev);
6604
6eccabb3
EG
6605 netif_carrier_off(dev);
6606
a2fbb9ea
ET
6607 bnx2x_set_power_state(bp, PCI_D0);
6608
72fd0718
VZ
6609 if (!bnx2x_reset_is_done(bp)) {
6610 do {
6611 /* Reset MCP mail box sequence if there is on going
6612 * recovery
6613 */
6614 bp->fw_seq = 0;
6615
6616 /* If it's the first function to load and reset done
6617 * is still not cleared it may mean that. We don't
6618 * check the attention state here because it may have
6619 * already been cleared by a "common" reset but we
6620 * shell proceed with "process kill" anyway.
6621 */
6622 if ((bnx2x_get_load_cnt(bp) == 0) &&
6623 bnx2x_trylock_hw_lock(bp,
6624 HW_LOCK_RESOURCE_RESERVED_08) &&
6625 (!bnx2x_leader_reset(bp))) {
6626 DP(NETIF_MSG_HW, "Recovered in open\n");
6627 break;
6628 }
6629
6630 bnx2x_set_power_state(bp, PCI_D3hot);
6631
6632 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
6633 " completed yet. Try again later. If u still see this"
6634 " message after a few retries then power cycle is"
6635 " required.\n", bp->dev->name);
6636
6637 return -EAGAIN;
6638 } while (0);
6639 }
6640
6641 bp->recovery_state = BNX2X_RECOVERY_DONE;
6642
bb2a0f7a 6643 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
6644}
6645
bb2a0f7a 6646/* called with rtnl_lock */
a2fbb9ea
ET
6647static int bnx2x_close(struct net_device *dev)
6648{
a2fbb9ea
ET
6649 struct bnx2x *bp = netdev_priv(dev);
6650
6651 /* Unload the driver, release IRQs */
bb2a0f7a 6652 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 6653 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
6654
6655 return 0;
6656}
6657
f5372251 6658/* called with netif_tx_lock from dev_mcast.c */
9f6c9258 6659void bnx2x_set_rx_mode(struct net_device *dev)
34f80b04
EG
6660{
6661 struct bnx2x *bp = netdev_priv(dev);
6662 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
6663 int port = BP_PORT(bp);
6664
6665 if (bp->state != BNX2X_STATE_OPEN) {
6666 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6667 return;
6668 }
6669
6670 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
6671
6672 if (dev->flags & IFF_PROMISC)
6673 rx_mode = BNX2X_RX_MODE_PROMISC;
6674
6675 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
6676 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
6677 CHIP_IS_E1(bp)))
34f80b04
EG
6678 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6679
6680 else { /* some multicasts */
6681 if (CHIP_IS_E1(bp)) {
6682 int i, old, offset;
22bedad3 6683 struct netdev_hw_addr *ha;
34f80b04
EG
6684 struct mac_configuration_cmd *config =
6685 bnx2x_sp(bp, mcast_config);
6686
0ddf477b 6687 i = 0;
22bedad3 6688 netdev_for_each_mc_addr(ha, dev) {
34f80b04
EG
6689 config->config_table[i].
6690 cam_entry.msb_mac_addr =
22bedad3 6691 swab16(*(u16 *)&ha->addr[0]);
34f80b04
EG
6692 config->config_table[i].
6693 cam_entry.middle_mac_addr =
22bedad3 6694 swab16(*(u16 *)&ha->addr[2]);
34f80b04
EG
6695 config->config_table[i].
6696 cam_entry.lsb_mac_addr =
22bedad3 6697 swab16(*(u16 *)&ha->addr[4]);
34f80b04
EG
6698 config->config_table[i].cam_entry.flags =
6699 cpu_to_le16(port);
6700 config->config_table[i].
6701 target_table_entry.flags = 0;
ca00392c
EG
6702 config->config_table[i].target_table_entry.
6703 clients_bit_vector =
6704 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
6705 config->config_table[i].
6706 target_table_entry.vlan_id = 0;
6707
6708 DP(NETIF_MSG_IFUP,
6709 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6710 config->config_table[i].
6711 cam_entry.msb_mac_addr,
6712 config->config_table[i].
6713 cam_entry.middle_mac_addr,
6714 config->config_table[i].
6715 cam_entry.lsb_mac_addr);
0ddf477b 6716 i++;
34f80b04 6717 }
8d9c5f34 6718 old = config->hdr.length;
34f80b04
EG
6719 if (old > i) {
6720 for (; i < old; i++) {
6721 if (CAM_IS_INVALID(config->
6722 config_table[i])) {
af246401 6723 /* already invalidated */
34f80b04
EG
6724 break;
6725 }
6726 /* invalidate */
6727 CAM_INVALIDATE(config->
6728 config_table[i]);
6729 }
6730 }
6731
6732 if (CHIP_REV_IS_SLOW(bp))
6733 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6734 else
6735 offset = BNX2X_MAX_MULTICAST*(1 + port);
6736
8d9c5f34 6737 config->hdr.length = i;
34f80b04 6738 config->hdr.offset = offset;
8d9c5f34 6739 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6740 config->hdr.reserved1 = 0;
6741
e665bfda
MC
6742 bp->set_mac_pending++;
6743 smp_wmb();
6744
34f80b04
EG
6745 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6746 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6747 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
6748 0);
6749 } else { /* E1H */
6750 /* Accept one or more multicasts */
22bedad3 6751 struct netdev_hw_addr *ha;
34f80b04
EG
6752 u32 mc_filter[MC_HASH_SIZE];
6753 u32 crc, bit, regidx;
6754 int i;
6755
6756 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6757
22bedad3 6758 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 6759 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
22bedad3 6760 ha->addr);
34f80b04 6761
22bedad3 6762 crc = crc32c_le(0, ha->addr, ETH_ALEN);
34f80b04
EG
6763 bit = (crc >> 24) & 0xff;
6764 regidx = bit >> 5;
6765 bit &= 0x1f;
6766 mc_filter[regidx] |= (1 << bit);
6767 }
6768
6769 for (i = 0; i < MC_HASH_SIZE; i++)
6770 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6771 mc_filter[i]);
6772 }
6773 }
6774
6775 bp->rx_mode = rx_mode;
6776 bnx2x_set_storm_rx_mode(bp);
6777}
6778
a2fbb9ea 6779
c18487ee 6780/* called with rtnl_lock */
01cd4528
EG
6781static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
6782 int devad, u16 addr)
a2fbb9ea 6783{
01cd4528
EG
6784 struct bnx2x *bp = netdev_priv(netdev);
6785 u16 value;
6786 int rc;
a2fbb9ea 6787
01cd4528
EG
6788 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
6789 prtad, devad, addr);
a2fbb9ea 6790
01cd4528
EG
6791 /* The HW expects different devad if CL22 is used */
6792 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 6793
01cd4528 6794 bnx2x_acquire_phy_lock(bp);
e10bc84d 6795 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
01cd4528
EG
6796 bnx2x_release_phy_lock(bp);
6797 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 6798
01cd4528
EG
6799 if (!rc)
6800 rc = value;
6801 return rc;
6802}
a2fbb9ea 6803
01cd4528
EG
6804/* called with rtnl_lock */
6805static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
6806 u16 addr, u16 value)
6807{
6808 struct bnx2x *bp = netdev_priv(netdev);
01cd4528
EG
6809 int rc;
6810
6811 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
6812 " value 0x%x\n", prtad, devad, addr, value);
6813
01cd4528
EG
6814 /* The HW expects different devad if CL22 is used */
6815 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 6816
01cd4528 6817 bnx2x_acquire_phy_lock(bp);
e10bc84d 6818 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
01cd4528
EG
6819 bnx2x_release_phy_lock(bp);
6820 return rc;
6821}
c18487ee 6822
01cd4528
EG
6823/* called with rtnl_lock */
6824static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6825{
6826 struct bnx2x *bp = netdev_priv(dev);
6827 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 6828
01cd4528
EG
6829 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
6830 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 6831
01cd4528
EG
6832 if (!netif_running(dev))
6833 return -EAGAIN;
6834
6835 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
6836}
6837
257ddbda 6838#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
6839static void poll_bnx2x(struct net_device *dev)
6840{
6841 struct bnx2x *bp = netdev_priv(dev);
6842
6843 disable_irq(bp->pdev->irq);
6844 bnx2x_interrupt(bp->pdev->irq, dev);
6845 enable_irq(bp->pdev->irq);
6846}
6847#endif
6848
c64213cd
SH
6849static const struct net_device_ops bnx2x_netdev_ops = {
6850 .ndo_open = bnx2x_open,
6851 .ndo_stop = bnx2x_close,
6852 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 6853 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
6854 .ndo_set_mac_address = bnx2x_change_mac_addr,
6855 .ndo_validate_addr = eth_validate_addr,
6856 .ndo_do_ioctl = bnx2x_ioctl,
6857 .ndo_change_mtu = bnx2x_change_mtu,
6858 .ndo_tx_timeout = bnx2x_tx_timeout,
6859#ifdef BCM_VLAN
6860 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
6861#endif
257ddbda 6862#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
6863 .ndo_poll_controller = poll_bnx2x,
6864#endif
6865};
6866
34f80b04
EG
6867static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
6868 struct net_device *dev)
a2fbb9ea
ET
6869{
6870 struct bnx2x *bp;
6871 int rc;
6872
6873 SET_NETDEV_DEV(dev, &pdev->dev);
6874 bp = netdev_priv(dev);
6875
34f80b04
EG
6876 bp->dev = dev;
6877 bp->pdev = pdev;
a2fbb9ea 6878 bp->flags = 0;
34f80b04 6879 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
6880
6881 rc = pci_enable_device(pdev);
6882 if (rc) {
cdaa7cb8
VZ
6883 dev_err(&bp->pdev->dev,
6884 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
6885 goto err_out;
6886 }
6887
6888 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
6889 dev_err(&bp->pdev->dev,
6890 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
6891 rc = -ENODEV;
6892 goto err_out_disable;
6893 }
6894
6895 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
6896 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
6897 " base address, aborting\n");
a2fbb9ea
ET
6898 rc = -ENODEV;
6899 goto err_out_disable;
6900 }
6901
34f80b04
EG
6902 if (atomic_read(&pdev->enable_cnt) == 1) {
6903 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6904 if (rc) {
cdaa7cb8
VZ
6905 dev_err(&bp->pdev->dev,
6906 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
6907 goto err_out_disable;
6908 }
a2fbb9ea 6909
34f80b04
EG
6910 pci_set_master(pdev);
6911 pci_save_state(pdev);
6912 }
a2fbb9ea
ET
6913
6914 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6915 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
6916 dev_err(&bp->pdev->dev,
6917 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
6918 rc = -EIO;
6919 goto err_out_release;
6920 }
6921
6922 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
6923 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
6924 dev_err(&bp->pdev->dev,
6925 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
6926 rc = -EIO;
6927 goto err_out_release;
6928 }
6929
1a983142 6930 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 6931 bp->flags |= USING_DAC_FLAG;
1a983142 6932 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
6933 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
6934 " failed, aborting\n");
a2fbb9ea
ET
6935 rc = -EIO;
6936 goto err_out_release;
6937 }
6938
1a983142 6939 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
6940 dev_err(&bp->pdev->dev,
6941 "System does not support DMA, aborting\n");
a2fbb9ea
ET
6942 rc = -EIO;
6943 goto err_out_release;
6944 }
6945
34f80b04
EG
6946 dev->mem_start = pci_resource_start(pdev, 0);
6947 dev->base_addr = dev->mem_start;
6948 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
6949
6950 dev->irq = pdev->irq;
6951
275f165f 6952 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 6953 if (!bp->regview) {
cdaa7cb8
VZ
6954 dev_err(&bp->pdev->dev,
6955 "Cannot map register space, aborting\n");
a2fbb9ea
ET
6956 rc = -ENOMEM;
6957 goto err_out_release;
6958 }
6959
34f80b04
EG
6960 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
6961 min_t(u64, BNX2X_DB_SIZE,
6962 pci_resource_len(pdev, 2)));
a2fbb9ea 6963 if (!bp->doorbells) {
cdaa7cb8
VZ
6964 dev_err(&bp->pdev->dev,
6965 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
6966 rc = -ENOMEM;
6967 goto err_out_unmap;
6968 }
6969
6970 bnx2x_set_power_state(bp, PCI_D0);
6971
34f80b04
EG
6972 /* clean indirect addresses */
6973 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
6974 PCICFG_VENDOR_ID_OFFSET);
6975 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
6976 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
6977 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
6978 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 6979
72fd0718
VZ
6980 /* Reset the load counter */
6981 bnx2x_clear_load_cnt(bp);
6982
34f80b04 6983 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 6984
c64213cd 6985 dev->netdev_ops = &bnx2x_netdev_ops;
de0c62db 6986 bnx2x_set_ethtool_ops(dev);
34f80b04
EG
6987 dev->features |= NETIF_F_SG;
6988 dev->features |= NETIF_F_HW_CSUM;
6989 if (bp->flags & USING_DAC_FLAG)
6990 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
6991 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
6992 dev->features |= NETIF_F_TSO6;
34f80b04
EG
6993#ifdef BCM_VLAN
6994 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 6995 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
6996
6997 dev->vlan_features |= NETIF_F_SG;
6998 dev->vlan_features |= NETIF_F_HW_CSUM;
6999 if (bp->flags & USING_DAC_FLAG)
7000 dev->vlan_features |= NETIF_F_HIGHDMA;
7001 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7002 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 7003#endif
a2fbb9ea 7004
01cd4528
EG
7005 /* get_port_hwinfo() will set prtad and mmds properly */
7006 bp->mdio.prtad = MDIO_PRTAD_NONE;
7007 bp->mdio.mmds = 0;
7008 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7009 bp->mdio.dev = dev;
7010 bp->mdio.mdio_read = bnx2x_mdio_read;
7011 bp->mdio.mdio_write = bnx2x_mdio_write;
7012
a2fbb9ea
ET
7013 return 0;
7014
7015err_out_unmap:
7016 if (bp->regview) {
7017 iounmap(bp->regview);
7018 bp->regview = NULL;
7019 }
a2fbb9ea
ET
7020 if (bp->doorbells) {
7021 iounmap(bp->doorbells);
7022 bp->doorbells = NULL;
7023 }
7024
7025err_out_release:
34f80b04
EG
7026 if (atomic_read(&pdev->enable_cnt) == 1)
7027 pci_release_regions(pdev);
a2fbb9ea
ET
7028
7029err_out_disable:
7030 pci_disable_device(pdev);
7031 pci_set_drvdata(pdev, NULL);
7032
7033err_out:
7034 return rc;
7035}
7036
37f9ce62
EG
7037static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
7038 int *width, int *speed)
25047950
ET
7039{
7040 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7041
37f9ce62 7042 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 7043
37f9ce62
EG
7044 /* return value of 1=2.5GHz 2=5GHz */
7045 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 7046}
37f9ce62 7047
6891dd25 7048static int bnx2x_check_firmware(struct bnx2x *bp)
94a78b79 7049{
37f9ce62 7050 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
7051 struct bnx2x_fw_file_hdr *fw_hdr;
7052 struct bnx2x_fw_file_section *sections;
94a78b79 7053 u32 offset, len, num_ops;
37f9ce62 7054 u16 *ops_offsets;
94a78b79 7055 int i;
37f9ce62 7056 const u8 *fw_ver;
94a78b79
VZ
7057
7058 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
7059 return -EINVAL;
7060
7061 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
7062 sections = (struct bnx2x_fw_file_section *)fw_hdr;
7063
7064 /* Make sure none of the offsets and sizes make us read beyond
7065 * the end of the firmware data */
7066 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
7067 offset = be32_to_cpu(sections[i].offset);
7068 len = be32_to_cpu(sections[i].len);
7069 if (offset + len > firmware->size) {
cdaa7cb8
VZ
7070 dev_err(&bp->pdev->dev,
7071 "Section %d length is out of bounds\n", i);
94a78b79
VZ
7072 return -EINVAL;
7073 }
7074 }
7075
7076 /* Likewise for the init_ops offsets */
7077 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
7078 ops_offsets = (u16 *)(firmware->data + offset);
7079 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
7080
7081 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
7082 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
7083 dev_err(&bp->pdev->dev,
7084 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
7085 return -EINVAL;
7086 }
7087 }
7088
7089 /* Check FW version */
7090 offset = be32_to_cpu(fw_hdr->fw_version.offset);
7091 fw_ver = firmware->data + offset;
7092 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
7093 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
7094 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
7095 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
7096 dev_err(&bp->pdev->dev,
7097 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
7098 fw_ver[0], fw_ver[1], fw_ver[2],
7099 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
7100 BCM_5710_FW_MINOR_VERSION,
7101 BCM_5710_FW_REVISION_VERSION,
7102 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 7103 return -EINVAL;
94a78b79
VZ
7104 }
7105
7106 return 0;
7107}
7108
ab6ad5a4 7109static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 7110{
ab6ad5a4
EG
7111 const __be32 *source = (const __be32 *)_source;
7112 u32 *target = (u32 *)_target;
94a78b79 7113 u32 i;
94a78b79
VZ
7114
7115 for (i = 0; i < n/4; i++)
7116 target[i] = be32_to_cpu(source[i]);
7117}
7118
7119/*
7120 Ops array is stored in the following format:
7121 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7122 */
ab6ad5a4 7123static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 7124{
ab6ad5a4
EG
7125 const __be32 *source = (const __be32 *)_source;
7126 struct raw_op *target = (struct raw_op *)_target;
94a78b79 7127 u32 i, j, tmp;
94a78b79 7128
ab6ad5a4 7129 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
7130 tmp = be32_to_cpu(source[j]);
7131 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
7132 target[i].offset = tmp & 0xffffff;
7133 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
7134 }
7135}
ab6ad5a4
EG
7136
7137static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 7138{
ab6ad5a4
EG
7139 const __be16 *source = (const __be16 *)_source;
7140 u16 *target = (u16 *)_target;
94a78b79 7141 u32 i;
94a78b79
VZ
7142
7143 for (i = 0; i < n/2; i++)
7144 target[i] = be16_to_cpu(source[i]);
7145}
7146
7995c64e
JP
7147#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
7148do { \
7149 u32 len = be32_to_cpu(fw_hdr->arr.len); \
7150 bp->arr = kmalloc(len, GFP_KERNEL); \
7151 if (!bp->arr) { \
7152 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7153 goto lbl; \
7154 } \
7155 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
7156 (u8 *)bp->arr, len); \
7157} while (0)
94a78b79 7158
6891dd25 7159int bnx2x_init_firmware(struct bnx2x *bp)
94a78b79 7160{
45229b42 7161 const char *fw_file_name;
94a78b79 7162 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 7163 int rc;
94a78b79 7164
94a78b79 7165 if (CHIP_IS_E1(bp))
45229b42 7166 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 7167 else if (CHIP_IS_E1H(bp))
45229b42 7168 fw_file_name = FW_FILE_NAME_E1H;
cdaa7cb8 7169 else {
6891dd25 7170 BNX2X_ERR("Unsupported chip revision\n");
cdaa7cb8
VZ
7171 return -EINVAL;
7172 }
94a78b79 7173
6891dd25 7174 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
94a78b79 7175
6891dd25 7176 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
94a78b79 7177 if (rc) {
6891dd25 7178 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
7179 goto request_firmware_exit;
7180 }
7181
7182 rc = bnx2x_check_firmware(bp);
7183 if (rc) {
6891dd25 7184 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
7185 goto request_firmware_exit;
7186 }
7187
7188 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
7189
7190 /* Initialize the pointers to the init arrays */
7191 /* Blob */
7192 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
7193
7194 /* Opcodes */
7195 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
7196
7197 /* Offsets */
ab6ad5a4
EG
7198 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
7199 be16_to_cpu_n);
94a78b79
VZ
7200
7201 /* STORMs firmware */
573f2035
EG
7202 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7203 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
7204 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
7205 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
7206 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7207 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
7208 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
7209 be32_to_cpu(fw_hdr->usem_pram_data.offset);
7210 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7211 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
7212 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
7213 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
7214 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7215 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
7216 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
7217 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
7218
7219 return 0;
ab6ad5a4 7220
94a78b79
VZ
7221init_offsets_alloc_err:
7222 kfree(bp->init_ops);
7223init_ops_alloc_err:
7224 kfree(bp->init_data);
7225request_firmware_exit:
7226 release_firmware(bp->firmware);
7227
7228 return rc;
7229}
7230
7231
a2fbb9ea
ET
7232static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7233 const struct pci_device_id *ent)
7234{
a2fbb9ea
ET
7235 struct net_device *dev = NULL;
7236 struct bnx2x *bp;
37f9ce62 7237 int pcie_width, pcie_speed;
25047950 7238 int rc;
a2fbb9ea 7239
a2fbb9ea 7240 /* dev zeroed in init_etherdev */
555f6c78 7241 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04 7242 if (!dev) {
cdaa7cb8 7243 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 7244 return -ENOMEM;
34f80b04 7245 }
a2fbb9ea 7246
a2fbb9ea 7247 bp = netdev_priv(dev);
7995c64e 7248 bp->msg_enable = debug;
a2fbb9ea 7249
df4770de
EG
7250 pci_set_drvdata(pdev, dev);
7251
34f80b04 7252 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
7253 if (rc < 0) {
7254 free_netdev(dev);
7255 return rc;
7256 }
7257
34f80b04 7258 rc = bnx2x_init_bp(bp);
693fc0d1
EG
7259 if (rc)
7260 goto init_one_exit;
7261
7262 rc = register_netdev(dev);
34f80b04 7263 if (rc) {
693fc0d1 7264 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
7265 goto init_one_exit;
7266 }
7267
37f9ce62 7268 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
cdaa7cb8
VZ
7269 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
7270 " IRQ %d, ", board_info[ent->driver_data].name,
7271 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
7272 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
7273 dev->base_addr, bp->pdev->irq);
7274 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 7275
a2fbb9ea 7276 return 0;
34f80b04
EG
7277
7278init_one_exit:
7279 if (bp->regview)
7280 iounmap(bp->regview);
7281
7282 if (bp->doorbells)
7283 iounmap(bp->doorbells);
7284
7285 free_netdev(dev);
7286
7287 if (atomic_read(&pdev->enable_cnt) == 1)
7288 pci_release_regions(pdev);
7289
7290 pci_disable_device(pdev);
7291 pci_set_drvdata(pdev, NULL);
7292
7293 return rc;
a2fbb9ea
ET
7294}
7295
7296static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7297{
7298 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
7299 struct bnx2x *bp;
7300
7301 if (!dev) {
cdaa7cb8 7302 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
7303 return;
7304 }
228241eb 7305 bp = netdev_priv(dev);
a2fbb9ea 7306
a2fbb9ea
ET
7307 unregister_netdev(dev);
7308
72fd0718
VZ
7309 /* Make sure RESET task is not scheduled before continuing */
7310 cancel_delayed_work_sync(&bp->reset_task);
7311
a2fbb9ea
ET
7312 if (bp->regview)
7313 iounmap(bp->regview);
7314
7315 if (bp->doorbells)
7316 iounmap(bp->doorbells);
7317
7318 free_netdev(dev);
34f80b04
EG
7319
7320 if (atomic_read(&pdev->enable_cnt) == 1)
7321 pci_release_regions(pdev);
7322
a2fbb9ea
ET
7323 pci_disable_device(pdev);
7324 pci_set_drvdata(pdev, NULL);
7325}
7326
f8ef6e44
YG
7327static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
7328{
7329 int i;
7330
7331 bp->state = BNX2X_STATE_ERROR;
7332
7333 bp->rx_mode = BNX2X_RX_MODE_NONE;
7334
7335 bnx2x_netif_stop(bp, 0);
c89af1a3 7336 netif_carrier_off(bp->dev);
f8ef6e44
YG
7337
7338 del_timer_sync(&bp->timer);
7339 bp->stats_state = STATS_STATE_DISABLED;
7340 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
7341
7342 /* Release IRQs */
6cbe5065 7343 bnx2x_free_irq(bp, false);
f8ef6e44
YG
7344
7345 if (CHIP_IS_E1(bp)) {
7346 struct mac_configuration_cmd *config =
7347 bnx2x_sp(bp, mcast_config);
7348
8d9c5f34 7349 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
7350 CAM_INVALIDATE(config->config_table[i]);
7351 }
7352
7353 /* Free SKBs, SGEs, TPA pool and driver internals */
7354 bnx2x_free_skbs(bp);
54b9ddaa 7355 for_each_queue(bp, i)
f8ef6e44 7356 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 7357 for_each_queue(bp, i)
7cde1c8b 7358 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
7359 bnx2x_free_mem(bp);
7360
7361 bp->state = BNX2X_STATE_CLOSED;
7362
f8ef6e44
YG
7363 return 0;
7364}
7365
7366static void bnx2x_eeh_recover(struct bnx2x *bp)
7367{
7368 u32 val;
7369
7370 mutex_init(&bp->port.phy_mutex);
7371
7372 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7373 bp->link_params.shmem_base = bp->common.shmem_base;
7374 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7375
7376 if (!bp->common.shmem_base ||
7377 (bp->common.shmem_base < 0xA0000) ||
7378 (bp->common.shmem_base >= 0xC0000)) {
7379 BNX2X_DEV_INFO("MCP not active\n");
7380 bp->flags |= NO_MCP_FLAG;
7381 return;
7382 }
7383
7384 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7385 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7386 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7387 BNX2X_ERR("BAD MCP validity signature\n");
7388
7389 if (!BP_NOMCP(bp)) {
7390 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
7391 & DRV_MSG_SEQ_NUMBER_MASK);
7392 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7393 }
7394}
7395
493adb1f
WX
7396/**
7397 * bnx2x_io_error_detected - called when PCI error is detected
7398 * @pdev: Pointer to PCI device
7399 * @state: The current pci connection state
7400 *
7401 * This function is called after a PCI bus error affecting
7402 * this device has been detected.
7403 */
7404static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
7405 pci_channel_state_t state)
7406{
7407 struct net_device *dev = pci_get_drvdata(pdev);
7408 struct bnx2x *bp = netdev_priv(dev);
7409
7410 rtnl_lock();
7411
7412 netif_device_detach(dev);
7413
07ce50e4
DN
7414 if (state == pci_channel_io_perm_failure) {
7415 rtnl_unlock();
7416 return PCI_ERS_RESULT_DISCONNECT;
7417 }
7418
493adb1f 7419 if (netif_running(dev))
f8ef6e44 7420 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
7421
7422 pci_disable_device(pdev);
7423
7424 rtnl_unlock();
7425
7426 /* Request a slot reset */
7427 return PCI_ERS_RESULT_NEED_RESET;
7428}
7429
7430/**
7431 * bnx2x_io_slot_reset - called after the PCI bus has been reset
7432 * @pdev: Pointer to PCI device
7433 *
7434 * Restart the card from scratch, as if from a cold-boot.
7435 */
7436static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
7437{
7438 struct net_device *dev = pci_get_drvdata(pdev);
7439 struct bnx2x *bp = netdev_priv(dev);
7440
7441 rtnl_lock();
7442
7443 if (pci_enable_device(pdev)) {
7444 dev_err(&pdev->dev,
7445 "Cannot re-enable PCI device after reset\n");
7446 rtnl_unlock();
7447 return PCI_ERS_RESULT_DISCONNECT;
7448 }
7449
7450 pci_set_master(pdev);
7451 pci_restore_state(pdev);
7452
7453 if (netif_running(dev))
7454 bnx2x_set_power_state(bp, PCI_D0);
7455
7456 rtnl_unlock();
7457
7458 return PCI_ERS_RESULT_RECOVERED;
7459}
7460
7461/**
7462 * bnx2x_io_resume - called when traffic can start flowing again
7463 * @pdev: Pointer to PCI device
7464 *
7465 * This callback is called when the error recovery driver tells us that
7466 * its OK to resume normal operation.
7467 */
7468static void bnx2x_io_resume(struct pci_dev *pdev)
7469{
7470 struct net_device *dev = pci_get_drvdata(pdev);
7471 struct bnx2x *bp = netdev_priv(dev);
7472
72fd0718
VZ
7473 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
7474 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
7475 return;
7476 }
7477
493adb1f
WX
7478 rtnl_lock();
7479
f8ef6e44
YG
7480 bnx2x_eeh_recover(bp);
7481
493adb1f 7482 if (netif_running(dev))
f8ef6e44 7483 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
7484
7485 netif_device_attach(dev);
7486
7487 rtnl_unlock();
7488}
7489
7490static struct pci_error_handlers bnx2x_err_handler = {
7491 .error_detected = bnx2x_io_error_detected,
356e2385
EG
7492 .slot_reset = bnx2x_io_slot_reset,
7493 .resume = bnx2x_io_resume,
493adb1f
WX
7494};
7495
a2fbb9ea 7496static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
7497 .name = DRV_MODULE_NAME,
7498 .id_table = bnx2x_pci_tbl,
7499 .probe = bnx2x_init_one,
7500 .remove = __devexit_p(bnx2x_remove_one),
7501 .suspend = bnx2x_suspend,
7502 .resume = bnx2x_resume,
7503 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
7504};
7505
7506static int __init bnx2x_init(void)
7507{
dd21ca6d
SG
7508 int ret;
7509
7995c64e 7510 pr_info("%s", version);
938cf541 7511
1cf167f2
EG
7512 bnx2x_wq = create_singlethread_workqueue("bnx2x");
7513 if (bnx2x_wq == NULL) {
7995c64e 7514 pr_err("Cannot create workqueue\n");
1cf167f2
EG
7515 return -ENOMEM;
7516 }
7517
dd21ca6d
SG
7518 ret = pci_register_driver(&bnx2x_pci_driver);
7519 if (ret) {
7995c64e 7520 pr_err("Cannot register driver\n");
dd21ca6d
SG
7521 destroy_workqueue(bnx2x_wq);
7522 }
7523 return ret;
a2fbb9ea
ET
7524}
7525
7526static void __exit bnx2x_cleanup(void)
7527{
7528 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
7529
7530 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
7531}
7532
7533module_init(bnx2x_init);
7534module_exit(bnx2x_cleanup);
7535
993ac7b5
MC
7536#ifdef BCM_CNIC
7537
7538/* count denotes the number of new completions we have seen */
7539static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
7540{
7541 struct eth_spe *spe;
7542
7543#ifdef BNX2X_STOP_ON_ERROR
7544 if (unlikely(bp->panic))
7545 return;
7546#endif
7547
7548 spin_lock_bh(&bp->spq_lock);
7549 bp->cnic_spq_pending -= count;
7550
7551 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
7552 bp->cnic_spq_pending++) {
7553
7554 if (!bp->cnic_kwq_pending)
7555 break;
7556
7557 spe = bnx2x_sp_get_next(bp);
7558 *spe = *bp->cnic_kwq_cons;
7559
7560 bp->cnic_kwq_pending--;
7561
7562 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
7563 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
7564
7565 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
7566 bp->cnic_kwq_cons = bp->cnic_kwq;
7567 else
7568 bp->cnic_kwq_cons++;
7569 }
7570 bnx2x_sp_prod_update(bp);
7571 spin_unlock_bh(&bp->spq_lock);
7572}
7573
7574static int bnx2x_cnic_sp_queue(struct net_device *dev,
7575 struct kwqe_16 *kwqes[], u32 count)
7576{
7577 struct bnx2x *bp = netdev_priv(dev);
7578 int i;
7579
7580#ifdef BNX2X_STOP_ON_ERROR
7581 if (unlikely(bp->panic))
7582 return -EIO;
7583#endif
7584
7585 spin_lock_bh(&bp->spq_lock);
7586
7587 for (i = 0; i < count; i++) {
7588 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
7589
7590 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
7591 break;
7592
7593 *bp->cnic_kwq_prod = *spe;
7594
7595 bp->cnic_kwq_pending++;
7596
7597 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
7598 spe->hdr.conn_and_cmd_data, spe->hdr.type,
7599 spe->data.mac_config_addr.hi,
7600 spe->data.mac_config_addr.lo,
7601 bp->cnic_kwq_pending);
7602
7603 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
7604 bp->cnic_kwq_prod = bp->cnic_kwq;
7605 else
7606 bp->cnic_kwq_prod++;
7607 }
7608
7609 spin_unlock_bh(&bp->spq_lock);
7610
7611 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
7612 bnx2x_cnic_sp_post(bp, 0);
7613
7614 return i;
7615}
7616
7617static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7618{
7619 struct cnic_ops *c_ops;
7620 int rc = 0;
7621
7622 mutex_lock(&bp->cnic_mutex);
7623 c_ops = bp->cnic_ops;
7624 if (c_ops)
7625 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7626 mutex_unlock(&bp->cnic_mutex);
7627
7628 return rc;
7629}
7630
7631static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7632{
7633 struct cnic_ops *c_ops;
7634 int rc = 0;
7635
7636 rcu_read_lock();
7637 c_ops = rcu_dereference(bp->cnic_ops);
7638 if (c_ops)
7639 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7640 rcu_read_unlock();
7641
7642 return rc;
7643}
7644
7645/*
7646 * for commands that have no data
7647 */
9f6c9258 7648int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
993ac7b5
MC
7649{
7650 struct cnic_ctl_info ctl = {0};
7651
7652 ctl.cmd = cmd;
7653
7654 return bnx2x_cnic_ctl_send(bp, &ctl);
7655}
7656
7657static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
7658{
7659 struct cnic_ctl_info ctl;
7660
7661 /* first we tell CNIC and only then we count this as a completion */
7662 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
7663 ctl.data.comp.cid = cid;
7664
7665 bnx2x_cnic_ctl_send_bh(bp, &ctl);
7666 bnx2x_cnic_sp_post(bp, 1);
7667}
7668
7669static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7670{
7671 struct bnx2x *bp = netdev_priv(dev);
7672 int rc = 0;
7673
7674 switch (ctl->cmd) {
7675 case DRV_CTL_CTXTBL_WR_CMD: {
7676 u32 index = ctl->data.io.offset;
7677 dma_addr_t addr = ctl->data.io.dma_addr;
7678
7679 bnx2x_ilt_wr(bp, index, addr);
7680 break;
7681 }
7682
7683 case DRV_CTL_COMPLETION_CMD: {
7684 int count = ctl->data.comp.comp_count;
7685
7686 bnx2x_cnic_sp_post(bp, count);
7687 break;
7688 }
7689
7690 /* rtnl_lock is held. */
7691 case DRV_CTL_START_L2_CMD: {
7692 u32 cli = ctl->data.ring.client_id;
7693
7694 bp->rx_mode_cl_mask |= (1 << cli);
7695 bnx2x_set_storm_rx_mode(bp);
7696 break;
7697 }
7698
7699 /* rtnl_lock is held. */
7700 case DRV_CTL_STOP_L2_CMD: {
7701 u32 cli = ctl->data.ring.client_id;
7702
7703 bp->rx_mode_cl_mask &= ~(1 << cli);
7704 bnx2x_set_storm_rx_mode(bp);
7705 break;
7706 }
7707
7708 default:
7709 BNX2X_ERR("unknown command %x\n", ctl->cmd);
7710 rc = -EINVAL;
7711 }
7712
7713 return rc;
7714}
7715
9f6c9258 7716void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
993ac7b5
MC
7717{
7718 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7719
7720 if (bp->flags & USING_MSIX_FLAG) {
7721 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
7722 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
7723 cp->irq_arr[0].vector = bp->msix_table[1].vector;
7724 } else {
7725 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
7726 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
7727 }
7728 cp->irq_arr[0].status_blk = bp->cnic_sb;
7729 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
7730 cp->irq_arr[1].status_blk = bp->def_status_blk;
7731 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
7732
7733 cp->num_irq = 2;
7734}
7735
7736static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
7737 void *data)
7738{
7739 struct bnx2x *bp = netdev_priv(dev);
7740 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7741
7742 if (ops == NULL)
7743 return -EINVAL;
7744
7745 if (atomic_read(&bp->intr_sem) != 0)
7746 return -EBUSY;
7747
7748 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
7749 if (!bp->cnic_kwq)
7750 return -ENOMEM;
7751
7752 bp->cnic_kwq_cons = bp->cnic_kwq;
7753 bp->cnic_kwq_prod = bp->cnic_kwq;
7754 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
7755
7756 bp->cnic_spq_pending = 0;
7757 bp->cnic_kwq_pending = 0;
7758
7759 bp->cnic_data = data;
7760
7761 cp->num_irq = 0;
7762 cp->drv_state = CNIC_DRV_STATE_REGD;
7763
7764 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
7765
7766 bnx2x_setup_cnic_irq_info(bp);
7767 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7768 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7769 rcu_assign_pointer(bp->cnic_ops, ops);
7770
7771 return 0;
7772}
7773
7774static int bnx2x_unregister_cnic(struct net_device *dev)
7775{
7776 struct bnx2x *bp = netdev_priv(dev);
7777 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7778
7779 mutex_lock(&bp->cnic_mutex);
7780 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7781 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7782 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7783 }
7784 cp->drv_state = 0;
7785 rcu_assign_pointer(bp->cnic_ops, NULL);
7786 mutex_unlock(&bp->cnic_mutex);
7787 synchronize_rcu();
7788 kfree(bp->cnic_kwq);
7789 bp->cnic_kwq = NULL;
7790
7791 return 0;
7792}
7793
7794struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
7795{
7796 struct bnx2x *bp = netdev_priv(dev);
7797 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7798
7799 cp->drv_owner = THIS_MODULE;
7800 cp->chip_id = CHIP_ID(bp);
7801 cp->pdev = bp->pdev;
7802 cp->io_base = bp->regview;
7803 cp->io_base2 = bp->doorbells;
7804 cp->max_kwqe_pending = 8;
7805 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
7806 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
7807 cp->ctx_tbl_len = CNIC_ILT_LINES;
7808 cp->starting_cid = BCM_CNIC_CID_START;
7809 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
7810 cp->drv_ctl = bnx2x_drv_ctl;
7811 cp->drv_register_cnic = bnx2x_register_cnic;
7812 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
7813
7814 return cp;
7815}
7816EXPORT_SYMBOL(bnx2x_cnic_probe);
7817
7818#endif /* BCM_CNIC */
94a78b79 7819