]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x/bnx2x_main.c
bnx2x: Fix potential link issue In BCM8727 based boards
[net-next-2.6.git] / drivers / net / bnx2x / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea 51#include <linux/io.h>
45229b42 52#include <linux/stringify.h>
a2fbb9ea 53
b0efbb99 54#define BNX2X_MAIN
a2fbb9ea
ET
55#include "bnx2x.h"
56#include "bnx2x_init.h"
94a78b79 57#include "bnx2x_init_ops.h"
9f6c9258 58#include "bnx2x_cmn.h"
a2fbb9ea 59
a2fbb9ea 60
94a78b79
VZ
61#include <linux/firmware.h>
62#include "bnx2x_fw_file_hdr.h"
63/* FW files */
45229b42
BH
64#define FW_FILE_VERSION \
65 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
66 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
67 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
68 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
69#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
70#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
94a78b79 71
34f80b04
EG
72/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ)
a2fbb9ea 74
53a10565 75static char version[] __devinitdata =
34f80b04 76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
24e3fcef 79MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 80MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
81MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
83MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H);
a2fbb9ea 85
555f6c78
EG
86static int multi_mode = 1;
87module_param(multi_mode, int, 0);
ca00392c
EG
88MODULE_PARM_DESC(multi_mode, " Multi queue mode "
89 "(0 Disable; 1 Enable (default))");
90
54b9ddaa
VZ
91static int num_queues;
92module_param(num_queues, int, 0);
93MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
94 " (default is as a number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
cdaa7cb8
VZ
102MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
103 "(1 INT#x; 2 MSI)");
8badd27a 104
a18f5128
EG
105static int dropless_fc;
106module_param(dropless_fc, int, 0);
107MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
108
9898f86d 109static int poll;
a2fbb9ea 110module_param(poll, int, 0);
9898f86d 111MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
112
113static int mrrs = -1;
114module_param(mrrs, int, 0);
115MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
116
9898f86d 117static int debug;
a2fbb9ea 118module_param(debug, int, 0);
9898f86d
EG
119MODULE_PARM_DESC(debug, " Default debug msglevel");
120
1cf167f2 121static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
122
123enum bnx2x_board_type {
124 BCM57710 = 0,
34f80b04
EG
125 BCM57711 = 1,
126 BCM57711E = 2,
a2fbb9ea
ET
127};
128
34f80b04 129/* indexed by board_type, above */
53a10565 130static struct {
a2fbb9ea
ET
131 char *name;
132} board_info[] __devinitdata = {
34f80b04
EG
133 { "Broadcom NetXtreme II BCM57710 XGb" },
134 { "Broadcom NetXtreme II BCM57711 XGb" },
135 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
136};
137
34f80b04 138
a3aa1884 139static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
140 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
143 { 0 }
144};
145
146MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148/****************************************************************************
149* General service functions
150****************************************************************************/
151
152/* used only at init
153 * locking is done by mcp
154 */
573f2035 155void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
156{
157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160 PCICFG_VENDOR_ID_OFFSET);
161}
162
a2fbb9ea
ET
163static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164{
165 u32 val;
166
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170 PCICFG_VENDOR_ID_OFFSET);
171
172 return val;
173}
a2fbb9ea 174
6c719d00 175const u32 dmae_reg_go_c[] = {
a2fbb9ea
ET
176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180};
181
182/* copy command into DMAE command memory and set DMAE command go */
6c719d00 183void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
a2fbb9ea
ET
184{
185 u32 cmd_offset;
186 int i;
187
188 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
189 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
190 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
191
ad8d3948
EG
192 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
193 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
194 }
195 REG_WR(bp, dmae_reg_go_c[idx], 1);
196}
197
ad8d3948
EG
198void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
199 u32 len32)
a2fbb9ea 200{
5ff7b6d4 201 struct dmae_command dmae;
a2fbb9ea 202 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
203 int cnt = 200;
204
205 if (!bp->dmae_ready) {
206 u32 *data = bnx2x_sp(bp, wb_data[0]);
207
208 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
209 " using indirect\n", dst_addr, len32);
210 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
211 return;
212 }
213
5ff7b6d4 214 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 215
5ff7b6d4
EG
216 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
217 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
218 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 219#ifdef __BIG_ENDIAN
5ff7b6d4 220 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 221#else
5ff7b6d4 222 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 223#endif
5ff7b6d4
EG
224 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
225 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
226 dmae.src_addr_lo = U64_LO(dma_addr);
227 dmae.src_addr_hi = U64_HI(dma_addr);
228 dmae.dst_addr_lo = dst_addr >> 2;
229 dmae.dst_addr_hi = 0;
230 dmae.len = len32;
231 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
232 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
233 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 234
c3eefaf6 235 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
236 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
237 "dst_addr [%x:%08x (%08x)]\n"
238 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
239 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
240 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
241 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 242 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
243 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
244 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 245
5ff7b6d4
EG
246 mutex_lock(&bp->dmae_mutex);
247
a2fbb9ea
ET
248 *wb_comp = 0;
249
5ff7b6d4 250 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
251
252 udelay(5);
ad8d3948
EG
253
254 while (*wb_comp != DMAE_COMP_VAL) {
255 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
256
ad8d3948 257 if (!cnt) {
c3eefaf6 258 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
259 break;
260 }
ad8d3948 261 cnt--;
12469401
YG
262 /* adjust delay for emulation/FPGA */
263 if (CHIP_REV_IS_SLOW(bp))
264 msleep(100);
265 else
266 udelay(5);
a2fbb9ea 267 }
ad8d3948
EG
268
269 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
270}
271
c18487ee 272void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 273{
5ff7b6d4 274 struct dmae_command dmae;
a2fbb9ea 275 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
276 int cnt = 200;
277
278 if (!bp->dmae_ready) {
279 u32 *data = bnx2x_sp(bp, wb_data[0]);
280 int i;
281
282 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
283 " using indirect\n", src_addr, len32);
284 for (i = 0; i < len32; i++)
285 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
286 return;
287 }
288
5ff7b6d4 289 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 290
5ff7b6d4
EG
291 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
292 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
293 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 294#ifdef __BIG_ENDIAN
5ff7b6d4 295 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 296#else
5ff7b6d4 297 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 298#endif
5ff7b6d4
EG
299 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
300 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
301 dmae.src_addr_lo = src_addr >> 2;
302 dmae.src_addr_hi = 0;
303 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
304 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
305 dmae.len = len32;
306 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
307 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
308 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 309
c3eefaf6 310 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
311 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
312 "dst_addr [%x:%08x (%08x)]\n"
313 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
314 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
315 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
316 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 317
5ff7b6d4
EG
318 mutex_lock(&bp->dmae_mutex);
319
320 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
321 *wb_comp = 0;
322
5ff7b6d4 323 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
324
325 udelay(5);
ad8d3948
EG
326
327 while (*wb_comp != DMAE_COMP_VAL) {
328
ad8d3948 329 if (!cnt) {
c3eefaf6 330 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
331 break;
332 }
ad8d3948 333 cnt--;
12469401
YG
334 /* adjust delay for emulation/FPGA */
335 if (CHIP_REV_IS_SLOW(bp))
336 msleep(100);
337 else
338 udelay(5);
a2fbb9ea 339 }
ad8d3948 340 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
341 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
342 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
343
344 mutex_unlock(&bp->dmae_mutex);
345}
346
573f2035
EG
347void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
348 u32 addr, u32 len)
349{
02e3c6cb 350 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
351 int offset = 0;
352
02e3c6cb 353 while (len > dmae_wr_max) {
573f2035 354 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
355 addr + offset, dmae_wr_max);
356 offset += dmae_wr_max * 4;
357 len -= dmae_wr_max;
573f2035
EG
358 }
359
360 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
361}
362
ad8d3948
EG
363/* used only for slowpath so not inlined */
364static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
365{
366 u32 wb_write[2];
367
368 wb_write[0] = val_hi;
369 wb_write[1] = val_lo;
370 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 371}
a2fbb9ea 372
ad8d3948
EG
373#ifdef USE_WB_RD
374static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
375{
376 u32 wb_data[2];
377
378 REG_RD_DMAE(bp, reg, wb_data, 2);
379
380 return HILO_U64(wb_data[0], wb_data[1]);
381}
382#endif
383
a2fbb9ea
ET
384static int bnx2x_mc_assert(struct bnx2x *bp)
385{
a2fbb9ea 386 char last_idx;
34f80b04
EG
387 int i, rc = 0;
388 u32 row0, row1, row2, row3;
389
390 /* XSTORM */
391 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
392 XSTORM_ASSERT_LIST_INDEX_OFFSET);
393 if (last_idx)
394 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
395
396 /* print the asserts */
397 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
398
399 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
400 XSTORM_ASSERT_LIST_OFFSET(i));
401 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
402 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
403 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
405 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
407
408 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
409 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
410 " 0x%08x 0x%08x 0x%08x\n",
411 i, row3, row2, row1, row0);
412 rc++;
413 } else {
414 break;
415 }
416 }
417
418 /* TSTORM */
419 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
420 TSTORM_ASSERT_LIST_INDEX_OFFSET);
421 if (last_idx)
422 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
423
424 /* print the asserts */
425 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
426
427 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
428 TSTORM_ASSERT_LIST_OFFSET(i));
429 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
430 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
431 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
433 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
435
436 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
437 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
438 " 0x%08x 0x%08x 0x%08x\n",
439 i, row3, row2, row1, row0);
440 rc++;
441 } else {
442 break;
443 }
444 }
445
446 /* CSTORM */
447 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
448 CSTORM_ASSERT_LIST_INDEX_OFFSET);
449 if (last_idx)
450 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
451
452 /* print the asserts */
453 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
454
455 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
456 CSTORM_ASSERT_LIST_OFFSET(i));
457 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
458 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
459 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
461 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
463
464 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
465 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
466 " 0x%08x 0x%08x 0x%08x\n",
467 i, row3, row2, row1, row0);
468 rc++;
469 } else {
470 break;
471 }
472 }
473
474 /* USTORM */
475 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
476 USTORM_ASSERT_LIST_INDEX_OFFSET);
477 if (last_idx)
478 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
479
480 /* print the asserts */
481 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
482
483 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
484 USTORM_ASSERT_LIST_OFFSET(i));
485 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
486 USTORM_ASSERT_LIST_OFFSET(i) + 4);
487 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i) + 8);
489 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 12);
491
492 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
493 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
494 " 0x%08x 0x%08x 0x%08x\n",
495 i, row3, row2, row1, row0);
496 rc++;
497 } else {
498 break;
a2fbb9ea
ET
499 }
500 }
34f80b04 501
a2fbb9ea
ET
502 return rc;
503}
c14423fe 504
a2fbb9ea
ET
505static void bnx2x_fw_dump(struct bnx2x *bp)
506{
cdaa7cb8 507 u32 addr;
a2fbb9ea 508 u32 mark, offset;
4781bfad 509 __be32 data[9];
a2fbb9ea
ET
510 int word;
511
2145a920
VZ
512 if (BP_NOMCP(bp)) {
513 BNX2X_ERR("NO MCP - can not dump\n");
514 return;
515 }
cdaa7cb8
VZ
516
517 addr = bp->common.shmem_base - 0x0800 + 4;
518 mark = REG_RD(bp, addr);
519 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
7995c64e 520 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 521
7995c64e 522 pr_err("");
cdaa7cb8 523 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
a2fbb9ea 524 for (word = 0; word < 8; word++)
cdaa7cb8 525 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 526 data[8] = 0x0;
7995c64e 527 pr_cont("%s", (char *)data);
a2fbb9ea 528 }
cdaa7cb8 529 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 530 for (word = 0; word < 8; word++)
cdaa7cb8 531 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 532 data[8] = 0x0;
7995c64e 533 pr_cont("%s", (char *)data);
a2fbb9ea 534 }
7995c64e 535 pr_err("end of fw dump\n");
a2fbb9ea
ET
536}
537
6c719d00 538void bnx2x_panic_dump(struct bnx2x *bp)
a2fbb9ea
ET
539{
540 int i;
541 u16 j, start, end;
542
66e855f3
YG
543 bp->stats_state = STATS_STATE_DISABLED;
544 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
545
a2fbb9ea
ET
546 BNX2X_ERR("begin crash dump -----------------\n");
547
8440d2b6
EG
548 /* Indices */
549 /* Common */
cdaa7cb8
VZ
550 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
551 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
552 " spq_prod_idx(0x%x)\n",
8440d2b6
EG
553 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
554 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
555
556 /* Rx */
54b9ddaa 557 for_each_queue(bp, i) {
a2fbb9ea 558 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 559
cdaa7cb8
VZ
560 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
561 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
562 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 563 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
564 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
565 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8
VZ
566 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
567 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
8440d2b6
EG
568 fp->rx_sge_prod, fp->last_max_sge,
569 le16_to_cpu(fp->fp_u_idx),
570 fp->status_blk->u_status_block.status_block_index);
571 }
a2fbb9ea 572
8440d2b6 573 /* Tx */
54b9ddaa 574 for_each_queue(bp, i) {
8440d2b6 575 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 576
cdaa7cb8
VZ
577 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
578 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
579 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
580 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
581 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
cdaa7cb8
VZ
582 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
583 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 584 fp->status_blk->c_status_block.status_block_index,
ca00392c 585 fp->tx_db.data.prod);
8440d2b6 586 }
a2fbb9ea 587
8440d2b6
EG
588 /* Rings */
589 /* Rx */
54b9ddaa 590 for_each_queue(bp, i) {
8440d2b6 591 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
592
593 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
594 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 595 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
596 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
597 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
598
c3eefaf6
EG
599 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
600 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
601 }
602
3196a88a
EG
603 start = RX_SGE(fp->rx_sge_prod);
604 end = RX_SGE(fp->last_max_sge);
8440d2b6 605 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
606 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
607 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
608
c3eefaf6
EG
609 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
610 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
611 }
612
a2fbb9ea
ET
613 start = RCQ_BD(fp->rx_comp_cons - 10);
614 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 615 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
616 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
617
c3eefaf6
EG
618 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
619 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
620 }
621 }
622
8440d2b6 623 /* Tx */
54b9ddaa 624 for_each_queue(bp, i) {
8440d2b6
EG
625 struct bnx2x_fastpath *fp = &bp->fp[i];
626
627 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
628 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
629 for (j = start; j != end; j = TX_BD(j + 1)) {
630 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
631
c3eefaf6
EG
632 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
633 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
634 }
635
636 start = TX_BD(fp->tx_bd_cons - 10);
637 end = TX_BD(fp->tx_bd_cons + 254);
638 for (j = start; j != end; j = TX_BD(j + 1)) {
639 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
640
c3eefaf6
EG
641 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
642 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
643 }
644 }
a2fbb9ea 645
34f80b04 646 bnx2x_fw_dump(bp);
a2fbb9ea
ET
647 bnx2x_mc_assert(bp);
648 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
649}
650
9f6c9258 651void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 652{
34f80b04 653 int port = BP_PORT(bp);
a2fbb9ea
ET
654 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
655 u32 val = REG_RD(bp, addr);
656 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 657 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
658
659 if (msix) {
8badd27a
EG
660 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
661 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
662 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
664 } else if (msi) {
665 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
666 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
669 } else {
670 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 671 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
672 HC_CONFIG_0_REG_INT_LINE_EN_0 |
673 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 674
8badd27a
EG
675 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
676 val, port, addr);
615f8fd9
ET
677
678 REG_WR(bp, addr, val);
679
a2fbb9ea
ET
680 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
681 }
682
8badd27a
EG
683 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
684 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
685
686 REG_WR(bp, addr, val);
37dbbf32
EG
687 /*
688 * Ensure that HC_CONFIG is written before leading/trailing edge config
689 */
690 mmiowb();
691 barrier();
34f80b04
EG
692
693 if (CHIP_IS_E1H(bp)) {
694 /* init leading/trailing edge */
695 if (IS_E1HMF(bp)) {
8badd27a 696 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 697 if (bp->port.pmf)
4acac6a5
EG
698 /* enable nig and gpio3 attention */
699 val |= 0x1100;
34f80b04
EG
700 } else
701 val = 0xffff;
702
703 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
704 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
705 }
37dbbf32
EG
706
707 /* Make sure that interrupts are indeed enabled from here on */
708 mmiowb();
a2fbb9ea
ET
709}
710
615f8fd9 711static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 712{
34f80b04 713 int port = BP_PORT(bp);
a2fbb9ea
ET
714 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
715 u32 val = REG_RD(bp, addr);
716
717 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
718 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
719 HC_CONFIG_0_REG_INT_LINE_EN_0 |
720 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
721
722 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
723 val, port, addr);
724
8badd27a
EG
725 /* flush all outstanding writes */
726 mmiowb();
727
a2fbb9ea
ET
728 REG_WR(bp, addr, val);
729 if (REG_RD(bp, addr) != val)
730 BNX2X_ERR("BUG! proper val not read from IGU!\n");
731}
732
9f6c9258 733void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 734{
a2fbb9ea 735 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 736 int i, offset;
a2fbb9ea 737
34f80b04 738 /* disable interrupt handling */
a2fbb9ea 739 atomic_inc(&bp->intr_sem);
e1510706
EG
740 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
741
f8ef6e44
YG
742 if (disable_hw)
743 /* prevent the HW from sending interrupts */
744 bnx2x_int_disable(bp);
a2fbb9ea
ET
745
746 /* make sure all ISRs are done */
747 if (msix) {
8badd27a
EG
748 synchronize_irq(bp->msix_table[0].vector);
749 offset = 1;
37b091ba
MC
750#ifdef BCM_CNIC
751 offset++;
752#endif
a2fbb9ea 753 for_each_queue(bp, i)
8badd27a 754 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
755 } else
756 synchronize_irq(bp->pdev->irq);
757
758 /* make sure sp_task is not running */
1cf167f2
EG
759 cancel_delayed_work(&bp->sp_task);
760 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
761}
762
34f80b04 763/* fast path */
a2fbb9ea
ET
764
765/*
34f80b04 766 * General service functions
a2fbb9ea
ET
767 */
768
72fd0718
VZ
769/* Return true if succeeded to acquire the lock */
770static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
771{
772 u32 lock_status;
773 u32 resource_bit = (1 << resource);
774 int func = BP_FUNC(bp);
775 u32 hw_lock_control_reg;
776
777 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
778
779 /* Validating that the resource is within range */
780 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
781 DP(NETIF_MSG_HW,
782 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
783 resource, HW_LOCK_MAX_RESOURCE_VALUE);
0fdf4d09 784 return false;
72fd0718
VZ
785 }
786
787 if (func <= 5)
788 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
789 else
790 hw_lock_control_reg =
791 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
792
793 /* Try to acquire the lock */
794 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
795 lock_status = REG_RD(bp, hw_lock_control_reg);
796 if (lock_status & resource_bit)
797 return true;
798
799 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
800 return false;
801}
802
a2fbb9ea 803
993ac7b5
MC
804#ifdef BCM_CNIC
805static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
806#endif
3196a88a 807
9f6c9258 808void bnx2x_sp_event(struct bnx2x_fastpath *fp,
a2fbb9ea
ET
809 union eth_rx_cqe *rr_cqe)
810{
811 struct bnx2x *bp = fp->bp;
812 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
813 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
814
34f80b04 815 DP(BNX2X_MSG_SP,
a2fbb9ea 816 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 817 fp->index, cid, command, bp->state,
34f80b04 818 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
819
820 bp->spq_left++;
821
0626b899 822 if (fp->index) {
a2fbb9ea
ET
823 switch (command | fp->state) {
824 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
825 BNX2X_FP_STATE_OPENING):
826 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
827 cid);
828 fp->state = BNX2X_FP_STATE_OPEN;
829 break;
830
831 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
832 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
833 cid);
834 fp->state = BNX2X_FP_STATE_HALTED;
835 break;
836
837 default:
34f80b04 838 BNX2X_ERR("unexpected MC reply (%d) "
cdaa7cb8
VZ
839 "fp[%d] state is %x\n",
840 command, fp->index, fp->state);
34f80b04 841 break;
a2fbb9ea 842 }
34f80b04 843 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
844 return;
845 }
c14423fe 846
a2fbb9ea
ET
847 switch (command | bp->state) {
848 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
849 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
850 bp->state = BNX2X_STATE_OPEN;
851 break;
852
853 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
854 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
855 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
856 fp->state = BNX2X_FP_STATE_HALTED;
857 break;
858
a2fbb9ea 859 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 860 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 861 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
862 break;
863
993ac7b5
MC
864#ifdef BCM_CNIC
865 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
866 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
867 bnx2x_cnic_cfc_comp(bp, cid);
868 break;
869#endif
3196a88a 870
a2fbb9ea 871 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 872 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 873 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
874 bp->set_mac_pending--;
875 smp_wmb();
a2fbb9ea
ET
876 break;
877
49d66772 878 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 879 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
880 bp->set_mac_pending--;
881 smp_wmb();
49d66772
ET
882 break;
883
a2fbb9ea 884 default:
34f80b04 885 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 886 command, bp->state);
34f80b04 887 break;
a2fbb9ea 888 }
34f80b04 889 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
890}
891
9f6c9258 892irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
a2fbb9ea 893{
555f6c78 894 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 895 u16 status = bnx2x_ack_int(bp);
34f80b04 896 u16 mask;
ca00392c 897 int i;
a2fbb9ea 898
34f80b04 899 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
900 if (unlikely(status == 0)) {
901 DP(NETIF_MSG_INTR, "not our interrupt!\n");
902 return IRQ_NONE;
903 }
f5372251 904 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 905
34f80b04 906 /* Return here if interrupt is disabled */
a2fbb9ea
ET
907 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
908 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
909 return IRQ_HANDLED;
910 }
911
3196a88a
EG
912#ifdef BNX2X_STOP_ON_ERROR
913 if (unlikely(bp->panic))
914 return IRQ_HANDLED;
915#endif
916
ca00392c
EG
917 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
918 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 919
ca00392c
EG
920 mask = 0x2 << fp->sb_id;
921 if (status & mask) {
54b9ddaa
VZ
922 /* Handle Rx and Tx according to SB id */
923 prefetch(fp->rx_cons_sb);
924 prefetch(&fp->status_blk->u_status_block.
925 status_block_index);
926 prefetch(fp->tx_cons_sb);
927 prefetch(&fp->status_blk->c_status_block.
928 status_block_index);
929 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
930 status &= ~mask;
931 }
a2fbb9ea
ET
932 }
933
993ac7b5
MC
934#ifdef BCM_CNIC
935 mask = 0x2 << CNIC_SB_ID(bp);
936 if (status & (mask | 0x1)) {
937 struct cnic_ops *c_ops = NULL;
938
939 rcu_read_lock();
940 c_ops = rcu_dereference(bp->cnic_ops);
941 if (c_ops)
942 c_ops->cnic_handler(bp->cnic_data, NULL);
943 rcu_read_unlock();
944
945 status &= ~mask;
946 }
947#endif
a2fbb9ea 948
34f80b04 949 if (unlikely(status & 0x1)) {
1cf167f2 950 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
951
952 status &= ~0x1;
953 if (!status)
954 return IRQ_HANDLED;
955 }
956
cdaa7cb8
VZ
957 if (unlikely(status))
958 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 959 status);
a2fbb9ea 960
c18487ee 961 return IRQ_HANDLED;
a2fbb9ea
ET
962}
963
c18487ee 964/* end of fast path */
a2fbb9ea 965
a2fbb9ea 966
c18487ee
YR
967/* Link */
968
969/*
970 * General service functions
971 */
a2fbb9ea 972
9f6c9258 973int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
974{
975 u32 lock_status;
976 u32 resource_bit = (1 << resource);
4a37fb66
YG
977 int func = BP_FUNC(bp);
978 u32 hw_lock_control_reg;
c18487ee 979 int cnt;
a2fbb9ea 980
c18487ee
YR
981 /* Validating that the resource is within range */
982 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
983 DP(NETIF_MSG_HW,
984 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
985 resource, HW_LOCK_MAX_RESOURCE_VALUE);
986 return -EINVAL;
987 }
a2fbb9ea 988
4a37fb66
YG
989 if (func <= 5) {
990 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
991 } else {
992 hw_lock_control_reg =
993 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
994 }
995
c18487ee 996 /* Validating that the resource is not already taken */
4a37fb66 997 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
998 if (lock_status & resource_bit) {
999 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1000 lock_status, resource_bit);
1001 return -EEXIST;
1002 }
a2fbb9ea 1003
46230476
EG
1004 /* Try for 5 second every 5ms */
1005 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1006 /* Try to acquire the lock */
4a37fb66
YG
1007 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1008 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1009 if (lock_status & resource_bit)
1010 return 0;
a2fbb9ea 1011
c18487ee 1012 msleep(5);
a2fbb9ea 1013 }
c18487ee
YR
1014 DP(NETIF_MSG_HW, "Timeout\n");
1015 return -EAGAIN;
1016}
a2fbb9ea 1017
9f6c9258 1018int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1019{
1020 u32 lock_status;
1021 u32 resource_bit = (1 << resource);
4a37fb66
YG
1022 int func = BP_FUNC(bp);
1023 u32 hw_lock_control_reg;
a2fbb9ea 1024
72fd0718
VZ
1025 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1026
c18487ee
YR
1027 /* Validating that the resource is within range */
1028 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1029 DP(NETIF_MSG_HW,
1030 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1031 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1032 return -EINVAL;
1033 }
1034
4a37fb66
YG
1035 if (func <= 5) {
1036 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1037 } else {
1038 hw_lock_control_reg =
1039 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1040 }
1041
c18487ee 1042 /* Validating that the resource is currently taken */
4a37fb66 1043 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1044 if (!(lock_status & resource_bit)) {
1045 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1046 lock_status, resource_bit);
1047 return -EFAULT;
a2fbb9ea
ET
1048 }
1049
9f6c9258
DK
1050 REG_WR(bp, hw_lock_control_reg, resource_bit);
1051 return 0;
c18487ee 1052}
a2fbb9ea 1053
9f6c9258 1054
4acac6a5
EG
1055int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1056{
1057 /* The GPIO should be swapped if swap register is set and active */
1058 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1059 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1060 int gpio_shift = gpio_num +
1061 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1062 u32 gpio_mask = (1 << gpio_shift);
1063 u32 gpio_reg;
1064 int value;
1065
1066 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1067 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1068 return -EINVAL;
1069 }
1070
1071 /* read GPIO value */
1072 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1073
1074 /* get the requested pin value */
1075 if ((gpio_reg & gpio_mask) == gpio_mask)
1076 value = 1;
1077 else
1078 value = 0;
1079
1080 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1081
1082 return value;
1083}
1084
17de50b7 1085int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1086{
1087 /* The GPIO should be swapped if swap register is set and active */
1088 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1089 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1090 int gpio_shift = gpio_num +
1091 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1092 u32 gpio_mask = (1 << gpio_shift);
1093 u32 gpio_reg;
a2fbb9ea 1094
c18487ee
YR
1095 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1096 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1097 return -EINVAL;
1098 }
a2fbb9ea 1099
4a37fb66 1100 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1101 /* read GPIO and mask except the float bits */
1102 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1103
c18487ee
YR
1104 switch (mode) {
1105 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1106 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1107 gpio_num, gpio_shift);
1108 /* clear FLOAT and set CLR */
1109 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1110 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1111 break;
a2fbb9ea 1112
c18487ee
YR
1113 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1114 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1115 gpio_num, gpio_shift);
1116 /* clear FLOAT and set SET */
1117 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1118 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1119 break;
a2fbb9ea 1120
17de50b7 1121 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1122 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1123 gpio_num, gpio_shift);
1124 /* set FLOAT */
1125 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1126 break;
a2fbb9ea 1127
c18487ee
YR
1128 default:
1129 break;
a2fbb9ea
ET
1130 }
1131
c18487ee 1132 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1133 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1134
c18487ee 1135 return 0;
a2fbb9ea
ET
1136}
1137
4acac6a5
EG
1138int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1139{
1140 /* The GPIO should be swapped if swap register is set and active */
1141 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1142 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1143 int gpio_shift = gpio_num +
1144 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1145 u32 gpio_mask = (1 << gpio_shift);
1146 u32 gpio_reg;
1147
1148 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1149 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1150 return -EINVAL;
1151 }
1152
1153 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1154 /* read GPIO int */
1155 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1156
1157 switch (mode) {
1158 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1159 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1160 "output low\n", gpio_num, gpio_shift);
1161 /* clear SET and set CLR */
1162 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1163 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1164 break;
1165
1166 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1167 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1168 "output high\n", gpio_num, gpio_shift);
1169 /* clear CLR and set SET */
1170 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1171 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1172 break;
1173
1174 default:
1175 break;
1176 }
1177
1178 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1179 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1180
1181 return 0;
1182}
1183
c18487ee 1184static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1185{
c18487ee
YR
1186 u32 spio_mask = (1 << spio_num);
1187 u32 spio_reg;
a2fbb9ea 1188
c18487ee
YR
1189 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1190 (spio_num > MISC_REGISTERS_SPIO_7)) {
1191 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1192 return -EINVAL;
a2fbb9ea
ET
1193 }
1194
4a37fb66 1195 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1196 /* read SPIO and mask except the float bits */
1197 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1198
c18487ee 1199 switch (mode) {
6378c025 1200 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1201 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1202 /* clear FLOAT and set CLR */
1203 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1204 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1205 break;
a2fbb9ea 1206
6378c025 1207 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1208 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1209 /* clear FLOAT and set SET */
1210 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1211 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1212 break;
a2fbb9ea 1213
c18487ee
YR
1214 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1215 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1216 /* set FLOAT */
1217 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1218 break;
a2fbb9ea 1219
c18487ee
YR
1220 default:
1221 break;
a2fbb9ea
ET
1222 }
1223
c18487ee 1224 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1225 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1226
a2fbb9ea
ET
1227 return 0;
1228}
1229
9f6c9258 1230void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1231{
ad33ea3a
EG
1232 switch (bp->link_vars.ieee_fc &
1233 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1234 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1235 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1236 ADVERTISED_Pause);
1237 break;
356e2385 1238
c18487ee 1239 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1240 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1241 ADVERTISED_Pause);
1242 break;
356e2385 1243
c18487ee 1244 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1245 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 1246 break;
356e2385 1247
c18487ee 1248 default:
34f80b04 1249 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1250 ADVERTISED_Pause);
1251 break;
1252 }
1253}
f1410647 1254
c18487ee 1255
9f6c9258 1256u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 1257{
19680c48
EG
1258 if (!BP_NOMCP(bp)) {
1259 u8 rc;
a2fbb9ea 1260
19680c48 1261 /* Initialize link parameters structure variables */
8c99e7b0
YR
1262 /* It is recommended to turn off RX FC for jumbo frames
1263 for better performance */
0c593270 1264 if (bp->dev->mtu > 5000)
c0700f90 1265 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1266 else
c0700f90 1267 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1268
4a37fb66 1269 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
1270
1271 if (load_mode == LOAD_DIAG)
1272 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
1273
19680c48 1274 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 1275
4a37fb66 1276 bnx2x_release_phy_lock(bp);
a2fbb9ea 1277
3c96c68b
EG
1278 bnx2x_calc_fc_adv(bp);
1279
b5bf9068
EG
1280 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1281 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 1282 bnx2x_link_report(bp);
b5bf9068 1283 }
34f80b04 1284
19680c48
EG
1285 return rc;
1286 }
f5372251 1287 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 1288 return -EINVAL;
a2fbb9ea
ET
1289}
1290
9f6c9258 1291void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1292{
19680c48 1293 if (!BP_NOMCP(bp)) {
4a37fb66 1294 bnx2x_acquire_phy_lock(bp);
19680c48 1295 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1296 bnx2x_release_phy_lock(bp);
a2fbb9ea 1297
19680c48
EG
1298 bnx2x_calc_fc_adv(bp);
1299 } else
f5372251 1300 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 1301}
a2fbb9ea 1302
c18487ee
YR
1303static void bnx2x__link_reset(struct bnx2x *bp)
1304{
19680c48 1305 if (!BP_NOMCP(bp)) {
4a37fb66 1306 bnx2x_acquire_phy_lock(bp);
589abe3a 1307 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 1308 bnx2x_release_phy_lock(bp);
19680c48 1309 } else
f5372251 1310 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 1311}
a2fbb9ea 1312
9f6c9258 1313u8 bnx2x_link_test(struct bnx2x *bp)
c18487ee 1314{
2145a920 1315 u8 rc = 0;
a2fbb9ea 1316
2145a920
VZ
1317 if (!BP_NOMCP(bp)) {
1318 bnx2x_acquire_phy_lock(bp);
1319 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1320 bnx2x_release_phy_lock(bp);
1321 } else
1322 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 1323
c18487ee
YR
1324 return rc;
1325}
a2fbb9ea 1326
8a1c38d1 1327static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 1328{
8a1c38d1
EG
1329 u32 r_param = bp->link_vars.line_speed / 8;
1330 u32 fair_periodic_timeout_usec;
1331 u32 t_fair;
34f80b04 1332
8a1c38d1
EG
1333 memset(&(bp->cmng.rs_vars), 0,
1334 sizeof(struct rate_shaping_vars_per_port));
1335 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 1336
8a1c38d1
EG
1337 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1338 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 1339
8a1c38d1
EG
1340 /* this is the threshold below which no timer arming will occur
1341 1.25 coefficient is for the threshold to be a little bigger
1342 than the real time, to compensate for timer in-accuracy */
1343 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
1344 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1345
8a1c38d1
EG
1346 /* resolution of fairness timer */
1347 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1348 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1349 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 1350
8a1c38d1
EG
1351 /* this is the threshold below which we won't arm the timer anymore */
1352 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 1353
8a1c38d1
EG
1354 /* we multiply by 1e3/8 to get bytes/msec.
1355 We don't want the credits to pass a credit
1356 of the t_fair*FAIR_MEM (algorithm resolution) */
1357 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1358 /* since each tick is 4 usec */
1359 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
1360}
1361
2691d51d
EG
1362/* Calculates the sum of vn_min_rates.
1363 It's needed for further normalizing of the min_rates.
1364 Returns:
1365 sum of vn_min_rates.
1366 or
1367 0 - if all the min_rates are 0.
1368 In the later case fainess algorithm should be deactivated.
1369 If not all min_rates are zero then those that are zeroes will be set to 1.
1370 */
1371static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1372{
1373 int all_zero = 1;
1374 int port = BP_PORT(bp);
1375 int vn;
1376
1377 bp->vn_weight_sum = 0;
1378 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1379 int func = 2*vn + port;
1380 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1381 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1382 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1383
1384 /* Skip hidden vns */
1385 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1386 continue;
1387
1388 /* If min rate is zero - set it to 1 */
1389 if (!vn_min_rate)
1390 vn_min_rate = DEF_MIN_RATE;
1391 else
1392 all_zero = 0;
1393
1394 bp->vn_weight_sum += vn_min_rate;
1395 }
1396
1397 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
1398 if (all_zero) {
1399 bp->cmng.flags.cmng_enables &=
1400 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1401 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1402 " fairness will be disabled\n");
1403 } else
1404 bp->cmng.flags.cmng_enables |=
1405 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
1406}
1407
8a1c38d1 1408static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
1409{
1410 struct rate_shaping_vars_per_vn m_rs_vn;
1411 struct fairness_vars_per_vn m_fair_vn;
1412 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1413 u16 vn_min_rate, vn_max_rate;
1414 int i;
1415
1416 /* If function is hidden - set min and max to zeroes */
1417 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1418 vn_min_rate = 0;
1419 vn_max_rate = 0;
1420
1421 } else {
1422 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1423 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
1424 /* If min rate is zero - set it to 1 */
1425 if (!vn_min_rate)
34f80b04
EG
1426 vn_min_rate = DEF_MIN_RATE;
1427 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1428 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1429 }
8a1c38d1 1430 DP(NETIF_MSG_IFUP,
b015e3d1 1431 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 1432 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
1433
1434 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1435 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1436
1437 /* global vn counter - maximal Mbps for this vn */
1438 m_rs_vn.vn_counter.rate = vn_max_rate;
1439
1440 /* quota - number of bytes transmitted in this period */
1441 m_rs_vn.vn_counter.quota =
1442 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1443
8a1c38d1 1444 if (bp->vn_weight_sum) {
34f80b04
EG
1445 /* credit for each period of the fairness algorithm:
1446 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
1447 vn_weight_sum should not be larger than 10000, thus
1448 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1449 than zero */
34f80b04 1450 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
1451 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1452 (8 * bp->vn_weight_sum))),
1453 (bp->cmng.fair_vars.fair_threshold * 2));
1454 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
1455 m_fair_vn.vn_credit_delta);
1456 }
1457
34f80b04
EG
1458 /* Store it to internal memory */
1459 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1460 REG_WR(bp, BAR_XSTRORM_INTMEM +
1461 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1462 ((u32 *)(&m_rs_vn))[i]);
1463
1464 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1465 REG_WR(bp, BAR_XSTRORM_INTMEM +
1466 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1467 ((u32 *)(&m_fair_vn))[i]);
1468}
1469
8a1c38d1 1470
c18487ee
YR
1471/* This function is called upon link interrupt */
1472static void bnx2x_link_attn(struct bnx2x *bp)
1473{
d9e8b185 1474 u32 prev_link_status = bp->link_vars.link_status;
bb2a0f7a
YG
1475 /* Make sure that we are synced with the current statistics */
1476 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1477
c18487ee 1478 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 1479
bb2a0f7a
YG
1480 if (bp->link_vars.link_up) {
1481
1c06328c 1482 /* dropless flow control */
a18f5128 1483 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
1484 int port = BP_PORT(bp);
1485 u32 pause_enabled = 0;
1486
1487 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1488 pause_enabled = 1;
1489
1490 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 1491 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
1492 pause_enabled);
1493 }
1494
bb2a0f7a
YG
1495 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1496 struct host_port_stats *pstats;
1497
1498 pstats = bnx2x_sp(bp, port_stats);
1499 /* reset old bmac stats */
1500 memset(&(pstats->mac_stx[0]), 0,
1501 sizeof(struct mac_stx));
1502 }
f34d28ea 1503 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
1504 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1505 }
1506
d9e8b185
VZ
1507 /* indicate link status only if link status actually changed */
1508 if (prev_link_status != bp->link_vars.link_status)
1509 bnx2x_link_report(bp);
34f80b04
EG
1510
1511 if (IS_E1HMF(bp)) {
8a1c38d1 1512 int port = BP_PORT(bp);
34f80b04 1513 int func;
8a1c38d1 1514 int vn;
34f80b04 1515
ab6ad5a4 1516 /* Set the attention towards other drivers on the same port */
34f80b04
EG
1517 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1518 if (vn == BP_E1HVN(bp))
1519 continue;
1520
8a1c38d1 1521 func = ((vn << 1) | port);
34f80b04
EG
1522 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1523 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1524 }
34f80b04 1525
8a1c38d1
EG
1526 if (bp->link_vars.link_up) {
1527 int i;
1528
1529 /* Init rate shaping and fairness contexts */
1530 bnx2x_init_port_minmax(bp);
34f80b04 1531
34f80b04 1532 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
1533 bnx2x_init_vn_minmax(bp, 2*vn + port);
1534
1535 /* Store it to internal memory */
1536 for (i = 0;
1537 i < sizeof(struct cmng_struct_per_port) / 4; i++)
1538 REG_WR(bp, BAR_XSTRORM_INTMEM +
1539 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1540 ((u32 *)(&bp->cmng))[i]);
1541 }
34f80b04 1542 }
c18487ee 1543}
a2fbb9ea 1544
9f6c9258 1545void bnx2x__link_status_update(struct bnx2x *bp)
c18487ee 1546{
f34d28ea 1547 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 1548 return;
a2fbb9ea 1549
c18487ee 1550 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 1551
bb2a0f7a
YG
1552 if (bp->link_vars.link_up)
1553 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1554 else
1555 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1556
2691d51d
EG
1557 bnx2x_calc_vn_weight_sum(bp);
1558
c18487ee
YR
1559 /* indicate link status */
1560 bnx2x_link_report(bp);
a2fbb9ea 1561}
a2fbb9ea 1562
34f80b04
EG
1563static void bnx2x_pmf_update(struct bnx2x *bp)
1564{
1565 int port = BP_PORT(bp);
1566 u32 val;
1567
1568 bp->port.pmf = 1;
1569 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1570
1571 /* enable nig attention */
1572 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1573 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1574 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
1575
1576 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
1577}
1578
c18487ee 1579/* end of Link */
a2fbb9ea
ET
1580
1581/* slow path */
1582
1583/*
1584 * General service functions
1585 */
1586
2691d51d
EG
1587/* send the MCP a request, block until there is a reply */
1588u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
1589{
1590 int func = BP_FUNC(bp);
1591 u32 seq = ++bp->fw_seq;
1592 u32 rc = 0;
1593 u32 cnt = 1;
1594 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1595
c4ff7cbf 1596 mutex_lock(&bp->fw_mb_mutex);
2691d51d
EG
1597 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1598 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1599
1600 do {
1601 /* let the FW do it's magic ... */
1602 msleep(delay);
1603
1604 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
1605
c4ff7cbf
EG
1606 /* Give the FW up to 5 second (500*10ms) */
1607 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
1608
1609 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
1610 cnt*delay, rc, seq);
1611
1612 /* is this a reply to our command? */
1613 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
1614 rc &= FW_MSG_CODE_MASK;
1615 else {
1616 /* FW BUG! */
1617 BNX2X_ERR("FW failed to respond!\n");
1618 bnx2x_fw_dump(bp);
1619 rc = 0;
1620 }
c4ff7cbf 1621 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
1622
1623 return rc;
1624}
1625
2691d51d
EG
1626static void bnx2x_e1h_disable(struct bnx2x *bp)
1627{
1628 int port = BP_PORT(bp);
2691d51d
EG
1629
1630 netif_tx_disable(bp->dev);
2691d51d
EG
1631
1632 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
1633
2691d51d
EG
1634 netif_carrier_off(bp->dev);
1635}
1636
1637static void bnx2x_e1h_enable(struct bnx2x *bp)
1638{
1639 int port = BP_PORT(bp);
1640
1641 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
1642
2691d51d
EG
1643 /* Tx queue should be only reenabled */
1644 netif_tx_wake_all_queues(bp->dev);
1645
061bc702
EG
1646 /*
1647 * Should not call netif_carrier_on since it will be called if the link
1648 * is up when checking for link state
1649 */
2691d51d
EG
1650}
1651
1652static void bnx2x_update_min_max(struct bnx2x *bp)
1653{
1654 int port = BP_PORT(bp);
1655 int vn, i;
1656
1657 /* Init rate shaping and fairness contexts */
1658 bnx2x_init_port_minmax(bp);
1659
1660 bnx2x_calc_vn_weight_sum(bp);
1661
1662 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1663 bnx2x_init_vn_minmax(bp, 2*vn + port);
1664
1665 if (bp->port.pmf) {
1666 int func;
1667
1668 /* Set the attention towards other drivers on the same port */
1669 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1670 if (vn == BP_E1HVN(bp))
1671 continue;
1672
1673 func = ((vn << 1) | port);
1674 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1675 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1676 }
1677
1678 /* Store it to internal memory */
1679 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1680 REG_WR(bp, BAR_XSTRORM_INTMEM +
1681 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1682 ((u32 *)(&bp->cmng))[i]);
1683 }
1684}
1685
1686static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1687{
2691d51d 1688 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
1689
1690 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
1691
f34d28ea
EG
1692 /*
1693 * This is the only place besides the function initialization
1694 * where the bp->flags can change so it is done without any
1695 * locks
1696 */
2691d51d
EG
1697 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1698 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
f34d28ea 1699 bp->flags |= MF_FUNC_DIS;
2691d51d
EG
1700
1701 bnx2x_e1h_disable(bp);
1702 } else {
1703 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
f34d28ea 1704 bp->flags &= ~MF_FUNC_DIS;
2691d51d
EG
1705
1706 bnx2x_e1h_enable(bp);
1707 }
1708 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
1709 }
1710 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1711
1712 bnx2x_update_min_max(bp);
1713 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1714 }
1715
1716 /* Report results to MCP */
1717 if (dcc_event)
1718 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
1719 else
1720 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
1721}
1722
28912902
MC
1723/* must be called under the spq lock */
1724static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
1725{
1726 struct eth_spe *next_spe = bp->spq_prod_bd;
1727
1728 if (bp->spq_prod_bd == bp->spq_last_bd) {
1729 bp->spq_prod_bd = bp->spq;
1730 bp->spq_prod_idx = 0;
1731 DP(NETIF_MSG_TIMER, "end of spq\n");
1732 } else {
1733 bp->spq_prod_bd++;
1734 bp->spq_prod_idx++;
1735 }
1736 return next_spe;
1737}
1738
1739/* must be called under the spq lock */
1740static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1741{
1742 int func = BP_FUNC(bp);
1743
1744 /* Make sure that BD data is updated before writing the producer */
1745 wmb();
1746
1747 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1748 bp->spq_prod_idx);
1749 mmiowb();
1750}
1751
a2fbb9ea 1752/* the slow path queue is odd since completions arrive on the fastpath ring */
9f6c9258 1753int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
a2fbb9ea
ET
1754 u32 data_hi, u32 data_lo, int common)
1755{
28912902 1756 struct eth_spe *spe;
a2fbb9ea 1757
a2fbb9ea
ET
1758#ifdef BNX2X_STOP_ON_ERROR
1759 if (unlikely(bp->panic))
1760 return -EIO;
1761#endif
1762
34f80b04 1763 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
1764
1765 if (!bp->spq_left) {
1766 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 1767 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
1768 bnx2x_panic();
1769 return -EBUSY;
1770 }
f1410647 1771
28912902
MC
1772 spe = bnx2x_sp_get_next(bp);
1773
a2fbb9ea 1774 /* CID needs port number to be encoded int it */
28912902 1775 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
1776 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1777 HW_CID(bp, cid));
28912902 1778 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 1779 if (common)
28912902 1780 spe->hdr.type |=
a2fbb9ea
ET
1781 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1782
28912902
MC
1783 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1784 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
1785
1786 bp->spq_left--;
1787
cdaa7cb8
VZ
1788 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1789 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
1790 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1791 (u32)(U64_LO(bp->spq_mapping) +
1792 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1793 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1794
28912902 1795 bnx2x_sp_prod_update(bp);
34f80b04 1796 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
1797 return 0;
1798}
1799
1800/* acquire split MCP access lock register */
4a37fb66 1801static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 1802{
72fd0718 1803 u32 j, val;
34f80b04 1804 int rc = 0;
a2fbb9ea
ET
1805
1806 might_sleep();
72fd0718 1807 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
1808 val = (1UL << 31);
1809 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1810 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1811 if (val & (1L << 31))
1812 break;
1813
1814 msleep(5);
1815 }
a2fbb9ea 1816 if (!(val & (1L << 31))) {
19680c48 1817 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
1818 rc = -EBUSY;
1819 }
1820
1821 return rc;
1822}
1823
4a37fb66
YG
1824/* release split MCP access lock register */
1825static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 1826{
72fd0718 1827 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
1828}
1829
1830static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1831{
1832 struct host_def_status_block *def_sb = bp->def_status_blk;
1833 u16 rc = 0;
1834
1835 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
1836 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1837 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1838 rc |= 1;
1839 }
1840 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1841 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1842 rc |= 2;
1843 }
1844 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1845 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1846 rc |= 4;
1847 }
1848 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1849 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1850 rc |= 8;
1851 }
1852 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1853 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1854 rc |= 16;
1855 }
1856 return rc;
1857}
1858
1859/*
1860 * slow path service functions
1861 */
1862
1863static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1864{
34f80b04 1865 int port = BP_PORT(bp);
5c862848
EG
1866 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
1867 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
1868 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1869 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
1870 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1871 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 1872 u32 aeu_mask;
87942b46 1873 u32 nig_mask = 0;
a2fbb9ea 1874
a2fbb9ea
ET
1875 if (bp->attn_state & asserted)
1876 BNX2X_ERR("IGU ERROR\n");
1877
3fcaf2e5
EG
1878 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1879 aeu_mask = REG_RD(bp, aeu_addr);
1880
a2fbb9ea 1881 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 1882 aeu_mask, asserted);
72fd0718 1883 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 1884 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 1885
3fcaf2e5
EG
1886 REG_WR(bp, aeu_addr, aeu_mask);
1887 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 1888
3fcaf2e5 1889 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 1890 bp->attn_state |= asserted;
3fcaf2e5 1891 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
1892
1893 if (asserted & ATTN_HARD_WIRED_MASK) {
1894 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 1895
a5e9a7cf
EG
1896 bnx2x_acquire_phy_lock(bp);
1897
877e9aa4 1898 /* save nig interrupt mask */
87942b46 1899 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 1900 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 1901
c18487ee 1902 bnx2x_link_attn(bp);
a2fbb9ea
ET
1903
1904 /* handle unicore attn? */
1905 }
1906 if (asserted & ATTN_SW_TIMER_4_FUNC)
1907 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1908
1909 if (asserted & GPIO_2_FUNC)
1910 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1911
1912 if (asserted & GPIO_3_FUNC)
1913 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1914
1915 if (asserted & GPIO_4_FUNC)
1916 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1917
1918 if (port == 0) {
1919 if (asserted & ATTN_GENERAL_ATTN_1) {
1920 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1921 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1922 }
1923 if (asserted & ATTN_GENERAL_ATTN_2) {
1924 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1925 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1926 }
1927 if (asserted & ATTN_GENERAL_ATTN_3) {
1928 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1929 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1930 }
1931 } else {
1932 if (asserted & ATTN_GENERAL_ATTN_4) {
1933 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1934 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1935 }
1936 if (asserted & ATTN_GENERAL_ATTN_5) {
1937 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1938 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1939 }
1940 if (asserted & ATTN_GENERAL_ATTN_6) {
1941 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1942 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1943 }
1944 }
1945
1946 } /* if hardwired */
1947
5c862848
EG
1948 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
1949 asserted, hc_addr);
1950 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
1951
1952 /* now set back the mask */
a5e9a7cf 1953 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 1954 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
1955 bnx2x_release_phy_lock(bp);
1956 }
a2fbb9ea
ET
1957}
1958
fd4ef40d
EG
1959static inline void bnx2x_fan_failure(struct bnx2x *bp)
1960{
1961 int port = BP_PORT(bp);
1962
1963 /* mark the failure */
1964 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
1965 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
1966 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
1967 bp->link_params.ext_phy_config);
1968
1969 /* log the failure */
cdaa7cb8
VZ
1970 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
1971 " the driver to shutdown the card to prevent permanent"
1972 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 1973}
ab6ad5a4 1974
877e9aa4 1975static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 1976{
34f80b04 1977 int port = BP_PORT(bp);
877e9aa4 1978 int reg_offset;
4d295db0 1979 u32 val, swap_val, swap_override;
877e9aa4 1980
34f80b04
EG
1981 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
1982 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 1983
34f80b04 1984 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
1985
1986 val = REG_RD(bp, reg_offset);
1987 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
1988 REG_WR(bp, reg_offset, val);
1989
1990 BNX2X_ERR("SPIO5 hw attention\n");
1991
fd4ef40d 1992 /* Fan failure attention */
35b19ba5
EG
1993 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
1994 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 1995 /* Low power mode is controlled by GPIO 2 */
877e9aa4 1996 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 1997 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
1998 /* The PHY reset is controlled by GPIO 1 */
1999 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2000 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2001 break;
2002
4d295db0
EG
2003 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2004 /* The PHY reset is controlled by GPIO 1 */
2005 /* fake the port number to cancel the swap done in
2006 set_gpio() */
2007 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2008 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2009 port = (swap_val && swap_override) ^ 1;
2010 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2011 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2012 break;
2013
877e9aa4
ET
2014 default:
2015 break;
2016 }
fd4ef40d 2017 bnx2x_fan_failure(bp);
877e9aa4 2018 }
34f80b04 2019
589abe3a
EG
2020 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2021 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2022 bnx2x_acquire_phy_lock(bp);
2023 bnx2x_handle_module_detect_int(&bp->link_params);
2024 bnx2x_release_phy_lock(bp);
2025 }
2026
34f80b04
EG
2027 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2028
2029 val = REG_RD(bp, reg_offset);
2030 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2031 REG_WR(bp, reg_offset, val);
2032
2033 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2034 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2035 bnx2x_panic();
2036 }
877e9aa4
ET
2037}
2038
2039static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2040{
2041 u32 val;
2042
0626b899 2043 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2044
2045 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2046 BNX2X_ERR("DB hw attention 0x%x\n", val);
2047 /* DORQ discard attention */
2048 if (val & 0x2)
2049 BNX2X_ERR("FATAL error from DORQ\n");
2050 }
34f80b04
EG
2051
2052 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2053
2054 int port = BP_PORT(bp);
2055 int reg_offset;
2056
2057 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2058 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2059
2060 val = REG_RD(bp, reg_offset);
2061 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2062 REG_WR(bp, reg_offset, val);
2063
2064 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 2065 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
2066 bnx2x_panic();
2067 }
877e9aa4
ET
2068}
2069
2070static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2071{
2072 u32 val;
2073
2074 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2075
2076 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2077 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2078 /* CFC error attention */
2079 if (val & 0x2)
2080 BNX2X_ERR("FATAL error from CFC\n");
2081 }
2082
2083 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2084
2085 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2086 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2087 /* RQ_USDMDP_FIFO_OVERFLOW */
2088 if (val & 0x18000)
2089 BNX2X_ERR("FATAL error from PXP\n");
2090 }
34f80b04
EG
2091
2092 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2093
2094 int port = BP_PORT(bp);
2095 int reg_offset;
2096
2097 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2098 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2099
2100 val = REG_RD(bp, reg_offset);
2101 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2102 REG_WR(bp, reg_offset, val);
2103
2104 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 2105 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
2106 bnx2x_panic();
2107 }
877e9aa4
ET
2108}
2109
2110static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2111{
34f80b04
EG
2112 u32 val;
2113
877e9aa4
ET
2114 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2115
34f80b04
EG
2116 if (attn & BNX2X_PMF_LINK_ASSERT) {
2117 int func = BP_FUNC(bp);
2118
2119 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
2120 bp->mf_config = SHMEM_RD(bp,
2121 mf_cfg.func_mf_config[func].config);
2691d51d
EG
2122 val = SHMEM_RD(bp, func_mb[func].drv_status);
2123 if (val & DRV_STATUS_DCC_EVENT_MASK)
2124 bnx2x_dcc_event(bp,
2125 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 2126 bnx2x__link_status_update(bp);
2691d51d 2127 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
2128 bnx2x_pmf_update(bp);
2129
2130 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2131
2132 BNX2X_ERR("MC assert!\n");
2133 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2134 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2135 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2136 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2137 bnx2x_panic();
2138
2139 } else if (attn & BNX2X_MCP_ASSERT) {
2140
2141 BNX2X_ERR("MCP assert!\n");
2142 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2143 bnx2x_fw_dump(bp);
877e9aa4
ET
2144
2145 } else
2146 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2147 }
2148
2149 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2150 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2151 if (attn & BNX2X_GRC_TIMEOUT) {
2152 val = CHIP_IS_E1H(bp) ?
2153 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2154 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2155 }
2156 if (attn & BNX2X_GRC_RSV) {
2157 val = CHIP_IS_E1H(bp) ?
2158 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2159 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2160 }
877e9aa4 2161 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2162 }
2163}
2164
72fd0718
VZ
2165#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
2166#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
2167#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2168#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
2169#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
2170#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2171/*
2172 * should be run under rtnl lock
2173 */
2174static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2175{
2176 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2177 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2178 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2179 barrier();
2180 mmiowb();
2181}
2182
2183/*
2184 * should be run under rtnl lock
2185 */
2186static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2187{
2188 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2189 val |= (1 << 16);
2190 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2191 barrier();
2192 mmiowb();
2193}
2194
2195/*
2196 * should be run under rtnl lock
2197 */
9f6c9258 2198bool bnx2x_reset_is_done(struct bnx2x *bp)
72fd0718
VZ
2199{
2200 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2201 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2202 return (val & RESET_DONE_FLAG_MASK) ? false : true;
2203}
2204
2205/*
2206 * should be run under rtnl lock
2207 */
9f6c9258 2208inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
72fd0718
VZ
2209{
2210 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2211
2212 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2213
2214 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2215 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2216 barrier();
2217 mmiowb();
2218}
2219
2220/*
2221 * should be run under rtnl lock
2222 */
9f6c9258 2223u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
72fd0718
VZ
2224{
2225 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2226
2227 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2228
2229 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2230 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2231 barrier();
2232 mmiowb();
2233
2234 return val1;
2235}
2236
2237/*
2238 * should be run under rtnl lock
2239 */
2240static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2241{
2242 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2243}
2244
2245static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2246{
2247 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2248 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2249}
2250
2251static inline void _print_next_block(int idx, const char *blk)
2252{
2253 if (idx)
2254 pr_cont(", ");
2255 pr_cont("%s", blk);
2256}
2257
2258static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2259{
2260 int i = 0;
2261 u32 cur_bit = 0;
2262 for (i = 0; sig; i++) {
2263 cur_bit = ((u32)0x1 << i);
2264 if (sig & cur_bit) {
2265 switch (cur_bit) {
2266 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2267 _print_next_block(par_num++, "BRB");
2268 break;
2269 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2270 _print_next_block(par_num++, "PARSER");
2271 break;
2272 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2273 _print_next_block(par_num++, "TSDM");
2274 break;
2275 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2276 _print_next_block(par_num++, "SEARCHER");
2277 break;
2278 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2279 _print_next_block(par_num++, "TSEMI");
2280 break;
2281 }
2282
2283 /* Clear the bit */
2284 sig &= ~cur_bit;
2285 }
2286 }
2287
2288 return par_num;
2289}
2290
2291static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2292{
2293 int i = 0;
2294 u32 cur_bit = 0;
2295 for (i = 0; sig; i++) {
2296 cur_bit = ((u32)0x1 << i);
2297 if (sig & cur_bit) {
2298 switch (cur_bit) {
2299 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2300 _print_next_block(par_num++, "PBCLIENT");
2301 break;
2302 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2303 _print_next_block(par_num++, "QM");
2304 break;
2305 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2306 _print_next_block(par_num++, "XSDM");
2307 break;
2308 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2309 _print_next_block(par_num++, "XSEMI");
2310 break;
2311 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2312 _print_next_block(par_num++, "DOORBELLQ");
2313 break;
2314 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2315 _print_next_block(par_num++, "VAUX PCI CORE");
2316 break;
2317 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2318 _print_next_block(par_num++, "DEBUG");
2319 break;
2320 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2321 _print_next_block(par_num++, "USDM");
2322 break;
2323 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2324 _print_next_block(par_num++, "USEMI");
2325 break;
2326 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2327 _print_next_block(par_num++, "UPB");
2328 break;
2329 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2330 _print_next_block(par_num++, "CSDM");
2331 break;
2332 }
2333
2334 /* Clear the bit */
2335 sig &= ~cur_bit;
2336 }
2337 }
2338
2339 return par_num;
2340}
2341
2342static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
2343{
2344 int i = 0;
2345 u32 cur_bit = 0;
2346 for (i = 0; sig; i++) {
2347 cur_bit = ((u32)0x1 << i);
2348 if (sig & cur_bit) {
2349 switch (cur_bit) {
2350 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
2351 _print_next_block(par_num++, "CSEMI");
2352 break;
2353 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
2354 _print_next_block(par_num++, "PXP");
2355 break;
2356 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
2357 _print_next_block(par_num++,
2358 "PXPPCICLOCKCLIENT");
2359 break;
2360 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
2361 _print_next_block(par_num++, "CFC");
2362 break;
2363 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
2364 _print_next_block(par_num++, "CDU");
2365 break;
2366 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
2367 _print_next_block(par_num++, "IGU");
2368 break;
2369 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
2370 _print_next_block(par_num++, "MISC");
2371 break;
2372 }
2373
2374 /* Clear the bit */
2375 sig &= ~cur_bit;
2376 }
2377 }
2378
2379 return par_num;
2380}
2381
2382static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
2383{
2384 int i = 0;
2385 u32 cur_bit = 0;
2386 for (i = 0; sig; i++) {
2387 cur_bit = ((u32)0x1 << i);
2388 if (sig & cur_bit) {
2389 switch (cur_bit) {
2390 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
2391 _print_next_block(par_num++, "MCP ROM");
2392 break;
2393 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
2394 _print_next_block(par_num++, "MCP UMP RX");
2395 break;
2396 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
2397 _print_next_block(par_num++, "MCP UMP TX");
2398 break;
2399 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
2400 _print_next_block(par_num++, "MCP SCPAD");
2401 break;
2402 }
2403
2404 /* Clear the bit */
2405 sig &= ~cur_bit;
2406 }
2407 }
2408
2409 return par_num;
2410}
2411
2412static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
2413 u32 sig2, u32 sig3)
2414{
2415 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
2416 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
2417 int par_num = 0;
2418 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
2419 "[0]:0x%08x [1]:0x%08x "
2420 "[2]:0x%08x [3]:0x%08x\n",
2421 sig0 & HW_PRTY_ASSERT_SET_0,
2422 sig1 & HW_PRTY_ASSERT_SET_1,
2423 sig2 & HW_PRTY_ASSERT_SET_2,
2424 sig3 & HW_PRTY_ASSERT_SET_3);
2425 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
2426 bp->dev->name);
2427 par_num = bnx2x_print_blocks_with_parity0(
2428 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
2429 par_num = bnx2x_print_blocks_with_parity1(
2430 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
2431 par_num = bnx2x_print_blocks_with_parity2(
2432 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
2433 par_num = bnx2x_print_blocks_with_parity3(
2434 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
2435 printk("\n");
2436 return true;
2437 } else
2438 return false;
2439}
2440
9f6c9258 2441bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 2442{
a2fbb9ea 2443 struct attn_route attn;
72fd0718
VZ
2444 int port = BP_PORT(bp);
2445
2446 attn.sig[0] = REG_RD(bp,
2447 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
2448 port*4);
2449 attn.sig[1] = REG_RD(bp,
2450 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
2451 port*4);
2452 attn.sig[2] = REG_RD(bp,
2453 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
2454 port*4);
2455 attn.sig[3] = REG_RD(bp,
2456 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
2457 port*4);
2458
2459 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
2460 attn.sig[3]);
2461}
2462
2463static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2464{
2465 struct attn_route attn, *group_mask;
34f80b04 2466 int port = BP_PORT(bp);
877e9aa4 2467 int index;
a2fbb9ea
ET
2468 u32 reg_addr;
2469 u32 val;
3fcaf2e5 2470 u32 aeu_mask;
a2fbb9ea
ET
2471
2472 /* need to take HW lock because MCP or other port might also
2473 try to handle this event */
4a37fb66 2474 bnx2x_acquire_alr(bp);
a2fbb9ea 2475
72fd0718
VZ
2476 if (bnx2x_chk_parity_attn(bp)) {
2477 bp->recovery_state = BNX2X_RECOVERY_INIT;
2478 bnx2x_set_reset_in_progress(bp);
2479 schedule_delayed_work(&bp->reset_task, 0);
2480 /* Disable HW interrupts */
2481 bnx2x_int_disable(bp);
2482 bnx2x_release_alr(bp);
2483 /* In case of parity errors don't handle attentions so that
2484 * other function would "see" parity errors.
2485 */
2486 return;
2487 }
2488
a2fbb9ea
ET
2489 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2490 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2491 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2492 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2493 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2494 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2495
2496 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2497 if (deasserted & (1 << index)) {
72fd0718 2498 group_mask = &bp->attn_group[index];
a2fbb9ea 2499
34f80b04 2500 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
72fd0718
VZ
2501 index, group_mask->sig[0], group_mask->sig[1],
2502 group_mask->sig[2], group_mask->sig[3]);
a2fbb9ea 2503
877e9aa4 2504 bnx2x_attn_int_deasserted3(bp,
72fd0718 2505 attn.sig[3] & group_mask->sig[3]);
877e9aa4 2506 bnx2x_attn_int_deasserted1(bp,
72fd0718 2507 attn.sig[1] & group_mask->sig[1]);
877e9aa4 2508 bnx2x_attn_int_deasserted2(bp,
72fd0718 2509 attn.sig[2] & group_mask->sig[2]);
877e9aa4 2510 bnx2x_attn_int_deasserted0(bp,
72fd0718 2511 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
2512 }
2513 }
2514
4a37fb66 2515 bnx2x_release_alr(bp);
a2fbb9ea 2516
5c862848 2517 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2518
2519 val = ~deasserted;
3fcaf2e5
EG
2520 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2521 val, reg_addr);
5c862848 2522 REG_WR(bp, reg_addr, val);
a2fbb9ea 2523
a2fbb9ea 2524 if (~bp->attn_state & deasserted)
3fcaf2e5 2525 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2526
2527 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2528 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2529
3fcaf2e5
EG
2530 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2531 aeu_mask = REG_RD(bp, reg_addr);
2532
2533 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2534 aeu_mask, deasserted);
72fd0718 2535 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 2536 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2537
3fcaf2e5
EG
2538 REG_WR(bp, reg_addr, aeu_mask);
2539 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2540
2541 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2542 bp->attn_state &= ~deasserted;
2543 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2544}
2545
2546static void bnx2x_attn_int(struct bnx2x *bp)
2547{
2548 /* read local copy of bits */
68d59484
EG
2549 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2550 attn_bits);
2551 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2552 attn_bits_ack);
a2fbb9ea
ET
2553 u32 attn_state = bp->attn_state;
2554
2555 /* look for changed bits */
2556 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2557 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2558
2559 DP(NETIF_MSG_HW,
2560 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2561 attn_bits, attn_ack, asserted, deasserted);
2562
2563 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2564 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2565
2566 /* handle bits that were raised */
2567 if (asserted)
2568 bnx2x_attn_int_asserted(bp, asserted);
2569
2570 if (deasserted)
2571 bnx2x_attn_int_deasserted(bp, deasserted);
2572}
2573
2574static void bnx2x_sp_task(struct work_struct *work)
2575{
1cf167f2 2576 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2577 u16 status;
2578
2579 /* Return here if interrupt is disabled */
2580 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2581 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2582 return;
2583 }
2584
2585 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2586/* if (status == 0) */
2587/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2588
cdaa7cb8 2589 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 2590
877e9aa4 2591 /* HW attentions */
cdaa7cb8 2592 if (status & 0x1) {
a2fbb9ea 2593 bnx2x_attn_int(bp);
cdaa7cb8
VZ
2594 status &= ~0x1;
2595 }
2596
2597 /* CStorm events: STAT_QUERY */
2598 if (status & 0x2) {
2599 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
2600 status &= ~0x2;
2601 }
2602
2603 if (unlikely(status))
2604 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2605 status);
a2fbb9ea 2606
68d59484 2607 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2608 IGU_INT_NOP, 1);
2609 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2610 IGU_INT_NOP, 1);
2611 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2612 IGU_INT_NOP, 1);
2613 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2614 IGU_INT_NOP, 1);
2615 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2616 IGU_INT_ENABLE, 1);
2617}
2618
9f6c9258 2619irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
a2fbb9ea
ET
2620{
2621 struct net_device *dev = dev_instance;
2622 struct bnx2x *bp = netdev_priv(dev);
2623
2624 /* Return here if interrupt is disabled */
2625 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2626 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2627 return IRQ_HANDLED;
2628 }
2629
8d9c5f34 2630 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2631
2632#ifdef BNX2X_STOP_ON_ERROR
2633 if (unlikely(bp->panic))
2634 return IRQ_HANDLED;
2635#endif
2636
993ac7b5
MC
2637#ifdef BCM_CNIC
2638 {
2639 struct cnic_ops *c_ops;
2640
2641 rcu_read_lock();
2642 c_ops = rcu_dereference(bp->cnic_ops);
2643 if (c_ops)
2644 c_ops->cnic_handler(bp->cnic_data, NULL);
2645 rcu_read_unlock();
2646 }
2647#endif
1cf167f2 2648 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2649
2650 return IRQ_HANDLED;
2651}
2652
2653/* end of slow path */
2654
a2fbb9ea
ET
2655static void bnx2x_timer(unsigned long data)
2656{
2657 struct bnx2x *bp = (struct bnx2x *) data;
2658
2659 if (!netif_running(bp->dev))
2660 return;
2661
2662 if (atomic_read(&bp->intr_sem) != 0)
f1410647 2663 goto timer_restart;
a2fbb9ea
ET
2664
2665 if (poll) {
2666 struct bnx2x_fastpath *fp = &bp->fp[0];
2667 int rc;
2668
7961f791 2669 bnx2x_tx_int(fp);
a2fbb9ea
ET
2670 rc = bnx2x_rx_int(fp, 1000);
2671 }
2672
34f80b04
EG
2673 if (!BP_NOMCP(bp)) {
2674 int func = BP_FUNC(bp);
a2fbb9ea
ET
2675 u32 drv_pulse;
2676 u32 mcp_pulse;
2677
2678 ++bp->fw_drv_pulse_wr_seq;
2679 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
2680 /* TBD - add SYSTEM_TIME */
2681 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 2682 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 2683
34f80b04 2684 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
2685 MCP_PULSE_SEQ_MASK);
2686 /* The delta between driver pulse and mcp response
2687 * should be 1 (before mcp response) or 0 (after mcp response)
2688 */
2689 if ((drv_pulse != mcp_pulse) &&
2690 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
2691 /* someone lost a heartbeat... */
2692 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2693 drv_pulse, mcp_pulse);
2694 }
2695 }
2696
f34d28ea 2697 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 2698 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 2699
f1410647 2700timer_restart:
a2fbb9ea
ET
2701 mod_timer(&bp->timer, jiffies + bp->current_interval);
2702}
2703
2704/* end of Statistics */
2705
2706/* nic init */
2707
2708/*
2709 * nic init service functions
2710 */
2711
34f80b04 2712static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 2713{
34f80b04
EG
2714 int port = BP_PORT(bp);
2715
ca00392c
EG
2716 /* "CSTORM" */
2717 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2718 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
2719 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
2720 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2721 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
2722 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
2723}
2724
9f6c9258 2725void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5c862848 2726 dma_addr_t mapping, int sb_id)
34f80b04
EG
2727{
2728 int port = BP_PORT(bp);
bb2a0f7a 2729 int func = BP_FUNC(bp);
a2fbb9ea 2730 int index;
34f80b04 2731 u64 section;
a2fbb9ea
ET
2732
2733 /* USTORM */
2734 section = ((u64)mapping) + offsetof(struct host_status_block,
2735 u_status_block);
34f80b04 2736 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 2737
ca00392c
EG
2738 REG_WR(bp, BAR_CSTRORM_INTMEM +
2739 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
2740 REG_WR(bp, BAR_CSTRORM_INTMEM +
2741 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 2742 U64_HI(section));
ca00392c
EG
2743 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
2744 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
2745
2746 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
2747 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2748 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
2749
2750 /* CSTORM */
2751 section = ((u64)mapping) + offsetof(struct host_status_block,
2752 c_status_block);
34f80b04 2753 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
2754
2755 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 2756 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 2757 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 2758 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 2759 U64_HI(section));
7a9b2557 2760 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 2761 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
2762
2763 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
2764 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 2765 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
2766
2767 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2768}
2769
2770static void bnx2x_zero_def_sb(struct bnx2x *bp)
2771{
2772 int func = BP_FUNC(bp);
a2fbb9ea 2773
ca00392c 2774 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
2775 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2776 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
2777 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2778 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
2779 sizeof(struct cstorm_def_status_block_u)/4);
2780 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2781 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
2782 sizeof(struct cstorm_def_status_block_c)/4);
2783 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
2784 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2785 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
2786}
2787
2788static void bnx2x_init_def_sb(struct bnx2x *bp,
2789 struct host_def_status_block *def_sb,
34f80b04 2790 dma_addr_t mapping, int sb_id)
a2fbb9ea 2791{
34f80b04
EG
2792 int port = BP_PORT(bp);
2793 int func = BP_FUNC(bp);
a2fbb9ea
ET
2794 int index, val, reg_offset;
2795 u64 section;
2796
2797 /* ATTN */
2798 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2799 atten_status_block);
34f80b04 2800 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 2801
49d66772
ET
2802 bp->attn_state = 0;
2803
a2fbb9ea
ET
2804 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2805 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2806
34f80b04 2807 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
2808 bp->attn_group[index].sig[0] = REG_RD(bp,
2809 reg_offset + 0x10*index);
2810 bp->attn_group[index].sig[1] = REG_RD(bp,
2811 reg_offset + 0x4 + 0x10*index);
2812 bp->attn_group[index].sig[2] = REG_RD(bp,
2813 reg_offset + 0x8 + 0x10*index);
2814 bp->attn_group[index].sig[3] = REG_RD(bp,
2815 reg_offset + 0xc + 0x10*index);
2816 }
2817
a2fbb9ea
ET
2818 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
2819 HC_REG_ATTN_MSG0_ADDR_L);
2820
2821 REG_WR(bp, reg_offset, U64_LO(section));
2822 REG_WR(bp, reg_offset + 4, U64_HI(section));
2823
2824 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
2825
2826 val = REG_RD(bp, reg_offset);
34f80b04 2827 val |= sb_id;
a2fbb9ea
ET
2828 REG_WR(bp, reg_offset, val);
2829
2830 /* USTORM */
2831 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2832 u_def_status_block);
34f80b04 2833 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 2834
ca00392c
EG
2835 REG_WR(bp, BAR_CSTRORM_INTMEM +
2836 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
2837 REG_WR(bp, BAR_CSTRORM_INTMEM +
2838 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 2839 U64_HI(section));
ca00392c
EG
2840 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
2841 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
2842
2843 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
2844 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2845 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
2846
2847 /* CSTORM */
2848 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2849 c_def_status_block);
34f80b04 2850 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
2851
2852 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 2853 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 2854 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 2855 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 2856 U64_HI(section));
5c862848 2857 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 2858 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
2859
2860 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2861 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 2862 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
2863
2864 /* TSTORM */
2865 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2866 t_def_status_block);
34f80b04 2867 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
2868
2869 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 2870 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 2871 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 2872 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 2873 U64_HI(section));
5c862848 2874 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 2875 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
2876
2877 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2878 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 2879 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
2880
2881 /* XSTORM */
2882 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2883 x_def_status_block);
34f80b04 2884 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
2885
2886 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 2887 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 2888 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 2889 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 2890 U64_HI(section));
5c862848 2891 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 2892 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
2893
2894 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2895 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 2896 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 2897
bb2a0f7a 2898 bp->stats_pending = 0;
66e855f3 2899 bp->set_mac_pending = 0;
bb2a0f7a 2900
34f80b04 2901 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
2902}
2903
9f6c9258 2904void bnx2x_update_coalesce(struct bnx2x *bp)
a2fbb9ea 2905{
34f80b04 2906 int port = BP_PORT(bp);
a2fbb9ea
ET
2907 int i;
2908
2909 for_each_queue(bp, i) {
34f80b04 2910 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
2911
2912 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
2913 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2914 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
2915 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 2916 bp->rx_ticks/(4 * BNX2X_BTR));
ca00392c
EG
2917 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2918 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
2919 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 2920 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
2921
2922 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2923 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
2924 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
2925 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 2926 bp->tx_ticks/(4 * BNX2X_BTR));
a2fbb9ea 2927 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
2928 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
2929 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 2930 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
2931 }
2932}
2933
a2fbb9ea
ET
2934static void bnx2x_init_sp_ring(struct bnx2x *bp)
2935{
34f80b04 2936 int func = BP_FUNC(bp);
a2fbb9ea
ET
2937
2938 spin_lock_init(&bp->spq_lock);
2939
2940 bp->spq_left = MAX_SPQ_PENDING;
2941 bp->spq_prod_idx = 0;
a2fbb9ea
ET
2942 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
2943 bp->spq_prod_bd = bp->spq;
2944 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
2945
34f80b04 2946 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 2947 U64_LO(bp->spq_mapping));
34f80b04
EG
2948 REG_WR(bp,
2949 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
2950 U64_HI(bp->spq_mapping));
2951
34f80b04 2952 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2953 bp->spq_prod_idx);
2954}
2955
2956static void bnx2x_init_context(struct bnx2x *bp)
2957{
2958 int i;
2959
54b9ddaa
VZ
2960 /* Rx */
2961 for_each_queue(bp, i) {
a2fbb9ea
ET
2962 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
2963 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 2964 u8 cl_id = fp->cl_id;
a2fbb9ea 2965
34f80b04
EG
2966 context->ustorm_st_context.common.sb_index_numbers =
2967 BNX2X_RX_SB_INDEX_NUM;
0626b899 2968 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 2969 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 2970 context->ustorm_st_context.common.flags =
de832a55
EG
2971 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
2972 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
2973 context->ustorm_st_context.common.statistics_counter_id =
2974 cl_id;
8d9c5f34 2975 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 2976 BNX2X_RX_ALIGN_SHIFT;
34f80b04 2977 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 2978 bp->rx_buf_size;
34f80b04 2979 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 2980 U64_HI(fp->rx_desc_mapping);
34f80b04 2981 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 2982 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
2983 if (!fp->disable_tpa) {
2984 context->ustorm_st_context.common.flags |=
ca00392c 2985 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 2986 context->ustorm_st_context.common.sge_buff_size =
cdaa7cb8
VZ
2987 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
2988 0xffff);
7a9b2557
VZ
2989 context->ustorm_st_context.common.sge_page_base_hi =
2990 U64_HI(fp->rx_sge_mapping);
2991 context->ustorm_st_context.common.sge_page_base_lo =
2992 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
2993
2994 context->ustorm_st_context.common.max_sges_for_packet =
2995 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
2996 context->ustorm_st_context.common.max_sges_for_packet =
2997 ((context->ustorm_st_context.common.
2998 max_sges_for_packet + PAGES_PER_SGE - 1) &
2999 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
3000 }
3001
8d9c5f34
EG
3002 context->ustorm_ag_context.cdu_usage =
3003 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3004 CDU_REGION_NUMBER_UCM_AG,
3005 ETH_CONNECTION_TYPE);
3006
ca00392c
EG
3007 context->xstorm_ag_context.cdu_reserved =
3008 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3009 CDU_REGION_NUMBER_XCM_AG,
3010 ETH_CONNECTION_TYPE);
3011 }
3012
54b9ddaa
VZ
3013 /* Tx */
3014 for_each_queue(bp, i) {
ca00392c
EG
3015 struct bnx2x_fastpath *fp = &bp->fp[i];
3016 struct eth_context *context =
54b9ddaa 3017 bnx2x_sp(bp, context[i].eth);
ca00392c
EG
3018
3019 context->cstorm_st_context.sb_index_number =
3020 C_SB_ETH_TX_CQ_INDEX;
3021 context->cstorm_st_context.status_block_id = fp->sb_id;
3022
8d9c5f34
EG
3023 context->xstorm_st_context.tx_bd_page_base_hi =
3024 U64_HI(fp->tx_desc_mapping);
3025 context->xstorm_st_context.tx_bd_page_base_lo =
3026 U64_LO(fp->tx_desc_mapping);
ca00392c 3027 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 3028 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
3029 }
3030}
3031
3032static void bnx2x_init_ind_table(struct bnx2x *bp)
3033{
26c8fa4d 3034 int func = BP_FUNC(bp);
a2fbb9ea
ET
3035 int i;
3036
555f6c78 3037 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
3038 return;
3039
555f6c78
EG
3040 DP(NETIF_MSG_IFUP,
3041 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 3042 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 3043 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 3044 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 3045 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
3046}
3047
9f6c9258 3048void bnx2x_set_client_config(struct bnx2x *bp)
49d66772 3049{
49d66772 3050 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
3051 int port = BP_PORT(bp);
3052 int i;
49d66772 3053
e7799c5f 3054 tstorm_client.mtu = bp->dev->mtu;
49d66772 3055 tstorm_client.config_flags =
de832a55
EG
3056 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
3057 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 3058#ifdef BCM_VLAN
0c6671b0 3059 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 3060 tstorm_client.config_flags |=
8d9c5f34 3061 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
3062 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3063 }
3064#endif
49d66772
ET
3065
3066 for_each_queue(bp, i) {
de832a55
EG
3067 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
3068
49d66772 3069 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3070 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
3071 ((u32 *)&tstorm_client)[0]);
3072 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3073 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
3074 ((u32 *)&tstorm_client)[1]);
3075 }
3076
34f80b04
EG
3077 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
3078 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
3079}
3080
9f6c9258 3081void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
a2fbb9ea 3082{
a2fbb9ea 3083 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 3084 int mode = bp->rx_mode;
37b091ba 3085 int mask = bp->rx_mode_cl_mask;
34f80b04 3086 int func = BP_FUNC(bp);
581ce43d 3087 int port = BP_PORT(bp);
a2fbb9ea 3088 int i;
581ce43d
EG
3089 /* All but management unicast packets should pass to the host as well */
3090 u32 llh_mask =
3091 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
3092 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
3093 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3094 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 3095
3196a88a 3096 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
3097
3098 switch (mode) {
3099 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
3100 tstorm_mac_filter.ucast_drop_all = mask;
3101 tstorm_mac_filter.mcast_drop_all = mask;
3102 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 3103 break;
356e2385 3104
a2fbb9ea 3105 case BNX2X_RX_MODE_NORMAL:
34f80b04 3106 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 3107 break;
356e2385 3108
a2fbb9ea 3109 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
3110 tstorm_mac_filter.mcast_accept_all = mask;
3111 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 3112 break;
356e2385 3113
a2fbb9ea 3114 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
3115 tstorm_mac_filter.ucast_accept_all = mask;
3116 tstorm_mac_filter.mcast_accept_all = mask;
3117 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
3118 /* pass management unicast packets as well */
3119 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 3120 break;
356e2385 3121
a2fbb9ea 3122 default:
34f80b04
EG
3123 BNX2X_ERR("BAD rx mode (%d)\n", mode);
3124 break;
a2fbb9ea
ET
3125 }
3126
581ce43d
EG
3127 REG_WR(bp,
3128 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
3129 llh_mask);
3130
a2fbb9ea
ET
3131 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
3132 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3133 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
3134 ((u32 *)&tstorm_mac_filter)[i]);
3135
34f80b04 3136/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
3137 ((u32 *)&tstorm_mac_filter)[i]); */
3138 }
a2fbb9ea 3139
49d66772
ET
3140 if (mode != BNX2X_RX_MODE_NONE)
3141 bnx2x_set_client_config(bp);
a2fbb9ea
ET
3142}
3143
471de716
EG
3144static void bnx2x_init_internal_common(struct bnx2x *bp)
3145{
3146 int i;
3147
3148 /* Zero this manually as its initialization is
3149 currently missing in the initTool */
3150 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3151 REG_WR(bp, BAR_USTRORM_INTMEM +
3152 USTORM_AGG_DATA_OFFSET + i * 4, 0);
3153}
3154
3155static void bnx2x_init_internal_port(struct bnx2x *bp)
3156{
3157 int port = BP_PORT(bp);
3158
ca00392c
EG
3159 REG_WR(bp,
3160 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
3161 REG_WR(bp,
3162 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
3163 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3164 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3165}
3166
3167static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 3168{
a2fbb9ea
ET
3169 struct tstorm_eth_function_common_config tstorm_config = {0};
3170 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
3171 int port = BP_PORT(bp);
3172 int func = BP_FUNC(bp);
de832a55
EG
3173 int i, j;
3174 u32 offset;
471de716 3175 u16 max_agg_size;
a2fbb9ea 3176
c68ed255
TH
3177 tstorm_config.config_flags = RSS_FLAGS(bp);
3178
3179 if (is_multi(bp))
a2fbb9ea 3180 tstorm_config.rss_result_mask = MULTI_MASK;
ca00392c
EG
3181
3182 /* Enable TPA if needed */
3183 if (bp->flags & TPA_ENABLE_FLAG)
3184 tstorm_config.config_flags |=
3185 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
3186
8d9c5f34
EG
3187 if (IS_E1HMF(bp))
3188 tstorm_config.config_flags |=
3189 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 3190
34f80b04
EG
3191 tstorm_config.leading_client_id = BP_L_ID(bp);
3192
a2fbb9ea 3193 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3194 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
3195 (*(u32 *)&tstorm_config));
3196
c14423fe 3197 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 3198 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
3199 bnx2x_set_storm_rx_mode(bp);
3200
de832a55
EG
3201 for_each_queue(bp, i) {
3202 u8 cl_id = bp->fp[i].cl_id;
3203
3204 /* reset xstorm per client statistics */
3205 offset = BAR_XSTRORM_INTMEM +
3206 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3207 for (j = 0;
3208 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
3209 REG_WR(bp, offset + j*4, 0);
3210
3211 /* reset tstorm per client statistics */
3212 offset = BAR_TSTRORM_INTMEM +
3213 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3214 for (j = 0;
3215 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
3216 REG_WR(bp, offset + j*4, 0);
3217
3218 /* reset ustorm per client statistics */
3219 offset = BAR_USTRORM_INTMEM +
3220 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3221 for (j = 0;
3222 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
3223 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
3224 }
3225
3226 /* Init statistics related context */
34f80b04 3227 stats_flags.collect_eth = 1;
a2fbb9ea 3228
66e855f3 3229 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 3230 ((u32 *)&stats_flags)[0]);
66e855f3 3231 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
3232 ((u32 *)&stats_flags)[1]);
3233
66e855f3 3234 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 3235 ((u32 *)&stats_flags)[0]);
66e855f3 3236 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
3237 ((u32 *)&stats_flags)[1]);
3238
de832a55
EG
3239 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
3240 ((u32 *)&stats_flags)[0]);
3241 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
3242 ((u32 *)&stats_flags)[1]);
3243
66e855f3 3244 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 3245 ((u32 *)&stats_flags)[0]);
66e855f3 3246 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
3247 ((u32 *)&stats_flags)[1]);
3248
66e855f3
YG
3249 REG_WR(bp, BAR_XSTRORM_INTMEM +
3250 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3251 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3252 REG_WR(bp, BAR_XSTRORM_INTMEM +
3253 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3254 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3255
3256 REG_WR(bp, BAR_TSTRORM_INTMEM +
3257 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3258 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3259 REG_WR(bp, BAR_TSTRORM_INTMEM +
3260 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3261 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 3262
de832a55
EG
3263 REG_WR(bp, BAR_USTRORM_INTMEM +
3264 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3265 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3266 REG_WR(bp, BAR_USTRORM_INTMEM +
3267 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3268 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3269
34f80b04
EG
3270 if (CHIP_IS_E1H(bp)) {
3271 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3272 IS_E1HMF(bp));
3273 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3274 IS_E1HMF(bp));
3275 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3276 IS_E1HMF(bp));
3277 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3278 IS_E1HMF(bp));
3279
7a9b2557
VZ
3280 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
3281 bp->e1hov);
34f80b04
EG
3282 }
3283
4f40f2cb 3284 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
cdaa7cb8
VZ
3285 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
3286 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
54b9ddaa 3287 for_each_queue(bp, i) {
7a9b2557 3288 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
3289
3290 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 3291 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
3292 U64_LO(fp->rx_comp_mapping));
3293 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 3294 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
3295 U64_HI(fp->rx_comp_mapping));
3296
ca00392c
EG
3297 /* Next page */
3298 REG_WR(bp, BAR_USTRORM_INTMEM +
3299 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
3300 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3301 REG_WR(bp, BAR_USTRORM_INTMEM +
3302 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
3303 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3304
7a9b2557 3305 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 3306 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
3307 max_agg_size);
3308 }
8a1c38d1 3309
1c06328c
EG
3310 /* dropless flow control */
3311 if (CHIP_IS_E1H(bp)) {
3312 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
3313
3314 rx_pause.bd_thr_low = 250;
3315 rx_pause.cqe_thr_low = 250;
3316 rx_pause.cos = 1;
3317 rx_pause.sge_thr_low = 0;
3318 rx_pause.bd_thr_high = 350;
3319 rx_pause.cqe_thr_high = 350;
3320 rx_pause.sge_thr_high = 0;
3321
54b9ddaa 3322 for_each_queue(bp, i) {
1c06328c
EG
3323 struct bnx2x_fastpath *fp = &bp->fp[i];
3324
3325 if (!fp->disable_tpa) {
3326 rx_pause.sge_thr_low = 150;
3327 rx_pause.sge_thr_high = 250;
3328 }
3329
3330
3331 offset = BAR_USTRORM_INTMEM +
3332 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
3333 fp->cl_id);
3334 for (j = 0;
3335 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
3336 j++)
3337 REG_WR(bp, offset + j*4,
3338 ((u32 *)&rx_pause)[j]);
3339 }
3340 }
3341
8a1c38d1
EG
3342 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3343
3344 /* Init rate shaping and fairness contexts */
3345 if (IS_E1HMF(bp)) {
3346 int vn;
3347
3348 /* During init there is no active link
3349 Until link is up, set link rate to 10Gbps */
3350 bp->link_vars.line_speed = SPEED_10000;
3351 bnx2x_init_port_minmax(bp);
3352
b015e3d1
EG
3353 if (!BP_NOMCP(bp))
3354 bp->mf_config =
3355 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
3356 bnx2x_calc_vn_weight_sum(bp);
3357
3358 for (vn = VN_0; vn < E1HVN_MAX; vn++)
3359 bnx2x_init_vn_minmax(bp, 2*vn + port);
3360
3361 /* Enable rate shaping and fairness */
b015e3d1 3362 bp->cmng.flags.cmng_enables |=
8a1c38d1 3363 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 3364
8a1c38d1
EG
3365 } else {
3366 /* rate shaping and fairness are disabled */
3367 DP(NETIF_MSG_IFUP,
3368 "single function mode minmax will be disabled\n");
3369 }
3370
3371
cdaa7cb8 3372 /* Store cmng structures to internal memory */
8a1c38d1
EG
3373 if (bp->port.pmf)
3374 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
3375 REG_WR(bp, BAR_XSTRORM_INTMEM +
3376 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
3377 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
3378}
3379
471de716
EG
3380static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3381{
3382 switch (load_code) {
3383 case FW_MSG_CODE_DRV_LOAD_COMMON:
3384 bnx2x_init_internal_common(bp);
3385 /* no break */
3386
3387 case FW_MSG_CODE_DRV_LOAD_PORT:
3388 bnx2x_init_internal_port(bp);
3389 /* no break */
3390
3391 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3392 bnx2x_init_internal_func(bp);
3393 break;
3394
3395 default:
3396 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3397 break;
3398 }
3399}
3400
9f6c9258 3401void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
3402{
3403 int i;
3404
3405 for_each_queue(bp, i) {
3406 struct bnx2x_fastpath *fp = &bp->fp[i];
3407
34f80b04 3408 fp->bp = bp;
a2fbb9ea 3409 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 3410 fp->index = i;
34f80b04 3411 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
3412#ifdef BCM_CNIC
3413 fp->sb_id = fp->cl_id + 1;
3414#else
34f80b04 3415 fp->sb_id = fp->cl_id;
37b091ba 3416#endif
34f80b04 3417 DP(NETIF_MSG_IFUP,
f5372251
EG
3418 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
3419 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 3420 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 3421 fp->sb_id);
5c862848 3422 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
3423 }
3424
16119785
EG
3425 /* ensure status block indices were read */
3426 rmb();
3427
3428
5c862848
EG
3429 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
3430 DEF_SB_ID);
3431 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
3432 bnx2x_update_coalesce(bp);
3433 bnx2x_init_rx_rings(bp);
3434 bnx2x_init_tx_ring(bp);
3435 bnx2x_init_sp_ring(bp);
3436 bnx2x_init_context(bp);
471de716 3437 bnx2x_init_internal(bp, load_code);
a2fbb9ea 3438 bnx2x_init_ind_table(bp);
0ef00459
EG
3439 bnx2x_stats_init(bp);
3440
3441 /* At this point, we are ready for interrupts */
3442 atomic_set(&bp->intr_sem, 0);
3443
3444 /* flush all before enabling interrupts */
3445 mb();
3446 mmiowb();
3447
615f8fd9 3448 bnx2x_int_enable(bp);
eb8da205
EG
3449
3450 /* Check for SPIO5 */
3451 bnx2x_attn_int_deasserted0(bp,
3452 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
3453 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
3454}
3455
3456/* end of nic init */
3457
3458/*
3459 * gzip service functions
3460 */
3461
3462static int bnx2x_gunzip_init(struct bnx2x *bp)
3463{
1a983142
FT
3464 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
3465 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
3466 if (bp->gunzip_buf == NULL)
3467 goto gunzip_nomem1;
3468
3469 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3470 if (bp->strm == NULL)
3471 goto gunzip_nomem2;
3472
3473 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3474 GFP_KERNEL);
3475 if (bp->strm->workspace == NULL)
3476 goto gunzip_nomem3;
3477
3478 return 0;
3479
3480gunzip_nomem3:
3481 kfree(bp->strm);
3482 bp->strm = NULL;
3483
3484gunzip_nomem2:
1a983142
FT
3485 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3486 bp->gunzip_mapping);
a2fbb9ea
ET
3487 bp->gunzip_buf = NULL;
3488
3489gunzip_nomem1:
cdaa7cb8
VZ
3490 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
3491 " un-compression\n");
a2fbb9ea
ET
3492 return -ENOMEM;
3493}
3494
3495static void bnx2x_gunzip_end(struct bnx2x *bp)
3496{
3497 kfree(bp->strm->workspace);
3498
3499 kfree(bp->strm);
3500 bp->strm = NULL;
3501
3502 if (bp->gunzip_buf) {
1a983142
FT
3503 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3504 bp->gunzip_mapping);
a2fbb9ea
ET
3505 bp->gunzip_buf = NULL;
3506 }
3507}
3508
94a78b79 3509static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
3510{
3511 int n, rc;
3512
3513 /* check gzip header */
94a78b79
VZ
3514 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
3515 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 3516 return -EINVAL;
94a78b79 3517 }
a2fbb9ea
ET
3518
3519 n = 10;
3520
34f80b04 3521#define FNAME 0x8
a2fbb9ea
ET
3522
3523 if (zbuf[3] & FNAME)
3524 while ((zbuf[n++] != 0) && (n < len));
3525
94a78b79 3526 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
3527 bp->strm->avail_in = len - n;
3528 bp->strm->next_out = bp->gunzip_buf;
3529 bp->strm->avail_out = FW_BUF_SIZE;
3530
3531 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
3532 if (rc != Z_OK)
3533 return rc;
3534
3535 rc = zlib_inflate(bp->strm, Z_FINISH);
3536 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
3537 netdev_err(bp->dev, "Firmware decompression error: %s\n",
3538 bp->strm->msg);
a2fbb9ea
ET
3539
3540 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
3541 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
3542 netdev_err(bp->dev, "Firmware decompression error:"
3543 " gunzip_outlen (%d) not aligned\n",
3544 bp->gunzip_outlen);
a2fbb9ea
ET
3545 bp->gunzip_outlen >>= 2;
3546
3547 zlib_inflateEnd(bp->strm);
3548
3549 if (rc == Z_STREAM_END)
3550 return 0;
3551
3552 return rc;
3553}
3554
3555/* nic load/unload */
3556
3557/*
34f80b04 3558 * General service functions
a2fbb9ea
ET
3559 */
3560
3561/* send a NIG loopback debug packet */
3562static void bnx2x_lb_pckt(struct bnx2x *bp)
3563{
a2fbb9ea 3564 u32 wb_write[3];
a2fbb9ea
ET
3565
3566 /* Ethernet source and destination addresses */
a2fbb9ea
ET
3567 wb_write[0] = 0x55555555;
3568 wb_write[1] = 0x55555555;
34f80b04 3569 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 3570 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
3571
3572 /* NON-IP protocol */
a2fbb9ea
ET
3573 wb_write[0] = 0x09000000;
3574 wb_write[1] = 0x55555555;
34f80b04 3575 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 3576 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
3577}
3578
3579/* some of the internal memories
3580 * are not directly readable from the driver
3581 * to test them we send debug packets
3582 */
3583static int bnx2x_int_mem_test(struct bnx2x *bp)
3584{
3585 int factor;
3586 int count, i;
3587 u32 val = 0;
3588
ad8d3948 3589 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 3590 factor = 120;
ad8d3948
EG
3591 else if (CHIP_REV_IS_EMUL(bp))
3592 factor = 200;
3593 else
a2fbb9ea 3594 factor = 1;
a2fbb9ea
ET
3595
3596 DP(NETIF_MSG_HW, "start part1\n");
3597
3598 /* Disable inputs of parser neighbor blocks */
3599 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3600 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3601 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 3602 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
3603
3604 /* Write 0 to parser credits for CFC search request */
3605 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3606
3607 /* send Ethernet packet */
3608 bnx2x_lb_pckt(bp);
3609
3610 /* TODO do i reset NIG statistic? */
3611 /* Wait until NIG register shows 1 packet of size 0x10 */
3612 count = 1000 * factor;
3613 while (count) {
34f80b04 3614
a2fbb9ea
ET
3615 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3616 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
3617 if (val == 0x10)
3618 break;
3619
3620 msleep(10);
3621 count--;
3622 }
3623 if (val != 0x10) {
3624 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3625 return -1;
3626 }
3627
3628 /* Wait until PRS register shows 1 packet */
3629 count = 1000 * factor;
3630 while (count) {
3631 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
3632 if (val == 1)
3633 break;
3634
3635 msleep(10);
3636 count--;
3637 }
3638 if (val != 0x1) {
3639 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3640 return -2;
3641 }
3642
3643 /* Reset and init BRB, PRS */
34f80b04 3644 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 3645 msleep(50);
34f80b04 3646 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 3647 msleep(50);
94a78b79
VZ
3648 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3649 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
3650
3651 DP(NETIF_MSG_HW, "part2\n");
3652
3653 /* Disable inputs of parser neighbor blocks */
3654 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3655 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3656 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 3657 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
3658
3659 /* Write 0 to parser credits for CFC search request */
3660 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3661
3662 /* send 10 Ethernet packets */
3663 for (i = 0; i < 10; i++)
3664 bnx2x_lb_pckt(bp);
3665
3666 /* Wait until NIG register shows 10 + 1
3667 packets of size 11*0x10 = 0xb0 */
3668 count = 1000 * factor;
3669 while (count) {
34f80b04 3670
a2fbb9ea
ET
3671 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3672 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
3673 if (val == 0xb0)
3674 break;
3675
3676 msleep(10);
3677 count--;
3678 }
3679 if (val != 0xb0) {
3680 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3681 return -3;
3682 }
3683
3684 /* Wait until PRS register shows 2 packets */
3685 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3686 if (val != 2)
3687 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3688
3689 /* Write 1 to parser credits for CFC search request */
3690 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
3691
3692 /* Wait until PRS register shows 3 packets */
3693 msleep(10 * factor);
3694 /* Wait until NIG register shows 1 packet of size 0x10 */
3695 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3696 if (val != 3)
3697 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3698
3699 /* clear NIG EOP FIFO */
3700 for (i = 0; i < 11; i++)
3701 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
3702 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
3703 if (val != 1) {
3704 BNX2X_ERR("clear of NIG failed\n");
3705 return -4;
3706 }
3707
3708 /* Reset and init BRB, PRS, NIG */
3709 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3710 msleep(50);
3711 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3712 msleep(50);
94a78b79
VZ
3713 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3714 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 3715#ifndef BCM_CNIC
a2fbb9ea
ET
3716 /* set NIC mode */
3717 REG_WR(bp, PRS_REG_NIC_MODE, 1);
3718#endif
3719
3720 /* Enable inputs of parser neighbor blocks */
3721 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
3722 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
3723 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 3724 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
3725
3726 DP(NETIF_MSG_HW, "done\n");
3727
3728 return 0; /* OK */
3729}
3730
3731static void enable_blocks_attention(struct bnx2x *bp)
3732{
3733 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3734 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
3735 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3736 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
3737 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
3738 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
3739 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
3740 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
3741 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
3742/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3743/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
3744 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
3745 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
3746 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
3747/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3748/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
3749 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
3750 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
3751 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
3752 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
3753/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3754/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3755 if (CHIP_REV_IS_FPGA(bp))
3756 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
3757 else
3758 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
3759 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
3760 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
3761 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
3762/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3763/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
3764 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
3765 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
3766/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3767 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
3768}
3769
72fd0718
VZ
3770static const struct {
3771 u32 addr;
3772 u32 mask;
3773} bnx2x_parity_mask[] = {
3774 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
3775 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
3776 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
3777 {HC_REG_HC_PRTY_MASK, 0xffffffff},
3778 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
3779 {QM_REG_QM_PRTY_MASK, 0x0},
3780 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
3781 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
3782 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
3783 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
3784 {CDU_REG_CDU_PRTY_MASK, 0x0},
3785 {CFC_REG_CFC_PRTY_MASK, 0x0},
3786 {DBG_REG_DBG_PRTY_MASK, 0x0},
3787 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
3788 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
3789 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
3790 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
3791 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
3792 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
3793 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
3794 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
3795 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
3796 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
3797 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
3798 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
3799 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
3800 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
3801 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
3802};
3803
3804static void enable_blocks_parity(struct bnx2x *bp)
3805{
3806 int i, mask_arr_len =
3807 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
3808
3809 for (i = 0; i < mask_arr_len; i++)
3810 REG_WR(bp, bnx2x_parity_mask[i].addr,
3811 bnx2x_parity_mask[i].mask);
3812}
3813
34f80b04 3814
81f75bbf
EG
3815static void bnx2x_reset_common(struct bnx2x *bp)
3816{
3817 /* reset_common */
3818 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
3819 0xd3ffff7f);
3820 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
3821}
3822
573f2035
EG
3823static void bnx2x_init_pxp(struct bnx2x *bp)
3824{
3825 u16 devctl;
3826 int r_order, w_order;
3827
3828 pci_read_config_word(bp->pdev,
3829 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
3830 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
3831 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3832 if (bp->mrrs == -1)
3833 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3834 else {
3835 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
3836 r_order = bp->mrrs;
3837 }
3838
3839 bnx2x_init_pxp_arb(bp, r_order, w_order);
3840}
fd4ef40d
EG
3841
3842static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3843{
2145a920 3844 int is_required;
fd4ef40d 3845 u32 val;
2145a920 3846 int port;
fd4ef40d 3847
2145a920
VZ
3848 if (BP_NOMCP(bp))
3849 return;
3850
3851 is_required = 0;
fd4ef40d
EG
3852 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
3853 SHARED_HW_CFG_FAN_FAILURE_MASK;
3854
3855 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
3856 is_required = 1;
3857
3858 /*
3859 * The fan failure mechanism is usually related to the PHY type since
3860 * the power consumption of the board is affected by the PHY. Currently,
3861 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
3862 */
3863 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
3864 for (port = PORT_0; port < PORT_MAX; port++) {
3865 u32 phy_type =
3866 SHMEM_RD(bp, dev_info.port_hw_config[port].
3867 external_phy_config) &
3868 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3869 is_required |=
3870 ((phy_type ==
3871 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
3872 (phy_type ==
3873 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
3874 (phy_type ==
3875 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
3876 }
3877
3878 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
3879
3880 if (is_required == 0)
3881 return;
3882
3883 /* Fan failure is indicated by SPIO 5 */
3884 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
3885 MISC_REGISTERS_SPIO_INPUT_HI_Z);
3886
3887 /* set to active low mode */
3888 val = REG_RD(bp, MISC_REG_SPIO_INT);
3889 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 3890 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
3891 REG_WR(bp, MISC_REG_SPIO_INT, val);
3892
3893 /* enable interrupt to signal the IGU */
3894 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
3895 val |= (1 << MISC_REGISTERS_SPIO_5);
3896 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3897}
3898
34f80b04 3899static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 3900{
a2fbb9ea 3901 u32 val, i;
37b091ba
MC
3902#ifdef BCM_CNIC
3903 u32 wb_write[2];
3904#endif
a2fbb9ea 3905
34f80b04 3906 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 3907
81f75bbf 3908 bnx2x_reset_common(bp);
34f80b04
EG
3909 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
3910 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 3911
94a78b79 3912 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
3913 if (CHIP_IS_E1H(bp))
3914 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 3915
34f80b04
EG
3916 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
3917 msleep(30);
3918 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 3919
94a78b79 3920 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
3921 if (CHIP_IS_E1(bp)) {
3922 /* enable HW interrupt from PXP on USDM overflow
3923 bit 16 on INT_MASK_0 */
3924 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3925 }
a2fbb9ea 3926
94a78b79 3927 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 3928 bnx2x_init_pxp(bp);
a2fbb9ea
ET
3929
3930#ifdef __BIG_ENDIAN
34f80b04
EG
3931 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
3932 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
3933 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
3934 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
3935 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
3936 /* make sure this value is 0 */
3937 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
3938
3939/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3940 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
3941 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
3942 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
3943 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
3944#endif
3945
34f80b04 3946 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 3947#ifdef BCM_CNIC
34f80b04
EG
3948 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3949 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3950 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
3951#endif
3952
34f80b04
EG
3953 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
3954 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 3955
34f80b04
EG
3956 /* let the HW do it's magic ... */
3957 msleep(100);
3958 /* finish PXP init */
3959 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
3960 if (val != 1) {
3961 BNX2X_ERR("PXP2 CFG failed\n");
3962 return -EBUSY;
3963 }
3964 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
3965 if (val != 1) {
3966 BNX2X_ERR("PXP2 RD_INIT failed\n");
3967 return -EBUSY;
3968 }
a2fbb9ea 3969
34f80b04
EG
3970 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
3971 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 3972
94a78b79 3973 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 3974
34f80b04
EG
3975 /* clean the DMAE memory */
3976 bp->dmae_ready = 1;
3977 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 3978
94a78b79
VZ
3979 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
3980 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
3981 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
3982 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 3983
34f80b04
EG
3984 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
3985 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
3986 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
3987 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
3988
94a78b79 3989 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
3990
3991#ifdef BCM_CNIC
3992 wb_write[0] = 0;
3993 wb_write[1] = 0;
3994 for (i = 0; i < 64; i++) {
3995 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
3996 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
3997
3998 if (CHIP_IS_E1H(bp)) {
3999 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
4000 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
4001 wb_write, 2);
4002 }
4003 }
4004#endif
34f80b04
EG
4005 /* soft reset pulse */
4006 REG_WR(bp, QM_REG_SOFT_RESET, 1);
4007 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 4008
37b091ba 4009#ifdef BCM_CNIC
94a78b79 4010 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 4011#endif
a2fbb9ea 4012
94a78b79 4013 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
4014 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
4015 if (!CHIP_REV_IS_SLOW(bp)) {
4016 /* enable hw interrupt from doorbell Q */
4017 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4018 }
a2fbb9ea 4019
94a78b79
VZ
4020 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4021 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 4022 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 4023#ifndef BCM_CNIC
3196a88a
EG
4024 /* set NIC mode */
4025 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 4026#endif
34f80b04
EG
4027 if (CHIP_IS_E1H(bp))
4028 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 4029
94a78b79
VZ
4030 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4031 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4032 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4033 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 4034
ca00392c
EG
4035 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4036 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4037 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4038 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 4039
94a78b79
VZ
4040 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4041 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4042 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4043 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 4044
34f80b04
EG
4045 /* sync semi rtc */
4046 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4047 0x80000000);
4048 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4049 0x80000000);
a2fbb9ea 4050
94a78b79
VZ
4051 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4052 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4053 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 4054
34f80b04 4055 REG_WR(bp, SRC_REG_SOFT_RST, 1);
c68ed255
TH
4056 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4057 REG_WR(bp, i, random32());
94a78b79 4058 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
4059#ifdef BCM_CNIC
4060 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4061 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4062 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4063 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4064 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4065 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4066 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4067 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4068 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4069 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4070#endif
34f80b04 4071 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 4072
34f80b04
EG
4073 if (sizeof(union cdu_context) != 1024)
4074 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
4075 dev_alert(&bp->pdev->dev, "please adjust the size "
4076 "of cdu_context(%ld)\n",
7995c64e 4077 (long)sizeof(union cdu_context));
a2fbb9ea 4078
94a78b79 4079 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
4080 val = (4 << 24) + (0 << 12) + 1024;
4081 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 4082
94a78b79 4083 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 4084 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
4085 /* enable context validation interrupt from CFC */
4086 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4087
4088 /* set the thresholds to prevent CFC/CDU race */
4089 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 4090
94a78b79
VZ
4091 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
4092 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 4093
94a78b79 4094 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
4095 /* Reset PCIE errors for debug */
4096 REG_WR(bp, 0x2814, 0xffffffff);
4097 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 4098
94a78b79 4099 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 4100 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 4101 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 4102 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 4103
94a78b79 4104 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
4105 if (CHIP_IS_E1H(bp)) {
4106 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
4107 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
4108 }
4109
4110 if (CHIP_REV_IS_SLOW(bp))
4111 msleep(200);
4112
4113 /* finish CFC init */
4114 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4115 if (val != 1) {
4116 BNX2X_ERR("CFC LL_INIT failed\n");
4117 return -EBUSY;
4118 }
4119 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4120 if (val != 1) {
4121 BNX2X_ERR("CFC AC_INIT failed\n");
4122 return -EBUSY;
4123 }
4124 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4125 if (val != 1) {
4126 BNX2X_ERR("CFC CAM_INIT failed\n");
4127 return -EBUSY;
4128 }
4129 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 4130
34f80b04
EG
4131 /* read NIG statistic
4132 to see if this is our first up since powerup */
4133 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4134 val = *bnx2x_sp(bp, wb_data[0]);
4135
4136 /* do internal memory self test */
4137 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4138 BNX2X_ERR("internal mem self test failed\n");
4139 return -EBUSY;
4140 }
4141
35b19ba5 4142 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
4143 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4144 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4145 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 4146 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
4147 bp->port.need_hw_lock = 1;
4148 break;
4149
34f80b04
EG
4150 default:
4151 break;
4152 }
f1410647 4153
fd4ef40d
EG
4154 bnx2x_setup_fan_failure_detection(bp);
4155
34f80b04
EG
4156 /* clear PXP2 attentions */
4157 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 4158
34f80b04 4159 enable_blocks_attention(bp);
72fd0718
VZ
4160 if (CHIP_PARITY_SUPPORTED(bp))
4161 enable_blocks_parity(bp);
a2fbb9ea 4162
6bbca910
YR
4163 if (!BP_NOMCP(bp)) {
4164 bnx2x_acquire_phy_lock(bp);
4165 bnx2x_common_init_phy(bp, bp->common.shmem_base);
4166 bnx2x_release_phy_lock(bp);
4167 } else
4168 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4169
34f80b04
EG
4170 return 0;
4171}
a2fbb9ea 4172
34f80b04
EG
4173static int bnx2x_init_port(struct bnx2x *bp)
4174{
4175 int port = BP_PORT(bp);
94a78b79 4176 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 4177 u32 low, high;
34f80b04 4178 u32 val;
a2fbb9ea 4179
cdaa7cb8 4180 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
4181
4182 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 4183
94a78b79 4184 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 4185 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
4186
4187 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4188 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4189 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 4190 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 4191
37b091ba
MC
4192#ifdef BCM_CNIC
4193 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 4194
94a78b79 4195 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
4196 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4197 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 4198#endif
cdaa7cb8 4199
94a78b79 4200 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 4201
94a78b79 4202 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
4203 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
4204 /* no pause for emulation and FPGA */
4205 low = 0;
4206 high = 513;
4207 } else {
4208 if (IS_E1HMF(bp))
4209 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4210 else if (bp->dev->mtu > 4096) {
4211 if (bp->flags & ONE_PORT_FLAG)
4212 low = 160;
4213 else {
4214 val = bp->dev->mtu;
4215 /* (24*1024 + val*4)/256 */
4216 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
4217 }
4218 } else
4219 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4220 high = low + 56; /* 14*1024/256 */
4221 }
4222 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4223 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4224
4225
94a78b79 4226 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 4227
94a78b79 4228 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 4229 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 4230 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 4231 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 4232
94a78b79
VZ
4233 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4234 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4235 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4236 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 4237
94a78b79 4238 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 4239 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 4240
94a78b79 4241 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
4242
4243 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 4244 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
4245
4246 /* update threshold */
34f80b04 4247 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 4248 /* update init credit */
34f80b04 4249 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
4250
4251 /* probe changes */
34f80b04 4252 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 4253 msleep(5);
34f80b04 4254 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 4255
37b091ba
MC
4256#ifdef BCM_CNIC
4257 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 4258#endif
94a78b79 4259 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 4260 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
4261
4262 if (CHIP_IS_E1(bp)) {
4263 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4264 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4265 }
94a78b79 4266 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 4267
94a78b79 4268 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
4269 /* init aeu_mask_attn_func_0/1:
4270 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4271 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4272 * bits 4-7 are used for "per vn group attention" */
4273 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4274 (IS_E1HMF(bp) ? 0xF7 : 0x7));
4275
94a78b79 4276 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 4277 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 4278 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 4279 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 4280 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 4281
94a78b79 4282 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
4283
4284 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4285
4286 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
4287 /* 0x2 disable e1hov, 0x1 enable */
4288 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4289 (IS_E1HMF(bp) ? 0x1 : 0x2));
4290
1c06328c
EG
4291 {
4292 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4293 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
4294 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
4295 }
34f80b04
EG
4296 }
4297
94a78b79 4298 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 4299 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 4300
35b19ba5 4301 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
4302 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4303 {
4304 u32 swap_val, swap_override, aeu_gpio_mask, offset;
4305
4306 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
4307 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
4308
4309 /* The GPIO should be swapped if the swap register is
4310 set and active */
4311 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4312 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
4313
4314 /* Select function upon port-swap configuration */
4315 if (port == 0) {
4316 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
4317 aeu_gpio_mask = (swap_val && swap_override) ?
4318 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
4319 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
4320 } else {
4321 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
4322 aeu_gpio_mask = (swap_val && swap_override) ?
4323 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
4324 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
4325 }
4326 val = REG_RD(bp, offset);
4327 /* add GPIO3 to group */
4328 val |= aeu_gpio_mask;
4329 REG_WR(bp, offset, val);
4330 }
3971a230 4331 bp->port.need_hw_lock = 1;
589abe3a
EG
4332 break;
4333
4d295db0 4334 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
3971a230
YR
4335 bp->port.need_hw_lock = 1;
4336 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
f1410647 4337 /* add SPIO 5 to group 0 */
4d295db0
EG
4338 {
4339 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4340 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4341 val = REG_RD(bp, reg_addr);
f1410647 4342 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
4343 REG_WR(bp, reg_addr, val);
4344 }
f1410647 4345 break;
3971a230
YR
4346 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4347 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4348 bp->port.need_hw_lock = 1;
4349 break;
f1410647
ET
4350 default:
4351 break;
4352 }
4353
c18487ee 4354 bnx2x__link_reset(bp);
a2fbb9ea 4355
34f80b04
EG
4356 return 0;
4357}
4358
4359#define ILT_PER_FUNC (768/2)
4360#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
4361/* the phys address is shifted right 12 bits and has an added
4362 1=valid bit added to the 53rd bit
4363 then since this is a wide register(TM)
4364 we split it into two 32 bit writes
4365 */
4366#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4367#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
4368#define PXP_ONE_ILT(x) (((x) << 10) | x)
4369#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
4370
37b091ba
MC
4371#ifdef BCM_CNIC
4372#define CNIC_ILT_LINES 127
4373#define CNIC_CTX_PER_ILT 16
4374#else
34f80b04 4375#define CNIC_ILT_LINES 0
37b091ba 4376#endif
34f80b04
EG
4377
4378static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4379{
4380 int reg;
4381
4382 if (CHIP_IS_E1H(bp))
4383 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4384 else /* E1 */
4385 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4386
4387 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4388}
4389
4390static int bnx2x_init_func(struct bnx2x *bp)
4391{
4392 int port = BP_PORT(bp);
4393 int func = BP_FUNC(bp);
8badd27a 4394 u32 addr, val;
34f80b04
EG
4395 int i;
4396
cdaa7cb8 4397 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 4398
8badd27a
EG
4399 /* set MSI reconfigure capability */
4400 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4401 val = REG_RD(bp, addr);
4402 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
4403 REG_WR(bp, addr, val);
4404
34f80b04
EG
4405 i = FUNC_ILT_BASE(func);
4406
4407 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
4408 if (CHIP_IS_E1H(bp)) {
4409 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
4410 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
4411 } else /* E1 */
4412 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
4413 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
4414
37b091ba
MC
4415#ifdef BCM_CNIC
4416 i += 1 + CNIC_ILT_LINES;
4417 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
4418 if (CHIP_IS_E1(bp))
4419 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
4420 else {
4421 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
4422 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
4423 }
4424
4425 i++;
4426 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
4427 if (CHIP_IS_E1(bp))
4428 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
4429 else {
4430 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
4431 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
4432 }
4433
4434 i++;
4435 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
4436 if (CHIP_IS_E1(bp))
4437 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
4438 else {
4439 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
4440 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
4441 }
4442
4443 /* tell the searcher where the T2 table is */
4444 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
4445
4446 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
4447 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
4448
4449 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
4450 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
4451 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
4452
4453 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
4454#endif
34f80b04
EG
4455
4456 if (CHIP_IS_E1H(bp)) {
573f2035
EG
4457 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4458 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4459 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4460 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4461 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4462 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4463 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4464 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4465 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
4466
4467 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4468 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
4469 }
4470
4471 /* HC init per function */
4472 if (CHIP_IS_E1H(bp)) {
4473 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4474
4475 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4476 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4477 }
94a78b79 4478 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 4479
c14423fe 4480 /* Reset PCIE errors for debug */
a2fbb9ea
ET
4481 REG_WR(bp, 0x2114, 0xffffffff);
4482 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 4483
34f80b04
EG
4484 return 0;
4485}
4486
9f6c9258 4487int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
34f80b04
EG
4488{
4489 int i, rc = 0;
a2fbb9ea 4490
34f80b04
EG
4491 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
4492 BP_FUNC(bp), load_code);
a2fbb9ea 4493
34f80b04
EG
4494 bp->dmae_ready = 0;
4495 mutex_init(&bp->dmae_mutex);
54016b26
EG
4496 rc = bnx2x_gunzip_init(bp);
4497 if (rc)
4498 return rc;
a2fbb9ea 4499
34f80b04
EG
4500 switch (load_code) {
4501 case FW_MSG_CODE_DRV_LOAD_COMMON:
4502 rc = bnx2x_init_common(bp);
4503 if (rc)
4504 goto init_hw_err;
4505 /* no break */
4506
4507 case FW_MSG_CODE_DRV_LOAD_PORT:
4508 bp->dmae_ready = 1;
4509 rc = bnx2x_init_port(bp);
4510 if (rc)
4511 goto init_hw_err;
4512 /* no break */
4513
4514 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4515 bp->dmae_ready = 1;
4516 rc = bnx2x_init_func(bp);
4517 if (rc)
4518 goto init_hw_err;
4519 break;
4520
4521 default:
4522 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4523 break;
4524 }
4525
4526 if (!BP_NOMCP(bp)) {
4527 int func = BP_FUNC(bp);
a2fbb9ea
ET
4528
4529 bp->fw_drv_pulse_wr_seq =
34f80b04 4530 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 4531 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
4532 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4533 }
a2fbb9ea 4534
34f80b04
EG
4535 /* this needs to be done before gunzip end */
4536 bnx2x_zero_def_sb(bp);
4537 for_each_queue(bp, i)
4538 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
4539#ifdef BCM_CNIC
4540 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4541#endif
34f80b04
EG
4542
4543init_hw_err:
4544 bnx2x_gunzip_end(bp);
4545
4546 return rc;
a2fbb9ea
ET
4547}
4548
9f6c9258 4549void bnx2x_free_mem(struct bnx2x *bp)
a2fbb9ea
ET
4550{
4551
4552#define BNX2X_PCI_FREE(x, y, size) \
4553 do { \
4554 if (x) { \
1a983142 4555 dma_free_coherent(&bp->pdev->dev, size, x, y); \
a2fbb9ea
ET
4556 x = NULL; \
4557 y = 0; \
4558 } \
4559 } while (0)
4560
4561#define BNX2X_FREE(x) \
4562 do { \
4563 if (x) { \
4564 vfree(x); \
4565 x = NULL; \
4566 } \
4567 } while (0)
4568
4569 int i;
4570
4571 /* fastpath */
555f6c78 4572 /* Common */
a2fbb9ea
ET
4573 for_each_queue(bp, i) {
4574
555f6c78 4575 /* status blocks */
a2fbb9ea
ET
4576 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
4577 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 4578 sizeof(struct host_status_block));
555f6c78
EG
4579 }
4580 /* Rx */
54b9ddaa 4581 for_each_queue(bp, i) {
a2fbb9ea 4582
555f6c78 4583 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
4584 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
4585 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
4586 bnx2x_fp(bp, i, rx_desc_mapping),
4587 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4588
4589 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
4590 bnx2x_fp(bp, i, rx_comp_mapping),
4591 sizeof(struct eth_fast_path_rx_cqe) *
4592 NUM_RCQ_BD);
a2fbb9ea 4593
7a9b2557 4594 /* SGE ring */
32626230 4595 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
4596 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
4597 bnx2x_fp(bp, i, rx_sge_mapping),
4598 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4599 }
555f6c78 4600 /* Tx */
54b9ddaa 4601 for_each_queue(bp, i) {
555f6c78
EG
4602
4603 /* fastpath tx rings: tx_buf tx_desc */
4604 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
4605 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
4606 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 4607 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 4608 }
a2fbb9ea
ET
4609 /* end of fastpath */
4610
4611 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 4612 sizeof(struct host_def_status_block));
a2fbb9ea
ET
4613
4614 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 4615 sizeof(struct bnx2x_slowpath));
a2fbb9ea 4616
37b091ba 4617#ifdef BCM_CNIC
a2fbb9ea
ET
4618 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
4619 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
4620 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
4621 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
4622 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
4623 sizeof(struct host_status_block));
a2fbb9ea 4624#endif
7a9b2557 4625 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
4626
4627#undef BNX2X_PCI_FREE
4628#undef BNX2X_KFREE
4629}
4630
9f6c9258 4631int bnx2x_alloc_mem(struct bnx2x *bp)
a2fbb9ea
ET
4632{
4633
4634#define BNX2X_PCI_ALLOC(x, y, size) \
4635 do { \
1a983142 4636 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
9f6c9258
DK
4637 if (x == NULL) \
4638 goto alloc_mem_err; \
4639 memset(x, 0, size); \
4640 } while (0)
a2fbb9ea 4641
9f6c9258
DK
4642#define BNX2X_ALLOC(x, size) \
4643 do { \
4644 x = vmalloc(size); \
4645 if (x == NULL) \
4646 goto alloc_mem_err; \
4647 memset(x, 0, size); \
4648 } while (0)
a2fbb9ea 4649
9f6c9258 4650 int i;
a2fbb9ea 4651
9f6c9258
DK
4652 /* fastpath */
4653 /* Common */
a2fbb9ea 4654 for_each_queue(bp, i) {
9f6c9258 4655 bnx2x_fp(bp, i, bp) = bp;
a2fbb9ea 4656
9f6c9258
DK
4657 /* status blocks */
4658 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
4659 &bnx2x_fp(bp, i, status_blk_mapping),
4660 sizeof(struct host_status_block));
a2fbb9ea 4661 }
9f6c9258
DK
4662 /* Rx */
4663 for_each_queue(bp, i) {
a2fbb9ea 4664
9f6c9258
DK
4665 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4666 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
4667 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4668 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
4669 &bnx2x_fp(bp, i, rx_desc_mapping),
4670 sizeof(struct eth_rx_bd) * NUM_RX_BD);
555f6c78 4671
9f6c9258
DK
4672 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
4673 &bnx2x_fp(bp, i, rx_comp_mapping),
4674 sizeof(struct eth_fast_path_rx_cqe) *
4675 NUM_RCQ_BD);
a2fbb9ea 4676
9f6c9258
DK
4677 /* SGE ring */
4678 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
4679 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4680 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
4681 &bnx2x_fp(bp, i, rx_sge_mapping),
4682 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4683 }
4684 /* Tx */
4685 for_each_queue(bp, i) {
8badd27a 4686
9f6c9258
DK
4687 /* fastpath tx rings: tx_buf tx_desc */
4688 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
4689 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4690 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
4691 &bnx2x_fp(bp, i, tx_desc_mapping),
4692 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
8badd27a 4693 }
9f6c9258 4694 /* end of fastpath */
8badd27a 4695
9f6c9258
DK
4696 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4697 sizeof(struct host_def_status_block));
8badd27a 4698
9f6c9258
DK
4699 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4700 sizeof(struct bnx2x_slowpath));
a2fbb9ea 4701
9f6c9258
DK
4702#ifdef BCM_CNIC
4703 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
8badd27a 4704
9f6c9258
DK
4705 /* allocate searcher T2 table
4706 we allocate 1/4 of alloc num for T2
4707 (which is not entered into the ILT) */
4708 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
a2fbb9ea 4709
9f6c9258
DK
4710 /* Initialize T2 (for 1024 connections) */
4711 for (i = 0; i < 16*1024; i += 64)
4712 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 4713
9f6c9258
DK
4714 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
4715 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
65abd74d 4716
9f6c9258
DK
4717 /* QM queues (128*MAX_CONN) */
4718 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
65abd74d 4719
9f6c9258
DK
4720 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
4721 sizeof(struct host_status_block));
4722#endif
65abd74d 4723
9f6c9258
DK
4724 /* Slow path ring */
4725 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
65abd74d 4726
9f6c9258 4727 return 0;
e1510706 4728
9f6c9258
DK
4729alloc_mem_err:
4730 bnx2x_free_mem(bp);
4731 return -ENOMEM;
e1510706 4732
9f6c9258
DK
4733#undef BNX2X_PCI_ALLOC
4734#undef BNX2X_ALLOC
65abd74d
YG
4735}
4736
65abd74d 4737
a2fbb9ea
ET
4738/*
4739 * Init service functions
4740 */
4741
e665bfda
MC
4742/**
4743 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
4744 *
4745 * @param bp driver descriptor
4746 * @param set set or clear an entry (1 or 0)
4747 * @param mac pointer to a buffer containing a MAC
4748 * @param cl_bit_vec bit vector of clients to register a MAC for
4749 * @param cam_offset offset in a CAM to use
4750 * @param with_bcast set broadcast MAC as well
4751 */
4752static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
4753 u32 cl_bit_vec, u8 cam_offset,
4754 u8 with_bcast)
a2fbb9ea
ET
4755{
4756 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 4757 int port = BP_PORT(bp);
a2fbb9ea
ET
4758
4759 /* CAM allocation
4760 * unicasts 0-31:port0 32-63:port1
4761 * multicast 64-127:port0 128-191:port1
4762 */
e665bfda
MC
4763 config->hdr.length = 1 + (with_bcast ? 1 : 0);
4764 config->hdr.offset = cam_offset;
4765 config->hdr.client_id = 0xff;
a2fbb9ea
ET
4766 config->hdr.reserved1 = 0;
4767
4768 /* primary MAC */
4769 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 4770 swab16(*(u16 *)&mac[0]);
a2fbb9ea 4771 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 4772 swab16(*(u16 *)&mac[2]);
a2fbb9ea 4773 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 4774 swab16(*(u16 *)&mac[4]);
34f80b04 4775 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
4776 if (set)
4777 config->config_table[0].target_table_entry.flags = 0;
4778 else
4779 CAM_INVALIDATE(config->config_table[0]);
ca00392c 4780 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 4781 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
4782 config->config_table[0].target_table_entry.vlan_id = 0;
4783
3101c2bc
YG
4784 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
4785 (set ? "setting" : "clearing"),
a2fbb9ea
ET
4786 config->config_table[0].cam_entry.msb_mac_addr,
4787 config->config_table[0].cam_entry.middle_mac_addr,
4788 config->config_table[0].cam_entry.lsb_mac_addr);
4789
4790 /* broadcast */
e665bfda
MC
4791 if (with_bcast) {
4792 config->config_table[1].cam_entry.msb_mac_addr =
4793 cpu_to_le16(0xffff);
4794 config->config_table[1].cam_entry.middle_mac_addr =
4795 cpu_to_le16(0xffff);
4796 config->config_table[1].cam_entry.lsb_mac_addr =
4797 cpu_to_le16(0xffff);
4798 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
4799 if (set)
4800 config->config_table[1].target_table_entry.flags =
4801 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4802 else
4803 CAM_INVALIDATE(config->config_table[1]);
4804 config->config_table[1].target_table_entry.clients_bit_vector =
4805 cpu_to_le32(cl_bit_vec);
4806 config->config_table[1].target_table_entry.vlan_id = 0;
4807 }
a2fbb9ea
ET
4808
4809 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4810 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4811 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4812}
4813
e665bfda
MC
4814/**
4815 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
4816 *
4817 * @param bp driver descriptor
4818 * @param set set or clear an entry (1 or 0)
4819 * @param mac pointer to a buffer containing a MAC
4820 * @param cl_bit_vec bit vector of clients to register a MAC for
4821 * @param cam_offset offset in a CAM to use
4822 */
4823static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
4824 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
4825{
4826 struct mac_configuration_cmd_e1h *config =
4827 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
4828
8d9c5f34 4829 config->hdr.length = 1;
e665bfda
MC
4830 config->hdr.offset = cam_offset;
4831 config->hdr.client_id = 0xff;
34f80b04
EG
4832 config->hdr.reserved1 = 0;
4833
4834 /* primary MAC */
4835 config->config_table[0].msb_mac_addr =
e665bfda 4836 swab16(*(u16 *)&mac[0]);
34f80b04 4837 config->config_table[0].middle_mac_addr =
e665bfda 4838 swab16(*(u16 *)&mac[2]);
34f80b04 4839 config->config_table[0].lsb_mac_addr =
e665bfda 4840 swab16(*(u16 *)&mac[4]);
ca00392c 4841 config->config_table[0].clients_bit_vector =
e665bfda 4842 cpu_to_le32(cl_bit_vec);
34f80b04
EG
4843 config->config_table[0].vlan_id = 0;
4844 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
4845 if (set)
4846 config->config_table[0].flags = BP_PORT(bp);
4847 else
4848 config->config_table[0].flags =
4849 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 4850
e665bfda 4851 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 4852 (set ? "setting" : "clearing"),
34f80b04
EG
4853 config->config_table[0].msb_mac_addr,
4854 config->config_table[0].middle_mac_addr,
e665bfda 4855 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
4856
4857 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4858 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4859 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4860}
4861
a2fbb9ea
ET
4862static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4863 int *state_p, int poll)
4864{
4865 /* can take a while if any port is running */
8b3a0f0b 4866 int cnt = 5000;
a2fbb9ea 4867
c14423fe
ET
4868 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4869 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
4870
4871 might_sleep();
34f80b04 4872 while (cnt--) {
a2fbb9ea
ET
4873 if (poll) {
4874 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
4875 /* if index is different from 0
4876 * the reply for some commands will
3101c2bc 4877 * be on the non default queue
a2fbb9ea
ET
4878 */
4879 if (idx)
4880 bnx2x_rx_int(&bp->fp[idx], 10);
4881 }
a2fbb9ea 4882
3101c2bc 4883 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
4884 if (*state_p == state) {
4885#ifdef BNX2X_STOP_ON_ERROR
4886 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
4887#endif
a2fbb9ea 4888 return 0;
8b3a0f0b 4889 }
a2fbb9ea 4890
a2fbb9ea 4891 msleep(1);
e3553b29
EG
4892
4893 if (bp->panic)
4894 return -EIO;
a2fbb9ea
ET
4895 }
4896
a2fbb9ea 4897 /* timeout! */
49d66772
ET
4898 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4899 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
4900#ifdef BNX2X_STOP_ON_ERROR
4901 bnx2x_panic();
4902#endif
a2fbb9ea 4903
49d66772 4904 return -EBUSY;
a2fbb9ea
ET
4905}
4906
9f6c9258 4907void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
e665bfda
MC
4908{
4909 bp->set_mac_pending++;
4910 smp_wmb();
4911
4912 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
4913 (1 << bp->fp->cl_id), BP_FUNC(bp));
4914
4915 /* Wait for a completion */
4916 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4917}
4918
9f6c9258 4919void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
e665bfda
MC
4920{
4921 bp->set_mac_pending++;
4922 smp_wmb();
4923
4924 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
4925 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
4926 1);
4927
4928 /* Wait for a completion */
4929 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4930}
4931
993ac7b5
MC
4932#ifdef BCM_CNIC
4933/**
4934 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
4935 * MAC(s). This function will wait until the ramdord completion
4936 * returns.
4937 *
4938 * @param bp driver handle
4939 * @param set set or clear the CAM entry
4940 *
4941 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
4942 */
9f6c9258 4943int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
993ac7b5
MC
4944{
4945 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
4946
4947 bp->set_mac_pending++;
4948 smp_wmb();
4949
4950 /* Send a SET_MAC ramrod */
4951 if (CHIP_IS_E1(bp))
4952 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
4953 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
4954 1);
4955 else
4956 /* CAM allocation for E1H
4957 * unicasts: by func number
4958 * multicast: 20+FUNC*20, 20 each
4959 */
4960 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
4961 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
4962
4963 /* Wait for a completion when setting */
4964 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4965
4966 return 0;
4967}
4968#endif
4969
9f6c9258 4970int bnx2x_setup_leading(struct bnx2x *bp)
a2fbb9ea 4971{
34f80b04 4972 int rc;
a2fbb9ea 4973
c14423fe 4974 /* reset IGU state */
34f80b04 4975 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4976
4977 /* SETUP ramrod */
4978 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
4979
34f80b04
EG
4980 /* Wait for completion */
4981 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 4982
34f80b04 4983 return rc;
a2fbb9ea
ET
4984}
4985
9f6c9258 4986int bnx2x_setup_multi(struct bnx2x *bp, int index)
a2fbb9ea 4987{
555f6c78
EG
4988 struct bnx2x_fastpath *fp = &bp->fp[index];
4989
a2fbb9ea 4990 /* reset IGU state */
555f6c78 4991 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 4992
228241eb 4993 /* SETUP ramrod */
555f6c78
EG
4994 fp->state = BNX2X_FP_STATE_OPENING;
4995 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
4996 fp->cl_id, 0);
a2fbb9ea
ET
4997
4998 /* Wait for completion */
4999 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 5000 &(fp->state), 0);
a2fbb9ea
ET
5001}
5002
a2fbb9ea 5003
9f6c9258 5004void bnx2x_set_num_queues_msix(struct bnx2x *bp)
ca00392c 5005{
ca00392c
EG
5006
5007 switch (bp->multi_mode) {
5008 case ETH_RSS_MODE_DISABLED:
54b9ddaa 5009 bp->num_queues = 1;
ca00392c
EG
5010 break;
5011
5012 case ETH_RSS_MODE_REGULAR:
54b9ddaa
VZ
5013 if (num_queues)
5014 bp->num_queues = min_t(u32, num_queues,
5015 BNX2X_MAX_QUEUES(bp));
ca00392c 5016 else
54b9ddaa
VZ
5017 bp->num_queues = min_t(u32, num_online_cpus(),
5018 BNX2X_MAX_QUEUES(bp));
ca00392c
EG
5019 break;
5020
5021
5022 default:
54b9ddaa 5023 bp->num_queues = 1;
9f6c9258
DK
5024 break;
5025 }
a2fbb9ea
ET
5026}
5027
9f6c9258
DK
5028
5029
a2fbb9ea
ET
5030static int bnx2x_stop_multi(struct bnx2x *bp, int index)
5031{
555f6c78 5032 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
5033 int rc;
5034
c14423fe 5035 /* halt the connection */
555f6c78
EG
5036 fp->state = BNX2X_FP_STATE_HALTING;
5037 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 5038
34f80b04 5039 /* Wait for completion */
a2fbb9ea 5040 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 5041 &(fp->state), 1);
c14423fe 5042 if (rc) /* timeout */
a2fbb9ea
ET
5043 return rc;
5044
5045 /* delete cfc entry */
5046 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
5047
34f80b04
EG
5048 /* Wait for completion */
5049 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 5050 &(fp->state), 1);
34f80b04 5051 return rc;
a2fbb9ea
ET
5052}
5053
da5a662a 5054static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 5055{
4781bfad 5056 __le16 dsb_sp_prod_idx;
c14423fe 5057 /* if the other port is handling traffic,
a2fbb9ea 5058 this can take a lot of time */
34f80b04
EG
5059 int cnt = 500;
5060 int rc;
a2fbb9ea
ET
5061
5062 might_sleep();
5063
5064 /* Send HALT ramrod */
5065 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 5066 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 5067
34f80b04
EG
5068 /* Wait for completion */
5069 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
5070 &(bp->fp[0].state), 1);
5071 if (rc) /* timeout */
da5a662a 5072 return rc;
a2fbb9ea 5073
49d66772 5074 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 5075
228241eb 5076 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
5077 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
5078
49d66772 5079 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
5080 we are going to reset the chip anyway
5081 so there is not much to do if this times out
5082 */
34f80b04 5083 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
5084 if (!cnt) {
5085 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
5086 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5087 *bp->dsb_sp_prod, dsb_sp_prod_idx);
5088#ifdef BNX2X_STOP_ON_ERROR
5089 bnx2x_panic();
5090#endif
36e552ab 5091 rc = -EBUSY;
34f80b04
EG
5092 break;
5093 }
5094 cnt--;
da5a662a 5095 msleep(1);
5650d9d4 5096 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
5097 }
5098 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
5099 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
5100
5101 return rc;
a2fbb9ea
ET
5102}
5103
34f80b04
EG
5104static void bnx2x_reset_func(struct bnx2x *bp)
5105{
5106 int port = BP_PORT(bp);
5107 int func = BP_FUNC(bp);
5108 int base, i;
5109
5110 /* Configure IGU */
5111 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5112 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5113
37b091ba
MC
5114#ifdef BCM_CNIC
5115 /* Disable Timer scan */
5116 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
5117 /*
5118 * Wait for at least 10ms and up to 2 second for the timers scan to
5119 * complete
5120 */
5121 for (i = 0; i < 200; i++) {
5122 msleep(10);
5123 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
5124 break;
5125 }
5126#endif
34f80b04
EG
5127 /* Clear ILT */
5128 base = FUNC_ILT_BASE(func);
5129 for (i = base; i < base + ILT_PER_FUNC; i++)
5130 bnx2x_ilt_wr(bp, i, 0);
5131}
5132
5133static void bnx2x_reset_port(struct bnx2x *bp)
5134{
5135 int port = BP_PORT(bp);
5136 u32 val;
5137
5138 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5139
5140 /* Do not rcv packets to BRB */
5141 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5142 /* Do not direct rcv packets that are not for MCP to the BRB */
5143 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5144 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5145
5146 /* Configure AEU */
5147 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5148
5149 msleep(100);
5150 /* Check for BRB port occupancy */
5151 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5152 if (val)
5153 DP(NETIF_MSG_IFDOWN,
33471629 5154 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
5155
5156 /* TODO: Close Doorbell port? */
5157}
5158
34f80b04
EG
5159static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5160{
5161 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
5162 BP_FUNC(bp), reset_code);
5163
5164 switch (reset_code) {
5165 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5166 bnx2x_reset_port(bp);
5167 bnx2x_reset_func(bp);
5168 bnx2x_reset_common(bp);
5169 break;
5170
5171 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5172 bnx2x_reset_port(bp);
5173 bnx2x_reset_func(bp);
5174 break;
5175
5176 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5177 bnx2x_reset_func(bp);
5178 break;
49d66772 5179
34f80b04
EG
5180 default:
5181 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5182 break;
5183 }
5184}
5185
9f6c9258 5186void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 5187{
da5a662a 5188 int port = BP_PORT(bp);
a2fbb9ea 5189 u32 reset_code = 0;
da5a662a 5190 int i, cnt, rc;
a2fbb9ea 5191
555f6c78 5192 /* Wait until tx fastpath tasks complete */
54b9ddaa 5193 for_each_queue(bp, i) {
228241eb
ET
5194 struct bnx2x_fastpath *fp = &bp->fp[i];
5195
34f80b04 5196 cnt = 1000;
e8b5fc51 5197 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 5198
7961f791 5199 bnx2x_tx_int(fp);
34f80b04
EG
5200 if (!cnt) {
5201 BNX2X_ERR("timeout waiting for queue[%d]\n",
5202 i);
5203#ifdef BNX2X_STOP_ON_ERROR
5204 bnx2x_panic();
5205 return -EBUSY;
5206#else
5207 break;
5208#endif
5209 }
5210 cnt--;
da5a662a 5211 msleep(1);
34f80b04 5212 }
228241eb 5213 }
da5a662a
VZ
5214 /* Give HW time to discard old tx messages */
5215 msleep(1);
a2fbb9ea 5216
3101c2bc
YG
5217 if (CHIP_IS_E1(bp)) {
5218 struct mac_configuration_cmd *config =
5219 bnx2x_sp(bp, mcast_config);
5220
e665bfda 5221 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 5222
8d9c5f34 5223 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
5224 CAM_INVALIDATE(config->config_table[i]);
5225
8d9c5f34 5226 config->hdr.length = i;
3101c2bc
YG
5227 if (CHIP_REV_IS_SLOW(bp))
5228 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
5229 else
5230 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 5231 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
5232 config->hdr.reserved1 = 0;
5233
e665bfda
MC
5234 bp->set_mac_pending++;
5235 smp_wmb();
5236
3101c2bc
YG
5237 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5238 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
5239 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
5240
5241 } else { /* E1H */
65abd74d
YG
5242 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5243
e665bfda 5244 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
5245
5246 for (i = 0; i < MC_HASH_SIZE; i++)
5247 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
5248
5249 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 5250 }
993ac7b5
MC
5251#ifdef BCM_CNIC
5252 /* Clear iSCSI L2 MAC */
5253 mutex_lock(&bp->cnic_mutex);
5254 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
5255 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
5256 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
5257 }
5258 mutex_unlock(&bp->cnic_mutex);
5259#endif
3101c2bc 5260
65abd74d
YG
5261 if (unload_mode == UNLOAD_NORMAL)
5262 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5263
7d0446c2 5264 else if (bp->flags & NO_WOL_FLAG)
65abd74d 5265 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 5266
7d0446c2 5267 else if (bp->wol) {
65abd74d
YG
5268 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5269 u8 *mac_addr = bp->dev->dev_addr;
5270 u32 val;
5271 /* The mac address is written to entries 1-4 to
5272 preserve entry 0 which is used by the PMF */
5273 u8 entry = (BP_E1HVN(bp) + 1)*8;
5274
5275 val = (mac_addr[0] << 8) | mac_addr[1];
5276 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
5277
5278 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
5279 (mac_addr[4] << 8) | mac_addr[5];
5280 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
5281
5282 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
5283
5284 } else
5285 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 5286
34f80b04
EG
5287 /* Close multi and leading connections
5288 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
5289 for_each_nondefault_queue(bp, i)
5290 if (bnx2x_stop_multi(bp, i))
228241eb 5291 goto unload_error;
a2fbb9ea 5292
da5a662a
VZ
5293 rc = bnx2x_stop_leading(bp);
5294 if (rc) {
34f80b04 5295 BNX2X_ERR("Stop leading failed!\n");
da5a662a 5296#ifdef BNX2X_STOP_ON_ERROR
34f80b04 5297 return -EBUSY;
da5a662a
VZ
5298#else
5299 goto unload_error;
34f80b04 5300#endif
228241eb
ET
5301 }
5302
5303unload_error:
34f80b04 5304 if (!BP_NOMCP(bp))
228241eb 5305 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 5306 else {
f5372251 5307 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
5308 load_count[0], load_count[1], load_count[2]);
5309 load_count[0]--;
da5a662a 5310 load_count[1 + port]--;
f5372251 5311 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
5312 load_count[0], load_count[1], load_count[2]);
5313 if (load_count[0] == 0)
5314 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 5315 else if (load_count[1 + port] == 0)
34f80b04
EG
5316 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
5317 else
5318 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
5319 }
a2fbb9ea 5320
34f80b04
EG
5321 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
5322 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
5323 bnx2x__link_reset(bp);
a2fbb9ea
ET
5324
5325 /* Reset the chip */
228241eb 5326 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
5327
5328 /* Report UNLOAD_DONE to MCP */
34f80b04 5329 if (!BP_NOMCP(bp))
a2fbb9ea 5330 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 5331
72fd0718
VZ
5332}
5333
9f6c9258 5334void bnx2x_disable_close_the_gate(struct bnx2x *bp)
72fd0718
VZ
5335{
5336 u32 val;
5337
5338 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
5339
5340 if (CHIP_IS_E1(bp)) {
5341 int port = BP_PORT(bp);
5342 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5343 MISC_REG_AEU_MASK_ATTN_FUNC_0;
5344
5345 val = REG_RD(bp, addr);
5346 val &= ~(0x300);
5347 REG_WR(bp, addr, val);
5348 } else if (CHIP_IS_E1H(bp)) {
5349 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
5350 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
5351 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
5352 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
5353 }
5354}
5355
72fd0718
VZ
5356
5357/* Close gates #2, #3 and #4: */
5358static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
5359{
5360 u32 val, addr;
5361
5362 /* Gates #2 and #4a are closed/opened for "not E1" only */
5363 if (!CHIP_IS_E1(bp)) {
5364 /* #4 */
5365 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
5366 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
5367 close ? (val | 0x1) : (val & (~(u32)1)));
5368 /* #2 */
5369 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
5370 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
5371 close ? (val | 0x1) : (val & (~(u32)1)));
5372 }
5373
5374 /* #3 */
5375 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
5376 val = REG_RD(bp, addr);
5377 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
5378
5379 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
5380 close ? "closing" : "opening");
5381 mmiowb();
5382}
5383
5384#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
5385
5386static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
5387{
5388 /* Do some magic... */
5389 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5390 *magic_val = val & SHARED_MF_CLP_MAGIC;
5391 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
5392}
5393
5394/* Restore the value of the `magic' bit.
5395 *
5396 * @param pdev Device handle.
5397 * @param magic_val Old value of the `magic' bit.
5398 */
5399static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
5400{
5401 /* Restore the `magic' bit value... */
5402 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
5403 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
5404 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
5405 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5406 MF_CFG_WR(bp, shared_mf_config.clp_mb,
5407 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
5408}
5409
5410/* Prepares for MCP reset: takes care of CLP configurations.
5411 *
5412 * @param bp
5413 * @param magic_val Old value of 'magic' bit.
5414 */
5415static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
5416{
5417 u32 shmem;
5418 u32 validity_offset;
5419
5420 DP(NETIF_MSG_HW, "Starting\n");
5421
5422 /* Set `magic' bit in order to save MF config */
5423 if (!CHIP_IS_E1(bp))
5424 bnx2x_clp_reset_prep(bp, magic_val);
5425
5426 /* Get shmem offset */
5427 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5428 validity_offset = offsetof(struct shmem_region, validity_map[0]);
5429
5430 /* Clear validity map flags */
5431 if (shmem > 0)
5432 REG_WR(bp, shmem + validity_offset, 0);
5433}
5434
5435#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
5436#define MCP_ONE_TIMEOUT 100 /* 100 ms */
5437
5438/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
5439 * depending on the HW type.
5440 *
5441 * @param bp
5442 */
5443static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
5444{
5445 /* special handling for emulation and FPGA,
5446 wait 10 times longer */
5447 if (CHIP_REV_IS_SLOW(bp))
5448 msleep(MCP_ONE_TIMEOUT*10);
5449 else
5450 msleep(MCP_ONE_TIMEOUT);
5451}
5452
5453static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
5454{
5455 u32 shmem, cnt, validity_offset, val;
5456 int rc = 0;
5457
5458 msleep(100);
5459
5460 /* Get shmem offset */
5461 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5462 if (shmem == 0) {
5463 BNX2X_ERR("Shmem 0 return failure\n");
5464 rc = -ENOTTY;
5465 goto exit_lbl;
5466 }
5467
5468 validity_offset = offsetof(struct shmem_region, validity_map[0]);
5469
5470 /* Wait for MCP to come up */
5471 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
5472 /* TBD: its best to check validity map of last port.
5473 * currently checks on port 0.
5474 */
5475 val = REG_RD(bp, shmem + validity_offset);
5476 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
5477 shmem + validity_offset, val);
5478
5479 /* check that shared memory is valid. */
5480 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5481 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5482 break;
5483
5484 bnx2x_mcp_wait_one(bp);
5485 }
5486
5487 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
5488
5489 /* Check that shared memory is valid. This indicates that MCP is up. */
5490 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
5491 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
5492 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
5493 rc = -ENOTTY;
5494 goto exit_lbl;
5495 }
5496
5497exit_lbl:
5498 /* Restore the `magic' bit value */
5499 if (!CHIP_IS_E1(bp))
5500 bnx2x_clp_reset_done(bp, magic_val);
5501
5502 return rc;
5503}
5504
5505static void bnx2x_pxp_prep(struct bnx2x *bp)
5506{
5507 if (!CHIP_IS_E1(bp)) {
5508 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
5509 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
5510 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
5511 mmiowb();
5512 }
5513}
5514
5515/*
5516 * Reset the whole chip except for:
5517 * - PCIE core
5518 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
5519 * one reset bit)
5520 * - IGU
5521 * - MISC (including AEU)
5522 * - GRC
5523 * - RBCN, RBCP
5524 */
5525static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
5526{
5527 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
5528
5529 not_reset_mask1 =
5530 MISC_REGISTERS_RESET_REG_1_RST_HC |
5531 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
5532 MISC_REGISTERS_RESET_REG_1_RST_PXP;
5533
5534 not_reset_mask2 =
5535 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
5536 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
5537 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
5538 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
5539 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
5540 MISC_REGISTERS_RESET_REG_2_RST_GRC |
5541 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
5542 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
5543
5544 reset_mask1 = 0xffffffff;
5545
5546 if (CHIP_IS_E1(bp))
5547 reset_mask2 = 0xffff;
5548 else
5549 reset_mask2 = 0x1ffff;
5550
5551 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5552 reset_mask1 & (~not_reset_mask1));
5553 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5554 reset_mask2 & (~not_reset_mask2));
5555
5556 barrier();
5557 mmiowb();
5558
5559 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
5560 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
5561 mmiowb();
5562}
5563
5564static int bnx2x_process_kill(struct bnx2x *bp)
5565{
5566 int cnt = 1000;
5567 u32 val = 0;
5568 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
5569
5570
5571 /* Empty the Tetris buffer, wait for 1s */
5572 do {
5573 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
5574 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
5575 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
5576 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
5577 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
5578 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
5579 ((port_is_idle_0 & 0x1) == 0x1) &&
5580 ((port_is_idle_1 & 0x1) == 0x1) &&
5581 (pgl_exp_rom2 == 0xffffffff))
5582 break;
5583 msleep(1);
5584 } while (cnt-- > 0);
5585
5586 if (cnt <= 0) {
5587 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
5588 " are still"
5589 " outstanding read requests after 1s!\n");
5590 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
5591 " port_is_idle_0=0x%08x,"
5592 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
5593 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
5594 pgl_exp_rom2);
5595 return -EAGAIN;
5596 }
5597
5598 barrier();
5599
5600 /* Close gates #2, #3 and #4 */
5601 bnx2x_set_234_gates(bp, true);
5602
5603 /* TBD: Indicate that "process kill" is in progress to MCP */
5604
5605 /* Clear "unprepared" bit */
5606 REG_WR(bp, MISC_REG_UNPREPARED, 0);
5607 barrier();
5608
5609 /* Make sure all is written to the chip before the reset */
5610 mmiowb();
5611
5612 /* Wait for 1ms to empty GLUE and PCI-E core queues,
5613 * PSWHST, GRC and PSWRD Tetris buffer.
5614 */
5615 msleep(1);
5616
5617 /* Prepare to chip reset: */
5618 /* MCP */
5619 bnx2x_reset_mcp_prep(bp, &val);
5620
5621 /* PXP */
5622 bnx2x_pxp_prep(bp);
5623 barrier();
5624
5625 /* reset the chip */
5626 bnx2x_process_kill_chip_reset(bp);
5627 barrier();
5628
5629 /* Recover after reset: */
5630 /* MCP */
5631 if (bnx2x_reset_mcp_comp(bp, val))
5632 return -EAGAIN;
5633
5634 /* PXP */
5635 bnx2x_pxp_prep(bp);
5636
5637 /* Open the gates #2, #3 and #4 */
5638 bnx2x_set_234_gates(bp, false);
5639
5640 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
5641 * reset state, re-enable attentions. */
5642
a2fbb9ea
ET
5643 return 0;
5644}
5645
72fd0718
VZ
5646static int bnx2x_leader_reset(struct bnx2x *bp)
5647{
5648 int rc = 0;
5649 /* Try to recover after the failure */
5650 if (bnx2x_process_kill(bp)) {
5651 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
5652 bp->dev->name);
5653 rc = -EAGAIN;
5654 goto exit_leader_reset;
5655 }
5656
5657 /* Clear "reset is in progress" bit and update the driver state */
5658 bnx2x_set_reset_done(bp);
5659 bp->recovery_state = BNX2X_RECOVERY_DONE;
5660
5661exit_leader_reset:
5662 bp->is_leader = 0;
5663 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
5664 smp_wmb();
5665 return rc;
5666}
5667
72fd0718
VZ
5668/* Assumption: runs under rtnl lock. This together with the fact
5669 * that it's called only from bnx2x_reset_task() ensure that it
5670 * will never be called when netif_running(bp->dev) is false.
5671 */
5672static void bnx2x_parity_recover(struct bnx2x *bp)
5673{
5674 DP(NETIF_MSG_HW, "Handling parity\n");
5675 while (1) {
5676 switch (bp->recovery_state) {
5677 case BNX2X_RECOVERY_INIT:
5678 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
5679 /* Try to get a LEADER_LOCK HW lock */
5680 if (bnx2x_trylock_hw_lock(bp,
5681 HW_LOCK_RESOURCE_RESERVED_08))
5682 bp->is_leader = 1;
5683
5684 /* Stop the driver */
5685 /* If interface has been removed - break */
5686 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
5687 return;
5688
5689 bp->recovery_state = BNX2X_RECOVERY_WAIT;
5690 /* Ensure "is_leader" and "recovery_state"
5691 * update values are seen on other CPUs
5692 */
5693 smp_wmb();
5694 break;
5695
5696 case BNX2X_RECOVERY_WAIT:
5697 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
5698 if (bp->is_leader) {
5699 u32 load_counter = bnx2x_get_load_cnt(bp);
5700 if (load_counter) {
5701 /* Wait until all other functions get
5702 * down.
5703 */
5704 schedule_delayed_work(&bp->reset_task,
5705 HZ/10);
5706 return;
5707 } else {
5708 /* If all other functions got down -
5709 * try to bring the chip back to
5710 * normal. In any case it's an exit
5711 * point for a leader.
5712 */
5713 if (bnx2x_leader_reset(bp) ||
5714 bnx2x_nic_load(bp, LOAD_NORMAL)) {
5715 printk(KERN_ERR"%s: Recovery "
5716 "has failed. Power cycle is "
5717 "needed.\n", bp->dev->name);
5718 /* Disconnect this device */
5719 netif_device_detach(bp->dev);
5720 /* Block ifup for all function
5721 * of this ASIC until
5722 * "process kill" or power
5723 * cycle.
5724 */
5725 bnx2x_set_reset_in_progress(bp);
5726 /* Shut down the power */
5727 bnx2x_set_power_state(bp,
5728 PCI_D3hot);
5729 return;
5730 }
5731
5732 return;
5733 }
5734 } else { /* non-leader */
5735 if (!bnx2x_reset_is_done(bp)) {
5736 /* Try to get a LEADER_LOCK HW lock as
5737 * long as a former leader may have
5738 * been unloaded by the user or
5739 * released a leadership by another
5740 * reason.
5741 */
5742 if (bnx2x_trylock_hw_lock(bp,
5743 HW_LOCK_RESOURCE_RESERVED_08)) {
5744 /* I'm a leader now! Restart a
5745 * switch case.
5746 */
5747 bp->is_leader = 1;
5748 break;
5749 }
5750
5751 schedule_delayed_work(&bp->reset_task,
5752 HZ/10);
5753 return;
5754
5755 } else { /* A leader has completed
5756 * the "process kill". It's an exit
5757 * point for a non-leader.
5758 */
5759 bnx2x_nic_load(bp, LOAD_NORMAL);
5760 bp->recovery_state =
5761 BNX2X_RECOVERY_DONE;
5762 smp_wmb();
5763 return;
5764 }
5765 }
5766 default:
5767 return;
5768 }
5769 }
5770}
5771
5772/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
5773 * scheduled on a general queue in order to prevent a dead lock.
5774 */
34f80b04
EG
5775static void bnx2x_reset_task(struct work_struct *work)
5776{
72fd0718 5777 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
5778
5779#ifdef BNX2X_STOP_ON_ERROR
5780 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
5781 " so reset not done to allow debug dump,\n"
72fd0718 5782 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
5783 return;
5784#endif
5785
5786 rtnl_lock();
5787
5788 if (!netif_running(bp->dev))
5789 goto reset_task_exit;
5790
72fd0718
VZ
5791 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
5792 bnx2x_parity_recover(bp);
5793 else {
5794 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
5795 bnx2x_nic_load(bp, LOAD_NORMAL);
5796 }
34f80b04
EG
5797
5798reset_task_exit:
5799 rtnl_unlock();
5800}
5801
a2fbb9ea
ET
5802/* end of nic load/unload */
5803
a2fbb9ea
ET
5804/*
5805 * Init service functions
5806 */
5807
f1ef27ef
EG
5808static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
5809{
5810 switch (func) {
5811 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
5812 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
5813 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
5814 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
5815 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
5816 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
5817 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
5818 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
5819 default:
5820 BNX2X_ERR("Unsupported function index: %d\n", func);
5821 return (u32)(-1);
5822 }
5823}
5824
5825static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
5826{
5827 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
5828
5829 /* Flush all outstanding writes */
5830 mmiowb();
5831
5832 /* Pretend to be function 0 */
5833 REG_WR(bp, reg, 0);
5834 /* Flush the GRC transaction (in the chip) */
5835 new_val = REG_RD(bp, reg);
5836 if (new_val != 0) {
5837 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
5838 new_val);
5839 BUG();
5840 }
5841
5842 /* From now we are in the "like-E1" mode */
5843 bnx2x_int_disable(bp);
5844
5845 /* Flush all outstanding writes */
5846 mmiowb();
5847
5848 /* Restore the original funtion settings */
5849 REG_WR(bp, reg, orig_func);
5850 new_val = REG_RD(bp, reg);
5851 if (new_val != orig_func) {
5852 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
5853 orig_func, new_val);
5854 BUG();
5855 }
5856}
5857
5858static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
5859{
5860 if (CHIP_IS_E1H(bp))
5861 bnx2x_undi_int_disable_e1h(bp, func);
5862 else
5863 bnx2x_int_disable(bp);
5864}
5865
34f80b04
EG
5866static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5867{
5868 u32 val;
5869
5870 /* Check if there is any driver already loaded */
5871 val = REG_RD(bp, MISC_REG_UNPREPARED);
5872 if (val == 0x1) {
5873 /* Check if it is the UNDI driver
5874 * UNDI driver initializes CID offset for normal bell to 0x7
5875 */
4a37fb66 5876 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
5877 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
5878 if (val == 0x7) {
5879 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 5880 /* save our func */
34f80b04 5881 int func = BP_FUNC(bp);
da5a662a
VZ
5882 u32 swap_en;
5883 u32 swap_val;
34f80b04 5884
b4661739
EG
5885 /* clear the UNDI indication */
5886 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
5887
34f80b04
EG
5888 BNX2X_DEV_INFO("UNDI is active! reset device\n");
5889
5890 /* try unload UNDI on port 0 */
5891 bp->func = 0;
da5a662a
VZ
5892 bp->fw_seq =
5893 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5894 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 5895 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
5896
5897 /* if UNDI is loaded on the other port */
5898 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
5899
da5a662a
VZ
5900 /* send "DONE" for previous unload */
5901 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5902
5903 /* unload UNDI on port 1 */
34f80b04 5904 bp->func = 1;
da5a662a
VZ
5905 bp->fw_seq =
5906 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5907 DRV_MSG_SEQ_NUMBER_MASK);
5908 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5909
5910 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
5911 }
5912
b4661739
EG
5913 /* now it's safe to release the lock */
5914 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5915
f1ef27ef 5916 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
5917
5918 /* close input traffic and wait for it */
5919 /* Do not rcv packets to BRB */
5920 REG_WR(bp,
5921 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
5922 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
5923 /* Do not direct rcv packets that are not for MCP to
5924 * the BRB */
5925 REG_WR(bp,
5926 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
5927 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5928 /* clear AEU */
5929 REG_WR(bp,
5930 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5931 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
5932 msleep(10);
5933
5934 /* save NIG port swap info */
5935 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5936 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
5937 /* reset device */
5938 REG_WR(bp,
5939 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 5940 0xd3ffffff);
34f80b04
EG
5941 REG_WR(bp,
5942 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5943 0x1403);
da5a662a
VZ
5944 /* take the NIG out of reset and restore swap values */
5945 REG_WR(bp,
5946 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5947 MISC_REGISTERS_RESET_REG_1_RST_NIG);
5948 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
5949 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
5950
5951 /* send unload done to the MCP */
5952 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5953
5954 /* restore our func and fw_seq */
5955 bp->func = func;
5956 bp->fw_seq =
5957 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5958 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
5959
5960 } else
5961 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
5962 }
5963}
5964
5965static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5966{
5967 u32 val, val2, val3, val4, id;
72ce58c3 5968 u16 pmc;
34f80b04
EG
5969
5970 /* Get the chip revision id and number. */
5971 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5972 val = REG_RD(bp, MISC_REG_CHIP_NUM);
5973 id = ((val & 0xffff) << 16);
5974 val = REG_RD(bp, MISC_REG_CHIP_REV);
5975 id |= ((val & 0xf) << 12);
5976 val = REG_RD(bp, MISC_REG_CHIP_METAL);
5977 id |= ((val & 0xff) << 4);
5a40e08e 5978 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
5979 id |= (val & 0xf);
5980 bp->common.chip_id = id;
5981 bp->link_params.chip_id = bp->common.chip_id;
5982 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
5983
1c06328c
EG
5984 val = (REG_RD(bp, 0x2874) & 0x55);
5985 if ((bp->common.chip_id & 0x1) ||
5986 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
5987 bp->flags |= ONE_PORT_FLAG;
5988 BNX2X_DEV_INFO("single port device\n");
5989 }
5990
34f80b04
EG
5991 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
5992 bp->common.flash_size = (NVRAM_1MB_SIZE <<
5993 (val & MCPR_NVM_CFG4_FLASH_SIZE));
5994 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5995 bp->common.flash_size, bp->common.flash_size);
5996
5997 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 5998 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 5999 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
6000 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
6001 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
6002
6003 if (!bp->common.shmem_base ||
6004 (bp->common.shmem_base < 0xA0000) ||
6005 (bp->common.shmem_base >= 0xC0000)) {
6006 BNX2X_DEV_INFO("MCP not active\n");
6007 bp->flags |= NO_MCP_FLAG;
6008 return;
6009 }
6010
6011 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6012 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6013 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
cdaa7cb8 6014 BNX2X_ERROR("BAD MCP validity signature\n");
34f80b04
EG
6015
6016 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 6017 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
6018
6019 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6020 SHARED_HW_CFG_LED_MODE_MASK) >>
6021 SHARED_HW_CFG_LED_MODE_SHIFT);
6022
c2c8b03e
EG
6023 bp->link_params.feature_config_flags = 0;
6024 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
6025 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
6026 bp->link_params.feature_config_flags |=
6027 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6028 else
6029 bp->link_params.feature_config_flags &=
6030 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6031
34f80b04
EG
6032 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6033 bp->common.bc_ver = val;
6034 BNX2X_DEV_INFO("bc_ver %X\n", val);
6035 if (val < BNX2X_BC_VER) {
6036 /* for now only warn
6037 * later we might need to enforce this */
cdaa7cb8
VZ
6038 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
6039 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 6040 }
4d295db0
EG
6041 bp->link_params.feature_config_flags |=
6042 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
6043 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
6044
6045 if (BP_E1HVN(bp) == 0) {
6046 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6047 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6048 } else {
6049 /* no WOL capability for E1HVN != 0 */
6050 bp->flags |= NO_WOL_FLAG;
6051 }
6052 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 6053 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
6054
6055 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6056 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6057 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6058 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6059
cdaa7cb8
VZ
6060 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
6061 val, val2, val3, val4);
34f80b04
EG
6062}
6063
6064static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6065 u32 switch_cfg)
a2fbb9ea 6066{
34f80b04 6067 int port = BP_PORT(bp);
a2fbb9ea
ET
6068 u32 ext_phy_type;
6069
a2fbb9ea
ET
6070 switch (switch_cfg) {
6071 case SWITCH_CFG_1G:
6072 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6073
c18487ee
YR
6074 ext_phy_type =
6075 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
6076 switch (ext_phy_type) {
6077 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6078 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6079 ext_phy_type);
6080
34f80b04
EG
6081 bp->port.supported |= (SUPPORTED_10baseT_Half |
6082 SUPPORTED_10baseT_Full |
6083 SUPPORTED_100baseT_Half |
6084 SUPPORTED_100baseT_Full |
6085 SUPPORTED_1000baseT_Full |
6086 SUPPORTED_2500baseX_Full |
6087 SUPPORTED_TP |
6088 SUPPORTED_FIBRE |
6089 SUPPORTED_Autoneg |
6090 SUPPORTED_Pause |
6091 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6092 break;
6093
6094 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6095 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6096 ext_phy_type);
6097
34f80b04
EG
6098 bp->port.supported |= (SUPPORTED_10baseT_Half |
6099 SUPPORTED_10baseT_Full |
6100 SUPPORTED_100baseT_Half |
6101 SUPPORTED_100baseT_Full |
6102 SUPPORTED_1000baseT_Full |
6103 SUPPORTED_TP |
6104 SUPPORTED_FIBRE |
6105 SUPPORTED_Autoneg |
6106 SUPPORTED_Pause |
6107 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6108 break;
6109
6110 default:
6111 BNX2X_ERR("NVRAM config error. "
6112 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 6113 bp->link_params.ext_phy_config);
a2fbb9ea
ET
6114 return;
6115 }
6116
34f80b04
EG
6117 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6118 port*0x10);
6119 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
6120 break;
6121
6122 case SWITCH_CFG_10G:
6123 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6124
c18487ee
YR
6125 ext_phy_type =
6126 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
6127 switch (ext_phy_type) {
6128 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6129 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6130 ext_phy_type);
6131
34f80b04
EG
6132 bp->port.supported |= (SUPPORTED_10baseT_Half |
6133 SUPPORTED_10baseT_Full |
6134 SUPPORTED_100baseT_Half |
6135 SUPPORTED_100baseT_Full |
6136 SUPPORTED_1000baseT_Full |
6137 SUPPORTED_2500baseX_Full |
6138 SUPPORTED_10000baseT_Full |
6139 SUPPORTED_TP |
6140 SUPPORTED_FIBRE |
6141 SUPPORTED_Autoneg |
6142 SUPPORTED_Pause |
6143 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6144 break;
6145
589abe3a
EG
6146 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6147 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 6148 ext_phy_type);
f1410647 6149
34f80b04 6150 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 6151 SUPPORTED_1000baseT_Full |
34f80b04 6152 SUPPORTED_FIBRE |
589abe3a 6153 SUPPORTED_Autoneg |
34f80b04
EG
6154 SUPPORTED_Pause |
6155 SUPPORTED_Asym_Pause);
f1410647
ET
6156 break;
6157
589abe3a
EG
6158 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6159 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
6160 ext_phy_type);
6161
34f80b04 6162 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 6163 SUPPORTED_2500baseX_Full |
34f80b04 6164 SUPPORTED_1000baseT_Full |
589abe3a
EG
6165 SUPPORTED_FIBRE |
6166 SUPPORTED_Autoneg |
6167 SUPPORTED_Pause |
6168 SUPPORTED_Asym_Pause);
6169 break;
6170
6171 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
6172 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
6173 ext_phy_type);
6174
6175 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
6176 SUPPORTED_FIBRE |
6177 SUPPORTED_Pause |
6178 SUPPORTED_Asym_Pause);
f1410647
ET
6179 break;
6180
589abe3a
EG
6181 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6182 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
6183 ext_phy_type);
6184
34f80b04
EG
6185 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6186 SUPPORTED_1000baseT_Full |
6187 SUPPORTED_FIBRE |
34f80b04
EG
6188 SUPPORTED_Pause |
6189 SUPPORTED_Asym_Pause);
f1410647
ET
6190 break;
6191
589abe3a
EG
6192 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6193 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
6194 ext_phy_type);
6195
34f80b04 6196 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 6197 SUPPORTED_1000baseT_Full |
34f80b04 6198 SUPPORTED_Autoneg |
589abe3a 6199 SUPPORTED_FIBRE |
34f80b04
EG
6200 SUPPORTED_Pause |
6201 SUPPORTED_Asym_Pause);
c18487ee
YR
6202 break;
6203
4d295db0
EG
6204 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6205 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
6206 ext_phy_type);
6207
6208 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6209 SUPPORTED_1000baseT_Full |
6210 SUPPORTED_Autoneg |
6211 SUPPORTED_FIBRE |
6212 SUPPORTED_Pause |
6213 SUPPORTED_Asym_Pause);
6214 break;
6215
f1410647
ET
6216 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6217 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
6218 ext_phy_type);
6219
34f80b04
EG
6220 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6221 SUPPORTED_TP |
6222 SUPPORTED_Autoneg |
6223 SUPPORTED_Pause |
6224 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6225 break;
6226
28577185
EG
6227 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
6228 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
6229 ext_phy_type);
6230
6231 bp->port.supported |= (SUPPORTED_10baseT_Half |
6232 SUPPORTED_10baseT_Full |
6233 SUPPORTED_100baseT_Half |
6234 SUPPORTED_100baseT_Full |
6235 SUPPORTED_1000baseT_Full |
6236 SUPPORTED_10000baseT_Full |
6237 SUPPORTED_TP |
6238 SUPPORTED_Autoneg |
6239 SUPPORTED_Pause |
6240 SUPPORTED_Asym_Pause);
6241 break;
6242
c18487ee
YR
6243 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
6244 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
6245 bp->link_params.ext_phy_config);
6246 break;
6247
a2fbb9ea
ET
6248 default:
6249 BNX2X_ERR("NVRAM config error. "
6250 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 6251 bp->link_params.ext_phy_config);
a2fbb9ea
ET
6252 return;
6253 }
6254
34f80b04
EG
6255 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6256 port*0x18);
6257 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 6258
a2fbb9ea
ET
6259 break;
6260
6261 default:
6262 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 6263 bp->port.link_config);
a2fbb9ea
ET
6264 return;
6265 }
34f80b04 6266 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
6267
6268 /* mask what we support according to speed_cap_mask */
c18487ee
YR
6269 if (!(bp->link_params.speed_cap_mask &
6270 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 6271 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 6272
c18487ee
YR
6273 if (!(bp->link_params.speed_cap_mask &
6274 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 6275 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 6276
c18487ee
YR
6277 if (!(bp->link_params.speed_cap_mask &
6278 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 6279 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 6280
c18487ee
YR
6281 if (!(bp->link_params.speed_cap_mask &
6282 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 6283 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 6284
c18487ee
YR
6285 if (!(bp->link_params.speed_cap_mask &
6286 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
6287 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
6288 SUPPORTED_1000baseT_Full);
a2fbb9ea 6289
c18487ee
YR
6290 if (!(bp->link_params.speed_cap_mask &
6291 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 6292 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 6293
c18487ee
YR
6294 if (!(bp->link_params.speed_cap_mask &
6295 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 6296 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 6297
34f80b04 6298 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
6299}
6300
34f80b04 6301static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 6302{
c18487ee 6303 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 6304
34f80b04 6305 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 6306 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 6307 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 6308 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 6309 bp->port.advertising = bp->port.supported;
a2fbb9ea 6310 } else {
c18487ee
YR
6311 u32 ext_phy_type =
6312 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6313
6314 if ((ext_phy_type ==
6315 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
6316 (ext_phy_type ==
6317 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 6318 /* force 10G, no AN */
c18487ee 6319 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 6320 bp->port.advertising =
a2fbb9ea
ET
6321 (ADVERTISED_10000baseT_Full |
6322 ADVERTISED_FIBRE);
6323 break;
6324 }
6325 BNX2X_ERR("NVRAM config error. "
6326 "Invalid link_config 0x%x"
6327 " Autoneg not supported\n",
34f80b04 6328 bp->port.link_config);
a2fbb9ea
ET
6329 return;
6330 }
6331 break;
6332
6333 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 6334 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 6335 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
6336 bp->port.advertising = (ADVERTISED_10baseT_Full |
6337 ADVERTISED_TP);
a2fbb9ea 6338 } else {
cdaa7cb8
VZ
6339 BNX2X_ERROR("NVRAM config error. "
6340 "Invalid link_config 0x%x"
6341 " speed_cap_mask 0x%x\n",
6342 bp->port.link_config,
6343 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6344 return;
6345 }
6346 break;
6347
6348 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 6349 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
6350 bp->link_params.req_line_speed = SPEED_10;
6351 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
6352 bp->port.advertising = (ADVERTISED_10baseT_Half |
6353 ADVERTISED_TP);
a2fbb9ea 6354 } else {
cdaa7cb8
VZ
6355 BNX2X_ERROR("NVRAM config error. "
6356 "Invalid link_config 0x%x"
6357 " speed_cap_mask 0x%x\n",
6358 bp->port.link_config,
6359 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6360 return;
6361 }
6362 break;
6363
6364 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 6365 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 6366 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
6367 bp->port.advertising = (ADVERTISED_100baseT_Full |
6368 ADVERTISED_TP);
a2fbb9ea 6369 } else {
cdaa7cb8
VZ
6370 BNX2X_ERROR("NVRAM config error. "
6371 "Invalid link_config 0x%x"
6372 " speed_cap_mask 0x%x\n",
6373 bp->port.link_config,
6374 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6375 return;
6376 }
6377 break;
6378
6379 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 6380 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
6381 bp->link_params.req_line_speed = SPEED_100;
6382 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
6383 bp->port.advertising = (ADVERTISED_100baseT_Half |
6384 ADVERTISED_TP);
a2fbb9ea 6385 } else {
cdaa7cb8
VZ
6386 BNX2X_ERROR("NVRAM config error. "
6387 "Invalid link_config 0x%x"
6388 " speed_cap_mask 0x%x\n",
6389 bp->port.link_config,
6390 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6391 return;
6392 }
6393 break;
6394
6395 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 6396 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 6397 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
6398 bp->port.advertising = (ADVERTISED_1000baseT_Full |
6399 ADVERTISED_TP);
a2fbb9ea 6400 } else {
cdaa7cb8
VZ
6401 BNX2X_ERROR("NVRAM config error. "
6402 "Invalid link_config 0x%x"
6403 " speed_cap_mask 0x%x\n",
6404 bp->port.link_config,
6405 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6406 return;
6407 }
6408 break;
6409
6410 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 6411 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 6412 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
6413 bp->port.advertising = (ADVERTISED_2500baseX_Full |
6414 ADVERTISED_TP);
a2fbb9ea 6415 } else {
cdaa7cb8
VZ
6416 BNX2X_ERROR("NVRAM config error. "
6417 "Invalid link_config 0x%x"
6418 " speed_cap_mask 0x%x\n",
6419 bp->port.link_config,
6420 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6421 return;
6422 }
6423 break;
6424
6425 case PORT_FEATURE_LINK_SPEED_10G_CX4:
6426 case PORT_FEATURE_LINK_SPEED_10G_KX4:
6427 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 6428 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 6429 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
6430 bp->port.advertising = (ADVERTISED_10000baseT_Full |
6431 ADVERTISED_FIBRE);
a2fbb9ea 6432 } else {
cdaa7cb8
VZ
6433 BNX2X_ERROR("NVRAM config error. "
6434 "Invalid link_config 0x%x"
6435 " speed_cap_mask 0x%x\n",
6436 bp->port.link_config,
6437 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6438 return;
6439 }
6440 break;
6441
6442 default:
cdaa7cb8
VZ
6443 BNX2X_ERROR("NVRAM config error. "
6444 "BAD link speed link_config 0x%x\n",
6445 bp->port.link_config);
c18487ee 6446 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 6447 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
6448 break;
6449 }
a2fbb9ea 6450
34f80b04
EG
6451 bp->link_params.req_flow_ctrl = (bp->port.link_config &
6452 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 6453 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 6454 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 6455 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 6456
c18487ee 6457 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 6458 " advertising 0x%x\n",
c18487ee
YR
6459 bp->link_params.req_line_speed,
6460 bp->link_params.req_duplex,
34f80b04 6461 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
6462}
6463
e665bfda
MC
6464static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
6465{
6466 mac_hi = cpu_to_be16(mac_hi);
6467 mac_lo = cpu_to_be32(mac_lo);
6468 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
6469 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
6470}
6471
34f80b04 6472static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 6473{
34f80b04
EG
6474 int port = BP_PORT(bp);
6475 u32 val, val2;
589abe3a 6476 u32 config;
c2c8b03e 6477 u16 i;
01cd4528 6478 u32 ext_phy_type;
a2fbb9ea 6479
c18487ee 6480 bp->link_params.bp = bp;
34f80b04 6481 bp->link_params.port = port;
c18487ee 6482
c18487ee 6483 bp->link_params.lane_config =
a2fbb9ea 6484 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 6485 bp->link_params.ext_phy_config =
a2fbb9ea
ET
6486 SHMEM_RD(bp,
6487 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
6488 /* BCM8727_NOC => BCM8727 no over current */
6489 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
6490 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
6491 bp->link_params.ext_phy_config &=
6492 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6493 bp->link_params.ext_phy_config |=
6494 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
6495 bp->link_params.feature_config_flags |=
6496 FEATURE_CONFIG_BCM8727_NOC;
6497 }
6498
c18487ee 6499 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
6500 SHMEM_RD(bp,
6501 dev_info.port_hw_config[port].speed_capability_mask);
6502
34f80b04 6503 bp->port.link_config =
a2fbb9ea
ET
6504 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6505
c2c8b03e
EG
6506 /* Get the 4 lanes xgxs config rx and tx */
6507 for (i = 0; i < 2; i++) {
6508 val = SHMEM_RD(bp,
6509 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
6510 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
6511 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
6512
6513 val = SHMEM_RD(bp,
6514 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
6515 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
6516 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
6517 }
6518
3ce2c3f9
EG
6519 /* If the device is capable of WoL, set the default state according
6520 * to the HW
6521 */
4d295db0 6522 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
6523 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
6524 (config & PORT_FEATURE_WOL_ENABLED));
6525
c2c8b03e
EG
6526 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
6527 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
6528 bp->link_params.lane_config,
6529 bp->link_params.ext_phy_config,
34f80b04 6530 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 6531
4d295db0
EG
6532 bp->link_params.switch_cfg |= (bp->port.link_config &
6533 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 6534 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
6535
6536 bnx2x_link_settings_requested(bp);
6537
01cd4528
EG
6538 /*
6539 * If connected directly, work with the internal PHY, otherwise, work
6540 * with the external PHY
6541 */
6542 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6543 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
6544 bp->mdio.prtad = bp->link_params.phy_addr;
6545
6546 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
6547 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
6548 bp->mdio.prtad =
659bc5c4 6549 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 6550
a2fbb9ea
ET
6551 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6552 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 6553 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
6554 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
6555 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
6556
6557#ifdef BCM_CNIC
6558 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
6559 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
6560 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
6561#endif
34f80b04
EG
6562}
6563
6564static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6565{
6566 int func = BP_FUNC(bp);
6567 u32 val, val2;
6568 int rc = 0;
a2fbb9ea 6569
34f80b04 6570 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 6571
34f80b04
EG
6572 bp->e1hov = 0;
6573 bp->e1hmf = 0;
2145a920 6574 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
34f80b04
EG
6575 bp->mf_config =
6576 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 6577
2691d51d 6578 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 6579 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 6580 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 6581 bp->e1hmf = 1;
2691d51d
EG
6582 BNX2X_DEV_INFO("%s function mode\n",
6583 IS_E1HMF(bp) ? "multi" : "single");
6584
6585 if (IS_E1HMF(bp)) {
6586 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
6587 e1hov_tag) &
6588 FUNC_MF_CFG_E1HOV_TAG_MASK);
6589 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
6590 bp->e1hov = val;
6591 BNX2X_DEV_INFO("E1HOV for func %d is %d "
6592 "(0x%04x)\n",
6593 func, bp->e1hov, bp->e1hov);
6594 } else {
cdaa7cb8
VZ
6595 BNX2X_ERROR("No valid E1HOV for func %d,"
6596 " aborting\n", func);
34f80b04
EG
6597 rc = -EPERM;
6598 }
2691d51d
EG
6599 } else {
6600 if (BP_E1HVN(bp)) {
cdaa7cb8
VZ
6601 BNX2X_ERROR("VN %d in single function mode,"
6602 " aborting\n", BP_E1HVN(bp));
2691d51d
EG
6603 rc = -EPERM;
6604 }
34f80b04
EG
6605 }
6606 }
a2fbb9ea 6607
34f80b04
EG
6608 if (!BP_NOMCP(bp)) {
6609 bnx2x_get_port_hwinfo(bp);
6610
6611 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
6612 DRV_MSG_SEQ_NUMBER_MASK);
6613 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
6614 }
6615
6616 if (IS_E1HMF(bp)) {
6617 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
6618 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
6619 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
6620 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
6621 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6622 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6623 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6624 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6625 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
6626 bp->dev->dev_addr[5] = (u8)(val & 0xff);
6627 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
6628 ETH_ALEN);
6629 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
6630 ETH_ALEN);
a2fbb9ea 6631 }
34f80b04
EG
6632
6633 return rc;
a2fbb9ea
ET
6634 }
6635
34f80b04
EG
6636 if (BP_NOMCP(bp)) {
6637 /* only supposed to happen on emulation/FPGA */
cdaa7cb8 6638 BNX2X_ERROR("warning: random MAC workaround active\n");
34f80b04
EG
6639 random_ether_addr(bp->dev->dev_addr);
6640 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6641 }
a2fbb9ea 6642
34f80b04
EG
6643 return rc;
6644}
6645
34f24c7f
VZ
6646static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
6647{
6648 int cnt, i, block_end, rodi;
6649 char vpd_data[BNX2X_VPD_LEN+1];
6650 char str_id_reg[VENDOR_ID_LEN+1];
6651 char str_id_cap[VENDOR_ID_LEN+1];
6652 u8 len;
6653
6654 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
6655 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
6656
6657 if (cnt < BNX2X_VPD_LEN)
6658 goto out_not_found;
6659
6660 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
6661 PCI_VPD_LRDT_RO_DATA);
6662 if (i < 0)
6663 goto out_not_found;
6664
6665
6666 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
6667 pci_vpd_lrdt_size(&vpd_data[i]);
6668
6669 i += PCI_VPD_LRDT_TAG_SIZE;
6670
6671 if (block_end > BNX2X_VPD_LEN)
6672 goto out_not_found;
6673
6674 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6675 PCI_VPD_RO_KEYWORD_MFR_ID);
6676 if (rodi < 0)
6677 goto out_not_found;
6678
6679 len = pci_vpd_info_field_size(&vpd_data[rodi]);
6680
6681 if (len != VENDOR_ID_LEN)
6682 goto out_not_found;
6683
6684 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6685
6686 /* vendor specific info */
6687 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
6688 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
6689 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
6690 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
6691
6692 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6693 PCI_VPD_RO_KEYWORD_VENDOR0);
6694 if (rodi >= 0) {
6695 len = pci_vpd_info_field_size(&vpd_data[rodi]);
6696
6697 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6698
6699 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
6700 memcpy(bp->fw_ver, &vpd_data[rodi], len);
6701 bp->fw_ver[len] = ' ';
6702 }
6703 }
6704 return;
6705 }
6706out_not_found:
6707 return;
6708}
6709
34f80b04
EG
6710static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6711{
6712 int func = BP_FUNC(bp);
87942b46 6713 int timer_interval;
34f80b04
EG
6714 int rc;
6715
da5a662a
VZ
6716 /* Disable interrupt handling until HW is initialized */
6717 atomic_set(&bp->intr_sem, 1);
e1510706 6718 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 6719
34f80b04 6720 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 6721 mutex_init(&bp->fw_mb_mutex);
bb7e95c8 6722 spin_lock_init(&bp->stats_lock);
993ac7b5
MC
6723#ifdef BCM_CNIC
6724 mutex_init(&bp->cnic_mutex);
6725#endif
a2fbb9ea 6726
1cf167f2 6727 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 6728 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
6729
6730 rc = bnx2x_get_hwinfo(bp);
6731
34f24c7f 6732 bnx2x_read_fwinfo(bp);
34f80b04
EG
6733 /* need to reset chip if undi was active */
6734 if (!BP_NOMCP(bp))
6735 bnx2x_undi_unload(bp);
6736
6737 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 6738 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
6739
6740 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
6741 dev_err(&bp->pdev->dev, "MCP disabled, "
6742 "must load devices in order!\n");
34f80b04 6743
555f6c78 6744 /* Set multi queue mode */
8badd27a
EG
6745 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
6746 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
cdaa7cb8
VZ
6747 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
6748 "requested is not MSI-X\n");
555f6c78
EG
6749 multi_mode = ETH_RSS_MODE_DISABLED;
6750 }
6751 bp->multi_mode = multi_mode;
5d7cd496 6752 bp->int_mode = int_mode;
555f6c78 6753
4fd89b7a
DK
6754 bp->dev->features |= NETIF_F_GRO;
6755
7a9b2557
VZ
6756 /* Set TPA flags */
6757 if (disable_tpa) {
6758 bp->flags &= ~TPA_ENABLE_FLAG;
6759 bp->dev->features &= ~NETIF_F_LRO;
6760 } else {
6761 bp->flags |= TPA_ENABLE_FLAG;
6762 bp->dev->features |= NETIF_F_LRO;
6763 }
5d7cd496 6764 bp->disable_tpa = disable_tpa;
7a9b2557 6765
a18f5128
EG
6766 if (CHIP_IS_E1(bp))
6767 bp->dropless_fc = 0;
6768 else
6769 bp->dropless_fc = dropless_fc;
6770
8d5726c4 6771 bp->mrrs = mrrs;
7a9b2557 6772
34f80b04
EG
6773 bp->tx_ring_size = MAX_TX_AVAIL;
6774 bp->rx_ring_size = MAX_RX_AVAIL;
6775
6776 bp->rx_csum = 1;
34f80b04 6777
7d323bfd
EG
6778 /* make sure that the numbers are in the right granularity */
6779 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6780 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
34f80b04 6781
87942b46
EG
6782 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
6783 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
6784
6785 init_timer(&bp->timer);
6786 bp->timer.expires = jiffies + bp->current_interval;
6787 bp->timer.data = (unsigned long) bp;
6788 bp->timer.function = bnx2x_timer;
6789
6790 return rc;
a2fbb9ea
ET
6791}
6792
a2fbb9ea 6793
de0c62db
DK
6794/****************************************************************************
6795* General service functions
6796****************************************************************************/
a2fbb9ea 6797
bb2a0f7a 6798/* called with rtnl_lock */
a2fbb9ea
ET
6799static int bnx2x_open(struct net_device *dev)
6800{
6801 struct bnx2x *bp = netdev_priv(dev);
6802
6eccabb3
EG
6803 netif_carrier_off(dev);
6804
a2fbb9ea
ET
6805 bnx2x_set_power_state(bp, PCI_D0);
6806
72fd0718
VZ
6807 if (!bnx2x_reset_is_done(bp)) {
6808 do {
6809 /* Reset MCP mail box sequence if there is on going
6810 * recovery
6811 */
6812 bp->fw_seq = 0;
6813
6814 /* If it's the first function to load and reset done
6815 * is still not cleared it may mean that. We don't
6816 * check the attention state here because it may have
6817 * already been cleared by a "common" reset but we
6818 * shell proceed with "process kill" anyway.
6819 */
6820 if ((bnx2x_get_load_cnt(bp) == 0) &&
6821 bnx2x_trylock_hw_lock(bp,
6822 HW_LOCK_RESOURCE_RESERVED_08) &&
6823 (!bnx2x_leader_reset(bp))) {
6824 DP(NETIF_MSG_HW, "Recovered in open\n");
6825 break;
6826 }
6827
6828 bnx2x_set_power_state(bp, PCI_D3hot);
6829
6830 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
6831 " completed yet. Try again later. If u still see this"
6832 " message after a few retries then power cycle is"
6833 " required.\n", bp->dev->name);
6834
6835 return -EAGAIN;
6836 } while (0);
6837 }
6838
6839 bp->recovery_state = BNX2X_RECOVERY_DONE;
6840
bb2a0f7a 6841 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
6842}
6843
bb2a0f7a 6844/* called with rtnl_lock */
a2fbb9ea
ET
6845static int bnx2x_close(struct net_device *dev)
6846{
a2fbb9ea
ET
6847 struct bnx2x *bp = netdev_priv(dev);
6848
6849 /* Unload the driver, release IRQs */
bb2a0f7a 6850 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 6851 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
6852
6853 return 0;
6854}
6855
f5372251 6856/* called with netif_tx_lock from dev_mcast.c */
9f6c9258 6857void bnx2x_set_rx_mode(struct net_device *dev)
34f80b04
EG
6858{
6859 struct bnx2x *bp = netdev_priv(dev);
6860 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
6861 int port = BP_PORT(bp);
6862
6863 if (bp->state != BNX2X_STATE_OPEN) {
6864 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6865 return;
6866 }
6867
6868 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
6869
6870 if (dev->flags & IFF_PROMISC)
6871 rx_mode = BNX2X_RX_MODE_PROMISC;
6872
6873 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
6874 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
6875 CHIP_IS_E1(bp)))
34f80b04
EG
6876 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6877
6878 else { /* some multicasts */
6879 if (CHIP_IS_E1(bp)) {
6880 int i, old, offset;
22bedad3 6881 struct netdev_hw_addr *ha;
34f80b04
EG
6882 struct mac_configuration_cmd *config =
6883 bnx2x_sp(bp, mcast_config);
6884
0ddf477b 6885 i = 0;
22bedad3 6886 netdev_for_each_mc_addr(ha, dev) {
34f80b04
EG
6887 config->config_table[i].
6888 cam_entry.msb_mac_addr =
22bedad3 6889 swab16(*(u16 *)&ha->addr[0]);
34f80b04
EG
6890 config->config_table[i].
6891 cam_entry.middle_mac_addr =
22bedad3 6892 swab16(*(u16 *)&ha->addr[2]);
34f80b04
EG
6893 config->config_table[i].
6894 cam_entry.lsb_mac_addr =
22bedad3 6895 swab16(*(u16 *)&ha->addr[4]);
34f80b04
EG
6896 config->config_table[i].cam_entry.flags =
6897 cpu_to_le16(port);
6898 config->config_table[i].
6899 target_table_entry.flags = 0;
ca00392c
EG
6900 config->config_table[i].target_table_entry.
6901 clients_bit_vector =
6902 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
6903 config->config_table[i].
6904 target_table_entry.vlan_id = 0;
6905
6906 DP(NETIF_MSG_IFUP,
6907 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6908 config->config_table[i].
6909 cam_entry.msb_mac_addr,
6910 config->config_table[i].
6911 cam_entry.middle_mac_addr,
6912 config->config_table[i].
6913 cam_entry.lsb_mac_addr);
0ddf477b 6914 i++;
34f80b04 6915 }
8d9c5f34 6916 old = config->hdr.length;
34f80b04
EG
6917 if (old > i) {
6918 for (; i < old; i++) {
6919 if (CAM_IS_INVALID(config->
6920 config_table[i])) {
af246401 6921 /* already invalidated */
34f80b04
EG
6922 break;
6923 }
6924 /* invalidate */
6925 CAM_INVALIDATE(config->
6926 config_table[i]);
6927 }
6928 }
6929
6930 if (CHIP_REV_IS_SLOW(bp))
6931 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6932 else
6933 offset = BNX2X_MAX_MULTICAST*(1 + port);
6934
8d9c5f34 6935 config->hdr.length = i;
34f80b04 6936 config->hdr.offset = offset;
8d9c5f34 6937 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6938 config->hdr.reserved1 = 0;
6939
e665bfda
MC
6940 bp->set_mac_pending++;
6941 smp_wmb();
6942
34f80b04
EG
6943 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6944 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6945 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
6946 0);
6947 } else { /* E1H */
6948 /* Accept one or more multicasts */
22bedad3 6949 struct netdev_hw_addr *ha;
34f80b04
EG
6950 u32 mc_filter[MC_HASH_SIZE];
6951 u32 crc, bit, regidx;
6952 int i;
6953
6954 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6955
22bedad3 6956 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 6957 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
22bedad3 6958 ha->addr);
34f80b04 6959
22bedad3 6960 crc = crc32c_le(0, ha->addr, ETH_ALEN);
34f80b04
EG
6961 bit = (crc >> 24) & 0xff;
6962 regidx = bit >> 5;
6963 bit &= 0x1f;
6964 mc_filter[regidx] |= (1 << bit);
6965 }
6966
6967 for (i = 0; i < MC_HASH_SIZE; i++)
6968 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6969 mc_filter[i]);
6970 }
6971 }
6972
6973 bp->rx_mode = rx_mode;
6974 bnx2x_set_storm_rx_mode(bp);
6975}
6976
a2fbb9ea 6977
c18487ee 6978/* called with rtnl_lock */
01cd4528
EG
6979static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
6980 int devad, u16 addr)
a2fbb9ea 6981{
01cd4528
EG
6982 struct bnx2x *bp = netdev_priv(netdev);
6983 u16 value;
6984 int rc;
6985 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 6986
01cd4528
EG
6987 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
6988 prtad, devad, addr);
a2fbb9ea 6989
01cd4528
EG
6990 if (prtad != bp->mdio.prtad) {
6991 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
6992 prtad, bp->mdio.prtad);
6993 return -EINVAL;
6994 }
6995
6996 /* The HW expects different devad if CL22 is used */
6997 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 6998
01cd4528
EG
6999 bnx2x_acquire_phy_lock(bp);
7000 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
7001 devad, addr, &value);
7002 bnx2x_release_phy_lock(bp);
7003 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 7004
01cd4528
EG
7005 if (!rc)
7006 rc = value;
7007 return rc;
7008}
a2fbb9ea 7009
01cd4528
EG
7010/* called with rtnl_lock */
7011static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
7012 u16 addr, u16 value)
7013{
7014 struct bnx2x *bp = netdev_priv(netdev);
7015 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7016 int rc;
7017
7018 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
7019 " value 0x%x\n", prtad, devad, addr, value);
7020
7021 if (prtad != bp->mdio.prtad) {
7022 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
7023 prtad, bp->mdio.prtad);
7024 return -EINVAL;
a2fbb9ea
ET
7025 }
7026
01cd4528
EG
7027 /* The HW expects different devad if CL22 is used */
7028 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 7029
01cd4528
EG
7030 bnx2x_acquire_phy_lock(bp);
7031 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
7032 devad, addr, value);
7033 bnx2x_release_phy_lock(bp);
7034 return rc;
7035}
c18487ee 7036
01cd4528
EG
7037/* called with rtnl_lock */
7038static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7039{
7040 struct bnx2x *bp = netdev_priv(dev);
7041 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 7042
01cd4528
EG
7043 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
7044 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 7045
01cd4528
EG
7046 if (!netif_running(dev))
7047 return -EAGAIN;
7048
7049 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
7050}
7051
257ddbda 7052#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
7053static void poll_bnx2x(struct net_device *dev)
7054{
7055 struct bnx2x *bp = netdev_priv(dev);
7056
7057 disable_irq(bp->pdev->irq);
7058 bnx2x_interrupt(bp->pdev->irq, dev);
7059 enable_irq(bp->pdev->irq);
7060}
7061#endif
7062
c64213cd
SH
7063static const struct net_device_ops bnx2x_netdev_ops = {
7064 .ndo_open = bnx2x_open,
7065 .ndo_stop = bnx2x_close,
7066 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 7067 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
7068 .ndo_set_mac_address = bnx2x_change_mac_addr,
7069 .ndo_validate_addr = eth_validate_addr,
7070 .ndo_do_ioctl = bnx2x_ioctl,
7071 .ndo_change_mtu = bnx2x_change_mtu,
7072 .ndo_tx_timeout = bnx2x_tx_timeout,
7073#ifdef BCM_VLAN
7074 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
7075#endif
257ddbda 7076#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
7077 .ndo_poll_controller = poll_bnx2x,
7078#endif
7079};
7080
34f80b04
EG
7081static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
7082 struct net_device *dev)
a2fbb9ea
ET
7083{
7084 struct bnx2x *bp;
7085 int rc;
7086
7087 SET_NETDEV_DEV(dev, &pdev->dev);
7088 bp = netdev_priv(dev);
7089
34f80b04
EG
7090 bp->dev = dev;
7091 bp->pdev = pdev;
a2fbb9ea 7092 bp->flags = 0;
34f80b04 7093 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
7094
7095 rc = pci_enable_device(pdev);
7096 if (rc) {
cdaa7cb8
VZ
7097 dev_err(&bp->pdev->dev,
7098 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
7099 goto err_out;
7100 }
7101
7102 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
7103 dev_err(&bp->pdev->dev,
7104 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
7105 rc = -ENODEV;
7106 goto err_out_disable;
7107 }
7108
7109 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
7110 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
7111 " base address, aborting\n");
a2fbb9ea
ET
7112 rc = -ENODEV;
7113 goto err_out_disable;
7114 }
7115
34f80b04
EG
7116 if (atomic_read(&pdev->enable_cnt) == 1) {
7117 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7118 if (rc) {
cdaa7cb8
VZ
7119 dev_err(&bp->pdev->dev,
7120 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
7121 goto err_out_disable;
7122 }
a2fbb9ea 7123
34f80b04
EG
7124 pci_set_master(pdev);
7125 pci_save_state(pdev);
7126 }
a2fbb9ea
ET
7127
7128 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7129 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
7130 dev_err(&bp->pdev->dev,
7131 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
7132 rc = -EIO;
7133 goto err_out_release;
7134 }
7135
7136 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
7137 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
7138 dev_err(&bp->pdev->dev,
7139 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
7140 rc = -EIO;
7141 goto err_out_release;
7142 }
7143
1a983142 7144 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 7145 bp->flags |= USING_DAC_FLAG;
1a983142 7146 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
7147 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
7148 " failed, aborting\n");
a2fbb9ea
ET
7149 rc = -EIO;
7150 goto err_out_release;
7151 }
7152
1a983142 7153 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
7154 dev_err(&bp->pdev->dev,
7155 "System does not support DMA, aborting\n");
a2fbb9ea
ET
7156 rc = -EIO;
7157 goto err_out_release;
7158 }
7159
34f80b04
EG
7160 dev->mem_start = pci_resource_start(pdev, 0);
7161 dev->base_addr = dev->mem_start;
7162 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
7163
7164 dev->irq = pdev->irq;
7165
275f165f 7166 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 7167 if (!bp->regview) {
cdaa7cb8
VZ
7168 dev_err(&bp->pdev->dev,
7169 "Cannot map register space, aborting\n");
a2fbb9ea
ET
7170 rc = -ENOMEM;
7171 goto err_out_release;
7172 }
7173
34f80b04
EG
7174 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
7175 min_t(u64, BNX2X_DB_SIZE,
7176 pci_resource_len(pdev, 2)));
a2fbb9ea 7177 if (!bp->doorbells) {
cdaa7cb8
VZ
7178 dev_err(&bp->pdev->dev,
7179 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
7180 rc = -ENOMEM;
7181 goto err_out_unmap;
7182 }
7183
7184 bnx2x_set_power_state(bp, PCI_D0);
7185
34f80b04
EG
7186 /* clean indirect addresses */
7187 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
7188 PCICFG_VENDOR_ID_OFFSET);
7189 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
7190 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
7191 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
7192 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 7193
72fd0718
VZ
7194 /* Reset the load counter */
7195 bnx2x_clear_load_cnt(bp);
7196
34f80b04 7197 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 7198
c64213cd 7199 dev->netdev_ops = &bnx2x_netdev_ops;
de0c62db 7200 bnx2x_set_ethtool_ops(dev);
34f80b04
EG
7201 dev->features |= NETIF_F_SG;
7202 dev->features |= NETIF_F_HW_CSUM;
7203 if (bp->flags & USING_DAC_FLAG)
7204 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
7205 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7206 dev->features |= NETIF_F_TSO6;
34f80b04
EG
7207#ifdef BCM_VLAN
7208 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 7209 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
7210
7211 dev->vlan_features |= NETIF_F_SG;
7212 dev->vlan_features |= NETIF_F_HW_CSUM;
7213 if (bp->flags & USING_DAC_FLAG)
7214 dev->vlan_features |= NETIF_F_HIGHDMA;
7215 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7216 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 7217#endif
a2fbb9ea 7218
01cd4528
EG
7219 /* get_port_hwinfo() will set prtad and mmds properly */
7220 bp->mdio.prtad = MDIO_PRTAD_NONE;
7221 bp->mdio.mmds = 0;
7222 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7223 bp->mdio.dev = dev;
7224 bp->mdio.mdio_read = bnx2x_mdio_read;
7225 bp->mdio.mdio_write = bnx2x_mdio_write;
7226
a2fbb9ea
ET
7227 return 0;
7228
7229err_out_unmap:
7230 if (bp->regview) {
7231 iounmap(bp->regview);
7232 bp->regview = NULL;
7233 }
a2fbb9ea
ET
7234 if (bp->doorbells) {
7235 iounmap(bp->doorbells);
7236 bp->doorbells = NULL;
7237 }
7238
7239err_out_release:
34f80b04
EG
7240 if (atomic_read(&pdev->enable_cnt) == 1)
7241 pci_release_regions(pdev);
a2fbb9ea
ET
7242
7243err_out_disable:
7244 pci_disable_device(pdev);
7245 pci_set_drvdata(pdev, NULL);
7246
7247err_out:
7248 return rc;
7249}
7250
37f9ce62
EG
7251static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
7252 int *width, int *speed)
25047950
ET
7253{
7254 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7255
37f9ce62 7256 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 7257
37f9ce62
EG
7258 /* return value of 1=2.5GHz 2=5GHz */
7259 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 7260}
37f9ce62 7261
6891dd25 7262static int bnx2x_check_firmware(struct bnx2x *bp)
94a78b79 7263{
37f9ce62 7264 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
7265 struct bnx2x_fw_file_hdr *fw_hdr;
7266 struct bnx2x_fw_file_section *sections;
94a78b79 7267 u32 offset, len, num_ops;
37f9ce62 7268 u16 *ops_offsets;
94a78b79 7269 int i;
37f9ce62 7270 const u8 *fw_ver;
94a78b79
VZ
7271
7272 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
7273 return -EINVAL;
7274
7275 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
7276 sections = (struct bnx2x_fw_file_section *)fw_hdr;
7277
7278 /* Make sure none of the offsets and sizes make us read beyond
7279 * the end of the firmware data */
7280 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
7281 offset = be32_to_cpu(sections[i].offset);
7282 len = be32_to_cpu(sections[i].len);
7283 if (offset + len > firmware->size) {
cdaa7cb8
VZ
7284 dev_err(&bp->pdev->dev,
7285 "Section %d length is out of bounds\n", i);
94a78b79
VZ
7286 return -EINVAL;
7287 }
7288 }
7289
7290 /* Likewise for the init_ops offsets */
7291 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
7292 ops_offsets = (u16 *)(firmware->data + offset);
7293 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
7294
7295 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
7296 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
7297 dev_err(&bp->pdev->dev,
7298 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
7299 return -EINVAL;
7300 }
7301 }
7302
7303 /* Check FW version */
7304 offset = be32_to_cpu(fw_hdr->fw_version.offset);
7305 fw_ver = firmware->data + offset;
7306 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
7307 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
7308 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
7309 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
7310 dev_err(&bp->pdev->dev,
7311 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
7312 fw_ver[0], fw_ver[1], fw_ver[2],
7313 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
7314 BCM_5710_FW_MINOR_VERSION,
7315 BCM_5710_FW_REVISION_VERSION,
7316 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 7317 return -EINVAL;
94a78b79
VZ
7318 }
7319
7320 return 0;
7321}
7322
ab6ad5a4 7323static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 7324{
ab6ad5a4
EG
7325 const __be32 *source = (const __be32 *)_source;
7326 u32 *target = (u32 *)_target;
94a78b79 7327 u32 i;
94a78b79
VZ
7328
7329 for (i = 0; i < n/4; i++)
7330 target[i] = be32_to_cpu(source[i]);
7331}
7332
7333/*
7334 Ops array is stored in the following format:
7335 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7336 */
ab6ad5a4 7337static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 7338{
ab6ad5a4
EG
7339 const __be32 *source = (const __be32 *)_source;
7340 struct raw_op *target = (struct raw_op *)_target;
94a78b79 7341 u32 i, j, tmp;
94a78b79 7342
ab6ad5a4 7343 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
7344 tmp = be32_to_cpu(source[j]);
7345 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
7346 target[i].offset = tmp & 0xffffff;
7347 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
7348 }
7349}
ab6ad5a4
EG
7350
7351static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 7352{
ab6ad5a4
EG
7353 const __be16 *source = (const __be16 *)_source;
7354 u16 *target = (u16 *)_target;
94a78b79 7355 u32 i;
94a78b79
VZ
7356
7357 for (i = 0; i < n/2; i++)
7358 target[i] = be16_to_cpu(source[i]);
7359}
7360
7995c64e
JP
7361#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
7362do { \
7363 u32 len = be32_to_cpu(fw_hdr->arr.len); \
7364 bp->arr = kmalloc(len, GFP_KERNEL); \
7365 if (!bp->arr) { \
7366 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7367 goto lbl; \
7368 } \
7369 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
7370 (u8 *)bp->arr, len); \
7371} while (0)
94a78b79 7372
6891dd25 7373int bnx2x_init_firmware(struct bnx2x *bp)
94a78b79 7374{
45229b42 7375 const char *fw_file_name;
94a78b79 7376 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 7377 int rc;
94a78b79 7378
94a78b79 7379 if (CHIP_IS_E1(bp))
45229b42 7380 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 7381 else if (CHIP_IS_E1H(bp))
45229b42 7382 fw_file_name = FW_FILE_NAME_E1H;
cdaa7cb8 7383 else {
6891dd25 7384 BNX2X_ERR("Unsupported chip revision\n");
cdaa7cb8
VZ
7385 return -EINVAL;
7386 }
94a78b79 7387
6891dd25 7388 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
94a78b79 7389
6891dd25 7390 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
94a78b79 7391 if (rc) {
6891dd25 7392 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
7393 goto request_firmware_exit;
7394 }
7395
7396 rc = bnx2x_check_firmware(bp);
7397 if (rc) {
6891dd25 7398 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
7399 goto request_firmware_exit;
7400 }
7401
7402 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
7403
7404 /* Initialize the pointers to the init arrays */
7405 /* Blob */
7406 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
7407
7408 /* Opcodes */
7409 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
7410
7411 /* Offsets */
ab6ad5a4
EG
7412 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
7413 be16_to_cpu_n);
94a78b79
VZ
7414
7415 /* STORMs firmware */
573f2035
EG
7416 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7417 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
7418 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
7419 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
7420 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7421 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
7422 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
7423 be32_to_cpu(fw_hdr->usem_pram_data.offset);
7424 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7425 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
7426 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
7427 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
7428 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7429 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
7430 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
7431 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
7432
7433 return 0;
ab6ad5a4 7434
94a78b79
VZ
7435init_offsets_alloc_err:
7436 kfree(bp->init_ops);
7437init_ops_alloc_err:
7438 kfree(bp->init_data);
7439request_firmware_exit:
7440 release_firmware(bp->firmware);
7441
7442 return rc;
7443}
7444
7445
a2fbb9ea
ET
7446static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7447 const struct pci_device_id *ent)
7448{
a2fbb9ea
ET
7449 struct net_device *dev = NULL;
7450 struct bnx2x *bp;
37f9ce62 7451 int pcie_width, pcie_speed;
25047950 7452 int rc;
a2fbb9ea 7453
a2fbb9ea 7454 /* dev zeroed in init_etherdev */
555f6c78 7455 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04 7456 if (!dev) {
cdaa7cb8 7457 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 7458 return -ENOMEM;
34f80b04 7459 }
a2fbb9ea 7460
a2fbb9ea 7461 bp = netdev_priv(dev);
7995c64e 7462 bp->msg_enable = debug;
a2fbb9ea 7463
df4770de
EG
7464 pci_set_drvdata(pdev, dev);
7465
34f80b04 7466 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
7467 if (rc < 0) {
7468 free_netdev(dev);
7469 return rc;
7470 }
7471
34f80b04 7472 rc = bnx2x_init_bp(bp);
693fc0d1
EG
7473 if (rc)
7474 goto init_one_exit;
7475
7476 rc = register_netdev(dev);
34f80b04 7477 if (rc) {
693fc0d1 7478 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
7479 goto init_one_exit;
7480 }
7481
37f9ce62 7482 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
cdaa7cb8
VZ
7483 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
7484 " IRQ %d, ", board_info[ent->driver_data].name,
7485 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
7486 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
7487 dev->base_addr, bp->pdev->irq);
7488 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 7489
a2fbb9ea 7490 return 0;
34f80b04
EG
7491
7492init_one_exit:
7493 if (bp->regview)
7494 iounmap(bp->regview);
7495
7496 if (bp->doorbells)
7497 iounmap(bp->doorbells);
7498
7499 free_netdev(dev);
7500
7501 if (atomic_read(&pdev->enable_cnt) == 1)
7502 pci_release_regions(pdev);
7503
7504 pci_disable_device(pdev);
7505 pci_set_drvdata(pdev, NULL);
7506
7507 return rc;
a2fbb9ea
ET
7508}
7509
7510static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7511{
7512 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
7513 struct bnx2x *bp;
7514
7515 if (!dev) {
cdaa7cb8 7516 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
7517 return;
7518 }
228241eb 7519 bp = netdev_priv(dev);
a2fbb9ea 7520
a2fbb9ea
ET
7521 unregister_netdev(dev);
7522
72fd0718
VZ
7523 /* Make sure RESET task is not scheduled before continuing */
7524 cancel_delayed_work_sync(&bp->reset_task);
7525
a2fbb9ea
ET
7526 if (bp->regview)
7527 iounmap(bp->regview);
7528
7529 if (bp->doorbells)
7530 iounmap(bp->doorbells);
7531
7532 free_netdev(dev);
34f80b04
EG
7533
7534 if (atomic_read(&pdev->enable_cnt) == 1)
7535 pci_release_regions(pdev);
7536
a2fbb9ea
ET
7537 pci_disable_device(pdev);
7538 pci_set_drvdata(pdev, NULL);
7539}
7540
f8ef6e44
YG
7541static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
7542{
7543 int i;
7544
7545 bp->state = BNX2X_STATE_ERROR;
7546
7547 bp->rx_mode = BNX2X_RX_MODE_NONE;
7548
7549 bnx2x_netif_stop(bp, 0);
c89af1a3 7550 netif_carrier_off(bp->dev);
f8ef6e44
YG
7551
7552 del_timer_sync(&bp->timer);
7553 bp->stats_state = STATS_STATE_DISABLED;
7554 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
7555
7556 /* Release IRQs */
6cbe5065 7557 bnx2x_free_irq(bp, false);
f8ef6e44
YG
7558
7559 if (CHIP_IS_E1(bp)) {
7560 struct mac_configuration_cmd *config =
7561 bnx2x_sp(bp, mcast_config);
7562
8d9c5f34 7563 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
7564 CAM_INVALIDATE(config->config_table[i]);
7565 }
7566
7567 /* Free SKBs, SGEs, TPA pool and driver internals */
7568 bnx2x_free_skbs(bp);
54b9ddaa 7569 for_each_queue(bp, i)
f8ef6e44 7570 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 7571 for_each_queue(bp, i)
7cde1c8b 7572 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
7573 bnx2x_free_mem(bp);
7574
7575 bp->state = BNX2X_STATE_CLOSED;
7576
f8ef6e44
YG
7577 return 0;
7578}
7579
7580static void bnx2x_eeh_recover(struct bnx2x *bp)
7581{
7582 u32 val;
7583
7584 mutex_init(&bp->port.phy_mutex);
7585
7586 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7587 bp->link_params.shmem_base = bp->common.shmem_base;
7588 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7589
7590 if (!bp->common.shmem_base ||
7591 (bp->common.shmem_base < 0xA0000) ||
7592 (bp->common.shmem_base >= 0xC0000)) {
7593 BNX2X_DEV_INFO("MCP not active\n");
7594 bp->flags |= NO_MCP_FLAG;
7595 return;
7596 }
7597
7598 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7599 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7600 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7601 BNX2X_ERR("BAD MCP validity signature\n");
7602
7603 if (!BP_NOMCP(bp)) {
7604 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
7605 & DRV_MSG_SEQ_NUMBER_MASK);
7606 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7607 }
7608}
7609
493adb1f
WX
7610/**
7611 * bnx2x_io_error_detected - called when PCI error is detected
7612 * @pdev: Pointer to PCI device
7613 * @state: The current pci connection state
7614 *
7615 * This function is called after a PCI bus error affecting
7616 * this device has been detected.
7617 */
7618static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
7619 pci_channel_state_t state)
7620{
7621 struct net_device *dev = pci_get_drvdata(pdev);
7622 struct bnx2x *bp = netdev_priv(dev);
7623
7624 rtnl_lock();
7625
7626 netif_device_detach(dev);
7627
07ce50e4
DN
7628 if (state == pci_channel_io_perm_failure) {
7629 rtnl_unlock();
7630 return PCI_ERS_RESULT_DISCONNECT;
7631 }
7632
493adb1f 7633 if (netif_running(dev))
f8ef6e44 7634 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
7635
7636 pci_disable_device(pdev);
7637
7638 rtnl_unlock();
7639
7640 /* Request a slot reset */
7641 return PCI_ERS_RESULT_NEED_RESET;
7642}
7643
7644/**
7645 * bnx2x_io_slot_reset - called after the PCI bus has been reset
7646 * @pdev: Pointer to PCI device
7647 *
7648 * Restart the card from scratch, as if from a cold-boot.
7649 */
7650static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
7651{
7652 struct net_device *dev = pci_get_drvdata(pdev);
7653 struct bnx2x *bp = netdev_priv(dev);
7654
7655 rtnl_lock();
7656
7657 if (pci_enable_device(pdev)) {
7658 dev_err(&pdev->dev,
7659 "Cannot re-enable PCI device after reset\n");
7660 rtnl_unlock();
7661 return PCI_ERS_RESULT_DISCONNECT;
7662 }
7663
7664 pci_set_master(pdev);
7665 pci_restore_state(pdev);
7666
7667 if (netif_running(dev))
7668 bnx2x_set_power_state(bp, PCI_D0);
7669
7670 rtnl_unlock();
7671
7672 return PCI_ERS_RESULT_RECOVERED;
7673}
7674
7675/**
7676 * bnx2x_io_resume - called when traffic can start flowing again
7677 * @pdev: Pointer to PCI device
7678 *
7679 * This callback is called when the error recovery driver tells us that
7680 * its OK to resume normal operation.
7681 */
7682static void bnx2x_io_resume(struct pci_dev *pdev)
7683{
7684 struct net_device *dev = pci_get_drvdata(pdev);
7685 struct bnx2x *bp = netdev_priv(dev);
7686
72fd0718
VZ
7687 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
7688 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
7689 return;
7690 }
7691
493adb1f
WX
7692 rtnl_lock();
7693
f8ef6e44
YG
7694 bnx2x_eeh_recover(bp);
7695
493adb1f 7696 if (netif_running(dev))
f8ef6e44 7697 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
7698
7699 netif_device_attach(dev);
7700
7701 rtnl_unlock();
7702}
7703
7704static struct pci_error_handlers bnx2x_err_handler = {
7705 .error_detected = bnx2x_io_error_detected,
356e2385
EG
7706 .slot_reset = bnx2x_io_slot_reset,
7707 .resume = bnx2x_io_resume,
493adb1f
WX
7708};
7709
a2fbb9ea 7710static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
7711 .name = DRV_MODULE_NAME,
7712 .id_table = bnx2x_pci_tbl,
7713 .probe = bnx2x_init_one,
7714 .remove = __devexit_p(bnx2x_remove_one),
7715 .suspend = bnx2x_suspend,
7716 .resume = bnx2x_resume,
7717 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
7718};
7719
7720static int __init bnx2x_init(void)
7721{
dd21ca6d
SG
7722 int ret;
7723
7995c64e 7724 pr_info("%s", version);
938cf541 7725
1cf167f2
EG
7726 bnx2x_wq = create_singlethread_workqueue("bnx2x");
7727 if (bnx2x_wq == NULL) {
7995c64e 7728 pr_err("Cannot create workqueue\n");
1cf167f2
EG
7729 return -ENOMEM;
7730 }
7731
dd21ca6d
SG
7732 ret = pci_register_driver(&bnx2x_pci_driver);
7733 if (ret) {
7995c64e 7734 pr_err("Cannot register driver\n");
dd21ca6d
SG
7735 destroy_workqueue(bnx2x_wq);
7736 }
7737 return ret;
a2fbb9ea
ET
7738}
7739
7740static void __exit bnx2x_cleanup(void)
7741{
7742 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
7743
7744 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
7745}
7746
7747module_init(bnx2x_init);
7748module_exit(bnx2x_cleanup);
7749
993ac7b5
MC
7750#ifdef BCM_CNIC
7751
7752/* count denotes the number of new completions we have seen */
7753static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
7754{
7755 struct eth_spe *spe;
7756
7757#ifdef BNX2X_STOP_ON_ERROR
7758 if (unlikely(bp->panic))
7759 return;
7760#endif
7761
7762 spin_lock_bh(&bp->spq_lock);
7763 bp->cnic_spq_pending -= count;
7764
7765 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
7766 bp->cnic_spq_pending++) {
7767
7768 if (!bp->cnic_kwq_pending)
7769 break;
7770
7771 spe = bnx2x_sp_get_next(bp);
7772 *spe = *bp->cnic_kwq_cons;
7773
7774 bp->cnic_kwq_pending--;
7775
7776 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
7777 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
7778
7779 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
7780 bp->cnic_kwq_cons = bp->cnic_kwq;
7781 else
7782 bp->cnic_kwq_cons++;
7783 }
7784 bnx2x_sp_prod_update(bp);
7785 spin_unlock_bh(&bp->spq_lock);
7786}
7787
7788static int bnx2x_cnic_sp_queue(struct net_device *dev,
7789 struct kwqe_16 *kwqes[], u32 count)
7790{
7791 struct bnx2x *bp = netdev_priv(dev);
7792 int i;
7793
7794#ifdef BNX2X_STOP_ON_ERROR
7795 if (unlikely(bp->panic))
7796 return -EIO;
7797#endif
7798
7799 spin_lock_bh(&bp->spq_lock);
7800
7801 for (i = 0; i < count; i++) {
7802 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
7803
7804 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
7805 break;
7806
7807 *bp->cnic_kwq_prod = *spe;
7808
7809 bp->cnic_kwq_pending++;
7810
7811 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
7812 spe->hdr.conn_and_cmd_data, spe->hdr.type,
7813 spe->data.mac_config_addr.hi,
7814 spe->data.mac_config_addr.lo,
7815 bp->cnic_kwq_pending);
7816
7817 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
7818 bp->cnic_kwq_prod = bp->cnic_kwq;
7819 else
7820 bp->cnic_kwq_prod++;
7821 }
7822
7823 spin_unlock_bh(&bp->spq_lock);
7824
7825 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
7826 bnx2x_cnic_sp_post(bp, 0);
7827
7828 return i;
7829}
7830
7831static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7832{
7833 struct cnic_ops *c_ops;
7834 int rc = 0;
7835
7836 mutex_lock(&bp->cnic_mutex);
7837 c_ops = bp->cnic_ops;
7838 if (c_ops)
7839 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7840 mutex_unlock(&bp->cnic_mutex);
7841
7842 return rc;
7843}
7844
7845static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7846{
7847 struct cnic_ops *c_ops;
7848 int rc = 0;
7849
7850 rcu_read_lock();
7851 c_ops = rcu_dereference(bp->cnic_ops);
7852 if (c_ops)
7853 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7854 rcu_read_unlock();
7855
7856 return rc;
7857}
7858
7859/*
7860 * for commands that have no data
7861 */
9f6c9258 7862int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
993ac7b5
MC
7863{
7864 struct cnic_ctl_info ctl = {0};
7865
7866 ctl.cmd = cmd;
7867
7868 return bnx2x_cnic_ctl_send(bp, &ctl);
7869}
7870
7871static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
7872{
7873 struct cnic_ctl_info ctl;
7874
7875 /* first we tell CNIC and only then we count this as a completion */
7876 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
7877 ctl.data.comp.cid = cid;
7878
7879 bnx2x_cnic_ctl_send_bh(bp, &ctl);
7880 bnx2x_cnic_sp_post(bp, 1);
7881}
7882
7883static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7884{
7885 struct bnx2x *bp = netdev_priv(dev);
7886 int rc = 0;
7887
7888 switch (ctl->cmd) {
7889 case DRV_CTL_CTXTBL_WR_CMD: {
7890 u32 index = ctl->data.io.offset;
7891 dma_addr_t addr = ctl->data.io.dma_addr;
7892
7893 bnx2x_ilt_wr(bp, index, addr);
7894 break;
7895 }
7896
7897 case DRV_CTL_COMPLETION_CMD: {
7898 int count = ctl->data.comp.comp_count;
7899
7900 bnx2x_cnic_sp_post(bp, count);
7901 break;
7902 }
7903
7904 /* rtnl_lock is held. */
7905 case DRV_CTL_START_L2_CMD: {
7906 u32 cli = ctl->data.ring.client_id;
7907
7908 bp->rx_mode_cl_mask |= (1 << cli);
7909 bnx2x_set_storm_rx_mode(bp);
7910 break;
7911 }
7912
7913 /* rtnl_lock is held. */
7914 case DRV_CTL_STOP_L2_CMD: {
7915 u32 cli = ctl->data.ring.client_id;
7916
7917 bp->rx_mode_cl_mask &= ~(1 << cli);
7918 bnx2x_set_storm_rx_mode(bp);
7919 break;
7920 }
7921
7922 default:
7923 BNX2X_ERR("unknown command %x\n", ctl->cmd);
7924 rc = -EINVAL;
7925 }
7926
7927 return rc;
7928}
7929
9f6c9258 7930void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
993ac7b5
MC
7931{
7932 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7933
7934 if (bp->flags & USING_MSIX_FLAG) {
7935 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
7936 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
7937 cp->irq_arr[0].vector = bp->msix_table[1].vector;
7938 } else {
7939 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
7940 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
7941 }
7942 cp->irq_arr[0].status_blk = bp->cnic_sb;
7943 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
7944 cp->irq_arr[1].status_blk = bp->def_status_blk;
7945 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
7946
7947 cp->num_irq = 2;
7948}
7949
7950static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
7951 void *data)
7952{
7953 struct bnx2x *bp = netdev_priv(dev);
7954 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7955
7956 if (ops == NULL)
7957 return -EINVAL;
7958
7959 if (atomic_read(&bp->intr_sem) != 0)
7960 return -EBUSY;
7961
7962 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
7963 if (!bp->cnic_kwq)
7964 return -ENOMEM;
7965
7966 bp->cnic_kwq_cons = bp->cnic_kwq;
7967 bp->cnic_kwq_prod = bp->cnic_kwq;
7968 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
7969
7970 bp->cnic_spq_pending = 0;
7971 bp->cnic_kwq_pending = 0;
7972
7973 bp->cnic_data = data;
7974
7975 cp->num_irq = 0;
7976 cp->drv_state = CNIC_DRV_STATE_REGD;
7977
7978 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
7979
7980 bnx2x_setup_cnic_irq_info(bp);
7981 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7982 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7983 rcu_assign_pointer(bp->cnic_ops, ops);
7984
7985 return 0;
7986}
7987
7988static int bnx2x_unregister_cnic(struct net_device *dev)
7989{
7990 struct bnx2x *bp = netdev_priv(dev);
7991 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7992
7993 mutex_lock(&bp->cnic_mutex);
7994 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7995 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7996 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7997 }
7998 cp->drv_state = 0;
7999 rcu_assign_pointer(bp->cnic_ops, NULL);
8000 mutex_unlock(&bp->cnic_mutex);
8001 synchronize_rcu();
8002 kfree(bp->cnic_kwq);
8003 bp->cnic_kwq = NULL;
8004
8005 return 0;
8006}
8007
8008struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
8009{
8010 struct bnx2x *bp = netdev_priv(dev);
8011 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8012
8013 cp->drv_owner = THIS_MODULE;
8014 cp->chip_id = CHIP_ID(bp);
8015 cp->pdev = bp->pdev;
8016 cp->io_base = bp->regview;
8017 cp->io_base2 = bp->doorbells;
8018 cp->max_kwqe_pending = 8;
8019 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
8020 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
8021 cp->ctx_tbl_len = CNIC_ILT_LINES;
8022 cp->starting_cid = BCM_CNIC_CID_START;
8023 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
8024 cp->drv_ctl = bnx2x_drv_ctl;
8025 cp->drv_register_cnic = bnx2x_register_cnic;
8026 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
8027
8028 return cp;
8029}
8030EXPORT_SYMBOL(bnx2x_cnic_probe);
8031
8032#endif /* BCM_CNIC */
94a78b79 8033