]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x/bnx2x_main.c
ixgbe: potential null dereference
[net-next-2.6.git] / drivers / net / bnx2x / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea 51#include <linux/io.h>
45229b42 52#include <linux/stringify.h>
a2fbb9ea 53
b0efbb99 54#define BNX2X_MAIN
a2fbb9ea
ET
55#include "bnx2x.h"
56#include "bnx2x_init.h"
94a78b79 57#include "bnx2x_init_ops.h"
9f6c9258 58#include "bnx2x_cmn.h"
a2fbb9ea 59
a2fbb9ea 60
94a78b79
VZ
61#include <linux/firmware.h>
62#include "bnx2x_fw_file_hdr.h"
63/* FW files */
45229b42
BH
64#define FW_FILE_VERSION \
65 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
66 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
67 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
68 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
69#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
70#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
94a78b79 71
34f80b04
EG
72/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ)
a2fbb9ea 74
53a10565 75static char version[] __devinitdata =
34f80b04 76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
24e3fcef 79MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 80MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
81MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
83MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H);
a2fbb9ea 85
555f6c78
EG
86static int multi_mode = 1;
87module_param(multi_mode, int, 0);
ca00392c
EG
88MODULE_PARM_DESC(multi_mode, " Multi queue mode "
89 "(0 Disable; 1 Enable (default))");
90
54b9ddaa
VZ
91static int num_queues;
92module_param(num_queues, int, 0);
93MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
94 " (default is as a number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
cdaa7cb8
VZ
102MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
103 "(1 INT#x; 2 MSI)");
8badd27a 104
a18f5128
EG
105static int dropless_fc;
106module_param(dropless_fc, int, 0);
107MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
108
9898f86d 109static int poll;
a2fbb9ea 110module_param(poll, int, 0);
9898f86d 111MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
112
113static int mrrs = -1;
114module_param(mrrs, int, 0);
115MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
116
9898f86d 117static int debug;
a2fbb9ea 118module_param(debug, int, 0);
9898f86d
EG
119MODULE_PARM_DESC(debug, " Default debug msglevel");
120
1cf167f2 121static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
122
123enum bnx2x_board_type {
124 BCM57710 = 0,
34f80b04
EG
125 BCM57711 = 1,
126 BCM57711E = 2,
a2fbb9ea
ET
127};
128
34f80b04 129/* indexed by board_type, above */
53a10565 130static struct {
a2fbb9ea
ET
131 char *name;
132} board_info[] __devinitdata = {
34f80b04
EG
133 { "Broadcom NetXtreme II BCM57710 XGb" },
134 { "Broadcom NetXtreme II BCM57711 XGb" },
135 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
136};
137
34f80b04 138
a3aa1884 139static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
140 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
143 { 0 }
144};
145
146MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148/****************************************************************************
149* General service functions
150****************************************************************************/
151
152/* used only at init
153 * locking is done by mcp
154 */
573f2035 155void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
156{
157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160 PCICFG_VENDOR_ID_OFFSET);
161}
162
a2fbb9ea
ET
163static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164{
165 u32 val;
166
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170 PCICFG_VENDOR_ID_OFFSET);
171
172 return val;
173}
a2fbb9ea 174
6c719d00 175const u32 dmae_reg_go_c[] = {
a2fbb9ea
ET
176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180};
181
182/* copy command into DMAE command memory and set DMAE command go */
6c719d00 183void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
a2fbb9ea
ET
184{
185 u32 cmd_offset;
186 int i;
187
188 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
189 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
190 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
191
ad8d3948
EG
192 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
193 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
194 }
195 REG_WR(bp, dmae_reg_go_c[idx], 1);
196}
197
ad8d3948
EG
198void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
199 u32 len32)
a2fbb9ea 200{
5ff7b6d4 201 struct dmae_command dmae;
a2fbb9ea 202 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
203 int cnt = 200;
204
205 if (!bp->dmae_ready) {
206 u32 *data = bnx2x_sp(bp, wb_data[0]);
207
208 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
209 " using indirect\n", dst_addr, len32);
210 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
211 return;
212 }
213
5ff7b6d4 214 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 215
5ff7b6d4
EG
216 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
217 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
218 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 219#ifdef __BIG_ENDIAN
5ff7b6d4 220 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 221#else
5ff7b6d4 222 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 223#endif
5ff7b6d4
EG
224 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
225 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
226 dmae.src_addr_lo = U64_LO(dma_addr);
227 dmae.src_addr_hi = U64_HI(dma_addr);
228 dmae.dst_addr_lo = dst_addr >> 2;
229 dmae.dst_addr_hi = 0;
230 dmae.len = len32;
231 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
232 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
233 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 234
c3eefaf6 235 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
236 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
237 "dst_addr [%x:%08x (%08x)]\n"
238 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
239 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
240 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
241 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 242 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
243 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
244 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 245
5ff7b6d4
EG
246 mutex_lock(&bp->dmae_mutex);
247
a2fbb9ea
ET
248 *wb_comp = 0;
249
5ff7b6d4 250 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
251
252 udelay(5);
ad8d3948
EG
253
254 while (*wb_comp != DMAE_COMP_VAL) {
255 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
256
ad8d3948 257 if (!cnt) {
c3eefaf6 258 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
259 break;
260 }
ad8d3948 261 cnt--;
12469401
YG
262 /* adjust delay for emulation/FPGA */
263 if (CHIP_REV_IS_SLOW(bp))
264 msleep(100);
265 else
266 udelay(5);
a2fbb9ea 267 }
ad8d3948
EG
268
269 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
270}
271
c18487ee 272void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 273{
5ff7b6d4 274 struct dmae_command dmae;
a2fbb9ea 275 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
276 int cnt = 200;
277
278 if (!bp->dmae_ready) {
279 u32 *data = bnx2x_sp(bp, wb_data[0]);
280 int i;
281
282 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
283 " using indirect\n", src_addr, len32);
284 for (i = 0; i < len32; i++)
285 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
286 return;
287 }
288
5ff7b6d4 289 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 290
5ff7b6d4
EG
291 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
292 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
293 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 294#ifdef __BIG_ENDIAN
5ff7b6d4 295 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 296#else
5ff7b6d4 297 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 298#endif
5ff7b6d4
EG
299 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
300 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
301 dmae.src_addr_lo = src_addr >> 2;
302 dmae.src_addr_hi = 0;
303 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
304 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
305 dmae.len = len32;
306 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
307 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
308 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 309
c3eefaf6 310 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
311 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
312 "dst_addr [%x:%08x (%08x)]\n"
313 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
314 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
315 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
316 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 317
5ff7b6d4
EG
318 mutex_lock(&bp->dmae_mutex);
319
320 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
321 *wb_comp = 0;
322
5ff7b6d4 323 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
324
325 udelay(5);
ad8d3948
EG
326
327 while (*wb_comp != DMAE_COMP_VAL) {
328
ad8d3948 329 if (!cnt) {
c3eefaf6 330 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
331 break;
332 }
ad8d3948 333 cnt--;
12469401
YG
334 /* adjust delay for emulation/FPGA */
335 if (CHIP_REV_IS_SLOW(bp))
336 msleep(100);
337 else
338 udelay(5);
a2fbb9ea 339 }
ad8d3948 340 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
341 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
342 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
343
344 mutex_unlock(&bp->dmae_mutex);
345}
346
573f2035
EG
347void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
348 u32 addr, u32 len)
349{
02e3c6cb 350 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
351 int offset = 0;
352
02e3c6cb 353 while (len > dmae_wr_max) {
573f2035 354 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
355 addr + offset, dmae_wr_max);
356 offset += dmae_wr_max * 4;
357 len -= dmae_wr_max;
573f2035
EG
358 }
359
360 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
361}
362
ad8d3948
EG
363/* used only for slowpath so not inlined */
364static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
365{
366 u32 wb_write[2];
367
368 wb_write[0] = val_hi;
369 wb_write[1] = val_lo;
370 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 371}
a2fbb9ea 372
ad8d3948
EG
373#ifdef USE_WB_RD
374static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
375{
376 u32 wb_data[2];
377
378 REG_RD_DMAE(bp, reg, wb_data, 2);
379
380 return HILO_U64(wb_data[0], wb_data[1]);
381}
382#endif
383
a2fbb9ea
ET
384static int bnx2x_mc_assert(struct bnx2x *bp)
385{
a2fbb9ea 386 char last_idx;
34f80b04
EG
387 int i, rc = 0;
388 u32 row0, row1, row2, row3;
389
390 /* XSTORM */
391 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
392 XSTORM_ASSERT_LIST_INDEX_OFFSET);
393 if (last_idx)
394 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
395
396 /* print the asserts */
397 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
398
399 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
400 XSTORM_ASSERT_LIST_OFFSET(i));
401 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
402 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
403 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
405 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
407
408 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
409 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
410 " 0x%08x 0x%08x 0x%08x\n",
411 i, row3, row2, row1, row0);
412 rc++;
413 } else {
414 break;
415 }
416 }
417
418 /* TSTORM */
419 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
420 TSTORM_ASSERT_LIST_INDEX_OFFSET);
421 if (last_idx)
422 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
423
424 /* print the asserts */
425 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
426
427 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
428 TSTORM_ASSERT_LIST_OFFSET(i));
429 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
430 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
431 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
433 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
435
436 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
437 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
438 " 0x%08x 0x%08x 0x%08x\n",
439 i, row3, row2, row1, row0);
440 rc++;
441 } else {
442 break;
443 }
444 }
445
446 /* CSTORM */
447 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
448 CSTORM_ASSERT_LIST_INDEX_OFFSET);
449 if (last_idx)
450 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
451
452 /* print the asserts */
453 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
454
455 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
456 CSTORM_ASSERT_LIST_OFFSET(i));
457 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
458 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
459 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
461 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
463
464 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
465 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
466 " 0x%08x 0x%08x 0x%08x\n",
467 i, row3, row2, row1, row0);
468 rc++;
469 } else {
470 break;
471 }
472 }
473
474 /* USTORM */
475 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
476 USTORM_ASSERT_LIST_INDEX_OFFSET);
477 if (last_idx)
478 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
479
480 /* print the asserts */
481 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
482
483 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
484 USTORM_ASSERT_LIST_OFFSET(i));
485 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
486 USTORM_ASSERT_LIST_OFFSET(i) + 4);
487 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i) + 8);
489 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 12);
491
492 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
493 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
494 " 0x%08x 0x%08x 0x%08x\n",
495 i, row3, row2, row1, row0);
496 rc++;
497 } else {
498 break;
a2fbb9ea
ET
499 }
500 }
34f80b04 501
a2fbb9ea
ET
502 return rc;
503}
c14423fe 504
a2fbb9ea
ET
505static void bnx2x_fw_dump(struct bnx2x *bp)
506{
cdaa7cb8 507 u32 addr;
a2fbb9ea 508 u32 mark, offset;
4781bfad 509 __be32 data[9];
a2fbb9ea
ET
510 int word;
511
2145a920
VZ
512 if (BP_NOMCP(bp)) {
513 BNX2X_ERR("NO MCP - can not dump\n");
514 return;
515 }
cdaa7cb8
VZ
516
517 addr = bp->common.shmem_base - 0x0800 + 4;
518 mark = REG_RD(bp, addr);
519 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
7995c64e 520 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 521
7995c64e 522 pr_err("");
cdaa7cb8 523 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
a2fbb9ea 524 for (word = 0; word < 8; word++)
cdaa7cb8 525 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 526 data[8] = 0x0;
7995c64e 527 pr_cont("%s", (char *)data);
a2fbb9ea 528 }
cdaa7cb8 529 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 530 for (word = 0; word < 8; word++)
cdaa7cb8 531 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 532 data[8] = 0x0;
7995c64e 533 pr_cont("%s", (char *)data);
a2fbb9ea 534 }
7995c64e 535 pr_err("end of fw dump\n");
a2fbb9ea
ET
536}
537
6c719d00 538void bnx2x_panic_dump(struct bnx2x *bp)
a2fbb9ea
ET
539{
540 int i;
541 u16 j, start, end;
542
66e855f3
YG
543 bp->stats_state = STATS_STATE_DISABLED;
544 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
545
a2fbb9ea
ET
546 BNX2X_ERR("begin crash dump -----------------\n");
547
8440d2b6
EG
548 /* Indices */
549 /* Common */
cdaa7cb8
VZ
550 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
551 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
552 " spq_prod_idx(0x%x)\n",
8440d2b6
EG
553 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
554 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
555
556 /* Rx */
54b9ddaa 557 for_each_queue(bp, i) {
a2fbb9ea 558 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 559
cdaa7cb8
VZ
560 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
561 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
562 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 563 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
564 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
565 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8
VZ
566 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
567 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
8440d2b6
EG
568 fp->rx_sge_prod, fp->last_max_sge,
569 le16_to_cpu(fp->fp_u_idx),
570 fp->status_blk->u_status_block.status_block_index);
571 }
a2fbb9ea 572
8440d2b6 573 /* Tx */
54b9ddaa 574 for_each_queue(bp, i) {
8440d2b6 575 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 576
cdaa7cb8
VZ
577 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
578 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
579 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
580 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
581 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
cdaa7cb8
VZ
582 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
583 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 584 fp->status_blk->c_status_block.status_block_index,
ca00392c 585 fp->tx_db.data.prod);
8440d2b6 586 }
a2fbb9ea 587
8440d2b6
EG
588 /* Rings */
589 /* Rx */
54b9ddaa 590 for_each_queue(bp, i) {
8440d2b6 591 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
592
593 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
594 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 595 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
596 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
597 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
598
c3eefaf6
EG
599 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
600 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
601 }
602
3196a88a
EG
603 start = RX_SGE(fp->rx_sge_prod);
604 end = RX_SGE(fp->last_max_sge);
8440d2b6 605 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
606 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
607 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
608
c3eefaf6
EG
609 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
610 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
611 }
612
a2fbb9ea
ET
613 start = RCQ_BD(fp->rx_comp_cons - 10);
614 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 615 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
616 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
617
c3eefaf6
EG
618 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
619 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
620 }
621 }
622
8440d2b6 623 /* Tx */
54b9ddaa 624 for_each_queue(bp, i) {
8440d2b6
EG
625 struct bnx2x_fastpath *fp = &bp->fp[i];
626
627 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
628 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
629 for (j = start; j != end; j = TX_BD(j + 1)) {
630 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
631
c3eefaf6
EG
632 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
633 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
634 }
635
636 start = TX_BD(fp->tx_bd_cons - 10);
637 end = TX_BD(fp->tx_bd_cons + 254);
638 for (j = start; j != end; j = TX_BD(j + 1)) {
639 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
640
c3eefaf6
EG
641 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
642 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
643 }
644 }
a2fbb9ea 645
34f80b04 646 bnx2x_fw_dump(bp);
a2fbb9ea
ET
647 bnx2x_mc_assert(bp);
648 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
649}
650
9f6c9258 651void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 652{
34f80b04 653 int port = BP_PORT(bp);
a2fbb9ea
ET
654 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
655 u32 val = REG_RD(bp, addr);
656 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 657 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
658
659 if (msix) {
8badd27a
EG
660 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
661 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
662 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
664 } else if (msi) {
665 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
666 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
669 } else {
670 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 671 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
672 HC_CONFIG_0_REG_INT_LINE_EN_0 |
673 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 674
8badd27a
EG
675 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
676 val, port, addr);
615f8fd9
ET
677
678 REG_WR(bp, addr, val);
679
a2fbb9ea
ET
680 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
681 }
682
8badd27a
EG
683 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
684 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
685
686 REG_WR(bp, addr, val);
37dbbf32
EG
687 /*
688 * Ensure that HC_CONFIG is written before leading/trailing edge config
689 */
690 mmiowb();
691 barrier();
34f80b04
EG
692
693 if (CHIP_IS_E1H(bp)) {
694 /* init leading/trailing edge */
695 if (IS_E1HMF(bp)) {
8badd27a 696 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 697 if (bp->port.pmf)
4acac6a5
EG
698 /* enable nig and gpio3 attention */
699 val |= 0x1100;
34f80b04
EG
700 } else
701 val = 0xffff;
702
703 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
704 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
705 }
37dbbf32
EG
706
707 /* Make sure that interrupts are indeed enabled from here on */
708 mmiowb();
a2fbb9ea
ET
709}
710
615f8fd9 711static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 712{
34f80b04 713 int port = BP_PORT(bp);
a2fbb9ea
ET
714 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
715 u32 val = REG_RD(bp, addr);
716
717 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
718 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
719 HC_CONFIG_0_REG_INT_LINE_EN_0 |
720 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
721
722 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
723 val, port, addr);
724
8badd27a
EG
725 /* flush all outstanding writes */
726 mmiowb();
727
a2fbb9ea
ET
728 REG_WR(bp, addr, val);
729 if (REG_RD(bp, addr) != val)
730 BNX2X_ERR("BUG! proper val not read from IGU!\n");
731}
732
9f6c9258 733void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 734{
a2fbb9ea 735 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 736 int i, offset;
a2fbb9ea 737
34f80b04 738 /* disable interrupt handling */
a2fbb9ea 739 atomic_inc(&bp->intr_sem);
e1510706
EG
740 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
741
f8ef6e44
YG
742 if (disable_hw)
743 /* prevent the HW from sending interrupts */
744 bnx2x_int_disable(bp);
a2fbb9ea
ET
745
746 /* make sure all ISRs are done */
747 if (msix) {
8badd27a
EG
748 synchronize_irq(bp->msix_table[0].vector);
749 offset = 1;
37b091ba
MC
750#ifdef BCM_CNIC
751 offset++;
752#endif
a2fbb9ea 753 for_each_queue(bp, i)
8badd27a 754 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
755 } else
756 synchronize_irq(bp->pdev->irq);
757
758 /* make sure sp_task is not running */
1cf167f2
EG
759 cancel_delayed_work(&bp->sp_task);
760 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
761}
762
34f80b04 763/* fast path */
a2fbb9ea
ET
764
765/*
34f80b04 766 * General service functions
a2fbb9ea
ET
767 */
768
72fd0718
VZ
769/* Return true if succeeded to acquire the lock */
770static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
771{
772 u32 lock_status;
773 u32 resource_bit = (1 << resource);
774 int func = BP_FUNC(bp);
775 u32 hw_lock_control_reg;
776
777 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
778
779 /* Validating that the resource is within range */
780 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
781 DP(NETIF_MSG_HW,
782 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
783 resource, HW_LOCK_MAX_RESOURCE_VALUE);
784 return -EINVAL;
785 }
786
787 if (func <= 5)
788 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
789 else
790 hw_lock_control_reg =
791 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
792
793 /* Try to acquire the lock */
794 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
795 lock_status = REG_RD(bp, hw_lock_control_reg);
796 if (lock_status & resource_bit)
797 return true;
798
799 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
800 return false;
801}
802
a2fbb9ea 803
993ac7b5
MC
804#ifdef BCM_CNIC
805static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
806#endif
3196a88a 807
9f6c9258 808void bnx2x_sp_event(struct bnx2x_fastpath *fp,
a2fbb9ea
ET
809 union eth_rx_cqe *rr_cqe)
810{
811 struct bnx2x *bp = fp->bp;
812 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
813 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
814
34f80b04 815 DP(BNX2X_MSG_SP,
a2fbb9ea 816 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 817 fp->index, cid, command, bp->state,
34f80b04 818 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
819
820 bp->spq_left++;
821
0626b899 822 if (fp->index) {
a2fbb9ea
ET
823 switch (command | fp->state) {
824 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
825 BNX2X_FP_STATE_OPENING):
826 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
827 cid);
828 fp->state = BNX2X_FP_STATE_OPEN;
829 break;
830
831 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
832 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
833 cid);
834 fp->state = BNX2X_FP_STATE_HALTED;
835 break;
836
837 default:
34f80b04 838 BNX2X_ERR("unexpected MC reply (%d) "
cdaa7cb8
VZ
839 "fp[%d] state is %x\n",
840 command, fp->index, fp->state);
34f80b04 841 break;
a2fbb9ea 842 }
34f80b04 843 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
844 return;
845 }
c14423fe 846
a2fbb9ea
ET
847 switch (command | bp->state) {
848 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
849 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
850 bp->state = BNX2X_STATE_OPEN;
851 break;
852
853 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
854 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
855 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
856 fp->state = BNX2X_FP_STATE_HALTED;
857 break;
858
a2fbb9ea 859 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 860 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 861 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
862 break;
863
993ac7b5
MC
864#ifdef BCM_CNIC
865 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
866 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
867 bnx2x_cnic_cfc_comp(bp, cid);
868 break;
869#endif
3196a88a 870
a2fbb9ea 871 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 872 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 873 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
874 bp->set_mac_pending--;
875 smp_wmb();
a2fbb9ea
ET
876 break;
877
49d66772 878 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 879 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
880 bp->set_mac_pending--;
881 smp_wmb();
49d66772
ET
882 break;
883
a2fbb9ea 884 default:
34f80b04 885 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 886 command, bp->state);
34f80b04 887 break;
a2fbb9ea 888 }
34f80b04 889 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
890}
891
9f6c9258 892irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
a2fbb9ea 893{
555f6c78 894 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 895 u16 status = bnx2x_ack_int(bp);
34f80b04 896 u16 mask;
ca00392c 897 int i;
a2fbb9ea 898
34f80b04 899 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
900 if (unlikely(status == 0)) {
901 DP(NETIF_MSG_INTR, "not our interrupt!\n");
902 return IRQ_NONE;
903 }
f5372251 904 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 905
34f80b04 906 /* Return here if interrupt is disabled */
a2fbb9ea
ET
907 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
908 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
909 return IRQ_HANDLED;
910 }
911
3196a88a
EG
912#ifdef BNX2X_STOP_ON_ERROR
913 if (unlikely(bp->panic))
914 return IRQ_HANDLED;
915#endif
916
ca00392c
EG
917 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
918 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 919
ca00392c
EG
920 mask = 0x2 << fp->sb_id;
921 if (status & mask) {
54b9ddaa
VZ
922 /* Handle Rx and Tx according to SB id */
923 prefetch(fp->rx_cons_sb);
924 prefetch(&fp->status_blk->u_status_block.
925 status_block_index);
926 prefetch(fp->tx_cons_sb);
927 prefetch(&fp->status_blk->c_status_block.
928 status_block_index);
929 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
930 status &= ~mask;
931 }
a2fbb9ea
ET
932 }
933
993ac7b5
MC
934#ifdef BCM_CNIC
935 mask = 0x2 << CNIC_SB_ID(bp);
936 if (status & (mask | 0x1)) {
937 struct cnic_ops *c_ops = NULL;
938
939 rcu_read_lock();
940 c_ops = rcu_dereference(bp->cnic_ops);
941 if (c_ops)
942 c_ops->cnic_handler(bp->cnic_data, NULL);
943 rcu_read_unlock();
944
945 status &= ~mask;
946 }
947#endif
a2fbb9ea 948
34f80b04 949 if (unlikely(status & 0x1)) {
1cf167f2 950 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
951
952 status &= ~0x1;
953 if (!status)
954 return IRQ_HANDLED;
955 }
956
cdaa7cb8
VZ
957 if (unlikely(status))
958 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 959 status);
a2fbb9ea 960
c18487ee 961 return IRQ_HANDLED;
a2fbb9ea
ET
962}
963
c18487ee 964/* end of fast path */
a2fbb9ea 965
a2fbb9ea 966
c18487ee
YR
967/* Link */
968
969/*
970 * General service functions
971 */
a2fbb9ea 972
9f6c9258 973int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
974{
975 u32 lock_status;
976 u32 resource_bit = (1 << resource);
4a37fb66
YG
977 int func = BP_FUNC(bp);
978 u32 hw_lock_control_reg;
c18487ee 979 int cnt;
a2fbb9ea 980
c18487ee
YR
981 /* Validating that the resource is within range */
982 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
983 DP(NETIF_MSG_HW,
984 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
985 resource, HW_LOCK_MAX_RESOURCE_VALUE);
986 return -EINVAL;
987 }
a2fbb9ea 988
4a37fb66
YG
989 if (func <= 5) {
990 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
991 } else {
992 hw_lock_control_reg =
993 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
994 }
995
c18487ee 996 /* Validating that the resource is not already taken */
4a37fb66 997 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
998 if (lock_status & resource_bit) {
999 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1000 lock_status, resource_bit);
1001 return -EEXIST;
1002 }
a2fbb9ea 1003
46230476
EG
1004 /* Try for 5 second every 5ms */
1005 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1006 /* Try to acquire the lock */
4a37fb66
YG
1007 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1008 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1009 if (lock_status & resource_bit)
1010 return 0;
a2fbb9ea 1011
c18487ee 1012 msleep(5);
a2fbb9ea 1013 }
c18487ee
YR
1014 DP(NETIF_MSG_HW, "Timeout\n");
1015 return -EAGAIN;
1016}
a2fbb9ea 1017
9f6c9258 1018int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1019{
1020 u32 lock_status;
1021 u32 resource_bit = (1 << resource);
4a37fb66
YG
1022 int func = BP_FUNC(bp);
1023 u32 hw_lock_control_reg;
a2fbb9ea 1024
72fd0718
VZ
1025 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1026
c18487ee
YR
1027 /* Validating that the resource is within range */
1028 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1029 DP(NETIF_MSG_HW,
1030 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1031 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1032 return -EINVAL;
1033 }
1034
4a37fb66
YG
1035 if (func <= 5) {
1036 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1037 } else {
1038 hw_lock_control_reg =
1039 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1040 }
1041
c18487ee 1042 /* Validating that the resource is currently taken */
4a37fb66 1043 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1044 if (!(lock_status & resource_bit)) {
1045 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1046 lock_status, resource_bit);
1047 return -EFAULT;
a2fbb9ea
ET
1048 }
1049
9f6c9258
DK
1050 REG_WR(bp, hw_lock_control_reg, resource_bit);
1051 return 0;
c18487ee 1052}
a2fbb9ea 1053
9f6c9258 1054
4acac6a5
EG
1055int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1056{
1057 /* The GPIO should be swapped if swap register is set and active */
1058 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1059 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1060 int gpio_shift = gpio_num +
1061 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1062 u32 gpio_mask = (1 << gpio_shift);
1063 u32 gpio_reg;
1064 int value;
1065
1066 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1067 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1068 return -EINVAL;
1069 }
1070
1071 /* read GPIO value */
1072 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1073
1074 /* get the requested pin value */
1075 if ((gpio_reg & gpio_mask) == gpio_mask)
1076 value = 1;
1077 else
1078 value = 0;
1079
1080 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1081
1082 return value;
1083}
1084
17de50b7 1085int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1086{
1087 /* The GPIO should be swapped if swap register is set and active */
1088 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1089 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1090 int gpio_shift = gpio_num +
1091 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1092 u32 gpio_mask = (1 << gpio_shift);
1093 u32 gpio_reg;
a2fbb9ea 1094
c18487ee
YR
1095 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1096 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1097 return -EINVAL;
1098 }
a2fbb9ea 1099
4a37fb66 1100 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1101 /* read GPIO and mask except the float bits */
1102 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1103
c18487ee
YR
1104 switch (mode) {
1105 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1106 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1107 gpio_num, gpio_shift);
1108 /* clear FLOAT and set CLR */
1109 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1110 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1111 break;
a2fbb9ea 1112
c18487ee
YR
1113 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1114 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1115 gpio_num, gpio_shift);
1116 /* clear FLOAT and set SET */
1117 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1118 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1119 break;
a2fbb9ea 1120
17de50b7 1121 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1122 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1123 gpio_num, gpio_shift);
1124 /* set FLOAT */
1125 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1126 break;
a2fbb9ea 1127
c18487ee
YR
1128 default:
1129 break;
a2fbb9ea
ET
1130 }
1131
c18487ee 1132 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1133 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1134
c18487ee 1135 return 0;
a2fbb9ea
ET
1136}
1137
4acac6a5
EG
1138int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1139{
1140 /* The GPIO should be swapped if swap register is set and active */
1141 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1142 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1143 int gpio_shift = gpio_num +
1144 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1145 u32 gpio_mask = (1 << gpio_shift);
1146 u32 gpio_reg;
1147
1148 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1149 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1150 return -EINVAL;
1151 }
1152
1153 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1154 /* read GPIO int */
1155 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1156
1157 switch (mode) {
1158 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1159 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1160 "output low\n", gpio_num, gpio_shift);
1161 /* clear SET and set CLR */
1162 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1163 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1164 break;
1165
1166 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1167 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1168 "output high\n", gpio_num, gpio_shift);
1169 /* clear CLR and set SET */
1170 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1171 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1172 break;
1173
1174 default:
1175 break;
1176 }
1177
1178 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1179 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1180
1181 return 0;
1182}
1183
c18487ee 1184static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1185{
c18487ee
YR
1186 u32 spio_mask = (1 << spio_num);
1187 u32 spio_reg;
a2fbb9ea 1188
c18487ee
YR
1189 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1190 (spio_num > MISC_REGISTERS_SPIO_7)) {
1191 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1192 return -EINVAL;
a2fbb9ea
ET
1193 }
1194
4a37fb66 1195 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1196 /* read SPIO and mask except the float bits */
1197 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1198
c18487ee 1199 switch (mode) {
6378c025 1200 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1201 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1202 /* clear FLOAT and set CLR */
1203 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1204 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1205 break;
a2fbb9ea 1206
6378c025 1207 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1208 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1209 /* clear FLOAT and set SET */
1210 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1211 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1212 break;
a2fbb9ea 1213
c18487ee
YR
1214 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1215 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1216 /* set FLOAT */
1217 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1218 break;
a2fbb9ea 1219
c18487ee
YR
1220 default:
1221 break;
a2fbb9ea
ET
1222 }
1223
c18487ee 1224 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1225 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1226
a2fbb9ea
ET
1227 return 0;
1228}
1229
9f6c9258 1230void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1231{
ad33ea3a
EG
1232 switch (bp->link_vars.ieee_fc &
1233 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1234 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1235 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1236 ADVERTISED_Pause);
1237 break;
356e2385 1238
c18487ee 1239 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1240 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1241 ADVERTISED_Pause);
1242 break;
356e2385 1243
c18487ee 1244 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1245 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 1246 break;
356e2385 1247
c18487ee 1248 default:
34f80b04 1249 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1250 ADVERTISED_Pause);
1251 break;
1252 }
1253}
f1410647 1254
c18487ee 1255
9f6c9258 1256u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 1257{
19680c48
EG
1258 if (!BP_NOMCP(bp)) {
1259 u8 rc;
a2fbb9ea 1260
19680c48 1261 /* Initialize link parameters structure variables */
8c99e7b0
YR
1262 /* It is recommended to turn off RX FC for jumbo frames
1263 for better performance */
0c593270 1264 if (bp->dev->mtu > 5000)
c0700f90 1265 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1266 else
c0700f90 1267 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1268
4a37fb66 1269 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
1270
1271 if (load_mode == LOAD_DIAG)
1272 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
1273
19680c48 1274 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 1275
4a37fb66 1276 bnx2x_release_phy_lock(bp);
a2fbb9ea 1277
3c96c68b
EG
1278 bnx2x_calc_fc_adv(bp);
1279
b5bf9068
EG
1280 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1281 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 1282 bnx2x_link_report(bp);
b5bf9068 1283 }
34f80b04 1284
19680c48
EG
1285 return rc;
1286 }
f5372251 1287 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 1288 return -EINVAL;
a2fbb9ea
ET
1289}
1290
9f6c9258 1291void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1292{
19680c48 1293 if (!BP_NOMCP(bp)) {
4a37fb66 1294 bnx2x_acquire_phy_lock(bp);
19680c48 1295 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1296 bnx2x_release_phy_lock(bp);
a2fbb9ea 1297
19680c48
EG
1298 bnx2x_calc_fc_adv(bp);
1299 } else
f5372251 1300 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 1301}
a2fbb9ea 1302
c18487ee
YR
1303static void bnx2x__link_reset(struct bnx2x *bp)
1304{
19680c48 1305 if (!BP_NOMCP(bp)) {
4a37fb66 1306 bnx2x_acquire_phy_lock(bp);
589abe3a 1307 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 1308 bnx2x_release_phy_lock(bp);
19680c48 1309 } else
f5372251 1310 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 1311}
a2fbb9ea 1312
9f6c9258 1313u8 bnx2x_link_test(struct bnx2x *bp)
c18487ee 1314{
2145a920 1315 u8 rc = 0;
a2fbb9ea 1316
2145a920
VZ
1317 if (!BP_NOMCP(bp)) {
1318 bnx2x_acquire_phy_lock(bp);
1319 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1320 bnx2x_release_phy_lock(bp);
1321 } else
1322 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 1323
c18487ee
YR
1324 return rc;
1325}
a2fbb9ea 1326
8a1c38d1 1327static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 1328{
8a1c38d1
EG
1329 u32 r_param = bp->link_vars.line_speed / 8;
1330 u32 fair_periodic_timeout_usec;
1331 u32 t_fair;
34f80b04 1332
8a1c38d1
EG
1333 memset(&(bp->cmng.rs_vars), 0,
1334 sizeof(struct rate_shaping_vars_per_port));
1335 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 1336
8a1c38d1
EG
1337 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1338 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 1339
8a1c38d1
EG
1340 /* this is the threshold below which no timer arming will occur
1341 1.25 coefficient is for the threshold to be a little bigger
1342 than the real time, to compensate for timer in-accuracy */
1343 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
1344 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1345
8a1c38d1
EG
1346 /* resolution of fairness timer */
1347 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1348 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1349 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 1350
8a1c38d1
EG
1351 /* this is the threshold below which we won't arm the timer anymore */
1352 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 1353
8a1c38d1
EG
1354 /* we multiply by 1e3/8 to get bytes/msec.
1355 We don't want the credits to pass a credit
1356 of the t_fair*FAIR_MEM (algorithm resolution) */
1357 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1358 /* since each tick is 4 usec */
1359 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
1360}
1361
2691d51d
EG
1362/* Calculates the sum of vn_min_rates.
1363 It's needed for further normalizing of the min_rates.
1364 Returns:
1365 sum of vn_min_rates.
1366 or
1367 0 - if all the min_rates are 0.
1368 In the later case fainess algorithm should be deactivated.
1369 If not all min_rates are zero then those that are zeroes will be set to 1.
1370 */
1371static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1372{
1373 int all_zero = 1;
1374 int port = BP_PORT(bp);
1375 int vn;
1376
1377 bp->vn_weight_sum = 0;
1378 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1379 int func = 2*vn + port;
1380 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1381 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1382 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1383
1384 /* Skip hidden vns */
1385 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1386 continue;
1387
1388 /* If min rate is zero - set it to 1 */
1389 if (!vn_min_rate)
1390 vn_min_rate = DEF_MIN_RATE;
1391 else
1392 all_zero = 0;
1393
1394 bp->vn_weight_sum += vn_min_rate;
1395 }
1396
1397 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
1398 if (all_zero) {
1399 bp->cmng.flags.cmng_enables &=
1400 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1401 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1402 " fairness will be disabled\n");
1403 } else
1404 bp->cmng.flags.cmng_enables |=
1405 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
1406}
1407
8a1c38d1 1408static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
1409{
1410 struct rate_shaping_vars_per_vn m_rs_vn;
1411 struct fairness_vars_per_vn m_fair_vn;
1412 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1413 u16 vn_min_rate, vn_max_rate;
1414 int i;
1415
1416 /* If function is hidden - set min and max to zeroes */
1417 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1418 vn_min_rate = 0;
1419 vn_max_rate = 0;
1420
1421 } else {
1422 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1423 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
1424 /* If min rate is zero - set it to 1 */
1425 if (!vn_min_rate)
34f80b04
EG
1426 vn_min_rate = DEF_MIN_RATE;
1427 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1428 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1429 }
8a1c38d1 1430 DP(NETIF_MSG_IFUP,
b015e3d1 1431 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 1432 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
1433
1434 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1435 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1436
1437 /* global vn counter - maximal Mbps for this vn */
1438 m_rs_vn.vn_counter.rate = vn_max_rate;
1439
1440 /* quota - number of bytes transmitted in this period */
1441 m_rs_vn.vn_counter.quota =
1442 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1443
8a1c38d1 1444 if (bp->vn_weight_sum) {
34f80b04
EG
1445 /* credit for each period of the fairness algorithm:
1446 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
1447 vn_weight_sum should not be larger than 10000, thus
1448 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1449 than zero */
34f80b04 1450 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
1451 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1452 (8 * bp->vn_weight_sum))),
1453 (bp->cmng.fair_vars.fair_threshold * 2));
1454 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
1455 m_fair_vn.vn_credit_delta);
1456 }
1457
34f80b04
EG
1458 /* Store it to internal memory */
1459 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1460 REG_WR(bp, BAR_XSTRORM_INTMEM +
1461 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1462 ((u32 *)(&m_rs_vn))[i]);
1463
1464 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1465 REG_WR(bp, BAR_XSTRORM_INTMEM +
1466 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1467 ((u32 *)(&m_fair_vn))[i]);
1468}
1469
8a1c38d1 1470
c18487ee
YR
1471/* This function is called upon link interrupt */
1472static void bnx2x_link_attn(struct bnx2x *bp)
1473{
d9e8b185 1474 u32 prev_link_status = bp->link_vars.link_status;
bb2a0f7a
YG
1475 /* Make sure that we are synced with the current statistics */
1476 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1477
c18487ee 1478 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 1479
bb2a0f7a
YG
1480 if (bp->link_vars.link_up) {
1481
1c06328c 1482 /* dropless flow control */
a18f5128 1483 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
1484 int port = BP_PORT(bp);
1485 u32 pause_enabled = 0;
1486
1487 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1488 pause_enabled = 1;
1489
1490 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 1491 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
1492 pause_enabled);
1493 }
1494
bb2a0f7a
YG
1495 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1496 struct host_port_stats *pstats;
1497
1498 pstats = bnx2x_sp(bp, port_stats);
1499 /* reset old bmac stats */
1500 memset(&(pstats->mac_stx[0]), 0,
1501 sizeof(struct mac_stx));
1502 }
f34d28ea 1503 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
1504 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1505 }
1506
d9e8b185
VZ
1507 /* indicate link status only if link status actually changed */
1508 if (prev_link_status != bp->link_vars.link_status)
1509 bnx2x_link_report(bp);
34f80b04
EG
1510
1511 if (IS_E1HMF(bp)) {
8a1c38d1 1512 int port = BP_PORT(bp);
34f80b04 1513 int func;
8a1c38d1 1514 int vn;
34f80b04 1515
ab6ad5a4 1516 /* Set the attention towards other drivers on the same port */
34f80b04
EG
1517 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1518 if (vn == BP_E1HVN(bp))
1519 continue;
1520
8a1c38d1 1521 func = ((vn << 1) | port);
34f80b04
EG
1522 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1523 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1524 }
34f80b04 1525
8a1c38d1
EG
1526 if (bp->link_vars.link_up) {
1527 int i;
1528
1529 /* Init rate shaping and fairness contexts */
1530 bnx2x_init_port_minmax(bp);
34f80b04 1531
34f80b04 1532 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
1533 bnx2x_init_vn_minmax(bp, 2*vn + port);
1534
1535 /* Store it to internal memory */
1536 for (i = 0;
1537 i < sizeof(struct cmng_struct_per_port) / 4; i++)
1538 REG_WR(bp, BAR_XSTRORM_INTMEM +
1539 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1540 ((u32 *)(&bp->cmng))[i]);
1541 }
34f80b04 1542 }
c18487ee 1543}
a2fbb9ea 1544
9f6c9258 1545void bnx2x__link_status_update(struct bnx2x *bp)
c18487ee 1546{
f34d28ea 1547 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 1548 return;
a2fbb9ea 1549
c18487ee 1550 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 1551
bb2a0f7a
YG
1552 if (bp->link_vars.link_up)
1553 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1554 else
1555 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1556
2691d51d
EG
1557 bnx2x_calc_vn_weight_sum(bp);
1558
c18487ee
YR
1559 /* indicate link status */
1560 bnx2x_link_report(bp);
a2fbb9ea 1561}
a2fbb9ea 1562
34f80b04
EG
1563static void bnx2x_pmf_update(struct bnx2x *bp)
1564{
1565 int port = BP_PORT(bp);
1566 u32 val;
1567
1568 bp->port.pmf = 1;
1569 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1570
1571 /* enable nig attention */
1572 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1573 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1574 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
1575
1576 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
1577}
1578
c18487ee 1579/* end of Link */
a2fbb9ea
ET
1580
1581/* slow path */
1582
1583/*
1584 * General service functions
1585 */
1586
2691d51d
EG
1587/* send the MCP a request, block until there is a reply */
1588u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
1589{
1590 int func = BP_FUNC(bp);
1591 u32 seq = ++bp->fw_seq;
1592 u32 rc = 0;
1593 u32 cnt = 1;
1594 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1595
c4ff7cbf 1596 mutex_lock(&bp->fw_mb_mutex);
2691d51d
EG
1597 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1598 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1599
1600 do {
1601 /* let the FW do it's magic ... */
1602 msleep(delay);
1603
1604 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
1605
c4ff7cbf
EG
1606 /* Give the FW up to 5 second (500*10ms) */
1607 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
1608
1609 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
1610 cnt*delay, rc, seq);
1611
1612 /* is this a reply to our command? */
1613 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
1614 rc &= FW_MSG_CODE_MASK;
1615 else {
1616 /* FW BUG! */
1617 BNX2X_ERR("FW failed to respond!\n");
1618 bnx2x_fw_dump(bp);
1619 rc = 0;
1620 }
c4ff7cbf 1621 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
1622
1623 return rc;
1624}
1625
2691d51d
EG
1626static void bnx2x_e1h_disable(struct bnx2x *bp)
1627{
1628 int port = BP_PORT(bp);
2691d51d
EG
1629
1630 netif_tx_disable(bp->dev);
2691d51d
EG
1631
1632 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
1633
2691d51d
EG
1634 netif_carrier_off(bp->dev);
1635}
1636
1637static void bnx2x_e1h_enable(struct bnx2x *bp)
1638{
1639 int port = BP_PORT(bp);
1640
1641 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
1642
2691d51d
EG
1643 /* Tx queue should be only reenabled */
1644 netif_tx_wake_all_queues(bp->dev);
1645
061bc702
EG
1646 /*
1647 * Should not call netif_carrier_on since it will be called if the link
1648 * is up when checking for link state
1649 */
2691d51d
EG
1650}
1651
1652static void bnx2x_update_min_max(struct bnx2x *bp)
1653{
1654 int port = BP_PORT(bp);
1655 int vn, i;
1656
1657 /* Init rate shaping and fairness contexts */
1658 bnx2x_init_port_minmax(bp);
1659
1660 bnx2x_calc_vn_weight_sum(bp);
1661
1662 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1663 bnx2x_init_vn_minmax(bp, 2*vn + port);
1664
1665 if (bp->port.pmf) {
1666 int func;
1667
1668 /* Set the attention towards other drivers on the same port */
1669 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1670 if (vn == BP_E1HVN(bp))
1671 continue;
1672
1673 func = ((vn << 1) | port);
1674 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1675 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1676 }
1677
1678 /* Store it to internal memory */
1679 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1680 REG_WR(bp, BAR_XSTRORM_INTMEM +
1681 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1682 ((u32 *)(&bp->cmng))[i]);
1683 }
1684}
1685
1686static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1687{
2691d51d 1688 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
1689
1690 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
1691
f34d28ea
EG
1692 /*
1693 * This is the only place besides the function initialization
1694 * where the bp->flags can change so it is done without any
1695 * locks
1696 */
2691d51d
EG
1697 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1698 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
f34d28ea 1699 bp->flags |= MF_FUNC_DIS;
2691d51d
EG
1700
1701 bnx2x_e1h_disable(bp);
1702 } else {
1703 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
f34d28ea 1704 bp->flags &= ~MF_FUNC_DIS;
2691d51d
EG
1705
1706 bnx2x_e1h_enable(bp);
1707 }
1708 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
1709 }
1710 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1711
1712 bnx2x_update_min_max(bp);
1713 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1714 }
1715
1716 /* Report results to MCP */
1717 if (dcc_event)
1718 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
1719 else
1720 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
1721}
1722
28912902
MC
1723/* must be called under the spq lock */
1724static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
1725{
1726 struct eth_spe *next_spe = bp->spq_prod_bd;
1727
1728 if (bp->spq_prod_bd == bp->spq_last_bd) {
1729 bp->spq_prod_bd = bp->spq;
1730 bp->spq_prod_idx = 0;
1731 DP(NETIF_MSG_TIMER, "end of spq\n");
1732 } else {
1733 bp->spq_prod_bd++;
1734 bp->spq_prod_idx++;
1735 }
1736 return next_spe;
1737}
1738
1739/* must be called under the spq lock */
1740static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1741{
1742 int func = BP_FUNC(bp);
1743
1744 /* Make sure that BD data is updated before writing the producer */
1745 wmb();
1746
1747 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1748 bp->spq_prod_idx);
1749 mmiowb();
1750}
1751
a2fbb9ea 1752/* the slow path queue is odd since completions arrive on the fastpath ring */
9f6c9258 1753int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
a2fbb9ea
ET
1754 u32 data_hi, u32 data_lo, int common)
1755{
28912902 1756 struct eth_spe *spe;
a2fbb9ea 1757
a2fbb9ea
ET
1758#ifdef BNX2X_STOP_ON_ERROR
1759 if (unlikely(bp->panic))
1760 return -EIO;
1761#endif
1762
34f80b04 1763 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
1764
1765 if (!bp->spq_left) {
1766 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 1767 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
1768 bnx2x_panic();
1769 return -EBUSY;
1770 }
f1410647 1771
28912902
MC
1772 spe = bnx2x_sp_get_next(bp);
1773
a2fbb9ea 1774 /* CID needs port number to be encoded int it */
28912902 1775 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
1776 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1777 HW_CID(bp, cid));
28912902 1778 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 1779 if (common)
28912902 1780 spe->hdr.type |=
a2fbb9ea
ET
1781 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1782
28912902
MC
1783 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1784 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
1785
1786 bp->spq_left--;
1787
cdaa7cb8
VZ
1788 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1789 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
1790 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1791 (u32)(U64_LO(bp->spq_mapping) +
1792 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1793 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1794
28912902 1795 bnx2x_sp_prod_update(bp);
34f80b04 1796 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
1797 return 0;
1798}
1799
1800/* acquire split MCP access lock register */
4a37fb66 1801static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 1802{
72fd0718 1803 u32 j, val;
34f80b04 1804 int rc = 0;
a2fbb9ea
ET
1805
1806 might_sleep();
72fd0718 1807 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
1808 val = (1UL << 31);
1809 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1810 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1811 if (val & (1L << 31))
1812 break;
1813
1814 msleep(5);
1815 }
a2fbb9ea 1816 if (!(val & (1L << 31))) {
19680c48 1817 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
1818 rc = -EBUSY;
1819 }
1820
1821 return rc;
1822}
1823
4a37fb66
YG
1824/* release split MCP access lock register */
1825static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 1826{
72fd0718 1827 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
1828}
1829
1830static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1831{
1832 struct host_def_status_block *def_sb = bp->def_status_blk;
1833 u16 rc = 0;
1834
1835 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
1836 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1837 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1838 rc |= 1;
1839 }
1840 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1841 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1842 rc |= 2;
1843 }
1844 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1845 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1846 rc |= 4;
1847 }
1848 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1849 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1850 rc |= 8;
1851 }
1852 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1853 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1854 rc |= 16;
1855 }
1856 return rc;
1857}
1858
1859/*
1860 * slow path service functions
1861 */
1862
1863static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1864{
34f80b04 1865 int port = BP_PORT(bp);
5c862848
EG
1866 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
1867 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
1868 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1869 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
1870 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1871 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 1872 u32 aeu_mask;
87942b46 1873 u32 nig_mask = 0;
a2fbb9ea 1874
a2fbb9ea
ET
1875 if (bp->attn_state & asserted)
1876 BNX2X_ERR("IGU ERROR\n");
1877
3fcaf2e5
EG
1878 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1879 aeu_mask = REG_RD(bp, aeu_addr);
1880
a2fbb9ea 1881 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 1882 aeu_mask, asserted);
72fd0718 1883 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 1884 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 1885
3fcaf2e5
EG
1886 REG_WR(bp, aeu_addr, aeu_mask);
1887 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 1888
3fcaf2e5 1889 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 1890 bp->attn_state |= asserted;
3fcaf2e5 1891 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
1892
1893 if (asserted & ATTN_HARD_WIRED_MASK) {
1894 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 1895
a5e9a7cf
EG
1896 bnx2x_acquire_phy_lock(bp);
1897
877e9aa4 1898 /* save nig interrupt mask */
87942b46 1899 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 1900 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 1901
c18487ee 1902 bnx2x_link_attn(bp);
a2fbb9ea
ET
1903
1904 /* handle unicore attn? */
1905 }
1906 if (asserted & ATTN_SW_TIMER_4_FUNC)
1907 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1908
1909 if (asserted & GPIO_2_FUNC)
1910 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1911
1912 if (asserted & GPIO_3_FUNC)
1913 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1914
1915 if (asserted & GPIO_4_FUNC)
1916 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1917
1918 if (port == 0) {
1919 if (asserted & ATTN_GENERAL_ATTN_1) {
1920 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1921 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1922 }
1923 if (asserted & ATTN_GENERAL_ATTN_2) {
1924 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1925 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1926 }
1927 if (asserted & ATTN_GENERAL_ATTN_3) {
1928 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1929 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1930 }
1931 } else {
1932 if (asserted & ATTN_GENERAL_ATTN_4) {
1933 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1934 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1935 }
1936 if (asserted & ATTN_GENERAL_ATTN_5) {
1937 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1938 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1939 }
1940 if (asserted & ATTN_GENERAL_ATTN_6) {
1941 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1942 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1943 }
1944 }
1945
1946 } /* if hardwired */
1947
5c862848
EG
1948 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
1949 asserted, hc_addr);
1950 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
1951
1952 /* now set back the mask */
a5e9a7cf 1953 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 1954 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
1955 bnx2x_release_phy_lock(bp);
1956 }
a2fbb9ea
ET
1957}
1958
fd4ef40d
EG
1959static inline void bnx2x_fan_failure(struct bnx2x *bp)
1960{
1961 int port = BP_PORT(bp);
1962
1963 /* mark the failure */
1964 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
1965 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
1966 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
1967 bp->link_params.ext_phy_config);
1968
1969 /* log the failure */
cdaa7cb8
VZ
1970 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
1971 " the driver to shutdown the card to prevent permanent"
1972 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 1973}
ab6ad5a4 1974
877e9aa4 1975static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 1976{
34f80b04 1977 int port = BP_PORT(bp);
877e9aa4 1978 int reg_offset;
4d295db0 1979 u32 val, swap_val, swap_override;
877e9aa4 1980
34f80b04
EG
1981 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
1982 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 1983
34f80b04 1984 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
1985
1986 val = REG_RD(bp, reg_offset);
1987 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
1988 REG_WR(bp, reg_offset, val);
1989
1990 BNX2X_ERR("SPIO5 hw attention\n");
1991
fd4ef40d 1992 /* Fan failure attention */
35b19ba5
EG
1993 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
1994 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 1995 /* Low power mode is controlled by GPIO 2 */
877e9aa4 1996 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 1997 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
1998 /* The PHY reset is controlled by GPIO 1 */
1999 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2000 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2001 break;
2002
4d295db0
EG
2003 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2004 /* The PHY reset is controlled by GPIO 1 */
2005 /* fake the port number to cancel the swap done in
2006 set_gpio() */
2007 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2008 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2009 port = (swap_val && swap_override) ^ 1;
2010 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2011 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2012 break;
2013
877e9aa4
ET
2014 default:
2015 break;
2016 }
fd4ef40d 2017 bnx2x_fan_failure(bp);
877e9aa4 2018 }
34f80b04 2019
589abe3a
EG
2020 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2021 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2022 bnx2x_acquire_phy_lock(bp);
2023 bnx2x_handle_module_detect_int(&bp->link_params);
2024 bnx2x_release_phy_lock(bp);
2025 }
2026
34f80b04
EG
2027 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2028
2029 val = REG_RD(bp, reg_offset);
2030 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2031 REG_WR(bp, reg_offset, val);
2032
2033 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2034 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2035 bnx2x_panic();
2036 }
877e9aa4
ET
2037}
2038
2039static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2040{
2041 u32 val;
2042
0626b899 2043 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2044
2045 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2046 BNX2X_ERR("DB hw attention 0x%x\n", val);
2047 /* DORQ discard attention */
2048 if (val & 0x2)
2049 BNX2X_ERR("FATAL error from DORQ\n");
2050 }
34f80b04
EG
2051
2052 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2053
2054 int port = BP_PORT(bp);
2055 int reg_offset;
2056
2057 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2058 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2059
2060 val = REG_RD(bp, reg_offset);
2061 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2062 REG_WR(bp, reg_offset, val);
2063
2064 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 2065 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
2066 bnx2x_panic();
2067 }
877e9aa4
ET
2068}
2069
2070static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2071{
2072 u32 val;
2073
2074 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2075
2076 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2077 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2078 /* CFC error attention */
2079 if (val & 0x2)
2080 BNX2X_ERR("FATAL error from CFC\n");
2081 }
2082
2083 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2084
2085 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2086 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2087 /* RQ_USDMDP_FIFO_OVERFLOW */
2088 if (val & 0x18000)
2089 BNX2X_ERR("FATAL error from PXP\n");
2090 }
34f80b04
EG
2091
2092 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2093
2094 int port = BP_PORT(bp);
2095 int reg_offset;
2096
2097 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2098 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2099
2100 val = REG_RD(bp, reg_offset);
2101 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2102 REG_WR(bp, reg_offset, val);
2103
2104 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 2105 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
2106 bnx2x_panic();
2107 }
877e9aa4
ET
2108}
2109
2110static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2111{
34f80b04
EG
2112 u32 val;
2113
877e9aa4
ET
2114 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2115
34f80b04
EG
2116 if (attn & BNX2X_PMF_LINK_ASSERT) {
2117 int func = BP_FUNC(bp);
2118
2119 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
2120 bp->mf_config = SHMEM_RD(bp,
2121 mf_cfg.func_mf_config[func].config);
2691d51d
EG
2122 val = SHMEM_RD(bp, func_mb[func].drv_status);
2123 if (val & DRV_STATUS_DCC_EVENT_MASK)
2124 bnx2x_dcc_event(bp,
2125 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 2126 bnx2x__link_status_update(bp);
2691d51d 2127 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
2128 bnx2x_pmf_update(bp);
2129
2130 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2131
2132 BNX2X_ERR("MC assert!\n");
2133 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2134 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2135 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2136 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2137 bnx2x_panic();
2138
2139 } else if (attn & BNX2X_MCP_ASSERT) {
2140
2141 BNX2X_ERR("MCP assert!\n");
2142 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2143 bnx2x_fw_dump(bp);
877e9aa4
ET
2144
2145 } else
2146 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2147 }
2148
2149 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2150 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2151 if (attn & BNX2X_GRC_TIMEOUT) {
2152 val = CHIP_IS_E1H(bp) ?
2153 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2154 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2155 }
2156 if (attn & BNX2X_GRC_RSV) {
2157 val = CHIP_IS_E1H(bp) ?
2158 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2159 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2160 }
877e9aa4 2161 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2162 }
2163}
2164
72fd0718
VZ
2165#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
2166#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
2167#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2168#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
2169#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
2170#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2171/*
2172 * should be run under rtnl lock
2173 */
2174static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2175{
2176 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2177 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2178 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2179 barrier();
2180 mmiowb();
2181}
2182
2183/*
2184 * should be run under rtnl lock
2185 */
2186static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2187{
2188 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2189 val |= (1 << 16);
2190 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2191 barrier();
2192 mmiowb();
2193}
2194
2195/*
2196 * should be run under rtnl lock
2197 */
9f6c9258 2198bool bnx2x_reset_is_done(struct bnx2x *bp)
72fd0718
VZ
2199{
2200 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2201 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2202 return (val & RESET_DONE_FLAG_MASK) ? false : true;
2203}
2204
2205/*
2206 * should be run under rtnl lock
2207 */
9f6c9258 2208inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
72fd0718
VZ
2209{
2210 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2211
2212 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2213
2214 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2215 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2216 barrier();
2217 mmiowb();
2218}
2219
2220/*
2221 * should be run under rtnl lock
2222 */
9f6c9258 2223u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
72fd0718
VZ
2224{
2225 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2226
2227 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2228
2229 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2230 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2231 barrier();
2232 mmiowb();
2233
2234 return val1;
2235}
2236
2237/*
2238 * should be run under rtnl lock
2239 */
2240static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2241{
2242 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2243}
2244
2245static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2246{
2247 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2248 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2249}
2250
2251static inline void _print_next_block(int idx, const char *blk)
2252{
2253 if (idx)
2254 pr_cont(", ");
2255 pr_cont("%s", blk);
2256}
2257
2258static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2259{
2260 int i = 0;
2261 u32 cur_bit = 0;
2262 for (i = 0; sig; i++) {
2263 cur_bit = ((u32)0x1 << i);
2264 if (sig & cur_bit) {
2265 switch (cur_bit) {
2266 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2267 _print_next_block(par_num++, "BRB");
2268 break;
2269 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2270 _print_next_block(par_num++, "PARSER");
2271 break;
2272 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2273 _print_next_block(par_num++, "TSDM");
2274 break;
2275 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2276 _print_next_block(par_num++, "SEARCHER");
2277 break;
2278 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2279 _print_next_block(par_num++, "TSEMI");
2280 break;
2281 }
2282
2283 /* Clear the bit */
2284 sig &= ~cur_bit;
2285 }
2286 }
2287
2288 return par_num;
2289}
2290
2291static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2292{
2293 int i = 0;
2294 u32 cur_bit = 0;
2295 for (i = 0; sig; i++) {
2296 cur_bit = ((u32)0x1 << i);
2297 if (sig & cur_bit) {
2298 switch (cur_bit) {
2299 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2300 _print_next_block(par_num++, "PBCLIENT");
2301 break;
2302 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2303 _print_next_block(par_num++, "QM");
2304 break;
2305 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2306 _print_next_block(par_num++, "XSDM");
2307 break;
2308 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2309 _print_next_block(par_num++, "XSEMI");
2310 break;
2311 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2312 _print_next_block(par_num++, "DOORBELLQ");
2313 break;
2314 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2315 _print_next_block(par_num++, "VAUX PCI CORE");
2316 break;
2317 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2318 _print_next_block(par_num++, "DEBUG");
2319 break;
2320 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2321 _print_next_block(par_num++, "USDM");
2322 break;
2323 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2324 _print_next_block(par_num++, "USEMI");
2325 break;
2326 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2327 _print_next_block(par_num++, "UPB");
2328 break;
2329 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2330 _print_next_block(par_num++, "CSDM");
2331 break;
2332 }
2333
2334 /* Clear the bit */
2335 sig &= ~cur_bit;
2336 }
2337 }
2338
2339 return par_num;
2340}
2341
2342static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
2343{
2344 int i = 0;
2345 u32 cur_bit = 0;
2346 for (i = 0; sig; i++) {
2347 cur_bit = ((u32)0x1 << i);
2348 if (sig & cur_bit) {
2349 switch (cur_bit) {
2350 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
2351 _print_next_block(par_num++, "CSEMI");
2352 break;
2353 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
2354 _print_next_block(par_num++, "PXP");
2355 break;
2356 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
2357 _print_next_block(par_num++,
2358 "PXPPCICLOCKCLIENT");
2359 break;
2360 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
2361 _print_next_block(par_num++, "CFC");
2362 break;
2363 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
2364 _print_next_block(par_num++, "CDU");
2365 break;
2366 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
2367 _print_next_block(par_num++, "IGU");
2368 break;
2369 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
2370 _print_next_block(par_num++, "MISC");
2371 break;
2372 }
2373
2374 /* Clear the bit */
2375 sig &= ~cur_bit;
2376 }
2377 }
2378
2379 return par_num;
2380}
2381
2382static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
2383{
2384 int i = 0;
2385 u32 cur_bit = 0;
2386 for (i = 0; sig; i++) {
2387 cur_bit = ((u32)0x1 << i);
2388 if (sig & cur_bit) {
2389 switch (cur_bit) {
2390 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
2391 _print_next_block(par_num++, "MCP ROM");
2392 break;
2393 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
2394 _print_next_block(par_num++, "MCP UMP RX");
2395 break;
2396 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
2397 _print_next_block(par_num++, "MCP UMP TX");
2398 break;
2399 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
2400 _print_next_block(par_num++, "MCP SCPAD");
2401 break;
2402 }
2403
2404 /* Clear the bit */
2405 sig &= ~cur_bit;
2406 }
2407 }
2408
2409 return par_num;
2410}
2411
2412static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
2413 u32 sig2, u32 sig3)
2414{
2415 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
2416 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
2417 int par_num = 0;
2418 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
2419 "[0]:0x%08x [1]:0x%08x "
2420 "[2]:0x%08x [3]:0x%08x\n",
2421 sig0 & HW_PRTY_ASSERT_SET_0,
2422 sig1 & HW_PRTY_ASSERT_SET_1,
2423 sig2 & HW_PRTY_ASSERT_SET_2,
2424 sig3 & HW_PRTY_ASSERT_SET_3);
2425 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
2426 bp->dev->name);
2427 par_num = bnx2x_print_blocks_with_parity0(
2428 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
2429 par_num = bnx2x_print_blocks_with_parity1(
2430 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
2431 par_num = bnx2x_print_blocks_with_parity2(
2432 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
2433 par_num = bnx2x_print_blocks_with_parity3(
2434 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
2435 printk("\n");
2436 return true;
2437 } else
2438 return false;
2439}
2440
9f6c9258 2441bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 2442{
a2fbb9ea 2443 struct attn_route attn;
72fd0718
VZ
2444 int port = BP_PORT(bp);
2445
2446 attn.sig[0] = REG_RD(bp,
2447 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
2448 port*4);
2449 attn.sig[1] = REG_RD(bp,
2450 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
2451 port*4);
2452 attn.sig[2] = REG_RD(bp,
2453 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
2454 port*4);
2455 attn.sig[3] = REG_RD(bp,
2456 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
2457 port*4);
2458
2459 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
2460 attn.sig[3]);
2461}
2462
2463static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2464{
2465 struct attn_route attn, *group_mask;
34f80b04 2466 int port = BP_PORT(bp);
877e9aa4 2467 int index;
a2fbb9ea
ET
2468 u32 reg_addr;
2469 u32 val;
3fcaf2e5 2470 u32 aeu_mask;
a2fbb9ea
ET
2471
2472 /* need to take HW lock because MCP or other port might also
2473 try to handle this event */
4a37fb66 2474 bnx2x_acquire_alr(bp);
a2fbb9ea 2475
72fd0718
VZ
2476 if (bnx2x_chk_parity_attn(bp)) {
2477 bp->recovery_state = BNX2X_RECOVERY_INIT;
2478 bnx2x_set_reset_in_progress(bp);
2479 schedule_delayed_work(&bp->reset_task, 0);
2480 /* Disable HW interrupts */
2481 bnx2x_int_disable(bp);
2482 bnx2x_release_alr(bp);
2483 /* In case of parity errors don't handle attentions so that
2484 * other function would "see" parity errors.
2485 */
2486 return;
2487 }
2488
a2fbb9ea
ET
2489 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2490 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2491 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2492 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2493 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2494 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2495
2496 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2497 if (deasserted & (1 << index)) {
72fd0718 2498 group_mask = &bp->attn_group[index];
a2fbb9ea 2499
34f80b04 2500 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
72fd0718
VZ
2501 index, group_mask->sig[0], group_mask->sig[1],
2502 group_mask->sig[2], group_mask->sig[3]);
a2fbb9ea 2503
877e9aa4 2504 bnx2x_attn_int_deasserted3(bp,
72fd0718 2505 attn.sig[3] & group_mask->sig[3]);
877e9aa4 2506 bnx2x_attn_int_deasserted1(bp,
72fd0718 2507 attn.sig[1] & group_mask->sig[1]);
877e9aa4 2508 bnx2x_attn_int_deasserted2(bp,
72fd0718 2509 attn.sig[2] & group_mask->sig[2]);
877e9aa4 2510 bnx2x_attn_int_deasserted0(bp,
72fd0718 2511 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
2512 }
2513 }
2514
4a37fb66 2515 bnx2x_release_alr(bp);
a2fbb9ea 2516
5c862848 2517 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2518
2519 val = ~deasserted;
3fcaf2e5
EG
2520 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2521 val, reg_addr);
5c862848 2522 REG_WR(bp, reg_addr, val);
a2fbb9ea 2523
a2fbb9ea 2524 if (~bp->attn_state & deasserted)
3fcaf2e5 2525 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2526
2527 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2528 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2529
3fcaf2e5
EG
2530 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2531 aeu_mask = REG_RD(bp, reg_addr);
2532
2533 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2534 aeu_mask, deasserted);
72fd0718 2535 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 2536 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2537
3fcaf2e5
EG
2538 REG_WR(bp, reg_addr, aeu_mask);
2539 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2540
2541 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2542 bp->attn_state &= ~deasserted;
2543 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2544}
2545
2546static void bnx2x_attn_int(struct bnx2x *bp)
2547{
2548 /* read local copy of bits */
68d59484
EG
2549 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2550 attn_bits);
2551 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2552 attn_bits_ack);
a2fbb9ea
ET
2553 u32 attn_state = bp->attn_state;
2554
2555 /* look for changed bits */
2556 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2557 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2558
2559 DP(NETIF_MSG_HW,
2560 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2561 attn_bits, attn_ack, asserted, deasserted);
2562
2563 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2564 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2565
2566 /* handle bits that were raised */
2567 if (asserted)
2568 bnx2x_attn_int_asserted(bp, asserted);
2569
2570 if (deasserted)
2571 bnx2x_attn_int_deasserted(bp, deasserted);
2572}
2573
2574static void bnx2x_sp_task(struct work_struct *work)
2575{
1cf167f2 2576 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2577 u16 status;
2578
2579 /* Return here if interrupt is disabled */
2580 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2581 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2582 return;
2583 }
2584
2585 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2586/* if (status == 0) */
2587/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2588
cdaa7cb8 2589 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 2590
877e9aa4 2591 /* HW attentions */
cdaa7cb8 2592 if (status & 0x1) {
a2fbb9ea 2593 bnx2x_attn_int(bp);
cdaa7cb8
VZ
2594 status &= ~0x1;
2595 }
2596
2597 /* CStorm events: STAT_QUERY */
2598 if (status & 0x2) {
2599 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
2600 status &= ~0x2;
2601 }
2602
2603 if (unlikely(status))
2604 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2605 status);
a2fbb9ea 2606
68d59484 2607 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2608 IGU_INT_NOP, 1);
2609 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2610 IGU_INT_NOP, 1);
2611 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2612 IGU_INT_NOP, 1);
2613 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2614 IGU_INT_NOP, 1);
2615 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2616 IGU_INT_ENABLE, 1);
2617}
2618
9f6c9258 2619irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
a2fbb9ea
ET
2620{
2621 struct net_device *dev = dev_instance;
2622 struct bnx2x *bp = netdev_priv(dev);
2623
2624 /* Return here if interrupt is disabled */
2625 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2626 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2627 return IRQ_HANDLED;
2628 }
2629
8d9c5f34 2630 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2631
2632#ifdef BNX2X_STOP_ON_ERROR
2633 if (unlikely(bp->panic))
2634 return IRQ_HANDLED;
2635#endif
2636
993ac7b5
MC
2637#ifdef BCM_CNIC
2638 {
2639 struct cnic_ops *c_ops;
2640
2641 rcu_read_lock();
2642 c_ops = rcu_dereference(bp->cnic_ops);
2643 if (c_ops)
2644 c_ops->cnic_handler(bp->cnic_data, NULL);
2645 rcu_read_unlock();
2646 }
2647#endif
1cf167f2 2648 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2649
2650 return IRQ_HANDLED;
2651}
2652
2653/* end of slow path */
2654
a2fbb9ea
ET
2655static void bnx2x_timer(unsigned long data)
2656{
2657 struct bnx2x *bp = (struct bnx2x *) data;
2658
2659 if (!netif_running(bp->dev))
2660 return;
2661
2662 if (atomic_read(&bp->intr_sem) != 0)
f1410647 2663 goto timer_restart;
a2fbb9ea
ET
2664
2665 if (poll) {
2666 struct bnx2x_fastpath *fp = &bp->fp[0];
2667 int rc;
2668
7961f791 2669 bnx2x_tx_int(fp);
a2fbb9ea
ET
2670 rc = bnx2x_rx_int(fp, 1000);
2671 }
2672
34f80b04
EG
2673 if (!BP_NOMCP(bp)) {
2674 int func = BP_FUNC(bp);
a2fbb9ea
ET
2675 u32 drv_pulse;
2676 u32 mcp_pulse;
2677
2678 ++bp->fw_drv_pulse_wr_seq;
2679 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
2680 /* TBD - add SYSTEM_TIME */
2681 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 2682 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 2683
34f80b04 2684 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
2685 MCP_PULSE_SEQ_MASK);
2686 /* The delta between driver pulse and mcp response
2687 * should be 1 (before mcp response) or 0 (after mcp response)
2688 */
2689 if ((drv_pulse != mcp_pulse) &&
2690 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
2691 /* someone lost a heartbeat... */
2692 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2693 drv_pulse, mcp_pulse);
2694 }
2695 }
2696
f34d28ea 2697 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 2698 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 2699
f1410647 2700timer_restart:
a2fbb9ea
ET
2701 mod_timer(&bp->timer, jiffies + bp->current_interval);
2702}
2703
2704/* end of Statistics */
2705
2706/* nic init */
2707
2708/*
2709 * nic init service functions
2710 */
2711
34f80b04 2712static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 2713{
34f80b04
EG
2714 int port = BP_PORT(bp);
2715
ca00392c
EG
2716 /* "CSTORM" */
2717 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2718 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
2719 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
2720 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2721 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
2722 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
2723}
2724
9f6c9258 2725void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5c862848 2726 dma_addr_t mapping, int sb_id)
34f80b04
EG
2727{
2728 int port = BP_PORT(bp);
bb2a0f7a 2729 int func = BP_FUNC(bp);
a2fbb9ea 2730 int index;
34f80b04 2731 u64 section;
a2fbb9ea
ET
2732
2733 /* USTORM */
2734 section = ((u64)mapping) + offsetof(struct host_status_block,
2735 u_status_block);
34f80b04 2736 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 2737
ca00392c
EG
2738 REG_WR(bp, BAR_CSTRORM_INTMEM +
2739 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
2740 REG_WR(bp, BAR_CSTRORM_INTMEM +
2741 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 2742 U64_HI(section));
ca00392c
EG
2743 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
2744 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
2745
2746 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
2747 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2748 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
2749
2750 /* CSTORM */
2751 section = ((u64)mapping) + offsetof(struct host_status_block,
2752 c_status_block);
34f80b04 2753 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
2754
2755 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 2756 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 2757 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 2758 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 2759 U64_HI(section));
7a9b2557 2760 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 2761 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
2762
2763 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
2764 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 2765 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
2766
2767 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2768}
2769
2770static void bnx2x_zero_def_sb(struct bnx2x *bp)
2771{
2772 int func = BP_FUNC(bp);
a2fbb9ea 2773
ca00392c 2774 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
2775 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2776 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
2777 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2778 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
2779 sizeof(struct cstorm_def_status_block_u)/4);
2780 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2781 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
2782 sizeof(struct cstorm_def_status_block_c)/4);
2783 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
2784 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2785 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
2786}
2787
2788static void bnx2x_init_def_sb(struct bnx2x *bp,
2789 struct host_def_status_block *def_sb,
34f80b04 2790 dma_addr_t mapping, int sb_id)
a2fbb9ea 2791{
34f80b04
EG
2792 int port = BP_PORT(bp);
2793 int func = BP_FUNC(bp);
a2fbb9ea
ET
2794 int index, val, reg_offset;
2795 u64 section;
2796
2797 /* ATTN */
2798 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2799 atten_status_block);
34f80b04 2800 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 2801
49d66772
ET
2802 bp->attn_state = 0;
2803
a2fbb9ea
ET
2804 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2805 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2806
34f80b04 2807 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
2808 bp->attn_group[index].sig[0] = REG_RD(bp,
2809 reg_offset + 0x10*index);
2810 bp->attn_group[index].sig[1] = REG_RD(bp,
2811 reg_offset + 0x4 + 0x10*index);
2812 bp->attn_group[index].sig[2] = REG_RD(bp,
2813 reg_offset + 0x8 + 0x10*index);
2814 bp->attn_group[index].sig[3] = REG_RD(bp,
2815 reg_offset + 0xc + 0x10*index);
2816 }
2817
a2fbb9ea
ET
2818 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
2819 HC_REG_ATTN_MSG0_ADDR_L);
2820
2821 REG_WR(bp, reg_offset, U64_LO(section));
2822 REG_WR(bp, reg_offset + 4, U64_HI(section));
2823
2824 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
2825
2826 val = REG_RD(bp, reg_offset);
34f80b04 2827 val |= sb_id;
a2fbb9ea
ET
2828 REG_WR(bp, reg_offset, val);
2829
2830 /* USTORM */
2831 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2832 u_def_status_block);
34f80b04 2833 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 2834
ca00392c
EG
2835 REG_WR(bp, BAR_CSTRORM_INTMEM +
2836 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
2837 REG_WR(bp, BAR_CSTRORM_INTMEM +
2838 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 2839 U64_HI(section));
ca00392c
EG
2840 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
2841 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
2842
2843 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
2844 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2845 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
2846
2847 /* CSTORM */
2848 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2849 c_def_status_block);
34f80b04 2850 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
2851
2852 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 2853 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 2854 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 2855 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 2856 U64_HI(section));
5c862848 2857 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 2858 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
2859
2860 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2861 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 2862 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
2863
2864 /* TSTORM */
2865 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2866 t_def_status_block);
34f80b04 2867 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
2868
2869 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 2870 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 2871 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 2872 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 2873 U64_HI(section));
5c862848 2874 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 2875 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
2876
2877 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2878 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 2879 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
2880
2881 /* XSTORM */
2882 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2883 x_def_status_block);
34f80b04 2884 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
2885
2886 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 2887 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 2888 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 2889 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 2890 U64_HI(section));
5c862848 2891 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 2892 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
2893
2894 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2895 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 2896 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 2897
bb2a0f7a 2898 bp->stats_pending = 0;
66e855f3 2899 bp->set_mac_pending = 0;
bb2a0f7a 2900
34f80b04 2901 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
2902}
2903
9f6c9258 2904void bnx2x_update_coalesce(struct bnx2x *bp)
a2fbb9ea 2905{
34f80b04 2906 int port = BP_PORT(bp);
a2fbb9ea
ET
2907 int i;
2908
2909 for_each_queue(bp, i) {
34f80b04 2910 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
2911
2912 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
2913 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2914 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
2915 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 2916 bp->rx_ticks/(4 * BNX2X_BTR));
ca00392c
EG
2917 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2918 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
2919 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 2920 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
2921
2922 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2923 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
2924 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
2925 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 2926 bp->tx_ticks/(4 * BNX2X_BTR));
a2fbb9ea 2927 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
2928 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
2929 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 2930 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
2931 }
2932}
2933
a2fbb9ea
ET
2934static void bnx2x_init_sp_ring(struct bnx2x *bp)
2935{
34f80b04 2936 int func = BP_FUNC(bp);
a2fbb9ea
ET
2937
2938 spin_lock_init(&bp->spq_lock);
2939
2940 bp->spq_left = MAX_SPQ_PENDING;
2941 bp->spq_prod_idx = 0;
a2fbb9ea
ET
2942 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
2943 bp->spq_prod_bd = bp->spq;
2944 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
2945
34f80b04 2946 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 2947 U64_LO(bp->spq_mapping));
34f80b04
EG
2948 REG_WR(bp,
2949 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
2950 U64_HI(bp->spq_mapping));
2951
34f80b04 2952 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2953 bp->spq_prod_idx);
2954}
2955
2956static void bnx2x_init_context(struct bnx2x *bp)
2957{
2958 int i;
2959
54b9ddaa
VZ
2960 /* Rx */
2961 for_each_queue(bp, i) {
a2fbb9ea
ET
2962 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
2963 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 2964 u8 cl_id = fp->cl_id;
a2fbb9ea 2965
34f80b04
EG
2966 context->ustorm_st_context.common.sb_index_numbers =
2967 BNX2X_RX_SB_INDEX_NUM;
0626b899 2968 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 2969 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 2970 context->ustorm_st_context.common.flags =
de832a55
EG
2971 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
2972 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
2973 context->ustorm_st_context.common.statistics_counter_id =
2974 cl_id;
8d9c5f34 2975 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 2976 BNX2X_RX_ALIGN_SHIFT;
34f80b04 2977 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 2978 bp->rx_buf_size;
34f80b04 2979 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 2980 U64_HI(fp->rx_desc_mapping);
34f80b04 2981 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 2982 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
2983 if (!fp->disable_tpa) {
2984 context->ustorm_st_context.common.flags |=
ca00392c 2985 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 2986 context->ustorm_st_context.common.sge_buff_size =
cdaa7cb8
VZ
2987 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
2988 0xffff);
7a9b2557
VZ
2989 context->ustorm_st_context.common.sge_page_base_hi =
2990 U64_HI(fp->rx_sge_mapping);
2991 context->ustorm_st_context.common.sge_page_base_lo =
2992 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
2993
2994 context->ustorm_st_context.common.max_sges_for_packet =
2995 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
2996 context->ustorm_st_context.common.max_sges_for_packet =
2997 ((context->ustorm_st_context.common.
2998 max_sges_for_packet + PAGES_PER_SGE - 1) &
2999 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
3000 }
3001
8d9c5f34
EG
3002 context->ustorm_ag_context.cdu_usage =
3003 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3004 CDU_REGION_NUMBER_UCM_AG,
3005 ETH_CONNECTION_TYPE);
3006
ca00392c
EG
3007 context->xstorm_ag_context.cdu_reserved =
3008 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3009 CDU_REGION_NUMBER_XCM_AG,
3010 ETH_CONNECTION_TYPE);
3011 }
3012
54b9ddaa
VZ
3013 /* Tx */
3014 for_each_queue(bp, i) {
ca00392c
EG
3015 struct bnx2x_fastpath *fp = &bp->fp[i];
3016 struct eth_context *context =
54b9ddaa 3017 bnx2x_sp(bp, context[i].eth);
ca00392c
EG
3018
3019 context->cstorm_st_context.sb_index_number =
3020 C_SB_ETH_TX_CQ_INDEX;
3021 context->cstorm_st_context.status_block_id = fp->sb_id;
3022
8d9c5f34
EG
3023 context->xstorm_st_context.tx_bd_page_base_hi =
3024 U64_HI(fp->tx_desc_mapping);
3025 context->xstorm_st_context.tx_bd_page_base_lo =
3026 U64_LO(fp->tx_desc_mapping);
ca00392c 3027 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 3028 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
3029 }
3030}
3031
3032static void bnx2x_init_ind_table(struct bnx2x *bp)
3033{
26c8fa4d 3034 int func = BP_FUNC(bp);
a2fbb9ea
ET
3035 int i;
3036
555f6c78 3037 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
3038 return;
3039
555f6c78
EG
3040 DP(NETIF_MSG_IFUP,
3041 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 3042 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 3043 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 3044 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 3045 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
3046}
3047
9f6c9258 3048void bnx2x_set_client_config(struct bnx2x *bp)
49d66772 3049{
49d66772 3050 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
3051 int port = BP_PORT(bp);
3052 int i;
49d66772 3053
e7799c5f 3054 tstorm_client.mtu = bp->dev->mtu;
49d66772 3055 tstorm_client.config_flags =
de832a55
EG
3056 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
3057 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 3058#ifdef BCM_VLAN
0c6671b0 3059 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 3060 tstorm_client.config_flags |=
8d9c5f34 3061 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
3062 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3063 }
3064#endif
49d66772
ET
3065
3066 for_each_queue(bp, i) {
de832a55
EG
3067 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
3068
49d66772 3069 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3070 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
3071 ((u32 *)&tstorm_client)[0]);
3072 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3073 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
3074 ((u32 *)&tstorm_client)[1]);
3075 }
3076
34f80b04
EG
3077 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
3078 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
3079}
3080
9f6c9258 3081void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
a2fbb9ea 3082{
a2fbb9ea 3083 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 3084 int mode = bp->rx_mode;
37b091ba 3085 int mask = bp->rx_mode_cl_mask;
34f80b04 3086 int func = BP_FUNC(bp);
581ce43d 3087 int port = BP_PORT(bp);
a2fbb9ea 3088 int i;
581ce43d
EG
3089 /* All but management unicast packets should pass to the host as well */
3090 u32 llh_mask =
3091 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
3092 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
3093 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3094 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 3095
3196a88a 3096 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
3097
3098 switch (mode) {
3099 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
3100 tstorm_mac_filter.ucast_drop_all = mask;
3101 tstorm_mac_filter.mcast_drop_all = mask;
3102 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 3103 break;
356e2385 3104
a2fbb9ea 3105 case BNX2X_RX_MODE_NORMAL:
34f80b04 3106 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 3107 break;
356e2385 3108
a2fbb9ea 3109 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
3110 tstorm_mac_filter.mcast_accept_all = mask;
3111 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 3112 break;
356e2385 3113
a2fbb9ea 3114 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
3115 tstorm_mac_filter.ucast_accept_all = mask;
3116 tstorm_mac_filter.mcast_accept_all = mask;
3117 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
3118 /* pass management unicast packets as well */
3119 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 3120 break;
356e2385 3121
a2fbb9ea 3122 default:
34f80b04
EG
3123 BNX2X_ERR("BAD rx mode (%d)\n", mode);
3124 break;
a2fbb9ea
ET
3125 }
3126
581ce43d
EG
3127 REG_WR(bp,
3128 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
3129 llh_mask);
3130
a2fbb9ea
ET
3131 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
3132 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3133 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
3134 ((u32 *)&tstorm_mac_filter)[i]);
3135
34f80b04 3136/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
3137 ((u32 *)&tstorm_mac_filter)[i]); */
3138 }
a2fbb9ea 3139
49d66772
ET
3140 if (mode != BNX2X_RX_MODE_NONE)
3141 bnx2x_set_client_config(bp);
a2fbb9ea
ET
3142}
3143
471de716
EG
3144static void bnx2x_init_internal_common(struct bnx2x *bp)
3145{
3146 int i;
3147
3148 /* Zero this manually as its initialization is
3149 currently missing in the initTool */
3150 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3151 REG_WR(bp, BAR_USTRORM_INTMEM +
3152 USTORM_AGG_DATA_OFFSET + i * 4, 0);
3153}
3154
3155static void bnx2x_init_internal_port(struct bnx2x *bp)
3156{
3157 int port = BP_PORT(bp);
3158
ca00392c
EG
3159 REG_WR(bp,
3160 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
3161 REG_WR(bp,
3162 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
3163 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3164 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3165}
3166
3167static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 3168{
a2fbb9ea
ET
3169 struct tstorm_eth_function_common_config tstorm_config = {0};
3170 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
3171 int port = BP_PORT(bp);
3172 int func = BP_FUNC(bp);
de832a55
EG
3173 int i, j;
3174 u32 offset;
471de716 3175 u16 max_agg_size;
a2fbb9ea 3176
c68ed255
TH
3177 tstorm_config.config_flags = RSS_FLAGS(bp);
3178
3179 if (is_multi(bp))
a2fbb9ea 3180 tstorm_config.rss_result_mask = MULTI_MASK;
ca00392c
EG
3181
3182 /* Enable TPA if needed */
3183 if (bp->flags & TPA_ENABLE_FLAG)
3184 tstorm_config.config_flags |=
3185 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
3186
8d9c5f34
EG
3187 if (IS_E1HMF(bp))
3188 tstorm_config.config_flags |=
3189 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 3190
34f80b04
EG
3191 tstorm_config.leading_client_id = BP_L_ID(bp);
3192
a2fbb9ea 3193 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3194 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
3195 (*(u32 *)&tstorm_config));
3196
c14423fe 3197 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 3198 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
3199 bnx2x_set_storm_rx_mode(bp);
3200
de832a55
EG
3201 for_each_queue(bp, i) {
3202 u8 cl_id = bp->fp[i].cl_id;
3203
3204 /* reset xstorm per client statistics */
3205 offset = BAR_XSTRORM_INTMEM +
3206 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3207 for (j = 0;
3208 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
3209 REG_WR(bp, offset + j*4, 0);
3210
3211 /* reset tstorm per client statistics */
3212 offset = BAR_TSTRORM_INTMEM +
3213 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3214 for (j = 0;
3215 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
3216 REG_WR(bp, offset + j*4, 0);
3217
3218 /* reset ustorm per client statistics */
3219 offset = BAR_USTRORM_INTMEM +
3220 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3221 for (j = 0;
3222 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
3223 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
3224 }
3225
3226 /* Init statistics related context */
34f80b04 3227 stats_flags.collect_eth = 1;
a2fbb9ea 3228
66e855f3 3229 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 3230 ((u32 *)&stats_flags)[0]);
66e855f3 3231 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
3232 ((u32 *)&stats_flags)[1]);
3233
66e855f3 3234 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 3235 ((u32 *)&stats_flags)[0]);
66e855f3 3236 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
3237 ((u32 *)&stats_flags)[1]);
3238
de832a55
EG
3239 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
3240 ((u32 *)&stats_flags)[0]);
3241 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
3242 ((u32 *)&stats_flags)[1]);
3243
66e855f3 3244 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 3245 ((u32 *)&stats_flags)[0]);
66e855f3 3246 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
3247 ((u32 *)&stats_flags)[1]);
3248
66e855f3
YG
3249 REG_WR(bp, BAR_XSTRORM_INTMEM +
3250 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3251 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3252 REG_WR(bp, BAR_XSTRORM_INTMEM +
3253 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3254 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3255
3256 REG_WR(bp, BAR_TSTRORM_INTMEM +
3257 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3258 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3259 REG_WR(bp, BAR_TSTRORM_INTMEM +
3260 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3261 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 3262
de832a55
EG
3263 REG_WR(bp, BAR_USTRORM_INTMEM +
3264 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3265 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3266 REG_WR(bp, BAR_USTRORM_INTMEM +
3267 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3268 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3269
34f80b04
EG
3270 if (CHIP_IS_E1H(bp)) {
3271 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3272 IS_E1HMF(bp));
3273 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3274 IS_E1HMF(bp));
3275 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3276 IS_E1HMF(bp));
3277 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3278 IS_E1HMF(bp));
3279
7a9b2557
VZ
3280 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
3281 bp->e1hov);
34f80b04
EG
3282 }
3283
4f40f2cb 3284 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
cdaa7cb8
VZ
3285 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
3286 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
54b9ddaa 3287 for_each_queue(bp, i) {
7a9b2557 3288 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
3289
3290 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 3291 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
3292 U64_LO(fp->rx_comp_mapping));
3293 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 3294 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
3295 U64_HI(fp->rx_comp_mapping));
3296
ca00392c
EG
3297 /* Next page */
3298 REG_WR(bp, BAR_USTRORM_INTMEM +
3299 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
3300 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3301 REG_WR(bp, BAR_USTRORM_INTMEM +
3302 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
3303 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3304
7a9b2557 3305 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 3306 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
3307 max_agg_size);
3308 }
8a1c38d1 3309
1c06328c
EG
3310 /* dropless flow control */
3311 if (CHIP_IS_E1H(bp)) {
3312 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
3313
3314 rx_pause.bd_thr_low = 250;
3315 rx_pause.cqe_thr_low = 250;
3316 rx_pause.cos = 1;
3317 rx_pause.sge_thr_low = 0;
3318 rx_pause.bd_thr_high = 350;
3319 rx_pause.cqe_thr_high = 350;
3320 rx_pause.sge_thr_high = 0;
3321
54b9ddaa 3322 for_each_queue(bp, i) {
1c06328c
EG
3323 struct bnx2x_fastpath *fp = &bp->fp[i];
3324
3325 if (!fp->disable_tpa) {
3326 rx_pause.sge_thr_low = 150;
3327 rx_pause.sge_thr_high = 250;
3328 }
3329
3330
3331 offset = BAR_USTRORM_INTMEM +
3332 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
3333 fp->cl_id);
3334 for (j = 0;
3335 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
3336 j++)
3337 REG_WR(bp, offset + j*4,
3338 ((u32 *)&rx_pause)[j]);
3339 }
3340 }
3341
8a1c38d1
EG
3342 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3343
3344 /* Init rate shaping and fairness contexts */
3345 if (IS_E1HMF(bp)) {
3346 int vn;
3347
3348 /* During init there is no active link
3349 Until link is up, set link rate to 10Gbps */
3350 bp->link_vars.line_speed = SPEED_10000;
3351 bnx2x_init_port_minmax(bp);
3352
b015e3d1
EG
3353 if (!BP_NOMCP(bp))
3354 bp->mf_config =
3355 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
3356 bnx2x_calc_vn_weight_sum(bp);
3357
3358 for (vn = VN_0; vn < E1HVN_MAX; vn++)
3359 bnx2x_init_vn_minmax(bp, 2*vn + port);
3360
3361 /* Enable rate shaping and fairness */
b015e3d1 3362 bp->cmng.flags.cmng_enables |=
8a1c38d1 3363 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 3364
8a1c38d1
EG
3365 } else {
3366 /* rate shaping and fairness are disabled */
3367 DP(NETIF_MSG_IFUP,
3368 "single function mode minmax will be disabled\n");
3369 }
3370
3371
cdaa7cb8 3372 /* Store cmng structures to internal memory */
8a1c38d1
EG
3373 if (bp->port.pmf)
3374 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
3375 REG_WR(bp, BAR_XSTRORM_INTMEM +
3376 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
3377 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
3378}
3379
471de716
EG
3380static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3381{
3382 switch (load_code) {
3383 case FW_MSG_CODE_DRV_LOAD_COMMON:
3384 bnx2x_init_internal_common(bp);
3385 /* no break */
3386
3387 case FW_MSG_CODE_DRV_LOAD_PORT:
3388 bnx2x_init_internal_port(bp);
3389 /* no break */
3390
3391 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3392 bnx2x_init_internal_func(bp);
3393 break;
3394
3395 default:
3396 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3397 break;
3398 }
3399}
3400
9f6c9258 3401void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
3402{
3403 int i;
3404
3405 for_each_queue(bp, i) {
3406 struct bnx2x_fastpath *fp = &bp->fp[i];
3407
34f80b04 3408 fp->bp = bp;
a2fbb9ea 3409 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 3410 fp->index = i;
34f80b04 3411 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
3412#ifdef BCM_CNIC
3413 fp->sb_id = fp->cl_id + 1;
3414#else
34f80b04 3415 fp->sb_id = fp->cl_id;
37b091ba 3416#endif
34f80b04 3417 DP(NETIF_MSG_IFUP,
f5372251
EG
3418 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
3419 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 3420 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 3421 fp->sb_id);
5c862848 3422 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
3423 }
3424
16119785
EG
3425 /* ensure status block indices were read */
3426 rmb();
3427
3428
5c862848
EG
3429 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
3430 DEF_SB_ID);
3431 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
3432 bnx2x_update_coalesce(bp);
3433 bnx2x_init_rx_rings(bp);
3434 bnx2x_init_tx_ring(bp);
3435 bnx2x_init_sp_ring(bp);
3436 bnx2x_init_context(bp);
471de716 3437 bnx2x_init_internal(bp, load_code);
a2fbb9ea 3438 bnx2x_init_ind_table(bp);
0ef00459
EG
3439 bnx2x_stats_init(bp);
3440
3441 /* At this point, we are ready for interrupts */
3442 atomic_set(&bp->intr_sem, 0);
3443
3444 /* flush all before enabling interrupts */
3445 mb();
3446 mmiowb();
3447
615f8fd9 3448 bnx2x_int_enable(bp);
eb8da205
EG
3449
3450 /* Check for SPIO5 */
3451 bnx2x_attn_int_deasserted0(bp,
3452 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
3453 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
3454}
3455
3456/* end of nic init */
3457
3458/*
3459 * gzip service functions
3460 */
3461
3462static int bnx2x_gunzip_init(struct bnx2x *bp)
3463{
1a983142
FT
3464 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
3465 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
3466 if (bp->gunzip_buf == NULL)
3467 goto gunzip_nomem1;
3468
3469 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3470 if (bp->strm == NULL)
3471 goto gunzip_nomem2;
3472
3473 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3474 GFP_KERNEL);
3475 if (bp->strm->workspace == NULL)
3476 goto gunzip_nomem3;
3477
3478 return 0;
3479
3480gunzip_nomem3:
3481 kfree(bp->strm);
3482 bp->strm = NULL;
3483
3484gunzip_nomem2:
1a983142
FT
3485 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3486 bp->gunzip_mapping);
a2fbb9ea
ET
3487 bp->gunzip_buf = NULL;
3488
3489gunzip_nomem1:
cdaa7cb8
VZ
3490 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
3491 " un-compression\n");
a2fbb9ea
ET
3492 return -ENOMEM;
3493}
3494
3495static void bnx2x_gunzip_end(struct bnx2x *bp)
3496{
3497 kfree(bp->strm->workspace);
3498
3499 kfree(bp->strm);
3500 bp->strm = NULL;
3501
3502 if (bp->gunzip_buf) {
1a983142
FT
3503 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3504 bp->gunzip_mapping);
a2fbb9ea
ET
3505 bp->gunzip_buf = NULL;
3506 }
3507}
3508
94a78b79 3509static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
3510{
3511 int n, rc;
3512
3513 /* check gzip header */
94a78b79
VZ
3514 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
3515 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 3516 return -EINVAL;
94a78b79 3517 }
a2fbb9ea
ET
3518
3519 n = 10;
3520
34f80b04 3521#define FNAME 0x8
a2fbb9ea
ET
3522
3523 if (zbuf[3] & FNAME)
3524 while ((zbuf[n++] != 0) && (n < len));
3525
94a78b79 3526 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
3527 bp->strm->avail_in = len - n;
3528 bp->strm->next_out = bp->gunzip_buf;
3529 bp->strm->avail_out = FW_BUF_SIZE;
3530
3531 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
3532 if (rc != Z_OK)
3533 return rc;
3534
3535 rc = zlib_inflate(bp->strm, Z_FINISH);
3536 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
3537 netdev_err(bp->dev, "Firmware decompression error: %s\n",
3538 bp->strm->msg);
a2fbb9ea
ET
3539
3540 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
3541 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
3542 netdev_err(bp->dev, "Firmware decompression error:"
3543 " gunzip_outlen (%d) not aligned\n",
3544 bp->gunzip_outlen);
a2fbb9ea
ET
3545 bp->gunzip_outlen >>= 2;
3546
3547 zlib_inflateEnd(bp->strm);
3548
3549 if (rc == Z_STREAM_END)
3550 return 0;
3551
3552 return rc;
3553}
3554
3555/* nic load/unload */
3556
3557/*
34f80b04 3558 * General service functions
a2fbb9ea
ET
3559 */
3560
3561/* send a NIG loopback debug packet */
3562static void bnx2x_lb_pckt(struct bnx2x *bp)
3563{
a2fbb9ea 3564 u32 wb_write[3];
a2fbb9ea
ET
3565
3566 /* Ethernet source and destination addresses */
a2fbb9ea
ET
3567 wb_write[0] = 0x55555555;
3568 wb_write[1] = 0x55555555;
34f80b04 3569 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 3570 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
3571
3572 /* NON-IP protocol */
a2fbb9ea
ET
3573 wb_write[0] = 0x09000000;
3574 wb_write[1] = 0x55555555;
34f80b04 3575 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 3576 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
3577}
3578
3579/* some of the internal memories
3580 * are not directly readable from the driver
3581 * to test them we send debug packets
3582 */
3583static int bnx2x_int_mem_test(struct bnx2x *bp)
3584{
3585 int factor;
3586 int count, i;
3587 u32 val = 0;
3588
ad8d3948 3589 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 3590 factor = 120;
ad8d3948
EG
3591 else if (CHIP_REV_IS_EMUL(bp))
3592 factor = 200;
3593 else
a2fbb9ea 3594 factor = 1;
a2fbb9ea
ET
3595
3596 DP(NETIF_MSG_HW, "start part1\n");
3597
3598 /* Disable inputs of parser neighbor blocks */
3599 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3600 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3601 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 3602 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
3603
3604 /* Write 0 to parser credits for CFC search request */
3605 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3606
3607 /* send Ethernet packet */
3608 bnx2x_lb_pckt(bp);
3609
3610 /* TODO do i reset NIG statistic? */
3611 /* Wait until NIG register shows 1 packet of size 0x10 */
3612 count = 1000 * factor;
3613 while (count) {
34f80b04 3614
a2fbb9ea
ET
3615 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3616 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
3617 if (val == 0x10)
3618 break;
3619
3620 msleep(10);
3621 count--;
3622 }
3623 if (val != 0x10) {
3624 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3625 return -1;
3626 }
3627
3628 /* Wait until PRS register shows 1 packet */
3629 count = 1000 * factor;
3630 while (count) {
3631 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
3632 if (val == 1)
3633 break;
3634
3635 msleep(10);
3636 count--;
3637 }
3638 if (val != 0x1) {
3639 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3640 return -2;
3641 }
3642
3643 /* Reset and init BRB, PRS */
34f80b04 3644 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 3645 msleep(50);
34f80b04 3646 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 3647 msleep(50);
94a78b79
VZ
3648 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3649 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
3650
3651 DP(NETIF_MSG_HW, "part2\n");
3652
3653 /* Disable inputs of parser neighbor blocks */
3654 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3655 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3656 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 3657 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
3658
3659 /* Write 0 to parser credits for CFC search request */
3660 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3661
3662 /* send 10 Ethernet packets */
3663 for (i = 0; i < 10; i++)
3664 bnx2x_lb_pckt(bp);
3665
3666 /* Wait until NIG register shows 10 + 1
3667 packets of size 11*0x10 = 0xb0 */
3668 count = 1000 * factor;
3669 while (count) {
34f80b04 3670
a2fbb9ea
ET
3671 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3672 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
3673 if (val == 0xb0)
3674 break;
3675
3676 msleep(10);
3677 count--;
3678 }
3679 if (val != 0xb0) {
3680 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3681 return -3;
3682 }
3683
3684 /* Wait until PRS register shows 2 packets */
3685 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3686 if (val != 2)
3687 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3688
3689 /* Write 1 to parser credits for CFC search request */
3690 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
3691
3692 /* Wait until PRS register shows 3 packets */
3693 msleep(10 * factor);
3694 /* Wait until NIG register shows 1 packet of size 0x10 */
3695 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3696 if (val != 3)
3697 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3698
3699 /* clear NIG EOP FIFO */
3700 for (i = 0; i < 11; i++)
3701 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
3702 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
3703 if (val != 1) {
3704 BNX2X_ERR("clear of NIG failed\n");
3705 return -4;
3706 }
3707
3708 /* Reset and init BRB, PRS, NIG */
3709 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3710 msleep(50);
3711 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3712 msleep(50);
94a78b79
VZ
3713 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3714 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 3715#ifndef BCM_CNIC
a2fbb9ea
ET
3716 /* set NIC mode */
3717 REG_WR(bp, PRS_REG_NIC_MODE, 1);
3718#endif
3719
3720 /* Enable inputs of parser neighbor blocks */
3721 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
3722 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
3723 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 3724 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
3725
3726 DP(NETIF_MSG_HW, "done\n");
3727
3728 return 0; /* OK */
3729}
3730
3731static void enable_blocks_attention(struct bnx2x *bp)
3732{
3733 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3734 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
3735 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3736 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
3737 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
3738 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
3739 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
3740 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
3741 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
3742/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3743/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
3744 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
3745 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
3746 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
3747/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3748/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
3749 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
3750 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
3751 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
3752 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
3753/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3754/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3755 if (CHIP_REV_IS_FPGA(bp))
3756 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
3757 else
3758 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
3759 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
3760 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
3761 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
3762/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3763/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
3764 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
3765 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
3766/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3767 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
3768}
3769
72fd0718
VZ
3770static const struct {
3771 u32 addr;
3772 u32 mask;
3773} bnx2x_parity_mask[] = {
3774 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
3775 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
3776 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
3777 {HC_REG_HC_PRTY_MASK, 0xffffffff},
3778 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
3779 {QM_REG_QM_PRTY_MASK, 0x0},
3780 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
3781 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
3782 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
3783 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
3784 {CDU_REG_CDU_PRTY_MASK, 0x0},
3785 {CFC_REG_CFC_PRTY_MASK, 0x0},
3786 {DBG_REG_DBG_PRTY_MASK, 0x0},
3787 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
3788 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
3789 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
3790 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
3791 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
3792 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
3793 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
3794 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
3795 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
3796 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
3797 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
3798 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
3799 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
3800 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
3801 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
3802};
3803
3804static void enable_blocks_parity(struct bnx2x *bp)
3805{
3806 int i, mask_arr_len =
3807 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
3808
3809 for (i = 0; i < mask_arr_len; i++)
3810 REG_WR(bp, bnx2x_parity_mask[i].addr,
3811 bnx2x_parity_mask[i].mask);
3812}
3813
34f80b04 3814
81f75bbf
EG
3815static void bnx2x_reset_common(struct bnx2x *bp)
3816{
3817 /* reset_common */
3818 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
3819 0xd3ffff7f);
3820 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
3821}
3822
573f2035
EG
3823static void bnx2x_init_pxp(struct bnx2x *bp)
3824{
3825 u16 devctl;
3826 int r_order, w_order;
3827
3828 pci_read_config_word(bp->pdev,
3829 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
3830 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
3831 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3832 if (bp->mrrs == -1)
3833 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3834 else {
3835 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
3836 r_order = bp->mrrs;
3837 }
3838
3839 bnx2x_init_pxp_arb(bp, r_order, w_order);
3840}
fd4ef40d
EG
3841
3842static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3843{
2145a920 3844 int is_required;
fd4ef40d 3845 u32 val;
2145a920 3846 int port;
fd4ef40d 3847
2145a920
VZ
3848 if (BP_NOMCP(bp))
3849 return;
3850
3851 is_required = 0;
fd4ef40d
EG
3852 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
3853 SHARED_HW_CFG_FAN_FAILURE_MASK;
3854
3855 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
3856 is_required = 1;
3857
3858 /*
3859 * The fan failure mechanism is usually related to the PHY type since
3860 * the power consumption of the board is affected by the PHY. Currently,
3861 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
3862 */
3863 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
3864 for (port = PORT_0; port < PORT_MAX; port++) {
3865 u32 phy_type =
3866 SHMEM_RD(bp, dev_info.port_hw_config[port].
3867 external_phy_config) &
3868 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3869 is_required |=
3870 ((phy_type ==
3871 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
3872 (phy_type ==
3873 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
3874 (phy_type ==
3875 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
3876 }
3877
3878 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
3879
3880 if (is_required == 0)
3881 return;
3882
3883 /* Fan failure is indicated by SPIO 5 */
3884 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
3885 MISC_REGISTERS_SPIO_INPUT_HI_Z);
3886
3887 /* set to active low mode */
3888 val = REG_RD(bp, MISC_REG_SPIO_INT);
3889 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 3890 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
3891 REG_WR(bp, MISC_REG_SPIO_INT, val);
3892
3893 /* enable interrupt to signal the IGU */
3894 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
3895 val |= (1 << MISC_REGISTERS_SPIO_5);
3896 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3897}
3898
34f80b04 3899static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 3900{
a2fbb9ea 3901 u32 val, i;
37b091ba
MC
3902#ifdef BCM_CNIC
3903 u32 wb_write[2];
3904#endif
a2fbb9ea 3905
34f80b04 3906 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 3907
81f75bbf 3908 bnx2x_reset_common(bp);
34f80b04
EG
3909 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
3910 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 3911
94a78b79 3912 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
3913 if (CHIP_IS_E1H(bp))
3914 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 3915
34f80b04
EG
3916 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
3917 msleep(30);
3918 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 3919
94a78b79 3920 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
3921 if (CHIP_IS_E1(bp)) {
3922 /* enable HW interrupt from PXP on USDM overflow
3923 bit 16 on INT_MASK_0 */
3924 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3925 }
a2fbb9ea 3926
94a78b79 3927 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 3928 bnx2x_init_pxp(bp);
a2fbb9ea
ET
3929
3930#ifdef __BIG_ENDIAN
34f80b04
EG
3931 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
3932 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
3933 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
3934 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
3935 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
3936 /* make sure this value is 0 */
3937 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
3938
3939/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3940 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
3941 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
3942 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
3943 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
3944#endif
3945
34f80b04 3946 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 3947#ifdef BCM_CNIC
34f80b04
EG
3948 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3949 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3950 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
3951#endif
3952
34f80b04
EG
3953 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
3954 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 3955
34f80b04
EG
3956 /* let the HW do it's magic ... */
3957 msleep(100);
3958 /* finish PXP init */
3959 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
3960 if (val != 1) {
3961 BNX2X_ERR("PXP2 CFG failed\n");
3962 return -EBUSY;
3963 }
3964 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
3965 if (val != 1) {
3966 BNX2X_ERR("PXP2 RD_INIT failed\n");
3967 return -EBUSY;
3968 }
a2fbb9ea 3969
34f80b04
EG
3970 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
3971 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 3972
94a78b79 3973 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 3974
34f80b04
EG
3975 /* clean the DMAE memory */
3976 bp->dmae_ready = 1;
3977 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 3978
94a78b79
VZ
3979 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
3980 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
3981 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
3982 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 3983
34f80b04
EG
3984 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
3985 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
3986 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
3987 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
3988
94a78b79 3989 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
3990
3991#ifdef BCM_CNIC
3992 wb_write[0] = 0;
3993 wb_write[1] = 0;
3994 for (i = 0; i < 64; i++) {
3995 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
3996 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
3997
3998 if (CHIP_IS_E1H(bp)) {
3999 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
4000 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
4001 wb_write, 2);
4002 }
4003 }
4004#endif
34f80b04
EG
4005 /* soft reset pulse */
4006 REG_WR(bp, QM_REG_SOFT_RESET, 1);
4007 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 4008
37b091ba 4009#ifdef BCM_CNIC
94a78b79 4010 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 4011#endif
a2fbb9ea 4012
94a78b79 4013 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
4014 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
4015 if (!CHIP_REV_IS_SLOW(bp)) {
4016 /* enable hw interrupt from doorbell Q */
4017 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4018 }
a2fbb9ea 4019
94a78b79
VZ
4020 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4021 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 4022 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 4023#ifndef BCM_CNIC
3196a88a
EG
4024 /* set NIC mode */
4025 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 4026#endif
34f80b04
EG
4027 if (CHIP_IS_E1H(bp))
4028 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 4029
94a78b79
VZ
4030 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4031 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4032 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4033 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 4034
ca00392c
EG
4035 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4036 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4037 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4038 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 4039
94a78b79
VZ
4040 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4041 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4042 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4043 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 4044
34f80b04
EG
4045 /* sync semi rtc */
4046 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4047 0x80000000);
4048 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4049 0x80000000);
a2fbb9ea 4050
94a78b79
VZ
4051 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4052 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4053 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 4054
34f80b04 4055 REG_WR(bp, SRC_REG_SOFT_RST, 1);
c68ed255
TH
4056 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4057 REG_WR(bp, i, random32());
94a78b79 4058 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
4059#ifdef BCM_CNIC
4060 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4061 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4062 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4063 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4064 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4065 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4066 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4067 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4068 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4069 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4070#endif
34f80b04 4071 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 4072
34f80b04
EG
4073 if (sizeof(union cdu_context) != 1024)
4074 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
4075 dev_alert(&bp->pdev->dev, "please adjust the size "
4076 "of cdu_context(%ld)\n",
7995c64e 4077 (long)sizeof(union cdu_context));
a2fbb9ea 4078
94a78b79 4079 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
4080 val = (4 << 24) + (0 << 12) + 1024;
4081 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 4082
94a78b79 4083 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 4084 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
4085 /* enable context validation interrupt from CFC */
4086 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4087
4088 /* set the thresholds to prevent CFC/CDU race */
4089 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 4090
94a78b79
VZ
4091 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
4092 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 4093
94a78b79 4094 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
4095 /* Reset PCIE errors for debug */
4096 REG_WR(bp, 0x2814, 0xffffffff);
4097 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 4098
94a78b79 4099 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 4100 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 4101 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 4102 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 4103
94a78b79 4104 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
4105 if (CHIP_IS_E1H(bp)) {
4106 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
4107 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
4108 }
4109
4110 if (CHIP_REV_IS_SLOW(bp))
4111 msleep(200);
4112
4113 /* finish CFC init */
4114 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4115 if (val != 1) {
4116 BNX2X_ERR("CFC LL_INIT failed\n");
4117 return -EBUSY;
4118 }
4119 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4120 if (val != 1) {
4121 BNX2X_ERR("CFC AC_INIT failed\n");
4122 return -EBUSY;
4123 }
4124 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4125 if (val != 1) {
4126 BNX2X_ERR("CFC CAM_INIT failed\n");
4127 return -EBUSY;
4128 }
4129 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 4130
34f80b04
EG
4131 /* read NIG statistic
4132 to see if this is our first up since powerup */
4133 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4134 val = *bnx2x_sp(bp, wb_data[0]);
4135
4136 /* do internal memory self test */
4137 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4138 BNX2X_ERR("internal mem self test failed\n");
4139 return -EBUSY;
4140 }
4141
35b19ba5 4142 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
4143 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4144 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4145 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 4146 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
4147 bp->port.need_hw_lock = 1;
4148 break;
4149
34f80b04
EG
4150 default:
4151 break;
4152 }
f1410647 4153
fd4ef40d
EG
4154 bnx2x_setup_fan_failure_detection(bp);
4155
34f80b04
EG
4156 /* clear PXP2 attentions */
4157 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 4158
34f80b04 4159 enable_blocks_attention(bp);
72fd0718
VZ
4160 if (CHIP_PARITY_SUPPORTED(bp))
4161 enable_blocks_parity(bp);
a2fbb9ea 4162
6bbca910
YR
4163 if (!BP_NOMCP(bp)) {
4164 bnx2x_acquire_phy_lock(bp);
4165 bnx2x_common_init_phy(bp, bp->common.shmem_base);
4166 bnx2x_release_phy_lock(bp);
4167 } else
4168 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4169
34f80b04
EG
4170 return 0;
4171}
a2fbb9ea 4172
34f80b04
EG
4173static int bnx2x_init_port(struct bnx2x *bp)
4174{
4175 int port = BP_PORT(bp);
94a78b79 4176 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 4177 u32 low, high;
34f80b04 4178 u32 val;
a2fbb9ea 4179
cdaa7cb8 4180 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
4181
4182 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 4183
94a78b79 4184 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 4185 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
4186
4187 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4188 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4189 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 4190 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 4191
37b091ba
MC
4192#ifdef BCM_CNIC
4193 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 4194
94a78b79 4195 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
4196 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4197 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 4198#endif
cdaa7cb8 4199
94a78b79 4200 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 4201
94a78b79 4202 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
4203 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
4204 /* no pause for emulation and FPGA */
4205 low = 0;
4206 high = 513;
4207 } else {
4208 if (IS_E1HMF(bp))
4209 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4210 else if (bp->dev->mtu > 4096) {
4211 if (bp->flags & ONE_PORT_FLAG)
4212 low = 160;
4213 else {
4214 val = bp->dev->mtu;
4215 /* (24*1024 + val*4)/256 */
4216 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
4217 }
4218 } else
4219 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4220 high = low + 56; /* 14*1024/256 */
4221 }
4222 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4223 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4224
4225
94a78b79 4226 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 4227
94a78b79 4228 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 4229 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 4230 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 4231 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 4232
94a78b79
VZ
4233 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4234 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4235 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4236 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 4237
94a78b79 4238 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 4239 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 4240
94a78b79 4241 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
4242
4243 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 4244 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
4245
4246 /* update threshold */
34f80b04 4247 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 4248 /* update init credit */
34f80b04 4249 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
4250
4251 /* probe changes */
34f80b04 4252 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 4253 msleep(5);
34f80b04 4254 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 4255
37b091ba
MC
4256#ifdef BCM_CNIC
4257 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 4258#endif
94a78b79 4259 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 4260 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
4261
4262 if (CHIP_IS_E1(bp)) {
4263 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4264 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4265 }
94a78b79 4266 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 4267
94a78b79 4268 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
4269 /* init aeu_mask_attn_func_0/1:
4270 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4271 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4272 * bits 4-7 are used for "per vn group attention" */
4273 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4274 (IS_E1HMF(bp) ? 0xF7 : 0x7));
4275
94a78b79 4276 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 4277 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 4278 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 4279 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 4280 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 4281
94a78b79 4282 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
4283
4284 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4285
4286 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
4287 /* 0x2 disable e1hov, 0x1 enable */
4288 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4289 (IS_E1HMF(bp) ? 0x1 : 0x2));
4290
1c06328c
EG
4291 {
4292 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4293 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
4294 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
4295 }
34f80b04
EG
4296 }
4297
94a78b79 4298 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 4299 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 4300
35b19ba5 4301 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
4302 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4303 {
4304 u32 swap_val, swap_override, aeu_gpio_mask, offset;
4305
4306 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
4307 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
4308
4309 /* The GPIO should be swapped if the swap register is
4310 set and active */
4311 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4312 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
4313
4314 /* Select function upon port-swap configuration */
4315 if (port == 0) {
4316 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
4317 aeu_gpio_mask = (swap_val && swap_override) ?
4318 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
4319 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
4320 } else {
4321 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
4322 aeu_gpio_mask = (swap_val && swap_override) ?
4323 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
4324 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
4325 }
4326 val = REG_RD(bp, offset);
4327 /* add GPIO3 to group */
4328 val |= aeu_gpio_mask;
4329 REG_WR(bp, offset, val);
4330 }
4331 break;
4332
35b19ba5 4333 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 4334 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 4335 /* add SPIO 5 to group 0 */
4d295db0
EG
4336 {
4337 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4338 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4339 val = REG_RD(bp, reg_addr);
f1410647 4340 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
4341 REG_WR(bp, reg_addr, val);
4342 }
f1410647
ET
4343 break;
4344
4345 default:
4346 break;
4347 }
4348
c18487ee 4349 bnx2x__link_reset(bp);
a2fbb9ea 4350
34f80b04
EG
4351 return 0;
4352}
4353
4354#define ILT_PER_FUNC (768/2)
4355#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
4356/* the phys address is shifted right 12 bits and has an added
4357 1=valid bit added to the 53rd bit
4358 then since this is a wide register(TM)
4359 we split it into two 32 bit writes
4360 */
4361#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4362#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
4363#define PXP_ONE_ILT(x) (((x) << 10) | x)
4364#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
4365
37b091ba
MC
4366#ifdef BCM_CNIC
4367#define CNIC_ILT_LINES 127
4368#define CNIC_CTX_PER_ILT 16
4369#else
34f80b04 4370#define CNIC_ILT_LINES 0
37b091ba 4371#endif
34f80b04
EG
4372
4373static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4374{
4375 int reg;
4376
4377 if (CHIP_IS_E1H(bp))
4378 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4379 else /* E1 */
4380 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4381
4382 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4383}
4384
4385static int bnx2x_init_func(struct bnx2x *bp)
4386{
4387 int port = BP_PORT(bp);
4388 int func = BP_FUNC(bp);
8badd27a 4389 u32 addr, val;
34f80b04
EG
4390 int i;
4391
cdaa7cb8 4392 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 4393
8badd27a
EG
4394 /* set MSI reconfigure capability */
4395 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4396 val = REG_RD(bp, addr);
4397 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
4398 REG_WR(bp, addr, val);
4399
34f80b04
EG
4400 i = FUNC_ILT_BASE(func);
4401
4402 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
4403 if (CHIP_IS_E1H(bp)) {
4404 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
4405 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
4406 } else /* E1 */
4407 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
4408 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
4409
37b091ba
MC
4410#ifdef BCM_CNIC
4411 i += 1 + CNIC_ILT_LINES;
4412 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
4413 if (CHIP_IS_E1(bp))
4414 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
4415 else {
4416 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
4417 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
4418 }
4419
4420 i++;
4421 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
4422 if (CHIP_IS_E1(bp))
4423 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
4424 else {
4425 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
4426 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
4427 }
4428
4429 i++;
4430 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
4431 if (CHIP_IS_E1(bp))
4432 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
4433 else {
4434 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
4435 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
4436 }
4437
4438 /* tell the searcher where the T2 table is */
4439 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
4440
4441 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
4442 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
4443
4444 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
4445 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
4446 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
4447
4448 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
4449#endif
34f80b04
EG
4450
4451 if (CHIP_IS_E1H(bp)) {
573f2035
EG
4452 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4453 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4454 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4455 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4456 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4457 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4458 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4459 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4460 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
4461
4462 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4463 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
4464 }
4465
4466 /* HC init per function */
4467 if (CHIP_IS_E1H(bp)) {
4468 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4469
4470 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4471 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4472 }
94a78b79 4473 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 4474
c14423fe 4475 /* Reset PCIE errors for debug */
a2fbb9ea
ET
4476 REG_WR(bp, 0x2114, 0xffffffff);
4477 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 4478
34f80b04
EG
4479 return 0;
4480}
4481
9f6c9258 4482int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
34f80b04
EG
4483{
4484 int i, rc = 0;
a2fbb9ea 4485
34f80b04
EG
4486 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
4487 BP_FUNC(bp), load_code);
a2fbb9ea 4488
34f80b04
EG
4489 bp->dmae_ready = 0;
4490 mutex_init(&bp->dmae_mutex);
54016b26
EG
4491 rc = bnx2x_gunzip_init(bp);
4492 if (rc)
4493 return rc;
a2fbb9ea 4494
34f80b04
EG
4495 switch (load_code) {
4496 case FW_MSG_CODE_DRV_LOAD_COMMON:
4497 rc = bnx2x_init_common(bp);
4498 if (rc)
4499 goto init_hw_err;
4500 /* no break */
4501
4502 case FW_MSG_CODE_DRV_LOAD_PORT:
4503 bp->dmae_ready = 1;
4504 rc = bnx2x_init_port(bp);
4505 if (rc)
4506 goto init_hw_err;
4507 /* no break */
4508
4509 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4510 bp->dmae_ready = 1;
4511 rc = bnx2x_init_func(bp);
4512 if (rc)
4513 goto init_hw_err;
4514 break;
4515
4516 default:
4517 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4518 break;
4519 }
4520
4521 if (!BP_NOMCP(bp)) {
4522 int func = BP_FUNC(bp);
a2fbb9ea
ET
4523
4524 bp->fw_drv_pulse_wr_seq =
34f80b04 4525 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 4526 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
4527 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4528 }
a2fbb9ea 4529
34f80b04
EG
4530 /* this needs to be done before gunzip end */
4531 bnx2x_zero_def_sb(bp);
4532 for_each_queue(bp, i)
4533 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
4534#ifdef BCM_CNIC
4535 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4536#endif
34f80b04
EG
4537
4538init_hw_err:
4539 bnx2x_gunzip_end(bp);
4540
4541 return rc;
a2fbb9ea
ET
4542}
4543
9f6c9258 4544void bnx2x_free_mem(struct bnx2x *bp)
a2fbb9ea
ET
4545{
4546
4547#define BNX2X_PCI_FREE(x, y, size) \
4548 do { \
4549 if (x) { \
1a983142 4550 dma_free_coherent(&bp->pdev->dev, size, x, y); \
a2fbb9ea
ET
4551 x = NULL; \
4552 y = 0; \
4553 } \
4554 } while (0)
4555
4556#define BNX2X_FREE(x) \
4557 do { \
4558 if (x) { \
4559 vfree(x); \
4560 x = NULL; \
4561 } \
4562 } while (0)
4563
4564 int i;
4565
4566 /* fastpath */
555f6c78 4567 /* Common */
a2fbb9ea
ET
4568 for_each_queue(bp, i) {
4569
555f6c78 4570 /* status blocks */
a2fbb9ea
ET
4571 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
4572 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 4573 sizeof(struct host_status_block));
555f6c78
EG
4574 }
4575 /* Rx */
54b9ddaa 4576 for_each_queue(bp, i) {
a2fbb9ea 4577
555f6c78 4578 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
4579 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
4580 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
4581 bnx2x_fp(bp, i, rx_desc_mapping),
4582 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4583
4584 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
4585 bnx2x_fp(bp, i, rx_comp_mapping),
4586 sizeof(struct eth_fast_path_rx_cqe) *
4587 NUM_RCQ_BD);
a2fbb9ea 4588
7a9b2557 4589 /* SGE ring */
32626230 4590 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
4591 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
4592 bnx2x_fp(bp, i, rx_sge_mapping),
4593 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4594 }
555f6c78 4595 /* Tx */
54b9ddaa 4596 for_each_queue(bp, i) {
555f6c78
EG
4597
4598 /* fastpath tx rings: tx_buf tx_desc */
4599 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
4600 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
4601 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 4602 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 4603 }
a2fbb9ea
ET
4604 /* end of fastpath */
4605
4606 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 4607 sizeof(struct host_def_status_block));
a2fbb9ea
ET
4608
4609 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 4610 sizeof(struct bnx2x_slowpath));
a2fbb9ea 4611
37b091ba 4612#ifdef BCM_CNIC
a2fbb9ea
ET
4613 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
4614 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
4615 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
4616 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
4617 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
4618 sizeof(struct host_status_block));
a2fbb9ea 4619#endif
7a9b2557 4620 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
4621
4622#undef BNX2X_PCI_FREE
4623#undef BNX2X_KFREE
4624}
4625
9f6c9258 4626int bnx2x_alloc_mem(struct bnx2x *bp)
a2fbb9ea
ET
4627{
4628
4629#define BNX2X_PCI_ALLOC(x, y, size) \
4630 do { \
1a983142 4631 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
9f6c9258
DK
4632 if (x == NULL) \
4633 goto alloc_mem_err; \
4634 memset(x, 0, size); \
4635 } while (0)
a2fbb9ea 4636
9f6c9258
DK
4637#define BNX2X_ALLOC(x, size) \
4638 do { \
4639 x = vmalloc(size); \
4640 if (x == NULL) \
4641 goto alloc_mem_err; \
4642 memset(x, 0, size); \
4643 } while (0)
a2fbb9ea 4644
9f6c9258 4645 int i;
a2fbb9ea 4646
9f6c9258
DK
4647 /* fastpath */
4648 /* Common */
a2fbb9ea 4649 for_each_queue(bp, i) {
9f6c9258 4650 bnx2x_fp(bp, i, bp) = bp;
a2fbb9ea 4651
9f6c9258
DK
4652 /* status blocks */
4653 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
4654 &bnx2x_fp(bp, i, status_blk_mapping),
4655 sizeof(struct host_status_block));
a2fbb9ea 4656 }
9f6c9258
DK
4657 /* Rx */
4658 for_each_queue(bp, i) {
a2fbb9ea 4659
9f6c9258
DK
4660 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4661 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
4662 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4663 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
4664 &bnx2x_fp(bp, i, rx_desc_mapping),
4665 sizeof(struct eth_rx_bd) * NUM_RX_BD);
555f6c78 4666
9f6c9258
DK
4667 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
4668 &bnx2x_fp(bp, i, rx_comp_mapping),
4669 sizeof(struct eth_fast_path_rx_cqe) *
4670 NUM_RCQ_BD);
a2fbb9ea 4671
9f6c9258
DK
4672 /* SGE ring */
4673 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
4674 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4675 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
4676 &bnx2x_fp(bp, i, rx_sge_mapping),
4677 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4678 }
4679 /* Tx */
4680 for_each_queue(bp, i) {
8badd27a 4681
9f6c9258
DK
4682 /* fastpath tx rings: tx_buf tx_desc */
4683 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
4684 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4685 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
4686 &bnx2x_fp(bp, i, tx_desc_mapping),
4687 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
8badd27a 4688 }
9f6c9258 4689 /* end of fastpath */
8badd27a 4690
9f6c9258
DK
4691 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4692 sizeof(struct host_def_status_block));
8badd27a 4693
9f6c9258
DK
4694 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4695 sizeof(struct bnx2x_slowpath));
a2fbb9ea 4696
9f6c9258
DK
4697#ifdef BCM_CNIC
4698 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
8badd27a 4699
9f6c9258
DK
4700 /* allocate searcher T2 table
4701 we allocate 1/4 of alloc num for T2
4702 (which is not entered into the ILT) */
4703 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
a2fbb9ea 4704
9f6c9258
DK
4705 /* Initialize T2 (for 1024 connections) */
4706 for (i = 0; i < 16*1024; i += 64)
4707 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 4708
9f6c9258
DK
4709 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
4710 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
65abd74d 4711
9f6c9258
DK
4712 /* QM queues (128*MAX_CONN) */
4713 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
65abd74d 4714
9f6c9258
DK
4715 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
4716 sizeof(struct host_status_block));
4717#endif
65abd74d 4718
9f6c9258
DK
4719 /* Slow path ring */
4720 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
65abd74d 4721
9f6c9258 4722 return 0;
e1510706 4723
9f6c9258
DK
4724alloc_mem_err:
4725 bnx2x_free_mem(bp);
4726 return -ENOMEM;
e1510706 4727
9f6c9258
DK
4728#undef BNX2X_PCI_ALLOC
4729#undef BNX2X_ALLOC
65abd74d
YG
4730}
4731
65abd74d 4732
a2fbb9ea
ET
4733/*
4734 * Init service functions
4735 */
4736
e665bfda
MC
4737/**
4738 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
4739 *
4740 * @param bp driver descriptor
4741 * @param set set or clear an entry (1 or 0)
4742 * @param mac pointer to a buffer containing a MAC
4743 * @param cl_bit_vec bit vector of clients to register a MAC for
4744 * @param cam_offset offset in a CAM to use
4745 * @param with_bcast set broadcast MAC as well
4746 */
4747static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
4748 u32 cl_bit_vec, u8 cam_offset,
4749 u8 with_bcast)
a2fbb9ea
ET
4750{
4751 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 4752 int port = BP_PORT(bp);
a2fbb9ea
ET
4753
4754 /* CAM allocation
4755 * unicasts 0-31:port0 32-63:port1
4756 * multicast 64-127:port0 128-191:port1
4757 */
e665bfda
MC
4758 config->hdr.length = 1 + (with_bcast ? 1 : 0);
4759 config->hdr.offset = cam_offset;
4760 config->hdr.client_id = 0xff;
a2fbb9ea
ET
4761 config->hdr.reserved1 = 0;
4762
4763 /* primary MAC */
4764 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 4765 swab16(*(u16 *)&mac[0]);
a2fbb9ea 4766 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 4767 swab16(*(u16 *)&mac[2]);
a2fbb9ea 4768 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 4769 swab16(*(u16 *)&mac[4]);
34f80b04 4770 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
4771 if (set)
4772 config->config_table[0].target_table_entry.flags = 0;
4773 else
4774 CAM_INVALIDATE(config->config_table[0]);
ca00392c 4775 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 4776 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
4777 config->config_table[0].target_table_entry.vlan_id = 0;
4778
3101c2bc
YG
4779 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
4780 (set ? "setting" : "clearing"),
a2fbb9ea
ET
4781 config->config_table[0].cam_entry.msb_mac_addr,
4782 config->config_table[0].cam_entry.middle_mac_addr,
4783 config->config_table[0].cam_entry.lsb_mac_addr);
4784
4785 /* broadcast */
e665bfda
MC
4786 if (with_bcast) {
4787 config->config_table[1].cam_entry.msb_mac_addr =
4788 cpu_to_le16(0xffff);
4789 config->config_table[1].cam_entry.middle_mac_addr =
4790 cpu_to_le16(0xffff);
4791 config->config_table[1].cam_entry.lsb_mac_addr =
4792 cpu_to_le16(0xffff);
4793 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
4794 if (set)
4795 config->config_table[1].target_table_entry.flags =
4796 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4797 else
4798 CAM_INVALIDATE(config->config_table[1]);
4799 config->config_table[1].target_table_entry.clients_bit_vector =
4800 cpu_to_le32(cl_bit_vec);
4801 config->config_table[1].target_table_entry.vlan_id = 0;
4802 }
a2fbb9ea
ET
4803
4804 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4805 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4806 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4807}
4808
e665bfda
MC
4809/**
4810 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
4811 *
4812 * @param bp driver descriptor
4813 * @param set set or clear an entry (1 or 0)
4814 * @param mac pointer to a buffer containing a MAC
4815 * @param cl_bit_vec bit vector of clients to register a MAC for
4816 * @param cam_offset offset in a CAM to use
4817 */
4818static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
4819 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
4820{
4821 struct mac_configuration_cmd_e1h *config =
4822 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
4823
8d9c5f34 4824 config->hdr.length = 1;
e665bfda
MC
4825 config->hdr.offset = cam_offset;
4826 config->hdr.client_id = 0xff;
34f80b04
EG
4827 config->hdr.reserved1 = 0;
4828
4829 /* primary MAC */
4830 config->config_table[0].msb_mac_addr =
e665bfda 4831 swab16(*(u16 *)&mac[0]);
34f80b04 4832 config->config_table[0].middle_mac_addr =
e665bfda 4833 swab16(*(u16 *)&mac[2]);
34f80b04 4834 config->config_table[0].lsb_mac_addr =
e665bfda 4835 swab16(*(u16 *)&mac[4]);
ca00392c 4836 config->config_table[0].clients_bit_vector =
e665bfda 4837 cpu_to_le32(cl_bit_vec);
34f80b04
EG
4838 config->config_table[0].vlan_id = 0;
4839 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
4840 if (set)
4841 config->config_table[0].flags = BP_PORT(bp);
4842 else
4843 config->config_table[0].flags =
4844 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 4845
e665bfda 4846 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 4847 (set ? "setting" : "clearing"),
34f80b04
EG
4848 config->config_table[0].msb_mac_addr,
4849 config->config_table[0].middle_mac_addr,
e665bfda 4850 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
4851
4852 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4853 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4854 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4855}
4856
a2fbb9ea
ET
4857static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4858 int *state_p, int poll)
4859{
4860 /* can take a while if any port is running */
8b3a0f0b 4861 int cnt = 5000;
a2fbb9ea 4862
c14423fe
ET
4863 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4864 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
4865
4866 might_sleep();
34f80b04 4867 while (cnt--) {
a2fbb9ea
ET
4868 if (poll) {
4869 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
4870 /* if index is different from 0
4871 * the reply for some commands will
3101c2bc 4872 * be on the non default queue
a2fbb9ea
ET
4873 */
4874 if (idx)
4875 bnx2x_rx_int(&bp->fp[idx], 10);
4876 }
a2fbb9ea 4877
3101c2bc 4878 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
4879 if (*state_p == state) {
4880#ifdef BNX2X_STOP_ON_ERROR
4881 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
4882#endif
a2fbb9ea 4883 return 0;
8b3a0f0b 4884 }
a2fbb9ea 4885
a2fbb9ea 4886 msleep(1);
e3553b29
EG
4887
4888 if (bp->panic)
4889 return -EIO;
a2fbb9ea
ET
4890 }
4891
a2fbb9ea 4892 /* timeout! */
49d66772
ET
4893 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4894 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
4895#ifdef BNX2X_STOP_ON_ERROR
4896 bnx2x_panic();
4897#endif
a2fbb9ea 4898
49d66772 4899 return -EBUSY;
a2fbb9ea
ET
4900}
4901
9f6c9258 4902void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
e665bfda
MC
4903{
4904 bp->set_mac_pending++;
4905 smp_wmb();
4906
4907 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
4908 (1 << bp->fp->cl_id), BP_FUNC(bp));
4909
4910 /* Wait for a completion */
4911 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4912}
4913
9f6c9258 4914void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
e665bfda
MC
4915{
4916 bp->set_mac_pending++;
4917 smp_wmb();
4918
4919 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
4920 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
4921 1);
4922
4923 /* Wait for a completion */
4924 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4925}
4926
993ac7b5
MC
4927#ifdef BCM_CNIC
4928/**
4929 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
4930 * MAC(s). This function will wait until the ramdord completion
4931 * returns.
4932 *
4933 * @param bp driver handle
4934 * @param set set or clear the CAM entry
4935 *
4936 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
4937 */
9f6c9258 4938int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
993ac7b5
MC
4939{
4940 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
4941
4942 bp->set_mac_pending++;
4943 smp_wmb();
4944
4945 /* Send a SET_MAC ramrod */
4946 if (CHIP_IS_E1(bp))
4947 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
4948 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
4949 1);
4950 else
4951 /* CAM allocation for E1H
4952 * unicasts: by func number
4953 * multicast: 20+FUNC*20, 20 each
4954 */
4955 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
4956 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
4957
4958 /* Wait for a completion when setting */
4959 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4960
4961 return 0;
4962}
4963#endif
4964
9f6c9258 4965int bnx2x_setup_leading(struct bnx2x *bp)
a2fbb9ea 4966{
34f80b04 4967 int rc;
a2fbb9ea 4968
c14423fe 4969 /* reset IGU state */
34f80b04 4970 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4971
4972 /* SETUP ramrod */
4973 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
4974
34f80b04
EG
4975 /* Wait for completion */
4976 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 4977
34f80b04 4978 return rc;
a2fbb9ea
ET
4979}
4980
9f6c9258 4981int bnx2x_setup_multi(struct bnx2x *bp, int index)
a2fbb9ea 4982{
555f6c78
EG
4983 struct bnx2x_fastpath *fp = &bp->fp[index];
4984
a2fbb9ea 4985 /* reset IGU state */
555f6c78 4986 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 4987
228241eb 4988 /* SETUP ramrod */
555f6c78
EG
4989 fp->state = BNX2X_FP_STATE_OPENING;
4990 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
4991 fp->cl_id, 0);
a2fbb9ea
ET
4992
4993 /* Wait for completion */
4994 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 4995 &(fp->state), 0);
a2fbb9ea
ET
4996}
4997
a2fbb9ea 4998
9f6c9258 4999void bnx2x_set_num_queues_msix(struct bnx2x *bp)
ca00392c 5000{
ca00392c
EG
5001
5002 switch (bp->multi_mode) {
5003 case ETH_RSS_MODE_DISABLED:
54b9ddaa 5004 bp->num_queues = 1;
ca00392c
EG
5005 break;
5006
5007 case ETH_RSS_MODE_REGULAR:
54b9ddaa
VZ
5008 if (num_queues)
5009 bp->num_queues = min_t(u32, num_queues,
5010 BNX2X_MAX_QUEUES(bp));
ca00392c 5011 else
54b9ddaa
VZ
5012 bp->num_queues = min_t(u32, num_online_cpus(),
5013 BNX2X_MAX_QUEUES(bp));
ca00392c
EG
5014 break;
5015
5016
5017 default:
54b9ddaa 5018 bp->num_queues = 1;
9f6c9258
DK
5019 break;
5020 }
a2fbb9ea
ET
5021}
5022
9f6c9258
DK
5023
5024
a2fbb9ea
ET
5025static int bnx2x_stop_multi(struct bnx2x *bp, int index)
5026{
555f6c78 5027 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
5028 int rc;
5029
c14423fe 5030 /* halt the connection */
555f6c78
EG
5031 fp->state = BNX2X_FP_STATE_HALTING;
5032 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 5033
34f80b04 5034 /* Wait for completion */
a2fbb9ea 5035 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 5036 &(fp->state), 1);
c14423fe 5037 if (rc) /* timeout */
a2fbb9ea
ET
5038 return rc;
5039
5040 /* delete cfc entry */
5041 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
5042
34f80b04
EG
5043 /* Wait for completion */
5044 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 5045 &(fp->state), 1);
34f80b04 5046 return rc;
a2fbb9ea
ET
5047}
5048
da5a662a 5049static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 5050{
4781bfad 5051 __le16 dsb_sp_prod_idx;
c14423fe 5052 /* if the other port is handling traffic,
a2fbb9ea 5053 this can take a lot of time */
34f80b04
EG
5054 int cnt = 500;
5055 int rc;
a2fbb9ea
ET
5056
5057 might_sleep();
5058
5059 /* Send HALT ramrod */
5060 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 5061 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 5062
34f80b04
EG
5063 /* Wait for completion */
5064 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
5065 &(bp->fp[0].state), 1);
5066 if (rc) /* timeout */
da5a662a 5067 return rc;
a2fbb9ea 5068
49d66772 5069 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 5070
228241eb 5071 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
5072 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
5073
49d66772 5074 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
5075 we are going to reset the chip anyway
5076 so there is not much to do if this times out
5077 */
34f80b04 5078 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
5079 if (!cnt) {
5080 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
5081 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5082 *bp->dsb_sp_prod, dsb_sp_prod_idx);
5083#ifdef BNX2X_STOP_ON_ERROR
5084 bnx2x_panic();
5085#endif
36e552ab 5086 rc = -EBUSY;
34f80b04
EG
5087 break;
5088 }
5089 cnt--;
da5a662a 5090 msleep(1);
5650d9d4 5091 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
5092 }
5093 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
5094 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
5095
5096 return rc;
a2fbb9ea
ET
5097}
5098
34f80b04
EG
5099static void bnx2x_reset_func(struct bnx2x *bp)
5100{
5101 int port = BP_PORT(bp);
5102 int func = BP_FUNC(bp);
5103 int base, i;
5104
5105 /* Configure IGU */
5106 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5107 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5108
37b091ba
MC
5109#ifdef BCM_CNIC
5110 /* Disable Timer scan */
5111 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
5112 /*
5113 * Wait for at least 10ms and up to 2 second for the timers scan to
5114 * complete
5115 */
5116 for (i = 0; i < 200; i++) {
5117 msleep(10);
5118 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
5119 break;
5120 }
5121#endif
34f80b04
EG
5122 /* Clear ILT */
5123 base = FUNC_ILT_BASE(func);
5124 for (i = base; i < base + ILT_PER_FUNC; i++)
5125 bnx2x_ilt_wr(bp, i, 0);
5126}
5127
5128static void bnx2x_reset_port(struct bnx2x *bp)
5129{
5130 int port = BP_PORT(bp);
5131 u32 val;
5132
5133 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5134
5135 /* Do not rcv packets to BRB */
5136 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5137 /* Do not direct rcv packets that are not for MCP to the BRB */
5138 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5139 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5140
5141 /* Configure AEU */
5142 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5143
5144 msleep(100);
5145 /* Check for BRB port occupancy */
5146 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5147 if (val)
5148 DP(NETIF_MSG_IFDOWN,
33471629 5149 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
5150
5151 /* TODO: Close Doorbell port? */
5152}
5153
34f80b04
EG
5154static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5155{
5156 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
5157 BP_FUNC(bp), reset_code);
5158
5159 switch (reset_code) {
5160 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5161 bnx2x_reset_port(bp);
5162 bnx2x_reset_func(bp);
5163 bnx2x_reset_common(bp);
5164 break;
5165
5166 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5167 bnx2x_reset_port(bp);
5168 bnx2x_reset_func(bp);
5169 break;
5170
5171 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5172 bnx2x_reset_func(bp);
5173 break;
49d66772 5174
34f80b04
EG
5175 default:
5176 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5177 break;
5178 }
5179}
5180
9f6c9258 5181void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 5182{
da5a662a 5183 int port = BP_PORT(bp);
a2fbb9ea 5184 u32 reset_code = 0;
da5a662a 5185 int i, cnt, rc;
a2fbb9ea 5186
555f6c78 5187 /* Wait until tx fastpath tasks complete */
54b9ddaa 5188 for_each_queue(bp, i) {
228241eb
ET
5189 struct bnx2x_fastpath *fp = &bp->fp[i];
5190
34f80b04 5191 cnt = 1000;
e8b5fc51 5192 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 5193
7961f791 5194 bnx2x_tx_int(fp);
34f80b04
EG
5195 if (!cnt) {
5196 BNX2X_ERR("timeout waiting for queue[%d]\n",
5197 i);
5198#ifdef BNX2X_STOP_ON_ERROR
5199 bnx2x_panic();
5200 return -EBUSY;
5201#else
5202 break;
5203#endif
5204 }
5205 cnt--;
da5a662a 5206 msleep(1);
34f80b04 5207 }
228241eb 5208 }
da5a662a
VZ
5209 /* Give HW time to discard old tx messages */
5210 msleep(1);
a2fbb9ea 5211
3101c2bc
YG
5212 if (CHIP_IS_E1(bp)) {
5213 struct mac_configuration_cmd *config =
5214 bnx2x_sp(bp, mcast_config);
5215
e665bfda 5216 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 5217
8d9c5f34 5218 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
5219 CAM_INVALIDATE(config->config_table[i]);
5220
8d9c5f34 5221 config->hdr.length = i;
3101c2bc
YG
5222 if (CHIP_REV_IS_SLOW(bp))
5223 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
5224 else
5225 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 5226 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
5227 config->hdr.reserved1 = 0;
5228
e665bfda
MC
5229 bp->set_mac_pending++;
5230 smp_wmb();
5231
3101c2bc
YG
5232 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5233 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
5234 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
5235
5236 } else { /* E1H */
65abd74d
YG
5237 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5238
e665bfda 5239 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
5240
5241 for (i = 0; i < MC_HASH_SIZE; i++)
5242 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
5243
5244 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 5245 }
993ac7b5
MC
5246#ifdef BCM_CNIC
5247 /* Clear iSCSI L2 MAC */
5248 mutex_lock(&bp->cnic_mutex);
5249 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
5250 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
5251 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
5252 }
5253 mutex_unlock(&bp->cnic_mutex);
5254#endif
3101c2bc 5255
65abd74d
YG
5256 if (unload_mode == UNLOAD_NORMAL)
5257 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5258
7d0446c2 5259 else if (bp->flags & NO_WOL_FLAG)
65abd74d 5260 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 5261
7d0446c2 5262 else if (bp->wol) {
65abd74d
YG
5263 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5264 u8 *mac_addr = bp->dev->dev_addr;
5265 u32 val;
5266 /* The mac address is written to entries 1-4 to
5267 preserve entry 0 which is used by the PMF */
5268 u8 entry = (BP_E1HVN(bp) + 1)*8;
5269
5270 val = (mac_addr[0] << 8) | mac_addr[1];
5271 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
5272
5273 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
5274 (mac_addr[4] << 8) | mac_addr[5];
5275 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
5276
5277 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
5278
5279 } else
5280 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 5281
34f80b04
EG
5282 /* Close multi and leading connections
5283 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
5284 for_each_nondefault_queue(bp, i)
5285 if (bnx2x_stop_multi(bp, i))
228241eb 5286 goto unload_error;
a2fbb9ea 5287
da5a662a
VZ
5288 rc = bnx2x_stop_leading(bp);
5289 if (rc) {
34f80b04 5290 BNX2X_ERR("Stop leading failed!\n");
da5a662a 5291#ifdef BNX2X_STOP_ON_ERROR
34f80b04 5292 return -EBUSY;
da5a662a
VZ
5293#else
5294 goto unload_error;
34f80b04 5295#endif
228241eb
ET
5296 }
5297
5298unload_error:
34f80b04 5299 if (!BP_NOMCP(bp))
228241eb 5300 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 5301 else {
f5372251 5302 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
5303 load_count[0], load_count[1], load_count[2]);
5304 load_count[0]--;
da5a662a 5305 load_count[1 + port]--;
f5372251 5306 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
5307 load_count[0], load_count[1], load_count[2]);
5308 if (load_count[0] == 0)
5309 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 5310 else if (load_count[1 + port] == 0)
34f80b04
EG
5311 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
5312 else
5313 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
5314 }
a2fbb9ea 5315
34f80b04
EG
5316 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
5317 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
5318 bnx2x__link_reset(bp);
a2fbb9ea
ET
5319
5320 /* Reset the chip */
228241eb 5321 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
5322
5323 /* Report UNLOAD_DONE to MCP */
34f80b04 5324 if (!BP_NOMCP(bp))
a2fbb9ea 5325 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 5326
72fd0718
VZ
5327}
5328
9f6c9258 5329void bnx2x_disable_close_the_gate(struct bnx2x *bp)
72fd0718
VZ
5330{
5331 u32 val;
5332
5333 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
5334
5335 if (CHIP_IS_E1(bp)) {
5336 int port = BP_PORT(bp);
5337 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5338 MISC_REG_AEU_MASK_ATTN_FUNC_0;
5339
5340 val = REG_RD(bp, addr);
5341 val &= ~(0x300);
5342 REG_WR(bp, addr, val);
5343 } else if (CHIP_IS_E1H(bp)) {
5344 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
5345 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
5346 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
5347 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
5348 }
5349}
5350
72fd0718
VZ
5351
5352/* Close gates #2, #3 and #4: */
5353static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
5354{
5355 u32 val, addr;
5356
5357 /* Gates #2 and #4a are closed/opened for "not E1" only */
5358 if (!CHIP_IS_E1(bp)) {
5359 /* #4 */
5360 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
5361 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
5362 close ? (val | 0x1) : (val & (~(u32)1)));
5363 /* #2 */
5364 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
5365 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
5366 close ? (val | 0x1) : (val & (~(u32)1)));
5367 }
5368
5369 /* #3 */
5370 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
5371 val = REG_RD(bp, addr);
5372 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
5373
5374 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
5375 close ? "closing" : "opening");
5376 mmiowb();
5377}
5378
5379#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
5380
5381static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
5382{
5383 /* Do some magic... */
5384 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5385 *magic_val = val & SHARED_MF_CLP_MAGIC;
5386 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
5387}
5388
5389/* Restore the value of the `magic' bit.
5390 *
5391 * @param pdev Device handle.
5392 * @param magic_val Old value of the `magic' bit.
5393 */
5394static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
5395{
5396 /* Restore the `magic' bit value... */
5397 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
5398 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
5399 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
5400 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5401 MF_CFG_WR(bp, shared_mf_config.clp_mb,
5402 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
5403}
5404
5405/* Prepares for MCP reset: takes care of CLP configurations.
5406 *
5407 * @param bp
5408 * @param magic_val Old value of 'magic' bit.
5409 */
5410static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
5411{
5412 u32 shmem;
5413 u32 validity_offset;
5414
5415 DP(NETIF_MSG_HW, "Starting\n");
5416
5417 /* Set `magic' bit in order to save MF config */
5418 if (!CHIP_IS_E1(bp))
5419 bnx2x_clp_reset_prep(bp, magic_val);
5420
5421 /* Get shmem offset */
5422 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5423 validity_offset = offsetof(struct shmem_region, validity_map[0]);
5424
5425 /* Clear validity map flags */
5426 if (shmem > 0)
5427 REG_WR(bp, shmem + validity_offset, 0);
5428}
5429
5430#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
5431#define MCP_ONE_TIMEOUT 100 /* 100 ms */
5432
5433/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
5434 * depending on the HW type.
5435 *
5436 * @param bp
5437 */
5438static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
5439{
5440 /* special handling for emulation and FPGA,
5441 wait 10 times longer */
5442 if (CHIP_REV_IS_SLOW(bp))
5443 msleep(MCP_ONE_TIMEOUT*10);
5444 else
5445 msleep(MCP_ONE_TIMEOUT);
5446}
5447
5448static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
5449{
5450 u32 shmem, cnt, validity_offset, val;
5451 int rc = 0;
5452
5453 msleep(100);
5454
5455 /* Get shmem offset */
5456 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5457 if (shmem == 0) {
5458 BNX2X_ERR("Shmem 0 return failure\n");
5459 rc = -ENOTTY;
5460 goto exit_lbl;
5461 }
5462
5463 validity_offset = offsetof(struct shmem_region, validity_map[0]);
5464
5465 /* Wait for MCP to come up */
5466 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
5467 /* TBD: its best to check validity map of last port.
5468 * currently checks on port 0.
5469 */
5470 val = REG_RD(bp, shmem + validity_offset);
5471 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
5472 shmem + validity_offset, val);
5473
5474 /* check that shared memory is valid. */
5475 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5476 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5477 break;
5478
5479 bnx2x_mcp_wait_one(bp);
5480 }
5481
5482 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
5483
5484 /* Check that shared memory is valid. This indicates that MCP is up. */
5485 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
5486 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
5487 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
5488 rc = -ENOTTY;
5489 goto exit_lbl;
5490 }
5491
5492exit_lbl:
5493 /* Restore the `magic' bit value */
5494 if (!CHIP_IS_E1(bp))
5495 bnx2x_clp_reset_done(bp, magic_val);
5496
5497 return rc;
5498}
5499
5500static void bnx2x_pxp_prep(struct bnx2x *bp)
5501{
5502 if (!CHIP_IS_E1(bp)) {
5503 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
5504 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
5505 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
5506 mmiowb();
5507 }
5508}
5509
5510/*
5511 * Reset the whole chip except for:
5512 * - PCIE core
5513 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
5514 * one reset bit)
5515 * - IGU
5516 * - MISC (including AEU)
5517 * - GRC
5518 * - RBCN, RBCP
5519 */
5520static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
5521{
5522 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
5523
5524 not_reset_mask1 =
5525 MISC_REGISTERS_RESET_REG_1_RST_HC |
5526 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
5527 MISC_REGISTERS_RESET_REG_1_RST_PXP;
5528
5529 not_reset_mask2 =
5530 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
5531 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
5532 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
5533 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
5534 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
5535 MISC_REGISTERS_RESET_REG_2_RST_GRC |
5536 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
5537 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
5538
5539 reset_mask1 = 0xffffffff;
5540
5541 if (CHIP_IS_E1(bp))
5542 reset_mask2 = 0xffff;
5543 else
5544 reset_mask2 = 0x1ffff;
5545
5546 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5547 reset_mask1 & (~not_reset_mask1));
5548 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5549 reset_mask2 & (~not_reset_mask2));
5550
5551 barrier();
5552 mmiowb();
5553
5554 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
5555 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
5556 mmiowb();
5557}
5558
5559static int bnx2x_process_kill(struct bnx2x *bp)
5560{
5561 int cnt = 1000;
5562 u32 val = 0;
5563 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
5564
5565
5566 /* Empty the Tetris buffer, wait for 1s */
5567 do {
5568 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
5569 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
5570 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
5571 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
5572 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
5573 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
5574 ((port_is_idle_0 & 0x1) == 0x1) &&
5575 ((port_is_idle_1 & 0x1) == 0x1) &&
5576 (pgl_exp_rom2 == 0xffffffff))
5577 break;
5578 msleep(1);
5579 } while (cnt-- > 0);
5580
5581 if (cnt <= 0) {
5582 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
5583 " are still"
5584 " outstanding read requests after 1s!\n");
5585 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
5586 " port_is_idle_0=0x%08x,"
5587 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
5588 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
5589 pgl_exp_rom2);
5590 return -EAGAIN;
5591 }
5592
5593 barrier();
5594
5595 /* Close gates #2, #3 and #4 */
5596 bnx2x_set_234_gates(bp, true);
5597
5598 /* TBD: Indicate that "process kill" is in progress to MCP */
5599
5600 /* Clear "unprepared" bit */
5601 REG_WR(bp, MISC_REG_UNPREPARED, 0);
5602 barrier();
5603
5604 /* Make sure all is written to the chip before the reset */
5605 mmiowb();
5606
5607 /* Wait for 1ms to empty GLUE and PCI-E core queues,
5608 * PSWHST, GRC and PSWRD Tetris buffer.
5609 */
5610 msleep(1);
5611
5612 /* Prepare to chip reset: */
5613 /* MCP */
5614 bnx2x_reset_mcp_prep(bp, &val);
5615
5616 /* PXP */
5617 bnx2x_pxp_prep(bp);
5618 barrier();
5619
5620 /* reset the chip */
5621 bnx2x_process_kill_chip_reset(bp);
5622 barrier();
5623
5624 /* Recover after reset: */
5625 /* MCP */
5626 if (bnx2x_reset_mcp_comp(bp, val))
5627 return -EAGAIN;
5628
5629 /* PXP */
5630 bnx2x_pxp_prep(bp);
5631
5632 /* Open the gates #2, #3 and #4 */
5633 bnx2x_set_234_gates(bp, false);
5634
5635 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
5636 * reset state, re-enable attentions. */
5637
a2fbb9ea
ET
5638 return 0;
5639}
5640
72fd0718
VZ
5641static int bnx2x_leader_reset(struct bnx2x *bp)
5642{
5643 int rc = 0;
5644 /* Try to recover after the failure */
5645 if (bnx2x_process_kill(bp)) {
5646 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
5647 bp->dev->name);
5648 rc = -EAGAIN;
5649 goto exit_leader_reset;
5650 }
5651
5652 /* Clear "reset is in progress" bit and update the driver state */
5653 bnx2x_set_reset_done(bp);
5654 bp->recovery_state = BNX2X_RECOVERY_DONE;
5655
5656exit_leader_reset:
5657 bp->is_leader = 0;
5658 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
5659 smp_wmb();
5660 return rc;
5661}
5662
72fd0718
VZ
5663/* Assumption: runs under rtnl lock. This together with the fact
5664 * that it's called only from bnx2x_reset_task() ensure that it
5665 * will never be called when netif_running(bp->dev) is false.
5666 */
5667static void bnx2x_parity_recover(struct bnx2x *bp)
5668{
5669 DP(NETIF_MSG_HW, "Handling parity\n");
5670 while (1) {
5671 switch (bp->recovery_state) {
5672 case BNX2X_RECOVERY_INIT:
5673 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
5674 /* Try to get a LEADER_LOCK HW lock */
5675 if (bnx2x_trylock_hw_lock(bp,
5676 HW_LOCK_RESOURCE_RESERVED_08))
5677 bp->is_leader = 1;
5678
5679 /* Stop the driver */
5680 /* If interface has been removed - break */
5681 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
5682 return;
5683
5684 bp->recovery_state = BNX2X_RECOVERY_WAIT;
5685 /* Ensure "is_leader" and "recovery_state"
5686 * update values are seen on other CPUs
5687 */
5688 smp_wmb();
5689 break;
5690
5691 case BNX2X_RECOVERY_WAIT:
5692 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
5693 if (bp->is_leader) {
5694 u32 load_counter = bnx2x_get_load_cnt(bp);
5695 if (load_counter) {
5696 /* Wait until all other functions get
5697 * down.
5698 */
5699 schedule_delayed_work(&bp->reset_task,
5700 HZ/10);
5701 return;
5702 } else {
5703 /* If all other functions got down -
5704 * try to bring the chip back to
5705 * normal. In any case it's an exit
5706 * point for a leader.
5707 */
5708 if (bnx2x_leader_reset(bp) ||
5709 bnx2x_nic_load(bp, LOAD_NORMAL)) {
5710 printk(KERN_ERR"%s: Recovery "
5711 "has failed. Power cycle is "
5712 "needed.\n", bp->dev->name);
5713 /* Disconnect this device */
5714 netif_device_detach(bp->dev);
5715 /* Block ifup for all function
5716 * of this ASIC until
5717 * "process kill" or power
5718 * cycle.
5719 */
5720 bnx2x_set_reset_in_progress(bp);
5721 /* Shut down the power */
5722 bnx2x_set_power_state(bp,
5723 PCI_D3hot);
5724 return;
5725 }
5726
5727 return;
5728 }
5729 } else { /* non-leader */
5730 if (!bnx2x_reset_is_done(bp)) {
5731 /* Try to get a LEADER_LOCK HW lock as
5732 * long as a former leader may have
5733 * been unloaded by the user or
5734 * released a leadership by another
5735 * reason.
5736 */
5737 if (bnx2x_trylock_hw_lock(bp,
5738 HW_LOCK_RESOURCE_RESERVED_08)) {
5739 /* I'm a leader now! Restart a
5740 * switch case.
5741 */
5742 bp->is_leader = 1;
5743 break;
5744 }
5745
5746 schedule_delayed_work(&bp->reset_task,
5747 HZ/10);
5748 return;
5749
5750 } else { /* A leader has completed
5751 * the "process kill". It's an exit
5752 * point for a non-leader.
5753 */
5754 bnx2x_nic_load(bp, LOAD_NORMAL);
5755 bp->recovery_state =
5756 BNX2X_RECOVERY_DONE;
5757 smp_wmb();
5758 return;
5759 }
5760 }
5761 default:
5762 return;
5763 }
5764 }
5765}
5766
5767/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
5768 * scheduled on a general queue in order to prevent a dead lock.
5769 */
34f80b04
EG
5770static void bnx2x_reset_task(struct work_struct *work)
5771{
72fd0718 5772 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
5773
5774#ifdef BNX2X_STOP_ON_ERROR
5775 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
5776 " so reset not done to allow debug dump,\n"
72fd0718 5777 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
5778 return;
5779#endif
5780
5781 rtnl_lock();
5782
5783 if (!netif_running(bp->dev))
5784 goto reset_task_exit;
5785
72fd0718
VZ
5786 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
5787 bnx2x_parity_recover(bp);
5788 else {
5789 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
5790 bnx2x_nic_load(bp, LOAD_NORMAL);
5791 }
34f80b04
EG
5792
5793reset_task_exit:
5794 rtnl_unlock();
5795}
5796
a2fbb9ea
ET
5797/* end of nic load/unload */
5798
a2fbb9ea
ET
5799/*
5800 * Init service functions
5801 */
5802
f1ef27ef
EG
5803static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
5804{
5805 switch (func) {
5806 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
5807 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
5808 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
5809 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
5810 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
5811 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
5812 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
5813 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
5814 default:
5815 BNX2X_ERR("Unsupported function index: %d\n", func);
5816 return (u32)(-1);
5817 }
5818}
5819
5820static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
5821{
5822 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
5823
5824 /* Flush all outstanding writes */
5825 mmiowb();
5826
5827 /* Pretend to be function 0 */
5828 REG_WR(bp, reg, 0);
5829 /* Flush the GRC transaction (in the chip) */
5830 new_val = REG_RD(bp, reg);
5831 if (new_val != 0) {
5832 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
5833 new_val);
5834 BUG();
5835 }
5836
5837 /* From now we are in the "like-E1" mode */
5838 bnx2x_int_disable(bp);
5839
5840 /* Flush all outstanding writes */
5841 mmiowb();
5842
5843 /* Restore the original funtion settings */
5844 REG_WR(bp, reg, orig_func);
5845 new_val = REG_RD(bp, reg);
5846 if (new_val != orig_func) {
5847 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
5848 orig_func, new_val);
5849 BUG();
5850 }
5851}
5852
5853static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
5854{
5855 if (CHIP_IS_E1H(bp))
5856 bnx2x_undi_int_disable_e1h(bp, func);
5857 else
5858 bnx2x_int_disable(bp);
5859}
5860
34f80b04
EG
5861static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5862{
5863 u32 val;
5864
5865 /* Check if there is any driver already loaded */
5866 val = REG_RD(bp, MISC_REG_UNPREPARED);
5867 if (val == 0x1) {
5868 /* Check if it is the UNDI driver
5869 * UNDI driver initializes CID offset for normal bell to 0x7
5870 */
4a37fb66 5871 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
5872 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
5873 if (val == 0x7) {
5874 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 5875 /* save our func */
34f80b04 5876 int func = BP_FUNC(bp);
da5a662a
VZ
5877 u32 swap_en;
5878 u32 swap_val;
34f80b04 5879
b4661739
EG
5880 /* clear the UNDI indication */
5881 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
5882
34f80b04
EG
5883 BNX2X_DEV_INFO("UNDI is active! reset device\n");
5884
5885 /* try unload UNDI on port 0 */
5886 bp->func = 0;
da5a662a
VZ
5887 bp->fw_seq =
5888 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5889 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 5890 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
5891
5892 /* if UNDI is loaded on the other port */
5893 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
5894
da5a662a
VZ
5895 /* send "DONE" for previous unload */
5896 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5897
5898 /* unload UNDI on port 1 */
34f80b04 5899 bp->func = 1;
da5a662a
VZ
5900 bp->fw_seq =
5901 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5902 DRV_MSG_SEQ_NUMBER_MASK);
5903 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5904
5905 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
5906 }
5907
b4661739
EG
5908 /* now it's safe to release the lock */
5909 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5910
f1ef27ef 5911 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
5912
5913 /* close input traffic and wait for it */
5914 /* Do not rcv packets to BRB */
5915 REG_WR(bp,
5916 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
5917 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
5918 /* Do not direct rcv packets that are not for MCP to
5919 * the BRB */
5920 REG_WR(bp,
5921 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
5922 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5923 /* clear AEU */
5924 REG_WR(bp,
5925 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5926 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
5927 msleep(10);
5928
5929 /* save NIG port swap info */
5930 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5931 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
5932 /* reset device */
5933 REG_WR(bp,
5934 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 5935 0xd3ffffff);
34f80b04
EG
5936 REG_WR(bp,
5937 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5938 0x1403);
da5a662a
VZ
5939 /* take the NIG out of reset and restore swap values */
5940 REG_WR(bp,
5941 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5942 MISC_REGISTERS_RESET_REG_1_RST_NIG);
5943 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
5944 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
5945
5946 /* send unload done to the MCP */
5947 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5948
5949 /* restore our func and fw_seq */
5950 bp->func = func;
5951 bp->fw_seq =
5952 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5953 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
5954
5955 } else
5956 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
5957 }
5958}
5959
5960static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5961{
5962 u32 val, val2, val3, val4, id;
72ce58c3 5963 u16 pmc;
34f80b04
EG
5964
5965 /* Get the chip revision id and number. */
5966 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5967 val = REG_RD(bp, MISC_REG_CHIP_NUM);
5968 id = ((val & 0xffff) << 16);
5969 val = REG_RD(bp, MISC_REG_CHIP_REV);
5970 id |= ((val & 0xf) << 12);
5971 val = REG_RD(bp, MISC_REG_CHIP_METAL);
5972 id |= ((val & 0xff) << 4);
5a40e08e 5973 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
5974 id |= (val & 0xf);
5975 bp->common.chip_id = id;
5976 bp->link_params.chip_id = bp->common.chip_id;
5977 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
5978
1c06328c
EG
5979 val = (REG_RD(bp, 0x2874) & 0x55);
5980 if ((bp->common.chip_id & 0x1) ||
5981 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
5982 bp->flags |= ONE_PORT_FLAG;
5983 BNX2X_DEV_INFO("single port device\n");
5984 }
5985
34f80b04
EG
5986 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
5987 bp->common.flash_size = (NVRAM_1MB_SIZE <<
5988 (val & MCPR_NVM_CFG4_FLASH_SIZE));
5989 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5990 bp->common.flash_size, bp->common.flash_size);
5991
5992 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 5993 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 5994 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
5995 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
5996 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
5997
5998 if (!bp->common.shmem_base ||
5999 (bp->common.shmem_base < 0xA0000) ||
6000 (bp->common.shmem_base >= 0xC0000)) {
6001 BNX2X_DEV_INFO("MCP not active\n");
6002 bp->flags |= NO_MCP_FLAG;
6003 return;
6004 }
6005
6006 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6007 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6008 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
cdaa7cb8 6009 BNX2X_ERROR("BAD MCP validity signature\n");
34f80b04
EG
6010
6011 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 6012 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
6013
6014 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6015 SHARED_HW_CFG_LED_MODE_MASK) >>
6016 SHARED_HW_CFG_LED_MODE_SHIFT);
6017
c2c8b03e
EG
6018 bp->link_params.feature_config_flags = 0;
6019 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
6020 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
6021 bp->link_params.feature_config_flags |=
6022 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6023 else
6024 bp->link_params.feature_config_flags &=
6025 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6026
34f80b04
EG
6027 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6028 bp->common.bc_ver = val;
6029 BNX2X_DEV_INFO("bc_ver %X\n", val);
6030 if (val < BNX2X_BC_VER) {
6031 /* for now only warn
6032 * later we might need to enforce this */
cdaa7cb8
VZ
6033 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
6034 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 6035 }
4d295db0
EG
6036 bp->link_params.feature_config_flags |=
6037 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
6038 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
6039
6040 if (BP_E1HVN(bp) == 0) {
6041 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6042 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6043 } else {
6044 /* no WOL capability for E1HVN != 0 */
6045 bp->flags |= NO_WOL_FLAG;
6046 }
6047 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 6048 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
6049
6050 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6051 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6052 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6053 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6054
cdaa7cb8
VZ
6055 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
6056 val, val2, val3, val4);
34f80b04
EG
6057}
6058
6059static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6060 u32 switch_cfg)
a2fbb9ea 6061{
34f80b04 6062 int port = BP_PORT(bp);
a2fbb9ea
ET
6063 u32 ext_phy_type;
6064
a2fbb9ea
ET
6065 switch (switch_cfg) {
6066 case SWITCH_CFG_1G:
6067 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6068
c18487ee
YR
6069 ext_phy_type =
6070 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
6071 switch (ext_phy_type) {
6072 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6073 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6074 ext_phy_type);
6075
34f80b04
EG
6076 bp->port.supported |= (SUPPORTED_10baseT_Half |
6077 SUPPORTED_10baseT_Full |
6078 SUPPORTED_100baseT_Half |
6079 SUPPORTED_100baseT_Full |
6080 SUPPORTED_1000baseT_Full |
6081 SUPPORTED_2500baseX_Full |
6082 SUPPORTED_TP |
6083 SUPPORTED_FIBRE |
6084 SUPPORTED_Autoneg |
6085 SUPPORTED_Pause |
6086 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6087 break;
6088
6089 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6090 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6091 ext_phy_type);
6092
34f80b04
EG
6093 bp->port.supported |= (SUPPORTED_10baseT_Half |
6094 SUPPORTED_10baseT_Full |
6095 SUPPORTED_100baseT_Half |
6096 SUPPORTED_100baseT_Full |
6097 SUPPORTED_1000baseT_Full |
6098 SUPPORTED_TP |
6099 SUPPORTED_FIBRE |
6100 SUPPORTED_Autoneg |
6101 SUPPORTED_Pause |
6102 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6103 break;
6104
6105 default:
6106 BNX2X_ERR("NVRAM config error. "
6107 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 6108 bp->link_params.ext_phy_config);
a2fbb9ea
ET
6109 return;
6110 }
6111
34f80b04
EG
6112 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6113 port*0x10);
6114 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
6115 break;
6116
6117 case SWITCH_CFG_10G:
6118 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6119
c18487ee
YR
6120 ext_phy_type =
6121 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
6122 switch (ext_phy_type) {
6123 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6124 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6125 ext_phy_type);
6126
34f80b04
EG
6127 bp->port.supported |= (SUPPORTED_10baseT_Half |
6128 SUPPORTED_10baseT_Full |
6129 SUPPORTED_100baseT_Half |
6130 SUPPORTED_100baseT_Full |
6131 SUPPORTED_1000baseT_Full |
6132 SUPPORTED_2500baseX_Full |
6133 SUPPORTED_10000baseT_Full |
6134 SUPPORTED_TP |
6135 SUPPORTED_FIBRE |
6136 SUPPORTED_Autoneg |
6137 SUPPORTED_Pause |
6138 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6139 break;
6140
589abe3a
EG
6141 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6142 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 6143 ext_phy_type);
f1410647 6144
34f80b04 6145 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 6146 SUPPORTED_1000baseT_Full |
34f80b04 6147 SUPPORTED_FIBRE |
589abe3a 6148 SUPPORTED_Autoneg |
34f80b04
EG
6149 SUPPORTED_Pause |
6150 SUPPORTED_Asym_Pause);
f1410647
ET
6151 break;
6152
589abe3a
EG
6153 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6154 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
6155 ext_phy_type);
6156
34f80b04 6157 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 6158 SUPPORTED_2500baseX_Full |
34f80b04 6159 SUPPORTED_1000baseT_Full |
589abe3a
EG
6160 SUPPORTED_FIBRE |
6161 SUPPORTED_Autoneg |
6162 SUPPORTED_Pause |
6163 SUPPORTED_Asym_Pause);
6164 break;
6165
6166 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
6167 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
6168 ext_phy_type);
6169
6170 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
6171 SUPPORTED_FIBRE |
6172 SUPPORTED_Pause |
6173 SUPPORTED_Asym_Pause);
f1410647
ET
6174 break;
6175
589abe3a
EG
6176 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6177 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
6178 ext_phy_type);
6179
34f80b04
EG
6180 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6181 SUPPORTED_1000baseT_Full |
6182 SUPPORTED_FIBRE |
34f80b04
EG
6183 SUPPORTED_Pause |
6184 SUPPORTED_Asym_Pause);
f1410647
ET
6185 break;
6186
589abe3a
EG
6187 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6188 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
6189 ext_phy_type);
6190
34f80b04 6191 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 6192 SUPPORTED_1000baseT_Full |
34f80b04 6193 SUPPORTED_Autoneg |
589abe3a 6194 SUPPORTED_FIBRE |
34f80b04
EG
6195 SUPPORTED_Pause |
6196 SUPPORTED_Asym_Pause);
c18487ee
YR
6197 break;
6198
4d295db0
EG
6199 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6200 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
6201 ext_phy_type);
6202
6203 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6204 SUPPORTED_1000baseT_Full |
6205 SUPPORTED_Autoneg |
6206 SUPPORTED_FIBRE |
6207 SUPPORTED_Pause |
6208 SUPPORTED_Asym_Pause);
6209 break;
6210
f1410647
ET
6211 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6212 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
6213 ext_phy_type);
6214
34f80b04
EG
6215 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6216 SUPPORTED_TP |
6217 SUPPORTED_Autoneg |
6218 SUPPORTED_Pause |
6219 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6220 break;
6221
28577185
EG
6222 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
6223 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
6224 ext_phy_type);
6225
6226 bp->port.supported |= (SUPPORTED_10baseT_Half |
6227 SUPPORTED_10baseT_Full |
6228 SUPPORTED_100baseT_Half |
6229 SUPPORTED_100baseT_Full |
6230 SUPPORTED_1000baseT_Full |
6231 SUPPORTED_10000baseT_Full |
6232 SUPPORTED_TP |
6233 SUPPORTED_Autoneg |
6234 SUPPORTED_Pause |
6235 SUPPORTED_Asym_Pause);
6236 break;
6237
c18487ee
YR
6238 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
6239 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
6240 bp->link_params.ext_phy_config);
6241 break;
6242
a2fbb9ea
ET
6243 default:
6244 BNX2X_ERR("NVRAM config error. "
6245 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 6246 bp->link_params.ext_phy_config);
a2fbb9ea
ET
6247 return;
6248 }
6249
34f80b04
EG
6250 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6251 port*0x18);
6252 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 6253
a2fbb9ea
ET
6254 break;
6255
6256 default:
6257 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 6258 bp->port.link_config);
a2fbb9ea
ET
6259 return;
6260 }
34f80b04 6261 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
6262
6263 /* mask what we support according to speed_cap_mask */
c18487ee
YR
6264 if (!(bp->link_params.speed_cap_mask &
6265 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 6266 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 6267
c18487ee
YR
6268 if (!(bp->link_params.speed_cap_mask &
6269 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 6270 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 6271
c18487ee
YR
6272 if (!(bp->link_params.speed_cap_mask &
6273 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 6274 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 6275
c18487ee
YR
6276 if (!(bp->link_params.speed_cap_mask &
6277 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 6278 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 6279
c18487ee
YR
6280 if (!(bp->link_params.speed_cap_mask &
6281 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
6282 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
6283 SUPPORTED_1000baseT_Full);
a2fbb9ea 6284
c18487ee
YR
6285 if (!(bp->link_params.speed_cap_mask &
6286 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 6287 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 6288
c18487ee
YR
6289 if (!(bp->link_params.speed_cap_mask &
6290 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 6291 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 6292
34f80b04 6293 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
6294}
6295
34f80b04 6296static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 6297{
c18487ee 6298 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 6299
34f80b04 6300 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 6301 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 6302 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 6303 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 6304 bp->port.advertising = bp->port.supported;
a2fbb9ea 6305 } else {
c18487ee
YR
6306 u32 ext_phy_type =
6307 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6308
6309 if ((ext_phy_type ==
6310 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
6311 (ext_phy_type ==
6312 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 6313 /* force 10G, no AN */
c18487ee 6314 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 6315 bp->port.advertising =
a2fbb9ea
ET
6316 (ADVERTISED_10000baseT_Full |
6317 ADVERTISED_FIBRE);
6318 break;
6319 }
6320 BNX2X_ERR("NVRAM config error. "
6321 "Invalid link_config 0x%x"
6322 " Autoneg not supported\n",
34f80b04 6323 bp->port.link_config);
a2fbb9ea
ET
6324 return;
6325 }
6326 break;
6327
6328 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 6329 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 6330 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
6331 bp->port.advertising = (ADVERTISED_10baseT_Full |
6332 ADVERTISED_TP);
a2fbb9ea 6333 } else {
cdaa7cb8
VZ
6334 BNX2X_ERROR("NVRAM config error. "
6335 "Invalid link_config 0x%x"
6336 " speed_cap_mask 0x%x\n",
6337 bp->port.link_config,
6338 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6339 return;
6340 }
6341 break;
6342
6343 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 6344 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
6345 bp->link_params.req_line_speed = SPEED_10;
6346 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
6347 bp->port.advertising = (ADVERTISED_10baseT_Half |
6348 ADVERTISED_TP);
a2fbb9ea 6349 } else {
cdaa7cb8
VZ
6350 BNX2X_ERROR("NVRAM config error. "
6351 "Invalid link_config 0x%x"
6352 " speed_cap_mask 0x%x\n",
6353 bp->port.link_config,
6354 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6355 return;
6356 }
6357 break;
6358
6359 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 6360 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 6361 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
6362 bp->port.advertising = (ADVERTISED_100baseT_Full |
6363 ADVERTISED_TP);
a2fbb9ea 6364 } else {
cdaa7cb8
VZ
6365 BNX2X_ERROR("NVRAM config error. "
6366 "Invalid link_config 0x%x"
6367 " speed_cap_mask 0x%x\n",
6368 bp->port.link_config,
6369 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6370 return;
6371 }
6372 break;
6373
6374 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 6375 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
6376 bp->link_params.req_line_speed = SPEED_100;
6377 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
6378 bp->port.advertising = (ADVERTISED_100baseT_Half |
6379 ADVERTISED_TP);
a2fbb9ea 6380 } else {
cdaa7cb8
VZ
6381 BNX2X_ERROR("NVRAM config error. "
6382 "Invalid link_config 0x%x"
6383 " speed_cap_mask 0x%x\n",
6384 bp->port.link_config,
6385 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6386 return;
6387 }
6388 break;
6389
6390 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 6391 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 6392 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
6393 bp->port.advertising = (ADVERTISED_1000baseT_Full |
6394 ADVERTISED_TP);
a2fbb9ea 6395 } else {
cdaa7cb8
VZ
6396 BNX2X_ERROR("NVRAM config error. "
6397 "Invalid link_config 0x%x"
6398 " speed_cap_mask 0x%x\n",
6399 bp->port.link_config,
6400 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6401 return;
6402 }
6403 break;
6404
6405 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 6406 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 6407 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
6408 bp->port.advertising = (ADVERTISED_2500baseX_Full |
6409 ADVERTISED_TP);
a2fbb9ea 6410 } else {
cdaa7cb8
VZ
6411 BNX2X_ERROR("NVRAM config error. "
6412 "Invalid link_config 0x%x"
6413 " speed_cap_mask 0x%x\n",
6414 bp->port.link_config,
6415 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6416 return;
6417 }
6418 break;
6419
6420 case PORT_FEATURE_LINK_SPEED_10G_CX4:
6421 case PORT_FEATURE_LINK_SPEED_10G_KX4:
6422 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 6423 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 6424 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
6425 bp->port.advertising = (ADVERTISED_10000baseT_Full |
6426 ADVERTISED_FIBRE);
a2fbb9ea 6427 } else {
cdaa7cb8
VZ
6428 BNX2X_ERROR("NVRAM config error. "
6429 "Invalid link_config 0x%x"
6430 " speed_cap_mask 0x%x\n",
6431 bp->port.link_config,
6432 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6433 return;
6434 }
6435 break;
6436
6437 default:
cdaa7cb8
VZ
6438 BNX2X_ERROR("NVRAM config error. "
6439 "BAD link speed link_config 0x%x\n",
6440 bp->port.link_config);
c18487ee 6441 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 6442 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
6443 break;
6444 }
a2fbb9ea 6445
34f80b04
EG
6446 bp->link_params.req_flow_ctrl = (bp->port.link_config &
6447 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 6448 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 6449 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 6450 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 6451
c18487ee 6452 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 6453 " advertising 0x%x\n",
c18487ee
YR
6454 bp->link_params.req_line_speed,
6455 bp->link_params.req_duplex,
34f80b04 6456 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
6457}
6458
e665bfda
MC
6459static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
6460{
6461 mac_hi = cpu_to_be16(mac_hi);
6462 mac_lo = cpu_to_be32(mac_lo);
6463 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
6464 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
6465}
6466
34f80b04 6467static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 6468{
34f80b04
EG
6469 int port = BP_PORT(bp);
6470 u32 val, val2;
589abe3a 6471 u32 config;
c2c8b03e 6472 u16 i;
01cd4528 6473 u32 ext_phy_type;
a2fbb9ea 6474
c18487ee 6475 bp->link_params.bp = bp;
34f80b04 6476 bp->link_params.port = port;
c18487ee 6477
c18487ee 6478 bp->link_params.lane_config =
a2fbb9ea 6479 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 6480 bp->link_params.ext_phy_config =
a2fbb9ea
ET
6481 SHMEM_RD(bp,
6482 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
6483 /* BCM8727_NOC => BCM8727 no over current */
6484 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
6485 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
6486 bp->link_params.ext_phy_config &=
6487 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6488 bp->link_params.ext_phy_config |=
6489 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
6490 bp->link_params.feature_config_flags |=
6491 FEATURE_CONFIG_BCM8727_NOC;
6492 }
6493
c18487ee 6494 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
6495 SHMEM_RD(bp,
6496 dev_info.port_hw_config[port].speed_capability_mask);
6497
34f80b04 6498 bp->port.link_config =
a2fbb9ea
ET
6499 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6500
c2c8b03e
EG
6501 /* Get the 4 lanes xgxs config rx and tx */
6502 for (i = 0; i < 2; i++) {
6503 val = SHMEM_RD(bp,
6504 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
6505 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
6506 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
6507
6508 val = SHMEM_RD(bp,
6509 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
6510 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
6511 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
6512 }
6513
3ce2c3f9
EG
6514 /* If the device is capable of WoL, set the default state according
6515 * to the HW
6516 */
4d295db0 6517 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
6518 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
6519 (config & PORT_FEATURE_WOL_ENABLED));
6520
c2c8b03e
EG
6521 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
6522 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
6523 bp->link_params.lane_config,
6524 bp->link_params.ext_phy_config,
34f80b04 6525 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 6526
4d295db0
EG
6527 bp->link_params.switch_cfg |= (bp->port.link_config &
6528 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 6529 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
6530
6531 bnx2x_link_settings_requested(bp);
6532
01cd4528
EG
6533 /*
6534 * If connected directly, work with the internal PHY, otherwise, work
6535 * with the external PHY
6536 */
6537 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6538 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
6539 bp->mdio.prtad = bp->link_params.phy_addr;
6540
6541 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
6542 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
6543 bp->mdio.prtad =
659bc5c4 6544 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 6545
a2fbb9ea
ET
6546 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6547 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 6548 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
6549 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
6550 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
6551
6552#ifdef BCM_CNIC
6553 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
6554 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
6555 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
6556#endif
34f80b04
EG
6557}
6558
6559static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6560{
6561 int func = BP_FUNC(bp);
6562 u32 val, val2;
6563 int rc = 0;
a2fbb9ea 6564
34f80b04 6565 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 6566
34f80b04
EG
6567 bp->e1hov = 0;
6568 bp->e1hmf = 0;
2145a920 6569 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
34f80b04
EG
6570 bp->mf_config =
6571 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 6572
2691d51d 6573 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 6574 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 6575 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 6576 bp->e1hmf = 1;
2691d51d
EG
6577 BNX2X_DEV_INFO("%s function mode\n",
6578 IS_E1HMF(bp) ? "multi" : "single");
6579
6580 if (IS_E1HMF(bp)) {
6581 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
6582 e1hov_tag) &
6583 FUNC_MF_CFG_E1HOV_TAG_MASK);
6584 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
6585 bp->e1hov = val;
6586 BNX2X_DEV_INFO("E1HOV for func %d is %d "
6587 "(0x%04x)\n",
6588 func, bp->e1hov, bp->e1hov);
6589 } else {
cdaa7cb8
VZ
6590 BNX2X_ERROR("No valid E1HOV for func %d,"
6591 " aborting\n", func);
34f80b04
EG
6592 rc = -EPERM;
6593 }
2691d51d
EG
6594 } else {
6595 if (BP_E1HVN(bp)) {
cdaa7cb8
VZ
6596 BNX2X_ERROR("VN %d in single function mode,"
6597 " aborting\n", BP_E1HVN(bp));
2691d51d
EG
6598 rc = -EPERM;
6599 }
34f80b04
EG
6600 }
6601 }
a2fbb9ea 6602
34f80b04
EG
6603 if (!BP_NOMCP(bp)) {
6604 bnx2x_get_port_hwinfo(bp);
6605
6606 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
6607 DRV_MSG_SEQ_NUMBER_MASK);
6608 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
6609 }
6610
6611 if (IS_E1HMF(bp)) {
6612 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
6613 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
6614 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
6615 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
6616 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6617 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6618 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6619 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6620 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
6621 bp->dev->dev_addr[5] = (u8)(val & 0xff);
6622 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
6623 ETH_ALEN);
6624 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
6625 ETH_ALEN);
a2fbb9ea 6626 }
34f80b04
EG
6627
6628 return rc;
a2fbb9ea
ET
6629 }
6630
34f80b04
EG
6631 if (BP_NOMCP(bp)) {
6632 /* only supposed to happen on emulation/FPGA */
cdaa7cb8 6633 BNX2X_ERROR("warning: random MAC workaround active\n");
34f80b04
EG
6634 random_ether_addr(bp->dev->dev_addr);
6635 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6636 }
a2fbb9ea 6637
34f80b04
EG
6638 return rc;
6639}
6640
34f24c7f
VZ
6641static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
6642{
6643 int cnt, i, block_end, rodi;
6644 char vpd_data[BNX2X_VPD_LEN+1];
6645 char str_id_reg[VENDOR_ID_LEN+1];
6646 char str_id_cap[VENDOR_ID_LEN+1];
6647 u8 len;
6648
6649 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
6650 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
6651
6652 if (cnt < BNX2X_VPD_LEN)
6653 goto out_not_found;
6654
6655 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
6656 PCI_VPD_LRDT_RO_DATA);
6657 if (i < 0)
6658 goto out_not_found;
6659
6660
6661 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
6662 pci_vpd_lrdt_size(&vpd_data[i]);
6663
6664 i += PCI_VPD_LRDT_TAG_SIZE;
6665
6666 if (block_end > BNX2X_VPD_LEN)
6667 goto out_not_found;
6668
6669 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6670 PCI_VPD_RO_KEYWORD_MFR_ID);
6671 if (rodi < 0)
6672 goto out_not_found;
6673
6674 len = pci_vpd_info_field_size(&vpd_data[rodi]);
6675
6676 if (len != VENDOR_ID_LEN)
6677 goto out_not_found;
6678
6679 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6680
6681 /* vendor specific info */
6682 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
6683 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
6684 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
6685 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
6686
6687 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6688 PCI_VPD_RO_KEYWORD_VENDOR0);
6689 if (rodi >= 0) {
6690 len = pci_vpd_info_field_size(&vpd_data[rodi]);
6691
6692 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6693
6694 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
6695 memcpy(bp->fw_ver, &vpd_data[rodi], len);
6696 bp->fw_ver[len] = ' ';
6697 }
6698 }
6699 return;
6700 }
6701out_not_found:
6702 return;
6703}
6704
34f80b04
EG
6705static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6706{
6707 int func = BP_FUNC(bp);
87942b46 6708 int timer_interval;
34f80b04
EG
6709 int rc;
6710
da5a662a
VZ
6711 /* Disable interrupt handling until HW is initialized */
6712 atomic_set(&bp->intr_sem, 1);
e1510706 6713 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 6714
34f80b04 6715 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 6716 mutex_init(&bp->fw_mb_mutex);
993ac7b5
MC
6717#ifdef BCM_CNIC
6718 mutex_init(&bp->cnic_mutex);
6719#endif
a2fbb9ea 6720
1cf167f2 6721 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 6722 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
6723
6724 rc = bnx2x_get_hwinfo(bp);
6725
34f24c7f 6726 bnx2x_read_fwinfo(bp);
34f80b04
EG
6727 /* need to reset chip if undi was active */
6728 if (!BP_NOMCP(bp))
6729 bnx2x_undi_unload(bp);
6730
6731 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 6732 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
6733
6734 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
6735 dev_err(&bp->pdev->dev, "MCP disabled, "
6736 "must load devices in order!\n");
34f80b04 6737
555f6c78 6738 /* Set multi queue mode */
8badd27a
EG
6739 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
6740 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
cdaa7cb8
VZ
6741 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
6742 "requested is not MSI-X\n");
555f6c78
EG
6743 multi_mode = ETH_RSS_MODE_DISABLED;
6744 }
6745 bp->multi_mode = multi_mode;
5d7cd496 6746 bp->int_mode = int_mode;
555f6c78 6747
4fd89b7a
DK
6748 bp->dev->features |= NETIF_F_GRO;
6749
7a9b2557
VZ
6750 /* Set TPA flags */
6751 if (disable_tpa) {
6752 bp->flags &= ~TPA_ENABLE_FLAG;
6753 bp->dev->features &= ~NETIF_F_LRO;
6754 } else {
6755 bp->flags |= TPA_ENABLE_FLAG;
6756 bp->dev->features |= NETIF_F_LRO;
6757 }
5d7cd496 6758 bp->disable_tpa = disable_tpa;
7a9b2557 6759
a18f5128
EG
6760 if (CHIP_IS_E1(bp))
6761 bp->dropless_fc = 0;
6762 else
6763 bp->dropless_fc = dropless_fc;
6764
8d5726c4 6765 bp->mrrs = mrrs;
7a9b2557 6766
34f80b04
EG
6767 bp->tx_ring_size = MAX_TX_AVAIL;
6768 bp->rx_ring_size = MAX_RX_AVAIL;
6769
6770 bp->rx_csum = 1;
34f80b04 6771
7d323bfd
EG
6772 /* make sure that the numbers are in the right granularity */
6773 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6774 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
34f80b04 6775
87942b46
EG
6776 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
6777 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
6778
6779 init_timer(&bp->timer);
6780 bp->timer.expires = jiffies + bp->current_interval;
6781 bp->timer.data = (unsigned long) bp;
6782 bp->timer.function = bnx2x_timer;
6783
6784 return rc;
a2fbb9ea
ET
6785}
6786
a2fbb9ea 6787
de0c62db
DK
6788/****************************************************************************
6789* General service functions
6790****************************************************************************/
a2fbb9ea 6791
bb2a0f7a 6792/* called with rtnl_lock */
a2fbb9ea
ET
6793static int bnx2x_open(struct net_device *dev)
6794{
6795 struct bnx2x *bp = netdev_priv(dev);
6796
6eccabb3
EG
6797 netif_carrier_off(dev);
6798
a2fbb9ea
ET
6799 bnx2x_set_power_state(bp, PCI_D0);
6800
72fd0718
VZ
6801 if (!bnx2x_reset_is_done(bp)) {
6802 do {
6803 /* Reset MCP mail box sequence if there is on going
6804 * recovery
6805 */
6806 bp->fw_seq = 0;
6807
6808 /* If it's the first function to load and reset done
6809 * is still not cleared it may mean that. We don't
6810 * check the attention state here because it may have
6811 * already been cleared by a "common" reset but we
6812 * shell proceed with "process kill" anyway.
6813 */
6814 if ((bnx2x_get_load_cnt(bp) == 0) &&
6815 bnx2x_trylock_hw_lock(bp,
6816 HW_LOCK_RESOURCE_RESERVED_08) &&
6817 (!bnx2x_leader_reset(bp))) {
6818 DP(NETIF_MSG_HW, "Recovered in open\n");
6819 break;
6820 }
6821
6822 bnx2x_set_power_state(bp, PCI_D3hot);
6823
6824 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
6825 " completed yet. Try again later. If u still see this"
6826 " message after a few retries then power cycle is"
6827 " required.\n", bp->dev->name);
6828
6829 return -EAGAIN;
6830 } while (0);
6831 }
6832
6833 bp->recovery_state = BNX2X_RECOVERY_DONE;
6834
bb2a0f7a 6835 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
6836}
6837
bb2a0f7a 6838/* called with rtnl_lock */
a2fbb9ea
ET
6839static int bnx2x_close(struct net_device *dev)
6840{
a2fbb9ea
ET
6841 struct bnx2x *bp = netdev_priv(dev);
6842
6843 /* Unload the driver, release IRQs */
bb2a0f7a 6844 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 6845 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
6846
6847 return 0;
6848}
6849
f5372251 6850/* called with netif_tx_lock from dev_mcast.c */
9f6c9258 6851void bnx2x_set_rx_mode(struct net_device *dev)
34f80b04
EG
6852{
6853 struct bnx2x *bp = netdev_priv(dev);
6854 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
6855 int port = BP_PORT(bp);
6856
6857 if (bp->state != BNX2X_STATE_OPEN) {
6858 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6859 return;
6860 }
6861
6862 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
6863
6864 if (dev->flags & IFF_PROMISC)
6865 rx_mode = BNX2X_RX_MODE_PROMISC;
6866
6867 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
6868 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
6869 CHIP_IS_E1(bp)))
34f80b04
EG
6870 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6871
6872 else { /* some multicasts */
6873 if (CHIP_IS_E1(bp)) {
6874 int i, old, offset;
22bedad3 6875 struct netdev_hw_addr *ha;
34f80b04
EG
6876 struct mac_configuration_cmd *config =
6877 bnx2x_sp(bp, mcast_config);
6878
0ddf477b 6879 i = 0;
22bedad3 6880 netdev_for_each_mc_addr(ha, dev) {
34f80b04
EG
6881 config->config_table[i].
6882 cam_entry.msb_mac_addr =
22bedad3 6883 swab16(*(u16 *)&ha->addr[0]);
34f80b04
EG
6884 config->config_table[i].
6885 cam_entry.middle_mac_addr =
22bedad3 6886 swab16(*(u16 *)&ha->addr[2]);
34f80b04
EG
6887 config->config_table[i].
6888 cam_entry.lsb_mac_addr =
22bedad3 6889 swab16(*(u16 *)&ha->addr[4]);
34f80b04
EG
6890 config->config_table[i].cam_entry.flags =
6891 cpu_to_le16(port);
6892 config->config_table[i].
6893 target_table_entry.flags = 0;
ca00392c
EG
6894 config->config_table[i].target_table_entry.
6895 clients_bit_vector =
6896 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
6897 config->config_table[i].
6898 target_table_entry.vlan_id = 0;
6899
6900 DP(NETIF_MSG_IFUP,
6901 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6902 config->config_table[i].
6903 cam_entry.msb_mac_addr,
6904 config->config_table[i].
6905 cam_entry.middle_mac_addr,
6906 config->config_table[i].
6907 cam_entry.lsb_mac_addr);
0ddf477b 6908 i++;
34f80b04 6909 }
8d9c5f34 6910 old = config->hdr.length;
34f80b04
EG
6911 if (old > i) {
6912 for (; i < old; i++) {
6913 if (CAM_IS_INVALID(config->
6914 config_table[i])) {
af246401 6915 /* already invalidated */
34f80b04
EG
6916 break;
6917 }
6918 /* invalidate */
6919 CAM_INVALIDATE(config->
6920 config_table[i]);
6921 }
6922 }
6923
6924 if (CHIP_REV_IS_SLOW(bp))
6925 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6926 else
6927 offset = BNX2X_MAX_MULTICAST*(1 + port);
6928
8d9c5f34 6929 config->hdr.length = i;
34f80b04 6930 config->hdr.offset = offset;
8d9c5f34 6931 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6932 config->hdr.reserved1 = 0;
6933
e665bfda
MC
6934 bp->set_mac_pending++;
6935 smp_wmb();
6936
34f80b04
EG
6937 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6938 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6939 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
6940 0);
6941 } else { /* E1H */
6942 /* Accept one or more multicasts */
22bedad3 6943 struct netdev_hw_addr *ha;
34f80b04
EG
6944 u32 mc_filter[MC_HASH_SIZE];
6945 u32 crc, bit, regidx;
6946 int i;
6947
6948 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6949
22bedad3 6950 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 6951 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
22bedad3 6952 ha->addr);
34f80b04 6953
22bedad3 6954 crc = crc32c_le(0, ha->addr, ETH_ALEN);
34f80b04
EG
6955 bit = (crc >> 24) & 0xff;
6956 regidx = bit >> 5;
6957 bit &= 0x1f;
6958 mc_filter[regidx] |= (1 << bit);
6959 }
6960
6961 for (i = 0; i < MC_HASH_SIZE; i++)
6962 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6963 mc_filter[i]);
6964 }
6965 }
6966
6967 bp->rx_mode = rx_mode;
6968 bnx2x_set_storm_rx_mode(bp);
6969}
6970
a2fbb9ea 6971
c18487ee 6972/* called with rtnl_lock */
01cd4528
EG
6973static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
6974 int devad, u16 addr)
a2fbb9ea 6975{
01cd4528
EG
6976 struct bnx2x *bp = netdev_priv(netdev);
6977 u16 value;
6978 int rc;
6979 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 6980
01cd4528
EG
6981 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
6982 prtad, devad, addr);
a2fbb9ea 6983
01cd4528
EG
6984 if (prtad != bp->mdio.prtad) {
6985 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
6986 prtad, bp->mdio.prtad);
6987 return -EINVAL;
6988 }
6989
6990 /* The HW expects different devad if CL22 is used */
6991 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 6992
01cd4528
EG
6993 bnx2x_acquire_phy_lock(bp);
6994 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
6995 devad, addr, &value);
6996 bnx2x_release_phy_lock(bp);
6997 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 6998
01cd4528
EG
6999 if (!rc)
7000 rc = value;
7001 return rc;
7002}
a2fbb9ea 7003
01cd4528
EG
7004/* called with rtnl_lock */
7005static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
7006 u16 addr, u16 value)
7007{
7008 struct bnx2x *bp = netdev_priv(netdev);
7009 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7010 int rc;
7011
7012 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
7013 " value 0x%x\n", prtad, devad, addr, value);
7014
7015 if (prtad != bp->mdio.prtad) {
7016 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
7017 prtad, bp->mdio.prtad);
7018 return -EINVAL;
a2fbb9ea
ET
7019 }
7020
01cd4528
EG
7021 /* The HW expects different devad if CL22 is used */
7022 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 7023
01cd4528
EG
7024 bnx2x_acquire_phy_lock(bp);
7025 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
7026 devad, addr, value);
7027 bnx2x_release_phy_lock(bp);
7028 return rc;
7029}
c18487ee 7030
01cd4528
EG
7031/* called with rtnl_lock */
7032static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7033{
7034 struct bnx2x *bp = netdev_priv(dev);
7035 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 7036
01cd4528
EG
7037 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
7038 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 7039
01cd4528
EG
7040 if (!netif_running(dev))
7041 return -EAGAIN;
7042
7043 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
7044}
7045
257ddbda 7046#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
7047static void poll_bnx2x(struct net_device *dev)
7048{
7049 struct bnx2x *bp = netdev_priv(dev);
7050
7051 disable_irq(bp->pdev->irq);
7052 bnx2x_interrupt(bp->pdev->irq, dev);
7053 enable_irq(bp->pdev->irq);
7054}
7055#endif
7056
c64213cd
SH
7057static const struct net_device_ops bnx2x_netdev_ops = {
7058 .ndo_open = bnx2x_open,
7059 .ndo_stop = bnx2x_close,
7060 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 7061 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
7062 .ndo_set_mac_address = bnx2x_change_mac_addr,
7063 .ndo_validate_addr = eth_validate_addr,
7064 .ndo_do_ioctl = bnx2x_ioctl,
7065 .ndo_change_mtu = bnx2x_change_mtu,
7066 .ndo_tx_timeout = bnx2x_tx_timeout,
7067#ifdef BCM_VLAN
7068 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
7069#endif
257ddbda 7070#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
7071 .ndo_poll_controller = poll_bnx2x,
7072#endif
7073};
7074
34f80b04
EG
7075static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
7076 struct net_device *dev)
a2fbb9ea
ET
7077{
7078 struct bnx2x *bp;
7079 int rc;
7080
7081 SET_NETDEV_DEV(dev, &pdev->dev);
7082 bp = netdev_priv(dev);
7083
34f80b04
EG
7084 bp->dev = dev;
7085 bp->pdev = pdev;
a2fbb9ea 7086 bp->flags = 0;
34f80b04 7087 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
7088
7089 rc = pci_enable_device(pdev);
7090 if (rc) {
cdaa7cb8
VZ
7091 dev_err(&bp->pdev->dev,
7092 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
7093 goto err_out;
7094 }
7095
7096 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
7097 dev_err(&bp->pdev->dev,
7098 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
7099 rc = -ENODEV;
7100 goto err_out_disable;
7101 }
7102
7103 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
7104 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
7105 " base address, aborting\n");
a2fbb9ea
ET
7106 rc = -ENODEV;
7107 goto err_out_disable;
7108 }
7109
34f80b04
EG
7110 if (atomic_read(&pdev->enable_cnt) == 1) {
7111 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7112 if (rc) {
cdaa7cb8
VZ
7113 dev_err(&bp->pdev->dev,
7114 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
7115 goto err_out_disable;
7116 }
a2fbb9ea 7117
34f80b04
EG
7118 pci_set_master(pdev);
7119 pci_save_state(pdev);
7120 }
a2fbb9ea
ET
7121
7122 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7123 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
7124 dev_err(&bp->pdev->dev,
7125 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
7126 rc = -EIO;
7127 goto err_out_release;
7128 }
7129
7130 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
7131 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
7132 dev_err(&bp->pdev->dev,
7133 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
7134 rc = -EIO;
7135 goto err_out_release;
7136 }
7137
1a983142 7138 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 7139 bp->flags |= USING_DAC_FLAG;
1a983142 7140 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
7141 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
7142 " failed, aborting\n");
a2fbb9ea
ET
7143 rc = -EIO;
7144 goto err_out_release;
7145 }
7146
1a983142 7147 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
7148 dev_err(&bp->pdev->dev,
7149 "System does not support DMA, aborting\n");
a2fbb9ea
ET
7150 rc = -EIO;
7151 goto err_out_release;
7152 }
7153
34f80b04
EG
7154 dev->mem_start = pci_resource_start(pdev, 0);
7155 dev->base_addr = dev->mem_start;
7156 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
7157
7158 dev->irq = pdev->irq;
7159
275f165f 7160 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 7161 if (!bp->regview) {
cdaa7cb8
VZ
7162 dev_err(&bp->pdev->dev,
7163 "Cannot map register space, aborting\n");
a2fbb9ea
ET
7164 rc = -ENOMEM;
7165 goto err_out_release;
7166 }
7167
34f80b04
EG
7168 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
7169 min_t(u64, BNX2X_DB_SIZE,
7170 pci_resource_len(pdev, 2)));
a2fbb9ea 7171 if (!bp->doorbells) {
cdaa7cb8
VZ
7172 dev_err(&bp->pdev->dev,
7173 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
7174 rc = -ENOMEM;
7175 goto err_out_unmap;
7176 }
7177
7178 bnx2x_set_power_state(bp, PCI_D0);
7179
34f80b04
EG
7180 /* clean indirect addresses */
7181 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
7182 PCICFG_VENDOR_ID_OFFSET);
7183 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
7184 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
7185 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
7186 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 7187
72fd0718
VZ
7188 /* Reset the load counter */
7189 bnx2x_clear_load_cnt(bp);
7190
34f80b04 7191 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 7192
c64213cd 7193 dev->netdev_ops = &bnx2x_netdev_ops;
de0c62db 7194 bnx2x_set_ethtool_ops(dev);
34f80b04
EG
7195 dev->features |= NETIF_F_SG;
7196 dev->features |= NETIF_F_HW_CSUM;
7197 if (bp->flags & USING_DAC_FLAG)
7198 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
7199 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7200 dev->features |= NETIF_F_TSO6;
34f80b04
EG
7201#ifdef BCM_VLAN
7202 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 7203 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
7204
7205 dev->vlan_features |= NETIF_F_SG;
7206 dev->vlan_features |= NETIF_F_HW_CSUM;
7207 if (bp->flags & USING_DAC_FLAG)
7208 dev->vlan_features |= NETIF_F_HIGHDMA;
7209 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7210 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 7211#endif
a2fbb9ea 7212
01cd4528
EG
7213 /* get_port_hwinfo() will set prtad and mmds properly */
7214 bp->mdio.prtad = MDIO_PRTAD_NONE;
7215 bp->mdio.mmds = 0;
7216 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7217 bp->mdio.dev = dev;
7218 bp->mdio.mdio_read = bnx2x_mdio_read;
7219 bp->mdio.mdio_write = bnx2x_mdio_write;
7220
a2fbb9ea
ET
7221 return 0;
7222
7223err_out_unmap:
7224 if (bp->regview) {
7225 iounmap(bp->regview);
7226 bp->regview = NULL;
7227 }
a2fbb9ea
ET
7228 if (bp->doorbells) {
7229 iounmap(bp->doorbells);
7230 bp->doorbells = NULL;
7231 }
7232
7233err_out_release:
34f80b04
EG
7234 if (atomic_read(&pdev->enable_cnt) == 1)
7235 pci_release_regions(pdev);
a2fbb9ea
ET
7236
7237err_out_disable:
7238 pci_disable_device(pdev);
7239 pci_set_drvdata(pdev, NULL);
7240
7241err_out:
7242 return rc;
7243}
7244
37f9ce62
EG
7245static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
7246 int *width, int *speed)
25047950
ET
7247{
7248 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7249
37f9ce62 7250 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 7251
37f9ce62
EG
7252 /* return value of 1=2.5GHz 2=5GHz */
7253 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 7254}
37f9ce62 7255
94a78b79
VZ
7256static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
7257{
37f9ce62 7258 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
7259 struct bnx2x_fw_file_hdr *fw_hdr;
7260 struct bnx2x_fw_file_section *sections;
94a78b79 7261 u32 offset, len, num_ops;
37f9ce62 7262 u16 *ops_offsets;
94a78b79 7263 int i;
37f9ce62 7264 const u8 *fw_ver;
94a78b79
VZ
7265
7266 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
7267 return -EINVAL;
7268
7269 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
7270 sections = (struct bnx2x_fw_file_section *)fw_hdr;
7271
7272 /* Make sure none of the offsets and sizes make us read beyond
7273 * the end of the firmware data */
7274 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
7275 offset = be32_to_cpu(sections[i].offset);
7276 len = be32_to_cpu(sections[i].len);
7277 if (offset + len > firmware->size) {
cdaa7cb8
VZ
7278 dev_err(&bp->pdev->dev,
7279 "Section %d length is out of bounds\n", i);
94a78b79
VZ
7280 return -EINVAL;
7281 }
7282 }
7283
7284 /* Likewise for the init_ops offsets */
7285 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
7286 ops_offsets = (u16 *)(firmware->data + offset);
7287 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
7288
7289 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
7290 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
7291 dev_err(&bp->pdev->dev,
7292 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
7293 return -EINVAL;
7294 }
7295 }
7296
7297 /* Check FW version */
7298 offset = be32_to_cpu(fw_hdr->fw_version.offset);
7299 fw_ver = firmware->data + offset;
7300 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
7301 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
7302 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
7303 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
7304 dev_err(&bp->pdev->dev,
7305 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
7306 fw_ver[0], fw_ver[1], fw_ver[2],
7307 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
7308 BCM_5710_FW_MINOR_VERSION,
7309 BCM_5710_FW_REVISION_VERSION,
7310 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 7311 return -EINVAL;
94a78b79
VZ
7312 }
7313
7314 return 0;
7315}
7316
ab6ad5a4 7317static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 7318{
ab6ad5a4
EG
7319 const __be32 *source = (const __be32 *)_source;
7320 u32 *target = (u32 *)_target;
94a78b79 7321 u32 i;
94a78b79
VZ
7322
7323 for (i = 0; i < n/4; i++)
7324 target[i] = be32_to_cpu(source[i]);
7325}
7326
7327/*
7328 Ops array is stored in the following format:
7329 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7330 */
ab6ad5a4 7331static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 7332{
ab6ad5a4
EG
7333 const __be32 *source = (const __be32 *)_source;
7334 struct raw_op *target = (struct raw_op *)_target;
94a78b79 7335 u32 i, j, tmp;
94a78b79 7336
ab6ad5a4 7337 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
7338 tmp = be32_to_cpu(source[j]);
7339 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
7340 target[i].offset = tmp & 0xffffff;
7341 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
7342 }
7343}
ab6ad5a4
EG
7344
7345static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 7346{
ab6ad5a4
EG
7347 const __be16 *source = (const __be16 *)_source;
7348 u16 *target = (u16 *)_target;
94a78b79 7349 u32 i;
94a78b79
VZ
7350
7351 for (i = 0; i < n/2; i++)
7352 target[i] = be16_to_cpu(source[i]);
7353}
7354
7995c64e
JP
7355#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
7356do { \
7357 u32 len = be32_to_cpu(fw_hdr->arr.len); \
7358 bp->arr = kmalloc(len, GFP_KERNEL); \
7359 if (!bp->arr) { \
7360 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7361 goto lbl; \
7362 } \
7363 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
7364 (u8 *)bp->arr, len); \
7365} while (0)
94a78b79 7366
94a78b79
VZ
7367static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
7368{
45229b42 7369 const char *fw_file_name;
94a78b79 7370 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 7371 int rc;
94a78b79 7372
94a78b79 7373 if (CHIP_IS_E1(bp))
45229b42 7374 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 7375 else if (CHIP_IS_E1H(bp))
45229b42 7376 fw_file_name = FW_FILE_NAME_E1H;
cdaa7cb8
VZ
7377 else {
7378 dev_err(dev, "Unsupported chip revision\n");
7379 return -EINVAL;
7380 }
94a78b79 7381
cdaa7cb8 7382 dev_info(dev, "Loading %s\n", fw_file_name);
94a78b79
VZ
7383
7384 rc = request_firmware(&bp->firmware, fw_file_name, dev);
7385 if (rc) {
cdaa7cb8 7386 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
7387 goto request_firmware_exit;
7388 }
7389
7390 rc = bnx2x_check_firmware(bp);
7391 if (rc) {
cdaa7cb8 7392 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
7393 goto request_firmware_exit;
7394 }
7395
7396 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
7397
7398 /* Initialize the pointers to the init arrays */
7399 /* Blob */
7400 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
7401
7402 /* Opcodes */
7403 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
7404
7405 /* Offsets */
ab6ad5a4
EG
7406 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
7407 be16_to_cpu_n);
94a78b79
VZ
7408
7409 /* STORMs firmware */
573f2035
EG
7410 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7411 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
7412 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
7413 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
7414 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7415 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
7416 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
7417 be32_to_cpu(fw_hdr->usem_pram_data.offset);
7418 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7419 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
7420 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
7421 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
7422 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7423 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
7424 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
7425 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
7426
7427 return 0;
ab6ad5a4 7428
94a78b79
VZ
7429init_offsets_alloc_err:
7430 kfree(bp->init_ops);
7431init_ops_alloc_err:
7432 kfree(bp->init_data);
7433request_firmware_exit:
7434 release_firmware(bp->firmware);
7435
7436 return rc;
7437}
7438
7439
a2fbb9ea
ET
7440static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7441 const struct pci_device_id *ent)
7442{
a2fbb9ea
ET
7443 struct net_device *dev = NULL;
7444 struct bnx2x *bp;
37f9ce62 7445 int pcie_width, pcie_speed;
25047950 7446 int rc;
a2fbb9ea 7447
a2fbb9ea 7448 /* dev zeroed in init_etherdev */
555f6c78 7449 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04 7450 if (!dev) {
cdaa7cb8 7451 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 7452 return -ENOMEM;
34f80b04 7453 }
a2fbb9ea 7454
a2fbb9ea 7455 bp = netdev_priv(dev);
7995c64e 7456 bp->msg_enable = debug;
a2fbb9ea 7457
df4770de
EG
7458 pci_set_drvdata(pdev, dev);
7459
34f80b04 7460 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
7461 if (rc < 0) {
7462 free_netdev(dev);
7463 return rc;
7464 }
7465
34f80b04 7466 rc = bnx2x_init_bp(bp);
693fc0d1
EG
7467 if (rc)
7468 goto init_one_exit;
7469
94a78b79
VZ
7470 /* Set init arrays */
7471 rc = bnx2x_init_firmware(bp, &pdev->dev);
7472 if (rc) {
cdaa7cb8 7473 dev_err(&pdev->dev, "Error loading firmware\n");
94a78b79
VZ
7474 goto init_one_exit;
7475 }
7476
693fc0d1 7477 rc = register_netdev(dev);
34f80b04 7478 if (rc) {
693fc0d1 7479 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
7480 goto init_one_exit;
7481 }
7482
37f9ce62 7483 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
cdaa7cb8
VZ
7484 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
7485 " IRQ %d, ", board_info[ent->driver_data].name,
7486 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
7487 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
7488 dev->base_addr, bp->pdev->irq);
7489 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 7490
a2fbb9ea 7491 return 0;
34f80b04
EG
7492
7493init_one_exit:
7494 if (bp->regview)
7495 iounmap(bp->regview);
7496
7497 if (bp->doorbells)
7498 iounmap(bp->doorbells);
7499
7500 free_netdev(dev);
7501
7502 if (atomic_read(&pdev->enable_cnt) == 1)
7503 pci_release_regions(pdev);
7504
7505 pci_disable_device(pdev);
7506 pci_set_drvdata(pdev, NULL);
7507
7508 return rc;
a2fbb9ea
ET
7509}
7510
7511static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7512{
7513 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
7514 struct bnx2x *bp;
7515
7516 if (!dev) {
cdaa7cb8 7517 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
7518 return;
7519 }
228241eb 7520 bp = netdev_priv(dev);
a2fbb9ea 7521
a2fbb9ea
ET
7522 unregister_netdev(dev);
7523
72fd0718
VZ
7524 /* Make sure RESET task is not scheduled before continuing */
7525 cancel_delayed_work_sync(&bp->reset_task);
7526
94a78b79
VZ
7527 kfree(bp->init_ops_offsets);
7528 kfree(bp->init_ops);
7529 kfree(bp->init_data);
7530 release_firmware(bp->firmware);
7531
a2fbb9ea
ET
7532 if (bp->regview)
7533 iounmap(bp->regview);
7534
7535 if (bp->doorbells)
7536 iounmap(bp->doorbells);
7537
7538 free_netdev(dev);
34f80b04
EG
7539
7540 if (atomic_read(&pdev->enable_cnt) == 1)
7541 pci_release_regions(pdev);
7542
a2fbb9ea
ET
7543 pci_disable_device(pdev);
7544 pci_set_drvdata(pdev, NULL);
7545}
7546
f8ef6e44
YG
7547static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
7548{
7549 int i;
7550
7551 bp->state = BNX2X_STATE_ERROR;
7552
7553 bp->rx_mode = BNX2X_RX_MODE_NONE;
7554
7555 bnx2x_netif_stop(bp, 0);
c89af1a3 7556 netif_carrier_off(bp->dev);
f8ef6e44
YG
7557
7558 del_timer_sync(&bp->timer);
7559 bp->stats_state = STATS_STATE_DISABLED;
7560 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
7561
7562 /* Release IRQs */
6cbe5065 7563 bnx2x_free_irq(bp, false);
f8ef6e44
YG
7564
7565 if (CHIP_IS_E1(bp)) {
7566 struct mac_configuration_cmd *config =
7567 bnx2x_sp(bp, mcast_config);
7568
8d9c5f34 7569 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
7570 CAM_INVALIDATE(config->config_table[i]);
7571 }
7572
7573 /* Free SKBs, SGEs, TPA pool and driver internals */
7574 bnx2x_free_skbs(bp);
54b9ddaa 7575 for_each_queue(bp, i)
f8ef6e44 7576 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 7577 for_each_queue(bp, i)
7cde1c8b 7578 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
7579 bnx2x_free_mem(bp);
7580
7581 bp->state = BNX2X_STATE_CLOSED;
7582
f8ef6e44
YG
7583 return 0;
7584}
7585
7586static void bnx2x_eeh_recover(struct bnx2x *bp)
7587{
7588 u32 val;
7589
7590 mutex_init(&bp->port.phy_mutex);
7591
7592 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7593 bp->link_params.shmem_base = bp->common.shmem_base;
7594 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7595
7596 if (!bp->common.shmem_base ||
7597 (bp->common.shmem_base < 0xA0000) ||
7598 (bp->common.shmem_base >= 0xC0000)) {
7599 BNX2X_DEV_INFO("MCP not active\n");
7600 bp->flags |= NO_MCP_FLAG;
7601 return;
7602 }
7603
7604 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7605 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7606 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7607 BNX2X_ERR("BAD MCP validity signature\n");
7608
7609 if (!BP_NOMCP(bp)) {
7610 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
7611 & DRV_MSG_SEQ_NUMBER_MASK);
7612 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7613 }
7614}
7615
493adb1f
WX
7616/**
7617 * bnx2x_io_error_detected - called when PCI error is detected
7618 * @pdev: Pointer to PCI device
7619 * @state: The current pci connection state
7620 *
7621 * This function is called after a PCI bus error affecting
7622 * this device has been detected.
7623 */
7624static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
7625 pci_channel_state_t state)
7626{
7627 struct net_device *dev = pci_get_drvdata(pdev);
7628 struct bnx2x *bp = netdev_priv(dev);
7629
7630 rtnl_lock();
7631
7632 netif_device_detach(dev);
7633
07ce50e4
DN
7634 if (state == pci_channel_io_perm_failure) {
7635 rtnl_unlock();
7636 return PCI_ERS_RESULT_DISCONNECT;
7637 }
7638
493adb1f 7639 if (netif_running(dev))
f8ef6e44 7640 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
7641
7642 pci_disable_device(pdev);
7643
7644 rtnl_unlock();
7645
7646 /* Request a slot reset */
7647 return PCI_ERS_RESULT_NEED_RESET;
7648}
7649
7650/**
7651 * bnx2x_io_slot_reset - called after the PCI bus has been reset
7652 * @pdev: Pointer to PCI device
7653 *
7654 * Restart the card from scratch, as if from a cold-boot.
7655 */
7656static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
7657{
7658 struct net_device *dev = pci_get_drvdata(pdev);
7659 struct bnx2x *bp = netdev_priv(dev);
7660
7661 rtnl_lock();
7662
7663 if (pci_enable_device(pdev)) {
7664 dev_err(&pdev->dev,
7665 "Cannot re-enable PCI device after reset\n");
7666 rtnl_unlock();
7667 return PCI_ERS_RESULT_DISCONNECT;
7668 }
7669
7670 pci_set_master(pdev);
7671 pci_restore_state(pdev);
7672
7673 if (netif_running(dev))
7674 bnx2x_set_power_state(bp, PCI_D0);
7675
7676 rtnl_unlock();
7677
7678 return PCI_ERS_RESULT_RECOVERED;
7679}
7680
7681/**
7682 * bnx2x_io_resume - called when traffic can start flowing again
7683 * @pdev: Pointer to PCI device
7684 *
7685 * This callback is called when the error recovery driver tells us that
7686 * its OK to resume normal operation.
7687 */
7688static void bnx2x_io_resume(struct pci_dev *pdev)
7689{
7690 struct net_device *dev = pci_get_drvdata(pdev);
7691 struct bnx2x *bp = netdev_priv(dev);
7692
72fd0718
VZ
7693 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
7694 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
7695 return;
7696 }
7697
493adb1f
WX
7698 rtnl_lock();
7699
f8ef6e44
YG
7700 bnx2x_eeh_recover(bp);
7701
493adb1f 7702 if (netif_running(dev))
f8ef6e44 7703 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
7704
7705 netif_device_attach(dev);
7706
7707 rtnl_unlock();
7708}
7709
7710static struct pci_error_handlers bnx2x_err_handler = {
7711 .error_detected = bnx2x_io_error_detected,
356e2385
EG
7712 .slot_reset = bnx2x_io_slot_reset,
7713 .resume = bnx2x_io_resume,
493adb1f
WX
7714};
7715
a2fbb9ea 7716static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
7717 .name = DRV_MODULE_NAME,
7718 .id_table = bnx2x_pci_tbl,
7719 .probe = bnx2x_init_one,
7720 .remove = __devexit_p(bnx2x_remove_one),
7721 .suspend = bnx2x_suspend,
7722 .resume = bnx2x_resume,
7723 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
7724};
7725
7726static int __init bnx2x_init(void)
7727{
dd21ca6d
SG
7728 int ret;
7729
7995c64e 7730 pr_info("%s", version);
938cf541 7731
1cf167f2
EG
7732 bnx2x_wq = create_singlethread_workqueue("bnx2x");
7733 if (bnx2x_wq == NULL) {
7995c64e 7734 pr_err("Cannot create workqueue\n");
1cf167f2
EG
7735 return -ENOMEM;
7736 }
7737
dd21ca6d
SG
7738 ret = pci_register_driver(&bnx2x_pci_driver);
7739 if (ret) {
7995c64e 7740 pr_err("Cannot register driver\n");
dd21ca6d
SG
7741 destroy_workqueue(bnx2x_wq);
7742 }
7743 return ret;
a2fbb9ea
ET
7744}
7745
7746static void __exit bnx2x_cleanup(void)
7747{
7748 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
7749
7750 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
7751}
7752
7753module_init(bnx2x_init);
7754module_exit(bnx2x_cleanup);
7755
993ac7b5
MC
7756#ifdef BCM_CNIC
7757
7758/* count denotes the number of new completions we have seen */
7759static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
7760{
7761 struct eth_spe *spe;
7762
7763#ifdef BNX2X_STOP_ON_ERROR
7764 if (unlikely(bp->panic))
7765 return;
7766#endif
7767
7768 spin_lock_bh(&bp->spq_lock);
7769 bp->cnic_spq_pending -= count;
7770
7771 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
7772 bp->cnic_spq_pending++) {
7773
7774 if (!bp->cnic_kwq_pending)
7775 break;
7776
7777 spe = bnx2x_sp_get_next(bp);
7778 *spe = *bp->cnic_kwq_cons;
7779
7780 bp->cnic_kwq_pending--;
7781
7782 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
7783 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
7784
7785 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
7786 bp->cnic_kwq_cons = bp->cnic_kwq;
7787 else
7788 bp->cnic_kwq_cons++;
7789 }
7790 bnx2x_sp_prod_update(bp);
7791 spin_unlock_bh(&bp->spq_lock);
7792}
7793
7794static int bnx2x_cnic_sp_queue(struct net_device *dev,
7795 struct kwqe_16 *kwqes[], u32 count)
7796{
7797 struct bnx2x *bp = netdev_priv(dev);
7798 int i;
7799
7800#ifdef BNX2X_STOP_ON_ERROR
7801 if (unlikely(bp->panic))
7802 return -EIO;
7803#endif
7804
7805 spin_lock_bh(&bp->spq_lock);
7806
7807 for (i = 0; i < count; i++) {
7808 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
7809
7810 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
7811 break;
7812
7813 *bp->cnic_kwq_prod = *spe;
7814
7815 bp->cnic_kwq_pending++;
7816
7817 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
7818 spe->hdr.conn_and_cmd_data, spe->hdr.type,
7819 spe->data.mac_config_addr.hi,
7820 spe->data.mac_config_addr.lo,
7821 bp->cnic_kwq_pending);
7822
7823 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
7824 bp->cnic_kwq_prod = bp->cnic_kwq;
7825 else
7826 bp->cnic_kwq_prod++;
7827 }
7828
7829 spin_unlock_bh(&bp->spq_lock);
7830
7831 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
7832 bnx2x_cnic_sp_post(bp, 0);
7833
7834 return i;
7835}
7836
7837static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7838{
7839 struct cnic_ops *c_ops;
7840 int rc = 0;
7841
7842 mutex_lock(&bp->cnic_mutex);
7843 c_ops = bp->cnic_ops;
7844 if (c_ops)
7845 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7846 mutex_unlock(&bp->cnic_mutex);
7847
7848 return rc;
7849}
7850
7851static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7852{
7853 struct cnic_ops *c_ops;
7854 int rc = 0;
7855
7856 rcu_read_lock();
7857 c_ops = rcu_dereference(bp->cnic_ops);
7858 if (c_ops)
7859 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7860 rcu_read_unlock();
7861
7862 return rc;
7863}
7864
7865/*
7866 * for commands that have no data
7867 */
9f6c9258 7868int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
993ac7b5
MC
7869{
7870 struct cnic_ctl_info ctl = {0};
7871
7872 ctl.cmd = cmd;
7873
7874 return bnx2x_cnic_ctl_send(bp, &ctl);
7875}
7876
7877static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
7878{
7879 struct cnic_ctl_info ctl;
7880
7881 /* first we tell CNIC and only then we count this as a completion */
7882 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
7883 ctl.data.comp.cid = cid;
7884
7885 bnx2x_cnic_ctl_send_bh(bp, &ctl);
7886 bnx2x_cnic_sp_post(bp, 1);
7887}
7888
7889static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7890{
7891 struct bnx2x *bp = netdev_priv(dev);
7892 int rc = 0;
7893
7894 switch (ctl->cmd) {
7895 case DRV_CTL_CTXTBL_WR_CMD: {
7896 u32 index = ctl->data.io.offset;
7897 dma_addr_t addr = ctl->data.io.dma_addr;
7898
7899 bnx2x_ilt_wr(bp, index, addr);
7900 break;
7901 }
7902
7903 case DRV_CTL_COMPLETION_CMD: {
7904 int count = ctl->data.comp.comp_count;
7905
7906 bnx2x_cnic_sp_post(bp, count);
7907 break;
7908 }
7909
7910 /* rtnl_lock is held. */
7911 case DRV_CTL_START_L2_CMD: {
7912 u32 cli = ctl->data.ring.client_id;
7913
7914 bp->rx_mode_cl_mask |= (1 << cli);
7915 bnx2x_set_storm_rx_mode(bp);
7916 break;
7917 }
7918
7919 /* rtnl_lock is held. */
7920 case DRV_CTL_STOP_L2_CMD: {
7921 u32 cli = ctl->data.ring.client_id;
7922
7923 bp->rx_mode_cl_mask &= ~(1 << cli);
7924 bnx2x_set_storm_rx_mode(bp);
7925 break;
7926 }
7927
7928 default:
7929 BNX2X_ERR("unknown command %x\n", ctl->cmd);
7930 rc = -EINVAL;
7931 }
7932
7933 return rc;
7934}
7935
9f6c9258 7936void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
993ac7b5
MC
7937{
7938 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7939
7940 if (bp->flags & USING_MSIX_FLAG) {
7941 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
7942 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
7943 cp->irq_arr[0].vector = bp->msix_table[1].vector;
7944 } else {
7945 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
7946 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
7947 }
7948 cp->irq_arr[0].status_blk = bp->cnic_sb;
7949 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
7950 cp->irq_arr[1].status_blk = bp->def_status_blk;
7951 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
7952
7953 cp->num_irq = 2;
7954}
7955
7956static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
7957 void *data)
7958{
7959 struct bnx2x *bp = netdev_priv(dev);
7960 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7961
7962 if (ops == NULL)
7963 return -EINVAL;
7964
7965 if (atomic_read(&bp->intr_sem) != 0)
7966 return -EBUSY;
7967
7968 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
7969 if (!bp->cnic_kwq)
7970 return -ENOMEM;
7971
7972 bp->cnic_kwq_cons = bp->cnic_kwq;
7973 bp->cnic_kwq_prod = bp->cnic_kwq;
7974 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
7975
7976 bp->cnic_spq_pending = 0;
7977 bp->cnic_kwq_pending = 0;
7978
7979 bp->cnic_data = data;
7980
7981 cp->num_irq = 0;
7982 cp->drv_state = CNIC_DRV_STATE_REGD;
7983
7984 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
7985
7986 bnx2x_setup_cnic_irq_info(bp);
7987 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7988 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7989 rcu_assign_pointer(bp->cnic_ops, ops);
7990
7991 return 0;
7992}
7993
7994static int bnx2x_unregister_cnic(struct net_device *dev)
7995{
7996 struct bnx2x *bp = netdev_priv(dev);
7997 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7998
7999 mutex_lock(&bp->cnic_mutex);
8000 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8001 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8002 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8003 }
8004 cp->drv_state = 0;
8005 rcu_assign_pointer(bp->cnic_ops, NULL);
8006 mutex_unlock(&bp->cnic_mutex);
8007 synchronize_rcu();
8008 kfree(bp->cnic_kwq);
8009 bp->cnic_kwq = NULL;
8010
8011 return 0;
8012}
8013
8014struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
8015{
8016 struct bnx2x *bp = netdev_priv(dev);
8017 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8018
8019 cp->drv_owner = THIS_MODULE;
8020 cp->chip_id = CHIP_ID(bp);
8021 cp->pdev = bp->pdev;
8022 cp->io_base = bp->regview;
8023 cp->io_base2 = bp->doorbells;
8024 cp->max_kwqe_pending = 8;
8025 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
8026 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
8027 cp->ctx_tbl_len = CNIC_ILT_LINES;
8028 cp->starting_cid = BCM_CNIC_CID_START;
8029 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
8030 cp->drv_ctl = bnx2x_drv_ctl;
8031 cp->drv_register_cnic = bnx2x_register_cnic;
8032 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
8033
8034 return cp;
8035}
8036EXPORT_SYMBOL(bnx2x_cnic_probe);
8037
8038#endif /* BCM_CNIC */
94a78b79 8039