]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x/bnx2x_main.c
bnx2x: Unify PHY attributes
[net-next-2.6.git] / drivers / net / bnx2x / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea 51#include <linux/io.h>
45229b42 52#include <linux/stringify.h>
a2fbb9ea 53
b0efbb99 54#define BNX2X_MAIN
a2fbb9ea
ET
55#include "bnx2x.h"
56#include "bnx2x_init.h"
94a78b79 57#include "bnx2x_init_ops.h"
9f6c9258 58#include "bnx2x_cmn.h"
a2fbb9ea 59
a2fbb9ea 60
94a78b79
VZ
61#include <linux/firmware.h>
62#include "bnx2x_fw_file_hdr.h"
63/* FW files */
45229b42
BH
64#define FW_FILE_VERSION \
65 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
66 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
67 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
68 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
69#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
70#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
94a78b79 71
34f80b04
EG
72/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ)
a2fbb9ea 74
53a10565 75static char version[] __devinitdata =
34f80b04 76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
24e3fcef 79MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 80MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
81MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
83MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H);
a2fbb9ea 85
555f6c78
EG
86static int multi_mode = 1;
87module_param(multi_mode, int, 0);
ca00392c
EG
88MODULE_PARM_DESC(multi_mode, " Multi queue mode "
89 "(0 Disable; 1 Enable (default))");
90
54b9ddaa
VZ
91static int num_queues;
92module_param(num_queues, int, 0);
93MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
94 " (default is as a number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
cdaa7cb8
VZ
102MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
103 "(1 INT#x; 2 MSI)");
8badd27a 104
a18f5128
EG
105static int dropless_fc;
106module_param(dropless_fc, int, 0);
107MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
108
9898f86d 109static int poll;
a2fbb9ea 110module_param(poll, int, 0);
9898f86d 111MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
112
113static int mrrs = -1;
114module_param(mrrs, int, 0);
115MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
116
9898f86d 117static int debug;
a2fbb9ea 118module_param(debug, int, 0);
9898f86d
EG
119MODULE_PARM_DESC(debug, " Default debug msglevel");
120
1cf167f2 121static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
122
123enum bnx2x_board_type {
124 BCM57710 = 0,
34f80b04
EG
125 BCM57711 = 1,
126 BCM57711E = 2,
a2fbb9ea
ET
127};
128
34f80b04 129/* indexed by board_type, above */
53a10565 130static struct {
a2fbb9ea
ET
131 char *name;
132} board_info[] __devinitdata = {
34f80b04
EG
133 { "Broadcom NetXtreme II BCM57710 XGb" },
134 { "Broadcom NetXtreme II BCM57711 XGb" },
135 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
136};
137
34f80b04 138
a3aa1884 139static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
140 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
143 { 0 }
144};
145
146MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148/****************************************************************************
149* General service functions
150****************************************************************************/
151
152/* used only at init
153 * locking is done by mcp
154 */
573f2035 155void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
156{
157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160 PCICFG_VENDOR_ID_OFFSET);
161}
162
a2fbb9ea
ET
163static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164{
165 u32 val;
166
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170 PCICFG_VENDOR_ID_OFFSET);
171
172 return val;
173}
a2fbb9ea 174
6c719d00 175const u32 dmae_reg_go_c[] = {
a2fbb9ea
ET
176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180};
181
182/* copy command into DMAE command memory and set DMAE command go */
6c719d00 183void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
a2fbb9ea
ET
184{
185 u32 cmd_offset;
186 int i;
187
188 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
189 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
190 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
191
ad8d3948
EG
192 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
193 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
194 }
195 REG_WR(bp, dmae_reg_go_c[idx], 1);
196}
197
ad8d3948
EG
198void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
199 u32 len32)
a2fbb9ea 200{
5ff7b6d4 201 struct dmae_command dmae;
a2fbb9ea 202 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
203 int cnt = 200;
204
205 if (!bp->dmae_ready) {
206 u32 *data = bnx2x_sp(bp, wb_data[0]);
207
208 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
209 " using indirect\n", dst_addr, len32);
210 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
211 return;
212 }
213
5ff7b6d4 214 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 215
5ff7b6d4
EG
216 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
217 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
218 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 219#ifdef __BIG_ENDIAN
5ff7b6d4 220 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 221#else
5ff7b6d4 222 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 223#endif
5ff7b6d4
EG
224 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
225 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
226 dmae.src_addr_lo = U64_LO(dma_addr);
227 dmae.src_addr_hi = U64_HI(dma_addr);
228 dmae.dst_addr_lo = dst_addr >> 2;
229 dmae.dst_addr_hi = 0;
230 dmae.len = len32;
231 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
232 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
233 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 234
c3eefaf6 235 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
236 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
237 "dst_addr [%x:%08x (%08x)]\n"
238 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
239 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
240 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
241 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 242 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
243 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
244 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 245
5ff7b6d4
EG
246 mutex_lock(&bp->dmae_mutex);
247
a2fbb9ea
ET
248 *wb_comp = 0;
249
5ff7b6d4 250 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
251
252 udelay(5);
ad8d3948
EG
253
254 while (*wb_comp != DMAE_COMP_VAL) {
255 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
256
ad8d3948 257 if (!cnt) {
c3eefaf6 258 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
259 break;
260 }
ad8d3948 261 cnt--;
12469401
YG
262 /* adjust delay for emulation/FPGA */
263 if (CHIP_REV_IS_SLOW(bp))
264 msleep(100);
265 else
266 udelay(5);
a2fbb9ea 267 }
ad8d3948
EG
268
269 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
270}
271
c18487ee 272void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 273{
5ff7b6d4 274 struct dmae_command dmae;
a2fbb9ea 275 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
276 int cnt = 200;
277
278 if (!bp->dmae_ready) {
279 u32 *data = bnx2x_sp(bp, wb_data[0]);
280 int i;
281
282 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
283 " using indirect\n", src_addr, len32);
284 for (i = 0; i < len32; i++)
285 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
286 return;
287 }
288
5ff7b6d4 289 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 290
5ff7b6d4
EG
291 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
292 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
293 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 294#ifdef __BIG_ENDIAN
5ff7b6d4 295 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 296#else
5ff7b6d4 297 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 298#endif
5ff7b6d4
EG
299 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
300 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
301 dmae.src_addr_lo = src_addr >> 2;
302 dmae.src_addr_hi = 0;
303 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
304 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
305 dmae.len = len32;
306 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
307 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
308 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 309
c3eefaf6 310 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
311 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
312 "dst_addr [%x:%08x (%08x)]\n"
313 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
314 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
315 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
316 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 317
5ff7b6d4
EG
318 mutex_lock(&bp->dmae_mutex);
319
320 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
321 *wb_comp = 0;
322
5ff7b6d4 323 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
324
325 udelay(5);
ad8d3948
EG
326
327 while (*wb_comp != DMAE_COMP_VAL) {
328
ad8d3948 329 if (!cnt) {
c3eefaf6 330 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
331 break;
332 }
ad8d3948 333 cnt--;
12469401
YG
334 /* adjust delay for emulation/FPGA */
335 if (CHIP_REV_IS_SLOW(bp))
336 msleep(100);
337 else
338 udelay(5);
a2fbb9ea 339 }
ad8d3948 340 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
341 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
342 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
343
344 mutex_unlock(&bp->dmae_mutex);
345}
346
573f2035
EG
347void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
348 u32 addr, u32 len)
349{
02e3c6cb 350 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
351 int offset = 0;
352
02e3c6cb 353 while (len > dmae_wr_max) {
573f2035 354 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
355 addr + offset, dmae_wr_max);
356 offset += dmae_wr_max * 4;
357 len -= dmae_wr_max;
573f2035
EG
358 }
359
360 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
361}
362
ad8d3948
EG
363/* used only for slowpath so not inlined */
364static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
365{
366 u32 wb_write[2];
367
368 wb_write[0] = val_hi;
369 wb_write[1] = val_lo;
370 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 371}
a2fbb9ea 372
ad8d3948
EG
373#ifdef USE_WB_RD
374static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
375{
376 u32 wb_data[2];
377
378 REG_RD_DMAE(bp, reg, wb_data, 2);
379
380 return HILO_U64(wb_data[0], wb_data[1]);
381}
382#endif
383
a2fbb9ea
ET
384static int bnx2x_mc_assert(struct bnx2x *bp)
385{
a2fbb9ea 386 char last_idx;
34f80b04
EG
387 int i, rc = 0;
388 u32 row0, row1, row2, row3;
389
390 /* XSTORM */
391 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
392 XSTORM_ASSERT_LIST_INDEX_OFFSET);
393 if (last_idx)
394 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
395
396 /* print the asserts */
397 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
398
399 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
400 XSTORM_ASSERT_LIST_OFFSET(i));
401 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
402 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
403 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
405 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
407
408 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
409 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
410 " 0x%08x 0x%08x 0x%08x\n",
411 i, row3, row2, row1, row0);
412 rc++;
413 } else {
414 break;
415 }
416 }
417
418 /* TSTORM */
419 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
420 TSTORM_ASSERT_LIST_INDEX_OFFSET);
421 if (last_idx)
422 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
423
424 /* print the asserts */
425 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
426
427 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
428 TSTORM_ASSERT_LIST_OFFSET(i));
429 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
430 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
431 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
433 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
435
436 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
437 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
438 " 0x%08x 0x%08x 0x%08x\n",
439 i, row3, row2, row1, row0);
440 rc++;
441 } else {
442 break;
443 }
444 }
445
446 /* CSTORM */
447 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
448 CSTORM_ASSERT_LIST_INDEX_OFFSET);
449 if (last_idx)
450 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
451
452 /* print the asserts */
453 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
454
455 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
456 CSTORM_ASSERT_LIST_OFFSET(i));
457 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
458 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
459 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
461 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
463
464 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
465 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
466 " 0x%08x 0x%08x 0x%08x\n",
467 i, row3, row2, row1, row0);
468 rc++;
469 } else {
470 break;
471 }
472 }
473
474 /* USTORM */
475 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
476 USTORM_ASSERT_LIST_INDEX_OFFSET);
477 if (last_idx)
478 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
479
480 /* print the asserts */
481 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
482
483 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
484 USTORM_ASSERT_LIST_OFFSET(i));
485 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
486 USTORM_ASSERT_LIST_OFFSET(i) + 4);
487 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i) + 8);
489 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 12);
491
492 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
493 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
494 " 0x%08x 0x%08x 0x%08x\n",
495 i, row3, row2, row1, row0);
496 rc++;
497 } else {
498 break;
a2fbb9ea
ET
499 }
500 }
34f80b04 501
a2fbb9ea
ET
502 return rc;
503}
c14423fe 504
a2fbb9ea
ET
505static void bnx2x_fw_dump(struct bnx2x *bp)
506{
cdaa7cb8 507 u32 addr;
a2fbb9ea 508 u32 mark, offset;
4781bfad 509 __be32 data[9];
a2fbb9ea
ET
510 int word;
511
2145a920
VZ
512 if (BP_NOMCP(bp)) {
513 BNX2X_ERR("NO MCP - can not dump\n");
514 return;
515 }
cdaa7cb8
VZ
516
517 addr = bp->common.shmem_base - 0x0800 + 4;
518 mark = REG_RD(bp, addr);
519 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
7995c64e 520 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 521
7995c64e 522 pr_err("");
cdaa7cb8 523 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
a2fbb9ea 524 for (word = 0; word < 8; word++)
cdaa7cb8 525 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 526 data[8] = 0x0;
7995c64e 527 pr_cont("%s", (char *)data);
a2fbb9ea 528 }
cdaa7cb8 529 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 530 for (word = 0; word < 8; word++)
cdaa7cb8 531 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 532 data[8] = 0x0;
7995c64e 533 pr_cont("%s", (char *)data);
a2fbb9ea 534 }
7995c64e 535 pr_err("end of fw dump\n");
a2fbb9ea
ET
536}
537
6c719d00 538void bnx2x_panic_dump(struct bnx2x *bp)
a2fbb9ea
ET
539{
540 int i;
541 u16 j, start, end;
542
66e855f3
YG
543 bp->stats_state = STATS_STATE_DISABLED;
544 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
545
a2fbb9ea
ET
546 BNX2X_ERR("begin crash dump -----------------\n");
547
8440d2b6
EG
548 /* Indices */
549 /* Common */
cdaa7cb8
VZ
550 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
551 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
552 " spq_prod_idx(0x%x)\n",
8440d2b6
EG
553 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
554 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
555
556 /* Rx */
54b9ddaa 557 for_each_queue(bp, i) {
a2fbb9ea 558 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 559
cdaa7cb8
VZ
560 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
561 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
562 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 563 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
564 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
565 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8
VZ
566 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
567 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
8440d2b6
EG
568 fp->rx_sge_prod, fp->last_max_sge,
569 le16_to_cpu(fp->fp_u_idx),
570 fp->status_blk->u_status_block.status_block_index);
571 }
a2fbb9ea 572
8440d2b6 573 /* Tx */
54b9ddaa 574 for_each_queue(bp, i) {
8440d2b6 575 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 576
cdaa7cb8
VZ
577 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
578 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
579 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
580 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
581 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
cdaa7cb8
VZ
582 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
583 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 584 fp->status_blk->c_status_block.status_block_index,
ca00392c 585 fp->tx_db.data.prod);
8440d2b6 586 }
a2fbb9ea 587
8440d2b6
EG
588 /* Rings */
589 /* Rx */
54b9ddaa 590 for_each_queue(bp, i) {
8440d2b6 591 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
592
593 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
594 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 595 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
596 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
597 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
598
c3eefaf6
EG
599 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
600 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
601 }
602
3196a88a
EG
603 start = RX_SGE(fp->rx_sge_prod);
604 end = RX_SGE(fp->last_max_sge);
8440d2b6 605 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
606 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
607 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
608
c3eefaf6
EG
609 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
610 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
611 }
612
a2fbb9ea
ET
613 start = RCQ_BD(fp->rx_comp_cons - 10);
614 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 615 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
616 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
617
c3eefaf6
EG
618 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
619 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
620 }
621 }
622
8440d2b6 623 /* Tx */
54b9ddaa 624 for_each_queue(bp, i) {
8440d2b6
EG
625 struct bnx2x_fastpath *fp = &bp->fp[i];
626
627 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
628 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
629 for (j = start; j != end; j = TX_BD(j + 1)) {
630 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
631
c3eefaf6
EG
632 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
633 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
634 }
635
636 start = TX_BD(fp->tx_bd_cons - 10);
637 end = TX_BD(fp->tx_bd_cons + 254);
638 for (j = start; j != end; j = TX_BD(j + 1)) {
639 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
640
c3eefaf6
EG
641 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
642 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
643 }
644 }
a2fbb9ea 645
34f80b04 646 bnx2x_fw_dump(bp);
a2fbb9ea
ET
647 bnx2x_mc_assert(bp);
648 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
649}
650
9f6c9258 651void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 652{
34f80b04 653 int port = BP_PORT(bp);
a2fbb9ea
ET
654 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
655 u32 val = REG_RD(bp, addr);
656 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 657 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
658
659 if (msix) {
8badd27a
EG
660 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
661 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
662 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
664 } else if (msi) {
665 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
666 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
669 } else {
670 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 671 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
672 HC_CONFIG_0_REG_INT_LINE_EN_0 |
673 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 674
8badd27a
EG
675 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
676 val, port, addr);
615f8fd9
ET
677
678 REG_WR(bp, addr, val);
679
a2fbb9ea
ET
680 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
681 }
682
8badd27a
EG
683 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
684 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
685
686 REG_WR(bp, addr, val);
37dbbf32
EG
687 /*
688 * Ensure that HC_CONFIG is written before leading/trailing edge config
689 */
690 mmiowb();
691 barrier();
34f80b04
EG
692
693 if (CHIP_IS_E1H(bp)) {
694 /* init leading/trailing edge */
695 if (IS_E1HMF(bp)) {
8badd27a 696 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 697 if (bp->port.pmf)
4acac6a5
EG
698 /* enable nig and gpio3 attention */
699 val |= 0x1100;
34f80b04
EG
700 } else
701 val = 0xffff;
702
703 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
704 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
705 }
37dbbf32
EG
706
707 /* Make sure that interrupts are indeed enabled from here on */
708 mmiowb();
a2fbb9ea
ET
709}
710
615f8fd9 711static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 712{
34f80b04 713 int port = BP_PORT(bp);
a2fbb9ea
ET
714 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
715 u32 val = REG_RD(bp, addr);
716
717 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
718 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
719 HC_CONFIG_0_REG_INT_LINE_EN_0 |
720 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
721
722 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
723 val, port, addr);
724
8badd27a
EG
725 /* flush all outstanding writes */
726 mmiowb();
727
a2fbb9ea
ET
728 REG_WR(bp, addr, val);
729 if (REG_RD(bp, addr) != val)
730 BNX2X_ERR("BUG! proper val not read from IGU!\n");
731}
732
9f6c9258 733void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 734{
a2fbb9ea 735 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 736 int i, offset;
a2fbb9ea 737
34f80b04 738 /* disable interrupt handling */
a2fbb9ea 739 atomic_inc(&bp->intr_sem);
e1510706
EG
740 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
741
f8ef6e44
YG
742 if (disable_hw)
743 /* prevent the HW from sending interrupts */
744 bnx2x_int_disable(bp);
a2fbb9ea
ET
745
746 /* make sure all ISRs are done */
747 if (msix) {
8badd27a
EG
748 synchronize_irq(bp->msix_table[0].vector);
749 offset = 1;
37b091ba
MC
750#ifdef BCM_CNIC
751 offset++;
752#endif
a2fbb9ea 753 for_each_queue(bp, i)
8badd27a 754 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
755 } else
756 synchronize_irq(bp->pdev->irq);
757
758 /* make sure sp_task is not running */
1cf167f2
EG
759 cancel_delayed_work(&bp->sp_task);
760 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
761}
762
34f80b04 763/* fast path */
a2fbb9ea
ET
764
765/*
34f80b04 766 * General service functions
a2fbb9ea
ET
767 */
768
72fd0718
VZ
769/* Return true if succeeded to acquire the lock */
770static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
771{
772 u32 lock_status;
773 u32 resource_bit = (1 << resource);
774 int func = BP_FUNC(bp);
775 u32 hw_lock_control_reg;
776
777 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
778
779 /* Validating that the resource is within range */
780 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
781 DP(NETIF_MSG_HW,
782 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
783 resource, HW_LOCK_MAX_RESOURCE_VALUE);
0fdf4d09 784 return false;
72fd0718
VZ
785 }
786
787 if (func <= 5)
788 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
789 else
790 hw_lock_control_reg =
791 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
792
793 /* Try to acquire the lock */
794 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
795 lock_status = REG_RD(bp, hw_lock_control_reg);
796 if (lock_status & resource_bit)
797 return true;
798
799 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
800 return false;
801}
802
a2fbb9ea 803
993ac7b5
MC
804#ifdef BCM_CNIC
805static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
806#endif
3196a88a 807
9f6c9258 808void bnx2x_sp_event(struct bnx2x_fastpath *fp,
a2fbb9ea
ET
809 union eth_rx_cqe *rr_cqe)
810{
811 struct bnx2x *bp = fp->bp;
812 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
813 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
814
34f80b04 815 DP(BNX2X_MSG_SP,
a2fbb9ea 816 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 817 fp->index, cid, command, bp->state,
34f80b04 818 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
819
820 bp->spq_left++;
821
0626b899 822 if (fp->index) {
a2fbb9ea
ET
823 switch (command | fp->state) {
824 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
825 BNX2X_FP_STATE_OPENING):
826 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
827 cid);
828 fp->state = BNX2X_FP_STATE_OPEN;
829 break;
830
831 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
832 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
833 cid);
834 fp->state = BNX2X_FP_STATE_HALTED;
835 break;
836
837 default:
34f80b04 838 BNX2X_ERR("unexpected MC reply (%d) "
cdaa7cb8
VZ
839 "fp[%d] state is %x\n",
840 command, fp->index, fp->state);
34f80b04 841 break;
a2fbb9ea 842 }
34f80b04 843 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
844 return;
845 }
c14423fe 846
a2fbb9ea
ET
847 switch (command | bp->state) {
848 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
849 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
850 bp->state = BNX2X_STATE_OPEN;
851 break;
852
853 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
854 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
855 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
856 fp->state = BNX2X_FP_STATE_HALTED;
857 break;
858
a2fbb9ea 859 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 860 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 861 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
862 break;
863
993ac7b5
MC
864#ifdef BCM_CNIC
865 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
866 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
867 bnx2x_cnic_cfc_comp(bp, cid);
868 break;
869#endif
3196a88a 870
a2fbb9ea 871 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 872 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 873 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
874 bp->set_mac_pending--;
875 smp_wmb();
a2fbb9ea
ET
876 break;
877
49d66772 878 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 879 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
880 bp->set_mac_pending--;
881 smp_wmb();
49d66772
ET
882 break;
883
a2fbb9ea 884 default:
34f80b04 885 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 886 command, bp->state);
34f80b04 887 break;
a2fbb9ea 888 }
34f80b04 889 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
890}
891
9f6c9258 892irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
a2fbb9ea 893{
555f6c78 894 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 895 u16 status = bnx2x_ack_int(bp);
34f80b04 896 u16 mask;
ca00392c 897 int i;
a2fbb9ea 898
34f80b04 899 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
900 if (unlikely(status == 0)) {
901 DP(NETIF_MSG_INTR, "not our interrupt!\n");
902 return IRQ_NONE;
903 }
f5372251 904 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 905
34f80b04 906 /* Return here if interrupt is disabled */
a2fbb9ea
ET
907 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
908 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
909 return IRQ_HANDLED;
910 }
911
3196a88a
EG
912#ifdef BNX2X_STOP_ON_ERROR
913 if (unlikely(bp->panic))
914 return IRQ_HANDLED;
915#endif
916
ca00392c
EG
917 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
918 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 919
ca00392c
EG
920 mask = 0x2 << fp->sb_id;
921 if (status & mask) {
54b9ddaa
VZ
922 /* Handle Rx and Tx according to SB id */
923 prefetch(fp->rx_cons_sb);
924 prefetch(&fp->status_blk->u_status_block.
925 status_block_index);
926 prefetch(fp->tx_cons_sb);
927 prefetch(&fp->status_blk->c_status_block.
928 status_block_index);
929 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
930 status &= ~mask;
931 }
a2fbb9ea
ET
932 }
933
993ac7b5
MC
934#ifdef BCM_CNIC
935 mask = 0x2 << CNIC_SB_ID(bp);
936 if (status & (mask | 0x1)) {
937 struct cnic_ops *c_ops = NULL;
938
939 rcu_read_lock();
940 c_ops = rcu_dereference(bp->cnic_ops);
941 if (c_ops)
942 c_ops->cnic_handler(bp->cnic_data, NULL);
943 rcu_read_unlock();
944
945 status &= ~mask;
946 }
947#endif
a2fbb9ea 948
34f80b04 949 if (unlikely(status & 0x1)) {
1cf167f2 950 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
951
952 status &= ~0x1;
953 if (!status)
954 return IRQ_HANDLED;
955 }
956
cdaa7cb8
VZ
957 if (unlikely(status))
958 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 959 status);
a2fbb9ea 960
c18487ee 961 return IRQ_HANDLED;
a2fbb9ea
ET
962}
963
c18487ee 964/* end of fast path */
a2fbb9ea 965
a2fbb9ea 966
c18487ee
YR
967/* Link */
968
969/*
970 * General service functions
971 */
a2fbb9ea 972
9f6c9258 973int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
974{
975 u32 lock_status;
976 u32 resource_bit = (1 << resource);
4a37fb66
YG
977 int func = BP_FUNC(bp);
978 u32 hw_lock_control_reg;
c18487ee 979 int cnt;
a2fbb9ea 980
c18487ee
YR
981 /* Validating that the resource is within range */
982 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
983 DP(NETIF_MSG_HW,
984 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
985 resource, HW_LOCK_MAX_RESOURCE_VALUE);
986 return -EINVAL;
987 }
a2fbb9ea 988
4a37fb66
YG
989 if (func <= 5) {
990 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
991 } else {
992 hw_lock_control_reg =
993 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
994 }
995
c18487ee 996 /* Validating that the resource is not already taken */
4a37fb66 997 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
998 if (lock_status & resource_bit) {
999 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1000 lock_status, resource_bit);
1001 return -EEXIST;
1002 }
a2fbb9ea 1003
46230476
EG
1004 /* Try for 5 second every 5ms */
1005 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1006 /* Try to acquire the lock */
4a37fb66
YG
1007 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1008 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1009 if (lock_status & resource_bit)
1010 return 0;
a2fbb9ea 1011
c18487ee 1012 msleep(5);
a2fbb9ea 1013 }
c18487ee
YR
1014 DP(NETIF_MSG_HW, "Timeout\n");
1015 return -EAGAIN;
1016}
a2fbb9ea 1017
9f6c9258 1018int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1019{
1020 u32 lock_status;
1021 u32 resource_bit = (1 << resource);
4a37fb66
YG
1022 int func = BP_FUNC(bp);
1023 u32 hw_lock_control_reg;
a2fbb9ea 1024
72fd0718
VZ
1025 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1026
c18487ee
YR
1027 /* Validating that the resource is within range */
1028 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1029 DP(NETIF_MSG_HW,
1030 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1031 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1032 return -EINVAL;
1033 }
1034
4a37fb66
YG
1035 if (func <= 5) {
1036 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1037 } else {
1038 hw_lock_control_reg =
1039 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1040 }
1041
c18487ee 1042 /* Validating that the resource is currently taken */
4a37fb66 1043 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1044 if (!(lock_status & resource_bit)) {
1045 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1046 lock_status, resource_bit);
1047 return -EFAULT;
a2fbb9ea
ET
1048 }
1049
9f6c9258
DK
1050 REG_WR(bp, hw_lock_control_reg, resource_bit);
1051 return 0;
c18487ee 1052}
a2fbb9ea 1053
9f6c9258 1054
4acac6a5
EG
1055int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1056{
1057 /* The GPIO should be swapped if swap register is set and active */
1058 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1059 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1060 int gpio_shift = gpio_num +
1061 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1062 u32 gpio_mask = (1 << gpio_shift);
1063 u32 gpio_reg;
1064 int value;
1065
1066 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1067 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1068 return -EINVAL;
1069 }
1070
1071 /* read GPIO value */
1072 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1073
1074 /* get the requested pin value */
1075 if ((gpio_reg & gpio_mask) == gpio_mask)
1076 value = 1;
1077 else
1078 value = 0;
1079
1080 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1081
1082 return value;
1083}
1084
17de50b7 1085int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1086{
1087 /* The GPIO should be swapped if swap register is set and active */
1088 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1089 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1090 int gpio_shift = gpio_num +
1091 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1092 u32 gpio_mask = (1 << gpio_shift);
1093 u32 gpio_reg;
a2fbb9ea 1094
c18487ee
YR
1095 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1096 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1097 return -EINVAL;
1098 }
a2fbb9ea 1099
4a37fb66 1100 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1101 /* read GPIO and mask except the float bits */
1102 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1103
c18487ee
YR
1104 switch (mode) {
1105 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1106 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1107 gpio_num, gpio_shift);
1108 /* clear FLOAT and set CLR */
1109 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1110 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1111 break;
a2fbb9ea 1112
c18487ee
YR
1113 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1114 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1115 gpio_num, gpio_shift);
1116 /* clear FLOAT and set SET */
1117 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1118 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1119 break;
a2fbb9ea 1120
17de50b7 1121 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1122 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1123 gpio_num, gpio_shift);
1124 /* set FLOAT */
1125 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1126 break;
a2fbb9ea 1127
c18487ee
YR
1128 default:
1129 break;
a2fbb9ea
ET
1130 }
1131
c18487ee 1132 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1133 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1134
c18487ee 1135 return 0;
a2fbb9ea
ET
1136}
1137
4acac6a5
EG
1138int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1139{
1140 /* The GPIO should be swapped if swap register is set and active */
1141 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1142 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1143 int gpio_shift = gpio_num +
1144 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1145 u32 gpio_mask = (1 << gpio_shift);
1146 u32 gpio_reg;
1147
1148 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1149 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1150 return -EINVAL;
1151 }
1152
1153 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1154 /* read GPIO int */
1155 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1156
1157 switch (mode) {
1158 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1159 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1160 "output low\n", gpio_num, gpio_shift);
1161 /* clear SET and set CLR */
1162 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1163 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1164 break;
1165
1166 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1167 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1168 "output high\n", gpio_num, gpio_shift);
1169 /* clear CLR and set SET */
1170 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1171 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1172 break;
1173
1174 default:
1175 break;
1176 }
1177
1178 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1179 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1180
1181 return 0;
1182}
1183
c18487ee 1184static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1185{
c18487ee
YR
1186 u32 spio_mask = (1 << spio_num);
1187 u32 spio_reg;
a2fbb9ea 1188
c18487ee
YR
1189 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1190 (spio_num > MISC_REGISTERS_SPIO_7)) {
1191 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1192 return -EINVAL;
a2fbb9ea
ET
1193 }
1194
4a37fb66 1195 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1196 /* read SPIO and mask except the float bits */
1197 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1198
c18487ee 1199 switch (mode) {
6378c025 1200 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1201 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1202 /* clear FLOAT and set CLR */
1203 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1204 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1205 break;
a2fbb9ea 1206
6378c025 1207 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1208 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1209 /* clear FLOAT and set SET */
1210 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1211 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1212 break;
a2fbb9ea 1213
c18487ee
YR
1214 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1215 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1216 /* set FLOAT */
1217 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1218 break;
a2fbb9ea 1219
c18487ee
YR
1220 default:
1221 break;
a2fbb9ea
ET
1222 }
1223
c18487ee 1224 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1225 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1226
a2fbb9ea
ET
1227 return 0;
1228}
1229
9f6c9258 1230void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1231{
ad33ea3a
EG
1232 switch (bp->link_vars.ieee_fc &
1233 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1234 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1235 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1236 ADVERTISED_Pause);
1237 break;
356e2385 1238
c18487ee 1239 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1240 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1241 ADVERTISED_Pause);
1242 break;
356e2385 1243
c18487ee 1244 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1245 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 1246 break;
356e2385 1247
c18487ee 1248 default:
34f80b04 1249 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1250 ADVERTISED_Pause);
1251 break;
1252 }
1253}
f1410647 1254
c18487ee 1255
9f6c9258 1256u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 1257{
19680c48
EG
1258 if (!BP_NOMCP(bp)) {
1259 u8 rc;
a2fbb9ea 1260
19680c48 1261 /* Initialize link parameters structure variables */
8c99e7b0
YR
1262 /* It is recommended to turn off RX FC for jumbo frames
1263 for better performance */
0c593270 1264 if (bp->dev->mtu > 5000)
c0700f90 1265 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1266 else
c0700f90 1267 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1268
4a37fb66 1269 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
1270
1271 if (load_mode == LOAD_DIAG)
1272 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
1273
19680c48 1274 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 1275
4a37fb66 1276 bnx2x_release_phy_lock(bp);
a2fbb9ea 1277
3c96c68b
EG
1278 bnx2x_calc_fc_adv(bp);
1279
b5bf9068
EG
1280 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1281 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 1282 bnx2x_link_report(bp);
b5bf9068 1283 }
34f80b04 1284
19680c48
EG
1285 return rc;
1286 }
f5372251 1287 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 1288 return -EINVAL;
a2fbb9ea
ET
1289}
1290
9f6c9258 1291void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1292{
19680c48 1293 if (!BP_NOMCP(bp)) {
4a37fb66 1294 bnx2x_acquire_phy_lock(bp);
54c2fb78 1295 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
19680c48 1296 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1297 bnx2x_release_phy_lock(bp);
a2fbb9ea 1298
19680c48
EG
1299 bnx2x_calc_fc_adv(bp);
1300 } else
f5372251 1301 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 1302}
a2fbb9ea 1303
c18487ee
YR
1304static void bnx2x__link_reset(struct bnx2x *bp)
1305{
19680c48 1306 if (!BP_NOMCP(bp)) {
4a37fb66 1307 bnx2x_acquire_phy_lock(bp);
589abe3a 1308 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 1309 bnx2x_release_phy_lock(bp);
19680c48 1310 } else
f5372251 1311 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 1312}
a2fbb9ea 1313
9f6c9258 1314u8 bnx2x_link_test(struct bnx2x *bp)
c18487ee 1315{
2145a920 1316 u8 rc = 0;
a2fbb9ea 1317
2145a920
VZ
1318 if (!BP_NOMCP(bp)) {
1319 bnx2x_acquire_phy_lock(bp);
1320 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1321 bnx2x_release_phy_lock(bp);
1322 } else
1323 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 1324
c18487ee
YR
1325 return rc;
1326}
a2fbb9ea 1327
8a1c38d1 1328static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 1329{
8a1c38d1
EG
1330 u32 r_param = bp->link_vars.line_speed / 8;
1331 u32 fair_periodic_timeout_usec;
1332 u32 t_fair;
34f80b04 1333
8a1c38d1
EG
1334 memset(&(bp->cmng.rs_vars), 0,
1335 sizeof(struct rate_shaping_vars_per_port));
1336 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 1337
8a1c38d1
EG
1338 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1339 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 1340
8a1c38d1
EG
1341 /* this is the threshold below which no timer arming will occur
1342 1.25 coefficient is for the threshold to be a little bigger
1343 than the real time, to compensate for timer in-accuracy */
1344 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
1345 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1346
8a1c38d1
EG
1347 /* resolution of fairness timer */
1348 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1349 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1350 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 1351
8a1c38d1
EG
1352 /* this is the threshold below which we won't arm the timer anymore */
1353 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 1354
8a1c38d1
EG
1355 /* we multiply by 1e3/8 to get bytes/msec.
1356 We don't want the credits to pass a credit
1357 of the t_fair*FAIR_MEM (algorithm resolution) */
1358 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1359 /* since each tick is 4 usec */
1360 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
1361}
1362
2691d51d
EG
1363/* Calculates the sum of vn_min_rates.
1364 It's needed for further normalizing of the min_rates.
1365 Returns:
1366 sum of vn_min_rates.
1367 or
1368 0 - if all the min_rates are 0.
1369 In the later case fainess algorithm should be deactivated.
1370 If not all min_rates are zero then those that are zeroes will be set to 1.
1371 */
1372static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1373{
1374 int all_zero = 1;
1375 int port = BP_PORT(bp);
1376 int vn;
1377
1378 bp->vn_weight_sum = 0;
1379 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1380 int func = 2*vn + port;
1381 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1382 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1383 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1384
1385 /* Skip hidden vns */
1386 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1387 continue;
1388
1389 /* If min rate is zero - set it to 1 */
1390 if (!vn_min_rate)
1391 vn_min_rate = DEF_MIN_RATE;
1392 else
1393 all_zero = 0;
1394
1395 bp->vn_weight_sum += vn_min_rate;
1396 }
1397
1398 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
1399 if (all_zero) {
1400 bp->cmng.flags.cmng_enables &=
1401 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1402 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1403 " fairness will be disabled\n");
1404 } else
1405 bp->cmng.flags.cmng_enables |=
1406 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
1407}
1408
8a1c38d1 1409static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
1410{
1411 struct rate_shaping_vars_per_vn m_rs_vn;
1412 struct fairness_vars_per_vn m_fair_vn;
1413 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1414 u16 vn_min_rate, vn_max_rate;
1415 int i;
1416
1417 /* If function is hidden - set min and max to zeroes */
1418 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1419 vn_min_rate = 0;
1420 vn_max_rate = 0;
1421
1422 } else {
1423 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1424 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
1425 /* If min rate is zero - set it to 1 */
1426 if (!vn_min_rate)
34f80b04
EG
1427 vn_min_rate = DEF_MIN_RATE;
1428 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1429 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1430 }
8a1c38d1 1431 DP(NETIF_MSG_IFUP,
b015e3d1 1432 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 1433 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
1434
1435 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1436 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1437
1438 /* global vn counter - maximal Mbps for this vn */
1439 m_rs_vn.vn_counter.rate = vn_max_rate;
1440
1441 /* quota - number of bytes transmitted in this period */
1442 m_rs_vn.vn_counter.quota =
1443 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1444
8a1c38d1 1445 if (bp->vn_weight_sum) {
34f80b04
EG
1446 /* credit for each period of the fairness algorithm:
1447 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
1448 vn_weight_sum should not be larger than 10000, thus
1449 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1450 than zero */
34f80b04 1451 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
1452 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1453 (8 * bp->vn_weight_sum))),
1454 (bp->cmng.fair_vars.fair_threshold * 2));
1455 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
1456 m_fair_vn.vn_credit_delta);
1457 }
1458
34f80b04
EG
1459 /* Store it to internal memory */
1460 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1461 REG_WR(bp, BAR_XSTRORM_INTMEM +
1462 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1463 ((u32 *)(&m_rs_vn))[i]);
1464
1465 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1466 REG_WR(bp, BAR_XSTRORM_INTMEM +
1467 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1468 ((u32 *)(&m_fair_vn))[i]);
1469}
1470
8a1c38d1 1471
c18487ee
YR
1472/* This function is called upon link interrupt */
1473static void bnx2x_link_attn(struct bnx2x *bp)
1474{
d9e8b185 1475 u32 prev_link_status = bp->link_vars.link_status;
bb2a0f7a
YG
1476 /* Make sure that we are synced with the current statistics */
1477 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1478
c18487ee 1479 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 1480
bb2a0f7a
YG
1481 if (bp->link_vars.link_up) {
1482
1c06328c 1483 /* dropless flow control */
a18f5128 1484 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
1485 int port = BP_PORT(bp);
1486 u32 pause_enabled = 0;
1487
1488 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1489 pause_enabled = 1;
1490
1491 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 1492 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
1493 pause_enabled);
1494 }
1495
bb2a0f7a
YG
1496 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1497 struct host_port_stats *pstats;
1498
1499 pstats = bnx2x_sp(bp, port_stats);
1500 /* reset old bmac stats */
1501 memset(&(pstats->mac_stx[0]), 0,
1502 sizeof(struct mac_stx));
1503 }
f34d28ea 1504 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
1505 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1506 }
1507
d9e8b185
VZ
1508 /* indicate link status only if link status actually changed */
1509 if (prev_link_status != bp->link_vars.link_status)
1510 bnx2x_link_report(bp);
34f80b04
EG
1511
1512 if (IS_E1HMF(bp)) {
8a1c38d1 1513 int port = BP_PORT(bp);
34f80b04 1514 int func;
8a1c38d1 1515 int vn;
34f80b04 1516
ab6ad5a4 1517 /* Set the attention towards other drivers on the same port */
34f80b04
EG
1518 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1519 if (vn == BP_E1HVN(bp))
1520 continue;
1521
8a1c38d1 1522 func = ((vn << 1) | port);
34f80b04
EG
1523 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1524 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1525 }
34f80b04 1526
8a1c38d1
EG
1527 if (bp->link_vars.link_up) {
1528 int i;
1529
1530 /* Init rate shaping and fairness contexts */
1531 bnx2x_init_port_minmax(bp);
34f80b04 1532
34f80b04 1533 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
1534 bnx2x_init_vn_minmax(bp, 2*vn + port);
1535
1536 /* Store it to internal memory */
1537 for (i = 0;
1538 i < sizeof(struct cmng_struct_per_port) / 4; i++)
1539 REG_WR(bp, BAR_XSTRORM_INTMEM +
1540 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1541 ((u32 *)(&bp->cmng))[i]);
1542 }
34f80b04 1543 }
c18487ee 1544}
a2fbb9ea 1545
9f6c9258 1546void bnx2x__link_status_update(struct bnx2x *bp)
c18487ee 1547{
f34d28ea 1548 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 1549 return;
a2fbb9ea 1550
c18487ee 1551 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 1552
bb2a0f7a
YG
1553 if (bp->link_vars.link_up)
1554 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1555 else
1556 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1557
2691d51d
EG
1558 bnx2x_calc_vn_weight_sum(bp);
1559
c18487ee
YR
1560 /* indicate link status */
1561 bnx2x_link_report(bp);
a2fbb9ea 1562}
a2fbb9ea 1563
34f80b04
EG
1564static void bnx2x_pmf_update(struct bnx2x *bp)
1565{
1566 int port = BP_PORT(bp);
1567 u32 val;
1568
1569 bp->port.pmf = 1;
1570 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1571
1572 /* enable nig attention */
1573 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1574 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1575 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
1576
1577 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
1578}
1579
c18487ee 1580/* end of Link */
a2fbb9ea
ET
1581
1582/* slow path */
1583
1584/*
1585 * General service functions
1586 */
1587
2691d51d
EG
1588/* send the MCP a request, block until there is a reply */
1589u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
1590{
1591 int func = BP_FUNC(bp);
1592 u32 seq = ++bp->fw_seq;
1593 u32 rc = 0;
1594 u32 cnt = 1;
1595 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1596
c4ff7cbf 1597 mutex_lock(&bp->fw_mb_mutex);
2691d51d
EG
1598 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1599 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1600
1601 do {
1602 /* let the FW do it's magic ... */
1603 msleep(delay);
1604
1605 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
1606
c4ff7cbf
EG
1607 /* Give the FW up to 5 second (500*10ms) */
1608 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
1609
1610 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
1611 cnt*delay, rc, seq);
1612
1613 /* is this a reply to our command? */
1614 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
1615 rc &= FW_MSG_CODE_MASK;
1616 else {
1617 /* FW BUG! */
1618 BNX2X_ERR("FW failed to respond!\n");
1619 bnx2x_fw_dump(bp);
1620 rc = 0;
1621 }
c4ff7cbf 1622 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
1623
1624 return rc;
1625}
1626
2691d51d
EG
1627static void bnx2x_e1h_disable(struct bnx2x *bp)
1628{
1629 int port = BP_PORT(bp);
2691d51d
EG
1630
1631 netif_tx_disable(bp->dev);
2691d51d
EG
1632
1633 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
1634
2691d51d
EG
1635 netif_carrier_off(bp->dev);
1636}
1637
1638static void bnx2x_e1h_enable(struct bnx2x *bp)
1639{
1640 int port = BP_PORT(bp);
1641
1642 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
1643
2691d51d
EG
1644 /* Tx queue should be only reenabled */
1645 netif_tx_wake_all_queues(bp->dev);
1646
061bc702
EG
1647 /*
1648 * Should not call netif_carrier_on since it will be called if the link
1649 * is up when checking for link state
1650 */
2691d51d
EG
1651}
1652
1653static void bnx2x_update_min_max(struct bnx2x *bp)
1654{
1655 int port = BP_PORT(bp);
1656 int vn, i;
1657
1658 /* Init rate shaping and fairness contexts */
1659 bnx2x_init_port_minmax(bp);
1660
1661 bnx2x_calc_vn_weight_sum(bp);
1662
1663 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1664 bnx2x_init_vn_minmax(bp, 2*vn + port);
1665
1666 if (bp->port.pmf) {
1667 int func;
1668
1669 /* Set the attention towards other drivers on the same port */
1670 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1671 if (vn == BP_E1HVN(bp))
1672 continue;
1673
1674 func = ((vn << 1) | port);
1675 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1676 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1677 }
1678
1679 /* Store it to internal memory */
1680 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1681 REG_WR(bp, BAR_XSTRORM_INTMEM +
1682 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1683 ((u32 *)(&bp->cmng))[i]);
1684 }
1685}
1686
1687static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1688{
2691d51d 1689 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
1690
1691 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
1692
f34d28ea
EG
1693 /*
1694 * This is the only place besides the function initialization
1695 * where the bp->flags can change so it is done without any
1696 * locks
1697 */
2691d51d
EG
1698 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1699 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
f34d28ea 1700 bp->flags |= MF_FUNC_DIS;
2691d51d
EG
1701
1702 bnx2x_e1h_disable(bp);
1703 } else {
1704 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
f34d28ea 1705 bp->flags &= ~MF_FUNC_DIS;
2691d51d
EG
1706
1707 bnx2x_e1h_enable(bp);
1708 }
1709 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
1710 }
1711 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1712
1713 bnx2x_update_min_max(bp);
1714 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1715 }
1716
1717 /* Report results to MCP */
1718 if (dcc_event)
1719 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
1720 else
1721 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
1722}
1723
28912902
MC
1724/* must be called under the spq lock */
1725static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
1726{
1727 struct eth_spe *next_spe = bp->spq_prod_bd;
1728
1729 if (bp->spq_prod_bd == bp->spq_last_bd) {
1730 bp->spq_prod_bd = bp->spq;
1731 bp->spq_prod_idx = 0;
1732 DP(NETIF_MSG_TIMER, "end of spq\n");
1733 } else {
1734 bp->spq_prod_bd++;
1735 bp->spq_prod_idx++;
1736 }
1737 return next_spe;
1738}
1739
1740/* must be called under the spq lock */
1741static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1742{
1743 int func = BP_FUNC(bp);
1744
1745 /* Make sure that BD data is updated before writing the producer */
1746 wmb();
1747
1748 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1749 bp->spq_prod_idx);
1750 mmiowb();
1751}
1752
a2fbb9ea 1753/* the slow path queue is odd since completions arrive on the fastpath ring */
9f6c9258 1754int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
a2fbb9ea
ET
1755 u32 data_hi, u32 data_lo, int common)
1756{
28912902 1757 struct eth_spe *spe;
a2fbb9ea 1758
a2fbb9ea
ET
1759#ifdef BNX2X_STOP_ON_ERROR
1760 if (unlikely(bp->panic))
1761 return -EIO;
1762#endif
1763
34f80b04 1764 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
1765
1766 if (!bp->spq_left) {
1767 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 1768 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
1769 bnx2x_panic();
1770 return -EBUSY;
1771 }
f1410647 1772
28912902
MC
1773 spe = bnx2x_sp_get_next(bp);
1774
a2fbb9ea 1775 /* CID needs port number to be encoded int it */
28912902 1776 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
1777 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1778 HW_CID(bp, cid));
28912902 1779 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 1780 if (common)
28912902 1781 spe->hdr.type |=
a2fbb9ea
ET
1782 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1783
28912902
MC
1784 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1785 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
1786
1787 bp->spq_left--;
1788
cdaa7cb8
VZ
1789 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1790 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
1791 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1792 (u32)(U64_LO(bp->spq_mapping) +
1793 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1794 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1795
28912902 1796 bnx2x_sp_prod_update(bp);
34f80b04 1797 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
1798 return 0;
1799}
1800
1801/* acquire split MCP access lock register */
4a37fb66 1802static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 1803{
72fd0718 1804 u32 j, val;
34f80b04 1805 int rc = 0;
a2fbb9ea
ET
1806
1807 might_sleep();
72fd0718 1808 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
1809 val = (1UL << 31);
1810 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1811 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1812 if (val & (1L << 31))
1813 break;
1814
1815 msleep(5);
1816 }
a2fbb9ea 1817 if (!(val & (1L << 31))) {
19680c48 1818 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
1819 rc = -EBUSY;
1820 }
1821
1822 return rc;
1823}
1824
4a37fb66
YG
1825/* release split MCP access lock register */
1826static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 1827{
72fd0718 1828 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
1829}
1830
1831static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1832{
1833 struct host_def_status_block *def_sb = bp->def_status_blk;
1834 u16 rc = 0;
1835
1836 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
1837 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1838 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1839 rc |= 1;
1840 }
1841 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1842 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1843 rc |= 2;
1844 }
1845 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1846 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1847 rc |= 4;
1848 }
1849 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1850 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1851 rc |= 8;
1852 }
1853 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1854 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1855 rc |= 16;
1856 }
1857 return rc;
1858}
1859
1860/*
1861 * slow path service functions
1862 */
1863
1864static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1865{
34f80b04 1866 int port = BP_PORT(bp);
5c862848
EG
1867 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
1868 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
1869 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1870 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
1871 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1872 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 1873 u32 aeu_mask;
87942b46 1874 u32 nig_mask = 0;
a2fbb9ea 1875
a2fbb9ea
ET
1876 if (bp->attn_state & asserted)
1877 BNX2X_ERR("IGU ERROR\n");
1878
3fcaf2e5
EG
1879 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1880 aeu_mask = REG_RD(bp, aeu_addr);
1881
a2fbb9ea 1882 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 1883 aeu_mask, asserted);
72fd0718 1884 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 1885 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 1886
3fcaf2e5
EG
1887 REG_WR(bp, aeu_addr, aeu_mask);
1888 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 1889
3fcaf2e5 1890 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 1891 bp->attn_state |= asserted;
3fcaf2e5 1892 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
1893
1894 if (asserted & ATTN_HARD_WIRED_MASK) {
1895 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 1896
a5e9a7cf
EG
1897 bnx2x_acquire_phy_lock(bp);
1898
877e9aa4 1899 /* save nig interrupt mask */
87942b46 1900 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 1901 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 1902
c18487ee 1903 bnx2x_link_attn(bp);
a2fbb9ea
ET
1904
1905 /* handle unicore attn? */
1906 }
1907 if (asserted & ATTN_SW_TIMER_4_FUNC)
1908 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1909
1910 if (asserted & GPIO_2_FUNC)
1911 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1912
1913 if (asserted & GPIO_3_FUNC)
1914 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1915
1916 if (asserted & GPIO_4_FUNC)
1917 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1918
1919 if (port == 0) {
1920 if (asserted & ATTN_GENERAL_ATTN_1) {
1921 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1922 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1923 }
1924 if (asserted & ATTN_GENERAL_ATTN_2) {
1925 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1926 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1927 }
1928 if (asserted & ATTN_GENERAL_ATTN_3) {
1929 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1930 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1931 }
1932 } else {
1933 if (asserted & ATTN_GENERAL_ATTN_4) {
1934 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1935 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1936 }
1937 if (asserted & ATTN_GENERAL_ATTN_5) {
1938 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1939 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1940 }
1941 if (asserted & ATTN_GENERAL_ATTN_6) {
1942 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1943 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1944 }
1945 }
1946
1947 } /* if hardwired */
1948
5c862848
EG
1949 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
1950 asserted, hc_addr);
1951 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
1952
1953 /* now set back the mask */
a5e9a7cf 1954 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 1955 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
1956 bnx2x_release_phy_lock(bp);
1957 }
a2fbb9ea
ET
1958}
1959
fd4ef40d
EG
1960static inline void bnx2x_fan_failure(struct bnx2x *bp)
1961{
1962 int port = BP_PORT(bp);
1963
1964 /* mark the failure */
1965 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
1966 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
1967 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
1968 bp->link_params.ext_phy_config);
1969
1970 /* log the failure */
cdaa7cb8
VZ
1971 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
1972 " the driver to shutdown the card to prevent permanent"
1973 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 1974}
ab6ad5a4 1975
877e9aa4 1976static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 1977{
34f80b04 1978 int port = BP_PORT(bp);
877e9aa4 1979 int reg_offset;
4d295db0 1980 u32 val, swap_val, swap_override;
877e9aa4 1981
34f80b04
EG
1982 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
1983 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 1984
34f80b04 1985 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
1986
1987 val = REG_RD(bp, reg_offset);
1988 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
1989 REG_WR(bp, reg_offset, val);
1990
1991 BNX2X_ERR("SPIO5 hw attention\n");
1992
fd4ef40d 1993 /* Fan failure attention */
35b19ba5
EG
1994 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
1995 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 1996 /* Low power mode is controlled by GPIO 2 */
877e9aa4 1997 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 1998 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
1999 /* The PHY reset is controlled by GPIO 1 */
2000 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2001 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2002 break;
2003
4d295db0
EG
2004 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2005 /* The PHY reset is controlled by GPIO 1 */
2006 /* fake the port number to cancel the swap done in
2007 set_gpio() */
2008 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2009 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2010 port = (swap_val && swap_override) ^ 1;
2011 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2012 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2013 break;
2014
877e9aa4
ET
2015 default:
2016 break;
2017 }
fd4ef40d 2018 bnx2x_fan_failure(bp);
877e9aa4 2019 }
34f80b04 2020
589abe3a
EG
2021 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2022 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2023 bnx2x_acquire_phy_lock(bp);
2024 bnx2x_handle_module_detect_int(&bp->link_params);
2025 bnx2x_release_phy_lock(bp);
2026 }
2027
34f80b04
EG
2028 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2029
2030 val = REG_RD(bp, reg_offset);
2031 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2032 REG_WR(bp, reg_offset, val);
2033
2034 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2035 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2036 bnx2x_panic();
2037 }
877e9aa4
ET
2038}
2039
2040static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2041{
2042 u32 val;
2043
0626b899 2044 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2045
2046 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2047 BNX2X_ERR("DB hw attention 0x%x\n", val);
2048 /* DORQ discard attention */
2049 if (val & 0x2)
2050 BNX2X_ERR("FATAL error from DORQ\n");
2051 }
34f80b04
EG
2052
2053 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2054
2055 int port = BP_PORT(bp);
2056 int reg_offset;
2057
2058 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2059 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2060
2061 val = REG_RD(bp, reg_offset);
2062 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2063 REG_WR(bp, reg_offset, val);
2064
2065 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 2066 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
2067 bnx2x_panic();
2068 }
877e9aa4
ET
2069}
2070
2071static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2072{
2073 u32 val;
2074
2075 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2076
2077 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2078 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2079 /* CFC error attention */
2080 if (val & 0x2)
2081 BNX2X_ERR("FATAL error from CFC\n");
2082 }
2083
2084 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2085
2086 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2087 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2088 /* RQ_USDMDP_FIFO_OVERFLOW */
2089 if (val & 0x18000)
2090 BNX2X_ERR("FATAL error from PXP\n");
2091 }
34f80b04
EG
2092
2093 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2094
2095 int port = BP_PORT(bp);
2096 int reg_offset;
2097
2098 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2099 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2100
2101 val = REG_RD(bp, reg_offset);
2102 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2103 REG_WR(bp, reg_offset, val);
2104
2105 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 2106 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
2107 bnx2x_panic();
2108 }
877e9aa4
ET
2109}
2110
2111static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2112{
34f80b04
EG
2113 u32 val;
2114
877e9aa4
ET
2115 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2116
34f80b04
EG
2117 if (attn & BNX2X_PMF_LINK_ASSERT) {
2118 int func = BP_FUNC(bp);
2119
2120 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
2121 bp->mf_config = SHMEM_RD(bp,
2122 mf_cfg.func_mf_config[func].config);
2691d51d
EG
2123 val = SHMEM_RD(bp, func_mb[func].drv_status);
2124 if (val & DRV_STATUS_DCC_EVENT_MASK)
2125 bnx2x_dcc_event(bp,
2126 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 2127 bnx2x__link_status_update(bp);
2691d51d 2128 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
2129 bnx2x_pmf_update(bp);
2130
2131 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2132
2133 BNX2X_ERR("MC assert!\n");
2134 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2135 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2136 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2137 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2138 bnx2x_panic();
2139
2140 } else if (attn & BNX2X_MCP_ASSERT) {
2141
2142 BNX2X_ERR("MCP assert!\n");
2143 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2144 bnx2x_fw_dump(bp);
877e9aa4
ET
2145
2146 } else
2147 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2148 }
2149
2150 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2151 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2152 if (attn & BNX2X_GRC_TIMEOUT) {
2153 val = CHIP_IS_E1H(bp) ?
2154 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2155 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2156 }
2157 if (attn & BNX2X_GRC_RSV) {
2158 val = CHIP_IS_E1H(bp) ?
2159 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2160 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2161 }
877e9aa4 2162 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2163 }
2164}
2165
72fd0718
VZ
2166#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
2167#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
2168#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2169#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
2170#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
2171#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2172/*
2173 * should be run under rtnl lock
2174 */
2175static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2176{
2177 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2178 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2179 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2180 barrier();
2181 mmiowb();
2182}
2183
2184/*
2185 * should be run under rtnl lock
2186 */
2187static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2188{
2189 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2190 val |= (1 << 16);
2191 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2192 barrier();
2193 mmiowb();
2194}
2195
2196/*
2197 * should be run under rtnl lock
2198 */
9f6c9258 2199bool bnx2x_reset_is_done(struct bnx2x *bp)
72fd0718
VZ
2200{
2201 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2202 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2203 return (val & RESET_DONE_FLAG_MASK) ? false : true;
2204}
2205
2206/*
2207 * should be run under rtnl lock
2208 */
9f6c9258 2209inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
72fd0718
VZ
2210{
2211 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2212
2213 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2214
2215 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2216 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2217 barrier();
2218 mmiowb();
2219}
2220
2221/*
2222 * should be run under rtnl lock
2223 */
9f6c9258 2224u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
72fd0718
VZ
2225{
2226 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2227
2228 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2229
2230 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2231 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2232 barrier();
2233 mmiowb();
2234
2235 return val1;
2236}
2237
2238/*
2239 * should be run under rtnl lock
2240 */
2241static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2242{
2243 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2244}
2245
2246static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2247{
2248 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2249 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2250}
2251
2252static inline void _print_next_block(int idx, const char *blk)
2253{
2254 if (idx)
2255 pr_cont(", ");
2256 pr_cont("%s", blk);
2257}
2258
2259static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2260{
2261 int i = 0;
2262 u32 cur_bit = 0;
2263 for (i = 0; sig; i++) {
2264 cur_bit = ((u32)0x1 << i);
2265 if (sig & cur_bit) {
2266 switch (cur_bit) {
2267 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2268 _print_next_block(par_num++, "BRB");
2269 break;
2270 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2271 _print_next_block(par_num++, "PARSER");
2272 break;
2273 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2274 _print_next_block(par_num++, "TSDM");
2275 break;
2276 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2277 _print_next_block(par_num++, "SEARCHER");
2278 break;
2279 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2280 _print_next_block(par_num++, "TSEMI");
2281 break;
2282 }
2283
2284 /* Clear the bit */
2285 sig &= ~cur_bit;
2286 }
2287 }
2288
2289 return par_num;
2290}
2291
2292static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2293{
2294 int i = 0;
2295 u32 cur_bit = 0;
2296 for (i = 0; sig; i++) {
2297 cur_bit = ((u32)0x1 << i);
2298 if (sig & cur_bit) {
2299 switch (cur_bit) {
2300 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2301 _print_next_block(par_num++, "PBCLIENT");
2302 break;
2303 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2304 _print_next_block(par_num++, "QM");
2305 break;
2306 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2307 _print_next_block(par_num++, "XSDM");
2308 break;
2309 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2310 _print_next_block(par_num++, "XSEMI");
2311 break;
2312 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2313 _print_next_block(par_num++, "DOORBELLQ");
2314 break;
2315 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2316 _print_next_block(par_num++, "VAUX PCI CORE");
2317 break;
2318 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2319 _print_next_block(par_num++, "DEBUG");
2320 break;
2321 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2322 _print_next_block(par_num++, "USDM");
2323 break;
2324 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2325 _print_next_block(par_num++, "USEMI");
2326 break;
2327 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2328 _print_next_block(par_num++, "UPB");
2329 break;
2330 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2331 _print_next_block(par_num++, "CSDM");
2332 break;
2333 }
2334
2335 /* Clear the bit */
2336 sig &= ~cur_bit;
2337 }
2338 }
2339
2340 return par_num;
2341}
2342
2343static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
2344{
2345 int i = 0;
2346 u32 cur_bit = 0;
2347 for (i = 0; sig; i++) {
2348 cur_bit = ((u32)0x1 << i);
2349 if (sig & cur_bit) {
2350 switch (cur_bit) {
2351 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
2352 _print_next_block(par_num++, "CSEMI");
2353 break;
2354 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
2355 _print_next_block(par_num++, "PXP");
2356 break;
2357 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
2358 _print_next_block(par_num++,
2359 "PXPPCICLOCKCLIENT");
2360 break;
2361 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
2362 _print_next_block(par_num++, "CFC");
2363 break;
2364 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
2365 _print_next_block(par_num++, "CDU");
2366 break;
2367 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
2368 _print_next_block(par_num++, "IGU");
2369 break;
2370 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
2371 _print_next_block(par_num++, "MISC");
2372 break;
2373 }
2374
2375 /* Clear the bit */
2376 sig &= ~cur_bit;
2377 }
2378 }
2379
2380 return par_num;
2381}
2382
2383static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
2384{
2385 int i = 0;
2386 u32 cur_bit = 0;
2387 for (i = 0; sig; i++) {
2388 cur_bit = ((u32)0x1 << i);
2389 if (sig & cur_bit) {
2390 switch (cur_bit) {
2391 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
2392 _print_next_block(par_num++, "MCP ROM");
2393 break;
2394 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
2395 _print_next_block(par_num++, "MCP UMP RX");
2396 break;
2397 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
2398 _print_next_block(par_num++, "MCP UMP TX");
2399 break;
2400 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
2401 _print_next_block(par_num++, "MCP SCPAD");
2402 break;
2403 }
2404
2405 /* Clear the bit */
2406 sig &= ~cur_bit;
2407 }
2408 }
2409
2410 return par_num;
2411}
2412
2413static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
2414 u32 sig2, u32 sig3)
2415{
2416 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
2417 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
2418 int par_num = 0;
2419 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
2420 "[0]:0x%08x [1]:0x%08x "
2421 "[2]:0x%08x [3]:0x%08x\n",
2422 sig0 & HW_PRTY_ASSERT_SET_0,
2423 sig1 & HW_PRTY_ASSERT_SET_1,
2424 sig2 & HW_PRTY_ASSERT_SET_2,
2425 sig3 & HW_PRTY_ASSERT_SET_3);
2426 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
2427 bp->dev->name);
2428 par_num = bnx2x_print_blocks_with_parity0(
2429 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
2430 par_num = bnx2x_print_blocks_with_parity1(
2431 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
2432 par_num = bnx2x_print_blocks_with_parity2(
2433 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
2434 par_num = bnx2x_print_blocks_with_parity3(
2435 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
2436 printk("\n");
2437 return true;
2438 } else
2439 return false;
2440}
2441
9f6c9258 2442bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 2443{
a2fbb9ea 2444 struct attn_route attn;
72fd0718
VZ
2445 int port = BP_PORT(bp);
2446
2447 attn.sig[0] = REG_RD(bp,
2448 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
2449 port*4);
2450 attn.sig[1] = REG_RD(bp,
2451 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
2452 port*4);
2453 attn.sig[2] = REG_RD(bp,
2454 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
2455 port*4);
2456 attn.sig[3] = REG_RD(bp,
2457 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
2458 port*4);
2459
2460 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
2461 attn.sig[3]);
2462}
2463
2464static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2465{
2466 struct attn_route attn, *group_mask;
34f80b04 2467 int port = BP_PORT(bp);
877e9aa4 2468 int index;
a2fbb9ea
ET
2469 u32 reg_addr;
2470 u32 val;
3fcaf2e5 2471 u32 aeu_mask;
a2fbb9ea
ET
2472
2473 /* need to take HW lock because MCP or other port might also
2474 try to handle this event */
4a37fb66 2475 bnx2x_acquire_alr(bp);
a2fbb9ea 2476
72fd0718
VZ
2477 if (bnx2x_chk_parity_attn(bp)) {
2478 bp->recovery_state = BNX2X_RECOVERY_INIT;
2479 bnx2x_set_reset_in_progress(bp);
2480 schedule_delayed_work(&bp->reset_task, 0);
2481 /* Disable HW interrupts */
2482 bnx2x_int_disable(bp);
2483 bnx2x_release_alr(bp);
2484 /* In case of parity errors don't handle attentions so that
2485 * other function would "see" parity errors.
2486 */
2487 return;
2488 }
2489
a2fbb9ea
ET
2490 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2491 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2492 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2493 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2494 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2495 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2496
2497 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2498 if (deasserted & (1 << index)) {
72fd0718 2499 group_mask = &bp->attn_group[index];
a2fbb9ea 2500
34f80b04 2501 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
72fd0718
VZ
2502 index, group_mask->sig[0], group_mask->sig[1],
2503 group_mask->sig[2], group_mask->sig[3]);
a2fbb9ea 2504
877e9aa4 2505 bnx2x_attn_int_deasserted3(bp,
72fd0718 2506 attn.sig[3] & group_mask->sig[3]);
877e9aa4 2507 bnx2x_attn_int_deasserted1(bp,
72fd0718 2508 attn.sig[1] & group_mask->sig[1]);
877e9aa4 2509 bnx2x_attn_int_deasserted2(bp,
72fd0718 2510 attn.sig[2] & group_mask->sig[2]);
877e9aa4 2511 bnx2x_attn_int_deasserted0(bp,
72fd0718 2512 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
2513 }
2514 }
2515
4a37fb66 2516 bnx2x_release_alr(bp);
a2fbb9ea 2517
5c862848 2518 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2519
2520 val = ~deasserted;
3fcaf2e5
EG
2521 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2522 val, reg_addr);
5c862848 2523 REG_WR(bp, reg_addr, val);
a2fbb9ea 2524
a2fbb9ea 2525 if (~bp->attn_state & deasserted)
3fcaf2e5 2526 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2527
2528 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2529 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2530
3fcaf2e5
EG
2531 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2532 aeu_mask = REG_RD(bp, reg_addr);
2533
2534 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2535 aeu_mask, deasserted);
72fd0718 2536 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 2537 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2538
3fcaf2e5
EG
2539 REG_WR(bp, reg_addr, aeu_mask);
2540 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2541
2542 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2543 bp->attn_state &= ~deasserted;
2544 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2545}
2546
2547static void bnx2x_attn_int(struct bnx2x *bp)
2548{
2549 /* read local copy of bits */
68d59484
EG
2550 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2551 attn_bits);
2552 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2553 attn_bits_ack);
a2fbb9ea
ET
2554 u32 attn_state = bp->attn_state;
2555
2556 /* look for changed bits */
2557 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2558 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2559
2560 DP(NETIF_MSG_HW,
2561 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2562 attn_bits, attn_ack, asserted, deasserted);
2563
2564 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2565 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2566
2567 /* handle bits that were raised */
2568 if (asserted)
2569 bnx2x_attn_int_asserted(bp, asserted);
2570
2571 if (deasserted)
2572 bnx2x_attn_int_deasserted(bp, deasserted);
2573}
2574
2575static void bnx2x_sp_task(struct work_struct *work)
2576{
1cf167f2 2577 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2578 u16 status;
2579
2580 /* Return here if interrupt is disabled */
2581 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2582 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2583 return;
2584 }
2585
2586 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2587/* if (status == 0) */
2588/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2589
cdaa7cb8 2590 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 2591
877e9aa4 2592 /* HW attentions */
cdaa7cb8 2593 if (status & 0x1) {
a2fbb9ea 2594 bnx2x_attn_int(bp);
cdaa7cb8
VZ
2595 status &= ~0x1;
2596 }
2597
2598 /* CStorm events: STAT_QUERY */
2599 if (status & 0x2) {
2600 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
2601 status &= ~0x2;
2602 }
2603
2604 if (unlikely(status))
2605 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2606 status);
a2fbb9ea 2607
68d59484 2608 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2609 IGU_INT_NOP, 1);
2610 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2611 IGU_INT_NOP, 1);
2612 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2613 IGU_INT_NOP, 1);
2614 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2615 IGU_INT_NOP, 1);
2616 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2617 IGU_INT_ENABLE, 1);
2618}
2619
9f6c9258 2620irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
a2fbb9ea
ET
2621{
2622 struct net_device *dev = dev_instance;
2623 struct bnx2x *bp = netdev_priv(dev);
2624
2625 /* Return here if interrupt is disabled */
2626 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2627 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2628 return IRQ_HANDLED;
2629 }
2630
8d9c5f34 2631 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2632
2633#ifdef BNX2X_STOP_ON_ERROR
2634 if (unlikely(bp->panic))
2635 return IRQ_HANDLED;
2636#endif
2637
993ac7b5
MC
2638#ifdef BCM_CNIC
2639 {
2640 struct cnic_ops *c_ops;
2641
2642 rcu_read_lock();
2643 c_ops = rcu_dereference(bp->cnic_ops);
2644 if (c_ops)
2645 c_ops->cnic_handler(bp->cnic_data, NULL);
2646 rcu_read_unlock();
2647 }
2648#endif
1cf167f2 2649 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2650
2651 return IRQ_HANDLED;
2652}
2653
2654/* end of slow path */
2655
a2fbb9ea
ET
2656static void bnx2x_timer(unsigned long data)
2657{
2658 struct bnx2x *bp = (struct bnx2x *) data;
2659
2660 if (!netif_running(bp->dev))
2661 return;
2662
2663 if (atomic_read(&bp->intr_sem) != 0)
f1410647 2664 goto timer_restart;
a2fbb9ea
ET
2665
2666 if (poll) {
2667 struct bnx2x_fastpath *fp = &bp->fp[0];
2668 int rc;
2669
7961f791 2670 bnx2x_tx_int(fp);
a2fbb9ea
ET
2671 rc = bnx2x_rx_int(fp, 1000);
2672 }
2673
34f80b04
EG
2674 if (!BP_NOMCP(bp)) {
2675 int func = BP_FUNC(bp);
a2fbb9ea
ET
2676 u32 drv_pulse;
2677 u32 mcp_pulse;
2678
2679 ++bp->fw_drv_pulse_wr_seq;
2680 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
2681 /* TBD - add SYSTEM_TIME */
2682 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 2683 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 2684
34f80b04 2685 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
2686 MCP_PULSE_SEQ_MASK);
2687 /* The delta between driver pulse and mcp response
2688 * should be 1 (before mcp response) or 0 (after mcp response)
2689 */
2690 if ((drv_pulse != mcp_pulse) &&
2691 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
2692 /* someone lost a heartbeat... */
2693 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2694 drv_pulse, mcp_pulse);
2695 }
2696 }
2697
f34d28ea 2698 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 2699 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 2700
f1410647 2701timer_restart:
a2fbb9ea
ET
2702 mod_timer(&bp->timer, jiffies + bp->current_interval);
2703}
2704
2705/* end of Statistics */
2706
2707/* nic init */
2708
2709/*
2710 * nic init service functions
2711 */
2712
34f80b04 2713static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 2714{
34f80b04
EG
2715 int port = BP_PORT(bp);
2716
ca00392c
EG
2717 /* "CSTORM" */
2718 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2719 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
2720 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
2721 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2722 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
2723 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
2724}
2725
9f6c9258 2726void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5c862848 2727 dma_addr_t mapping, int sb_id)
34f80b04
EG
2728{
2729 int port = BP_PORT(bp);
bb2a0f7a 2730 int func = BP_FUNC(bp);
a2fbb9ea 2731 int index;
34f80b04 2732 u64 section;
a2fbb9ea
ET
2733
2734 /* USTORM */
2735 section = ((u64)mapping) + offsetof(struct host_status_block,
2736 u_status_block);
34f80b04 2737 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 2738
ca00392c
EG
2739 REG_WR(bp, BAR_CSTRORM_INTMEM +
2740 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
2741 REG_WR(bp, BAR_CSTRORM_INTMEM +
2742 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 2743 U64_HI(section));
ca00392c
EG
2744 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
2745 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
2746
2747 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
2748 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2749 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
2750
2751 /* CSTORM */
2752 section = ((u64)mapping) + offsetof(struct host_status_block,
2753 c_status_block);
34f80b04 2754 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
2755
2756 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 2757 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 2758 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 2759 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 2760 U64_HI(section));
7a9b2557 2761 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 2762 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
2763
2764 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
2765 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 2766 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
2767
2768 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2769}
2770
2771static void bnx2x_zero_def_sb(struct bnx2x *bp)
2772{
2773 int func = BP_FUNC(bp);
a2fbb9ea 2774
ca00392c 2775 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
2776 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2777 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
2778 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2779 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
2780 sizeof(struct cstorm_def_status_block_u)/4);
2781 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2782 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
2783 sizeof(struct cstorm_def_status_block_c)/4);
2784 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
2785 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2786 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
2787}
2788
2789static void bnx2x_init_def_sb(struct bnx2x *bp,
2790 struct host_def_status_block *def_sb,
34f80b04 2791 dma_addr_t mapping, int sb_id)
a2fbb9ea 2792{
34f80b04
EG
2793 int port = BP_PORT(bp);
2794 int func = BP_FUNC(bp);
a2fbb9ea
ET
2795 int index, val, reg_offset;
2796 u64 section;
2797
2798 /* ATTN */
2799 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2800 atten_status_block);
34f80b04 2801 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 2802
49d66772
ET
2803 bp->attn_state = 0;
2804
a2fbb9ea
ET
2805 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2806 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2807
34f80b04 2808 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
2809 bp->attn_group[index].sig[0] = REG_RD(bp,
2810 reg_offset + 0x10*index);
2811 bp->attn_group[index].sig[1] = REG_RD(bp,
2812 reg_offset + 0x4 + 0x10*index);
2813 bp->attn_group[index].sig[2] = REG_RD(bp,
2814 reg_offset + 0x8 + 0x10*index);
2815 bp->attn_group[index].sig[3] = REG_RD(bp,
2816 reg_offset + 0xc + 0x10*index);
2817 }
2818
a2fbb9ea
ET
2819 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
2820 HC_REG_ATTN_MSG0_ADDR_L);
2821
2822 REG_WR(bp, reg_offset, U64_LO(section));
2823 REG_WR(bp, reg_offset + 4, U64_HI(section));
2824
2825 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
2826
2827 val = REG_RD(bp, reg_offset);
34f80b04 2828 val |= sb_id;
a2fbb9ea
ET
2829 REG_WR(bp, reg_offset, val);
2830
2831 /* USTORM */
2832 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2833 u_def_status_block);
34f80b04 2834 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 2835
ca00392c
EG
2836 REG_WR(bp, BAR_CSTRORM_INTMEM +
2837 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
2838 REG_WR(bp, BAR_CSTRORM_INTMEM +
2839 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 2840 U64_HI(section));
ca00392c
EG
2841 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
2842 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
2843
2844 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
2845 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2846 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
2847
2848 /* CSTORM */
2849 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2850 c_def_status_block);
34f80b04 2851 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
2852
2853 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 2854 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 2855 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 2856 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 2857 U64_HI(section));
5c862848 2858 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 2859 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
2860
2861 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2862 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 2863 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
2864
2865 /* TSTORM */
2866 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2867 t_def_status_block);
34f80b04 2868 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
2869
2870 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 2871 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 2872 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 2873 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 2874 U64_HI(section));
5c862848 2875 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 2876 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
2877
2878 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2879 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 2880 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
2881
2882 /* XSTORM */
2883 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2884 x_def_status_block);
34f80b04 2885 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
2886
2887 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 2888 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 2889 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 2890 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 2891 U64_HI(section));
5c862848 2892 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 2893 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
2894
2895 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2896 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 2897 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 2898
bb2a0f7a 2899 bp->stats_pending = 0;
66e855f3 2900 bp->set_mac_pending = 0;
bb2a0f7a 2901
34f80b04 2902 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
2903}
2904
9f6c9258 2905void bnx2x_update_coalesce(struct bnx2x *bp)
a2fbb9ea 2906{
34f80b04 2907 int port = BP_PORT(bp);
a2fbb9ea
ET
2908 int i;
2909
2910 for_each_queue(bp, i) {
34f80b04 2911 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
2912
2913 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
2914 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2915 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
2916 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 2917 bp->rx_ticks/(4 * BNX2X_BTR));
ca00392c
EG
2918 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2919 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
2920 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 2921 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
2922
2923 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2924 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
2925 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
2926 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 2927 bp->tx_ticks/(4 * BNX2X_BTR));
a2fbb9ea 2928 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
2929 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
2930 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 2931 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
2932 }
2933}
2934
a2fbb9ea
ET
2935static void bnx2x_init_sp_ring(struct bnx2x *bp)
2936{
34f80b04 2937 int func = BP_FUNC(bp);
a2fbb9ea
ET
2938
2939 spin_lock_init(&bp->spq_lock);
2940
2941 bp->spq_left = MAX_SPQ_PENDING;
2942 bp->spq_prod_idx = 0;
a2fbb9ea
ET
2943 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
2944 bp->spq_prod_bd = bp->spq;
2945 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
2946
34f80b04 2947 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 2948 U64_LO(bp->spq_mapping));
34f80b04
EG
2949 REG_WR(bp,
2950 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
2951 U64_HI(bp->spq_mapping));
2952
34f80b04 2953 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2954 bp->spq_prod_idx);
2955}
2956
2957static void bnx2x_init_context(struct bnx2x *bp)
2958{
2959 int i;
2960
54b9ddaa
VZ
2961 /* Rx */
2962 for_each_queue(bp, i) {
a2fbb9ea
ET
2963 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
2964 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 2965 u8 cl_id = fp->cl_id;
a2fbb9ea 2966
34f80b04
EG
2967 context->ustorm_st_context.common.sb_index_numbers =
2968 BNX2X_RX_SB_INDEX_NUM;
0626b899 2969 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 2970 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 2971 context->ustorm_st_context.common.flags =
de832a55
EG
2972 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
2973 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
2974 context->ustorm_st_context.common.statistics_counter_id =
2975 cl_id;
8d9c5f34 2976 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 2977 BNX2X_RX_ALIGN_SHIFT;
34f80b04 2978 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 2979 bp->rx_buf_size;
34f80b04 2980 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 2981 U64_HI(fp->rx_desc_mapping);
34f80b04 2982 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 2983 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
2984 if (!fp->disable_tpa) {
2985 context->ustorm_st_context.common.flags |=
ca00392c 2986 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 2987 context->ustorm_st_context.common.sge_buff_size =
cdaa7cb8
VZ
2988 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
2989 0xffff);
7a9b2557
VZ
2990 context->ustorm_st_context.common.sge_page_base_hi =
2991 U64_HI(fp->rx_sge_mapping);
2992 context->ustorm_st_context.common.sge_page_base_lo =
2993 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
2994
2995 context->ustorm_st_context.common.max_sges_for_packet =
2996 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
2997 context->ustorm_st_context.common.max_sges_for_packet =
2998 ((context->ustorm_st_context.common.
2999 max_sges_for_packet + PAGES_PER_SGE - 1) &
3000 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
3001 }
3002
8d9c5f34
EG
3003 context->ustorm_ag_context.cdu_usage =
3004 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3005 CDU_REGION_NUMBER_UCM_AG,
3006 ETH_CONNECTION_TYPE);
3007
ca00392c
EG
3008 context->xstorm_ag_context.cdu_reserved =
3009 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3010 CDU_REGION_NUMBER_XCM_AG,
3011 ETH_CONNECTION_TYPE);
3012 }
3013
54b9ddaa
VZ
3014 /* Tx */
3015 for_each_queue(bp, i) {
ca00392c
EG
3016 struct bnx2x_fastpath *fp = &bp->fp[i];
3017 struct eth_context *context =
54b9ddaa 3018 bnx2x_sp(bp, context[i].eth);
ca00392c
EG
3019
3020 context->cstorm_st_context.sb_index_number =
3021 C_SB_ETH_TX_CQ_INDEX;
3022 context->cstorm_st_context.status_block_id = fp->sb_id;
3023
8d9c5f34
EG
3024 context->xstorm_st_context.tx_bd_page_base_hi =
3025 U64_HI(fp->tx_desc_mapping);
3026 context->xstorm_st_context.tx_bd_page_base_lo =
3027 U64_LO(fp->tx_desc_mapping);
ca00392c 3028 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 3029 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
3030 }
3031}
3032
3033static void bnx2x_init_ind_table(struct bnx2x *bp)
3034{
26c8fa4d 3035 int func = BP_FUNC(bp);
a2fbb9ea
ET
3036 int i;
3037
555f6c78 3038 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
3039 return;
3040
555f6c78
EG
3041 DP(NETIF_MSG_IFUP,
3042 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 3043 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 3044 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 3045 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 3046 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
3047}
3048
9f6c9258 3049void bnx2x_set_client_config(struct bnx2x *bp)
49d66772 3050{
49d66772 3051 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
3052 int port = BP_PORT(bp);
3053 int i;
49d66772 3054
e7799c5f 3055 tstorm_client.mtu = bp->dev->mtu;
49d66772 3056 tstorm_client.config_flags =
de832a55
EG
3057 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
3058 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 3059#ifdef BCM_VLAN
0c6671b0 3060 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 3061 tstorm_client.config_flags |=
8d9c5f34 3062 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
3063 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3064 }
3065#endif
49d66772
ET
3066
3067 for_each_queue(bp, i) {
de832a55
EG
3068 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
3069
49d66772 3070 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3071 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
3072 ((u32 *)&tstorm_client)[0]);
3073 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3074 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
3075 ((u32 *)&tstorm_client)[1]);
3076 }
3077
34f80b04
EG
3078 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
3079 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
3080}
3081
9f6c9258 3082void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
a2fbb9ea 3083{
a2fbb9ea 3084 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 3085 int mode = bp->rx_mode;
37b091ba 3086 int mask = bp->rx_mode_cl_mask;
34f80b04 3087 int func = BP_FUNC(bp);
581ce43d 3088 int port = BP_PORT(bp);
a2fbb9ea 3089 int i;
581ce43d
EG
3090 /* All but management unicast packets should pass to the host as well */
3091 u32 llh_mask =
3092 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
3093 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
3094 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3095 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 3096
3196a88a 3097 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
3098
3099 switch (mode) {
3100 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
3101 tstorm_mac_filter.ucast_drop_all = mask;
3102 tstorm_mac_filter.mcast_drop_all = mask;
3103 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 3104 break;
356e2385 3105
a2fbb9ea 3106 case BNX2X_RX_MODE_NORMAL:
34f80b04 3107 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 3108 break;
356e2385 3109
a2fbb9ea 3110 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
3111 tstorm_mac_filter.mcast_accept_all = mask;
3112 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 3113 break;
356e2385 3114
a2fbb9ea 3115 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
3116 tstorm_mac_filter.ucast_accept_all = mask;
3117 tstorm_mac_filter.mcast_accept_all = mask;
3118 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
3119 /* pass management unicast packets as well */
3120 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 3121 break;
356e2385 3122
a2fbb9ea 3123 default:
34f80b04
EG
3124 BNX2X_ERR("BAD rx mode (%d)\n", mode);
3125 break;
a2fbb9ea
ET
3126 }
3127
581ce43d
EG
3128 REG_WR(bp,
3129 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
3130 llh_mask);
3131
a2fbb9ea
ET
3132 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
3133 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3134 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
3135 ((u32 *)&tstorm_mac_filter)[i]);
3136
34f80b04 3137/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
3138 ((u32 *)&tstorm_mac_filter)[i]); */
3139 }
a2fbb9ea 3140
49d66772
ET
3141 if (mode != BNX2X_RX_MODE_NONE)
3142 bnx2x_set_client_config(bp);
a2fbb9ea
ET
3143}
3144
471de716
EG
3145static void bnx2x_init_internal_common(struct bnx2x *bp)
3146{
3147 int i;
3148
3149 /* Zero this manually as its initialization is
3150 currently missing in the initTool */
3151 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3152 REG_WR(bp, BAR_USTRORM_INTMEM +
3153 USTORM_AGG_DATA_OFFSET + i * 4, 0);
3154}
3155
3156static void bnx2x_init_internal_port(struct bnx2x *bp)
3157{
3158 int port = BP_PORT(bp);
3159
ca00392c
EG
3160 REG_WR(bp,
3161 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
3162 REG_WR(bp,
3163 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
3164 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3165 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3166}
3167
3168static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 3169{
a2fbb9ea
ET
3170 struct tstorm_eth_function_common_config tstorm_config = {0};
3171 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
3172 int port = BP_PORT(bp);
3173 int func = BP_FUNC(bp);
de832a55
EG
3174 int i, j;
3175 u32 offset;
471de716 3176 u16 max_agg_size;
a2fbb9ea 3177
c68ed255
TH
3178 tstorm_config.config_flags = RSS_FLAGS(bp);
3179
3180 if (is_multi(bp))
a2fbb9ea 3181 tstorm_config.rss_result_mask = MULTI_MASK;
ca00392c
EG
3182
3183 /* Enable TPA if needed */
3184 if (bp->flags & TPA_ENABLE_FLAG)
3185 tstorm_config.config_flags |=
3186 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
3187
8d9c5f34
EG
3188 if (IS_E1HMF(bp))
3189 tstorm_config.config_flags |=
3190 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 3191
34f80b04
EG
3192 tstorm_config.leading_client_id = BP_L_ID(bp);
3193
a2fbb9ea 3194 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3195 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
3196 (*(u32 *)&tstorm_config));
3197
c14423fe 3198 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 3199 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
3200 bnx2x_set_storm_rx_mode(bp);
3201
de832a55
EG
3202 for_each_queue(bp, i) {
3203 u8 cl_id = bp->fp[i].cl_id;
3204
3205 /* reset xstorm per client statistics */
3206 offset = BAR_XSTRORM_INTMEM +
3207 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3208 for (j = 0;
3209 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
3210 REG_WR(bp, offset + j*4, 0);
3211
3212 /* reset tstorm per client statistics */
3213 offset = BAR_TSTRORM_INTMEM +
3214 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3215 for (j = 0;
3216 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
3217 REG_WR(bp, offset + j*4, 0);
3218
3219 /* reset ustorm per client statistics */
3220 offset = BAR_USTRORM_INTMEM +
3221 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3222 for (j = 0;
3223 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
3224 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
3225 }
3226
3227 /* Init statistics related context */
34f80b04 3228 stats_flags.collect_eth = 1;
a2fbb9ea 3229
66e855f3 3230 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 3231 ((u32 *)&stats_flags)[0]);
66e855f3 3232 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
3233 ((u32 *)&stats_flags)[1]);
3234
66e855f3 3235 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 3236 ((u32 *)&stats_flags)[0]);
66e855f3 3237 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
3238 ((u32 *)&stats_flags)[1]);
3239
de832a55
EG
3240 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
3241 ((u32 *)&stats_flags)[0]);
3242 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
3243 ((u32 *)&stats_flags)[1]);
3244
66e855f3 3245 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 3246 ((u32 *)&stats_flags)[0]);
66e855f3 3247 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
3248 ((u32 *)&stats_flags)[1]);
3249
66e855f3
YG
3250 REG_WR(bp, BAR_XSTRORM_INTMEM +
3251 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3252 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3253 REG_WR(bp, BAR_XSTRORM_INTMEM +
3254 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3255 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3256
3257 REG_WR(bp, BAR_TSTRORM_INTMEM +
3258 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3259 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3260 REG_WR(bp, BAR_TSTRORM_INTMEM +
3261 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3262 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 3263
de832a55
EG
3264 REG_WR(bp, BAR_USTRORM_INTMEM +
3265 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3266 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3267 REG_WR(bp, BAR_USTRORM_INTMEM +
3268 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3269 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3270
34f80b04
EG
3271 if (CHIP_IS_E1H(bp)) {
3272 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3273 IS_E1HMF(bp));
3274 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3275 IS_E1HMF(bp));
3276 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3277 IS_E1HMF(bp));
3278 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3279 IS_E1HMF(bp));
3280
7a9b2557
VZ
3281 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
3282 bp->e1hov);
34f80b04
EG
3283 }
3284
4f40f2cb 3285 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
cdaa7cb8
VZ
3286 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
3287 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
54b9ddaa 3288 for_each_queue(bp, i) {
7a9b2557 3289 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
3290
3291 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 3292 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
3293 U64_LO(fp->rx_comp_mapping));
3294 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 3295 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
3296 U64_HI(fp->rx_comp_mapping));
3297
ca00392c
EG
3298 /* Next page */
3299 REG_WR(bp, BAR_USTRORM_INTMEM +
3300 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
3301 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3302 REG_WR(bp, BAR_USTRORM_INTMEM +
3303 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
3304 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3305
7a9b2557 3306 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 3307 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
3308 max_agg_size);
3309 }
8a1c38d1 3310
1c06328c
EG
3311 /* dropless flow control */
3312 if (CHIP_IS_E1H(bp)) {
3313 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
3314
3315 rx_pause.bd_thr_low = 250;
3316 rx_pause.cqe_thr_low = 250;
3317 rx_pause.cos = 1;
3318 rx_pause.sge_thr_low = 0;
3319 rx_pause.bd_thr_high = 350;
3320 rx_pause.cqe_thr_high = 350;
3321 rx_pause.sge_thr_high = 0;
3322
54b9ddaa 3323 for_each_queue(bp, i) {
1c06328c
EG
3324 struct bnx2x_fastpath *fp = &bp->fp[i];
3325
3326 if (!fp->disable_tpa) {
3327 rx_pause.sge_thr_low = 150;
3328 rx_pause.sge_thr_high = 250;
3329 }
3330
3331
3332 offset = BAR_USTRORM_INTMEM +
3333 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
3334 fp->cl_id);
3335 for (j = 0;
3336 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
3337 j++)
3338 REG_WR(bp, offset + j*4,
3339 ((u32 *)&rx_pause)[j]);
3340 }
3341 }
3342
8a1c38d1
EG
3343 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3344
3345 /* Init rate shaping and fairness contexts */
3346 if (IS_E1HMF(bp)) {
3347 int vn;
3348
3349 /* During init there is no active link
3350 Until link is up, set link rate to 10Gbps */
3351 bp->link_vars.line_speed = SPEED_10000;
3352 bnx2x_init_port_minmax(bp);
3353
b015e3d1
EG
3354 if (!BP_NOMCP(bp))
3355 bp->mf_config =
3356 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
3357 bnx2x_calc_vn_weight_sum(bp);
3358
3359 for (vn = VN_0; vn < E1HVN_MAX; vn++)
3360 bnx2x_init_vn_minmax(bp, 2*vn + port);
3361
3362 /* Enable rate shaping and fairness */
b015e3d1 3363 bp->cmng.flags.cmng_enables |=
8a1c38d1 3364 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 3365
8a1c38d1
EG
3366 } else {
3367 /* rate shaping and fairness are disabled */
3368 DP(NETIF_MSG_IFUP,
3369 "single function mode minmax will be disabled\n");
3370 }
3371
3372
cdaa7cb8 3373 /* Store cmng structures to internal memory */
8a1c38d1
EG
3374 if (bp->port.pmf)
3375 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
3376 REG_WR(bp, BAR_XSTRORM_INTMEM +
3377 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
3378 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
3379}
3380
471de716
EG
3381static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3382{
3383 switch (load_code) {
3384 case FW_MSG_CODE_DRV_LOAD_COMMON:
3385 bnx2x_init_internal_common(bp);
3386 /* no break */
3387
3388 case FW_MSG_CODE_DRV_LOAD_PORT:
3389 bnx2x_init_internal_port(bp);
3390 /* no break */
3391
3392 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3393 bnx2x_init_internal_func(bp);
3394 break;
3395
3396 default:
3397 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3398 break;
3399 }
3400}
3401
9f6c9258 3402void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
3403{
3404 int i;
3405
3406 for_each_queue(bp, i) {
3407 struct bnx2x_fastpath *fp = &bp->fp[i];
3408
34f80b04 3409 fp->bp = bp;
a2fbb9ea 3410 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 3411 fp->index = i;
34f80b04 3412 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
3413#ifdef BCM_CNIC
3414 fp->sb_id = fp->cl_id + 1;
3415#else
34f80b04 3416 fp->sb_id = fp->cl_id;
37b091ba 3417#endif
34f80b04 3418 DP(NETIF_MSG_IFUP,
f5372251
EG
3419 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
3420 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 3421 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 3422 fp->sb_id);
5c862848 3423 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
3424 }
3425
16119785
EG
3426 /* ensure status block indices were read */
3427 rmb();
3428
3429
5c862848
EG
3430 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
3431 DEF_SB_ID);
3432 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
3433 bnx2x_update_coalesce(bp);
3434 bnx2x_init_rx_rings(bp);
3435 bnx2x_init_tx_ring(bp);
3436 bnx2x_init_sp_ring(bp);
3437 bnx2x_init_context(bp);
471de716 3438 bnx2x_init_internal(bp, load_code);
a2fbb9ea 3439 bnx2x_init_ind_table(bp);
0ef00459
EG
3440 bnx2x_stats_init(bp);
3441
3442 /* At this point, we are ready for interrupts */
3443 atomic_set(&bp->intr_sem, 0);
3444
3445 /* flush all before enabling interrupts */
3446 mb();
3447 mmiowb();
3448
615f8fd9 3449 bnx2x_int_enable(bp);
eb8da205
EG
3450
3451 /* Check for SPIO5 */
3452 bnx2x_attn_int_deasserted0(bp,
3453 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
3454 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
3455}
3456
3457/* end of nic init */
3458
3459/*
3460 * gzip service functions
3461 */
3462
3463static int bnx2x_gunzip_init(struct bnx2x *bp)
3464{
1a983142
FT
3465 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
3466 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
3467 if (bp->gunzip_buf == NULL)
3468 goto gunzip_nomem1;
3469
3470 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3471 if (bp->strm == NULL)
3472 goto gunzip_nomem2;
3473
3474 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3475 GFP_KERNEL);
3476 if (bp->strm->workspace == NULL)
3477 goto gunzip_nomem3;
3478
3479 return 0;
3480
3481gunzip_nomem3:
3482 kfree(bp->strm);
3483 bp->strm = NULL;
3484
3485gunzip_nomem2:
1a983142
FT
3486 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3487 bp->gunzip_mapping);
a2fbb9ea
ET
3488 bp->gunzip_buf = NULL;
3489
3490gunzip_nomem1:
cdaa7cb8
VZ
3491 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
3492 " un-compression\n");
a2fbb9ea
ET
3493 return -ENOMEM;
3494}
3495
3496static void bnx2x_gunzip_end(struct bnx2x *bp)
3497{
3498 kfree(bp->strm->workspace);
3499
3500 kfree(bp->strm);
3501 bp->strm = NULL;
3502
3503 if (bp->gunzip_buf) {
1a983142
FT
3504 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3505 bp->gunzip_mapping);
a2fbb9ea
ET
3506 bp->gunzip_buf = NULL;
3507 }
3508}
3509
94a78b79 3510static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
3511{
3512 int n, rc;
3513
3514 /* check gzip header */
94a78b79
VZ
3515 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
3516 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 3517 return -EINVAL;
94a78b79 3518 }
a2fbb9ea
ET
3519
3520 n = 10;
3521
34f80b04 3522#define FNAME 0x8
a2fbb9ea
ET
3523
3524 if (zbuf[3] & FNAME)
3525 while ((zbuf[n++] != 0) && (n < len));
3526
94a78b79 3527 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
3528 bp->strm->avail_in = len - n;
3529 bp->strm->next_out = bp->gunzip_buf;
3530 bp->strm->avail_out = FW_BUF_SIZE;
3531
3532 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
3533 if (rc != Z_OK)
3534 return rc;
3535
3536 rc = zlib_inflate(bp->strm, Z_FINISH);
3537 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
3538 netdev_err(bp->dev, "Firmware decompression error: %s\n",
3539 bp->strm->msg);
a2fbb9ea
ET
3540
3541 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
3542 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
3543 netdev_err(bp->dev, "Firmware decompression error:"
3544 " gunzip_outlen (%d) not aligned\n",
3545 bp->gunzip_outlen);
a2fbb9ea
ET
3546 bp->gunzip_outlen >>= 2;
3547
3548 zlib_inflateEnd(bp->strm);
3549
3550 if (rc == Z_STREAM_END)
3551 return 0;
3552
3553 return rc;
3554}
3555
3556/* nic load/unload */
3557
3558/*
34f80b04 3559 * General service functions
a2fbb9ea
ET
3560 */
3561
3562/* send a NIG loopback debug packet */
3563static void bnx2x_lb_pckt(struct bnx2x *bp)
3564{
a2fbb9ea 3565 u32 wb_write[3];
a2fbb9ea
ET
3566
3567 /* Ethernet source and destination addresses */
a2fbb9ea
ET
3568 wb_write[0] = 0x55555555;
3569 wb_write[1] = 0x55555555;
34f80b04 3570 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 3571 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
3572
3573 /* NON-IP protocol */
a2fbb9ea
ET
3574 wb_write[0] = 0x09000000;
3575 wb_write[1] = 0x55555555;
34f80b04 3576 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 3577 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
3578}
3579
3580/* some of the internal memories
3581 * are not directly readable from the driver
3582 * to test them we send debug packets
3583 */
3584static int bnx2x_int_mem_test(struct bnx2x *bp)
3585{
3586 int factor;
3587 int count, i;
3588 u32 val = 0;
3589
ad8d3948 3590 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 3591 factor = 120;
ad8d3948
EG
3592 else if (CHIP_REV_IS_EMUL(bp))
3593 factor = 200;
3594 else
a2fbb9ea 3595 factor = 1;
a2fbb9ea
ET
3596
3597 DP(NETIF_MSG_HW, "start part1\n");
3598
3599 /* Disable inputs of parser neighbor blocks */
3600 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3601 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3602 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 3603 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
3604
3605 /* Write 0 to parser credits for CFC search request */
3606 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3607
3608 /* send Ethernet packet */
3609 bnx2x_lb_pckt(bp);
3610
3611 /* TODO do i reset NIG statistic? */
3612 /* Wait until NIG register shows 1 packet of size 0x10 */
3613 count = 1000 * factor;
3614 while (count) {
34f80b04 3615
a2fbb9ea
ET
3616 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3617 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
3618 if (val == 0x10)
3619 break;
3620
3621 msleep(10);
3622 count--;
3623 }
3624 if (val != 0x10) {
3625 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3626 return -1;
3627 }
3628
3629 /* Wait until PRS register shows 1 packet */
3630 count = 1000 * factor;
3631 while (count) {
3632 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
3633 if (val == 1)
3634 break;
3635
3636 msleep(10);
3637 count--;
3638 }
3639 if (val != 0x1) {
3640 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3641 return -2;
3642 }
3643
3644 /* Reset and init BRB, PRS */
34f80b04 3645 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 3646 msleep(50);
34f80b04 3647 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 3648 msleep(50);
94a78b79
VZ
3649 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3650 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
3651
3652 DP(NETIF_MSG_HW, "part2\n");
3653
3654 /* Disable inputs of parser neighbor blocks */
3655 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3656 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3657 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 3658 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
3659
3660 /* Write 0 to parser credits for CFC search request */
3661 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3662
3663 /* send 10 Ethernet packets */
3664 for (i = 0; i < 10; i++)
3665 bnx2x_lb_pckt(bp);
3666
3667 /* Wait until NIG register shows 10 + 1
3668 packets of size 11*0x10 = 0xb0 */
3669 count = 1000 * factor;
3670 while (count) {
34f80b04 3671
a2fbb9ea
ET
3672 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3673 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
3674 if (val == 0xb0)
3675 break;
3676
3677 msleep(10);
3678 count--;
3679 }
3680 if (val != 0xb0) {
3681 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3682 return -3;
3683 }
3684
3685 /* Wait until PRS register shows 2 packets */
3686 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3687 if (val != 2)
3688 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3689
3690 /* Write 1 to parser credits for CFC search request */
3691 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
3692
3693 /* Wait until PRS register shows 3 packets */
3694 msleep(10 * factor);
3695 /* Wait until NIG register shows 1 packet of size 0x10 */
3696 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3697 if (val != 3)
3698 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3699
3700 /* clear NIG EOP FIFO */
3701 for (i = 0; i < 11; i++)
3702 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
3703 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
3704 if (val != 1) {
3705 BNX2X_ERR("clear of NIG failed\n");
3706 return -4;
3707 }
3708
3709 /* Reset and init BRB, PRS, NIG */
3710 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3711 msleep(50);
3712 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3713 msleep(50);
94a78b79
VZ
3714 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3715 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 3716#ifndef BCM_CNIC
a2fbb9ea
ET
3717 /* set NIC mode */
3718 REG_WR(bp, PRS_REG_NIC_MODE, 1);
3719#endif
3720
3721 /* Enable inputs of parser neighbor blocks */
3722 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
3723 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
3724 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 3725 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
3726
3727 DP(NETIF_MSG_HW, "done\n");
3728
3729 return 0; /* OK */
3730}
3731
3732static void enable_blocks_attention(struct bnx2x *bp)
3733{
3734 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3735 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
3736 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3737 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
3738 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
3739 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
3740 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
3741 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
3742 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
3743/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3744/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
3745 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
3746 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
3747 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
3748/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3749/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
3750 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
3751 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
3752 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
3753 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
3754/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3755/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3756 if (CHIP_REV_IS_FPGA(bp))
3757 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
3758 else
3759 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
3760 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
3761 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
3762 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
3763/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3764/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
3765 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
3766 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
3767/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3768 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
3769}
3770
72fd0718
VZ
3771static const struct {
3772 u32 addr;
3773 u32 mask;
3774} bnx2x_parity_mask[] = {
3775 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
3776 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
3777 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
3778 {HC_REG_HC_PRTY_MASK, 0xffffffff},
3779 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
3780 {QM_REG_QM_PRTY_MASK, 0x0},
3781 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
3782 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
3783 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
3784 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
3785 {CDU_REG_CDU_PRTY_MASK, 0x0},
3786 {CFC_REG_CFC_PRTY_MASK, 0x0},
3787 {DBG_REG_DBG_PRTY_MASK, 0x0},
3788 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
3789 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
3790 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
3791 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
3792 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
3793 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
3794 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
3795 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
3796 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
3797 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
3798 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
3799 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
3800 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
3801 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
3802 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
3803};
3804
3805static void enable_blocks_parity(struct bnx2x *bp)
3806{
3807 int i, mask_arr_len =
3808 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
3809
3810 for (i = 0; i < mask_arr_len; i++)
3811 REG_WR(bp, bnx2x_parity_mask[i].addr,
3812 bnx2x_parity_mask[i].mask);
3813}
3814
34f80b04 3815
81f75bbf
EG
3816static void bnx2x_reset_common(struct bnx2x *bp)
3817{
3818 /* reset_common */
3819 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
3820 0xd3ffff7f);
3821 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
3822}
3823
573f2035
EG
3824static void bnx2x_init_pxp(struct bnx2x *bp)
3825{
3826 u16 devctl;
3827 int r_order, w_order;
3828
3829 pci_read_config_word(bp->pdev,
3830 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
3831 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
3832 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3833 if (bp->mrrs == -1)
3834 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3835 else {
3836 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
3837 r_order = bp->mrrs;
3838 }
3839
3840 bnx2x_init_pxp_arb(bp, r_order, w_order);
3841}
fd4ef40d
EG
3842
3843static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3844{
2145a920 3845 int is_required;
fd4ef40d 3846 u32 val;
2145a920 3847 int port;
fd4ef40d 3848
2145a920
VZ
3849 if (BP_NOMCP(bp))
3850 return;
3851
3852 is_required = 0;
fd4ef40d
EG
3853 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
3854 SHARED_HW_CFG_FAN_FAILURE_MASK;
3855
3856 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
3857 is_required = 1;
3858
3859 /*
3860 * The fan failure mechanism is usually related to the PHY type since
3861 * the power consumption of the board is affected by the PHY. Currently,
3862 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
3863 */
3864 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
3865 for (port = PORT_0; port < PORT_MAX; port++) {
3866 u32 phy_type =
3867 SHMEM_RD(bp, dev_info.port_hw_config[port].
3868 external_phy_config) &
3869 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3870 is_required |=
3871 ((phy_type ==
3872 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
3873 (phy_type ==
3874 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
3875 (phy_type ==
3876 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
3877 }
3878
3879 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
3880
3881 if (is_required == 0)
3882 return;
3883
3884 /* Fan failure is indicated by SPIO 5 */
3885 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
3886 MISC_REGISTERS_SPIO_INPUT_HI_Z);
3887
3888 /* set to active low mode */
3889 val = REG_RD(bp, MISC_REG_SPIO_INT);
3890 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 3891 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
3892 REG_WR(bp, MISC_REG_SPIO_INT, val);
3893
3894 /* enable interrupt to signal the IGU */
3895 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
3896 val |= (1 << MISC_REGISTERS_SPIO_5);
3897 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3898}
3899
34f80b04 3900static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 3901{
a2fbb9ea 3902 u32 val, i;
37b091ba
MC
3903#ifdef BCM_CNIC
3904 u32 wb_write[2];
3905#endif
a2fbb9ea 3906
34f80b04 3907 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 3908
81f75bbf 3909 bnx2x_reset_common(bp);
34f80b04
EG
3910 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
3911 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 3912
94a78b79 3913 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
3914 if (CHIP_IS_E1H(bp))
3915 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 3916
34f80b04
EG
3917 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
3918 msleep(30);
3919 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 3920
94a78b79 3921 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
3922 if (CHIP_IS_E1(bp)) {
3923 /* enable HW interrupt from PXP on USDM overflow
3924 bit 16 on INT_MASK_0 */
3925 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3926 }
a2fbb9ea 3927
94a78b79 3928 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 3929 bnx2x_init_pxp(bp);
a2fbb9ea
ET
3930
3931#ifdef __BIG_ENDIAN
34f80b04
EG
3932 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
3933 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
3934 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
3935 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
3936 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
3937 /* make sure this value is 0 */
3938 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
3939
3940/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3941 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
3942 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
3943 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
3944 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
3945#endif
3946
34f80b04 3947 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 3948#ifdef BCM_CNIC
34f80b04
EG
3949 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3950 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3951 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
3952#endif
3953
34f80b04
EG
3954 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
3955 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 3956
34f80b04
EG
3957 /* let the HW do it's magic ... */
3958 msleep(100);
3959 /* finish PXP init */
3960 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
3961 if (val != 1) {
3962 BNX2X_ERR("PXP2 CFG failed\n");
3963 return -EBUSY;
3964 }
3965 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
3966 if (val != 1) {
3967 BNX2X_ERR("PXP2 RD_INIT failed\n");
3968 return -EBUSY;
3969 }
a2fbb9ea 3970
34f80b04
EG
3971 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
3972 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 3973
94a78b79 3974 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 3975
34f80b04
EG
3976 /* clean the DMAE memory */
3977 bp->dmae_ready = 1;
3978 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 3979
94a78b79
VZ
3980 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
3981 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
3982 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
3983 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 3984
34f80b04
EG
3985 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
3986 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
3987 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
3988 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
3989
94a78b79 3990 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
3991
3992#ifdef BCM_CNIC
3993 wb_write[0] = 0;
3994 wb_write[1] = 0;
3995 for (i = 0; i < 64; i++) {
3996 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
3997 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
3998
3999 if (CHIP_IS_E1H(bp)) {
4000 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
4001 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
4002 wb_write, 2);
4003 }
4004 }
4005#endif
34f80b04
EG
4006 /* soft reset pulse */
4007 REG_WR(bp, QM_REG_SOFT_RESET, 1);
4008 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 4009
37b091ba 4010#ifdef BCM_CNIC
94a78b79 4011 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 4012#endif
a2fbb9ea 4013
94a78b79 4014 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
4015 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
4016 if (!CHIP_REV_IS_SLOW(bp)) {
4017 /* enable hw interrupt from doorbell Q */
4018 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4019 }
a2fbb9ea 4020
94a78b79
VZ
4021 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4022 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 4023 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 4024#ifndef BCM_CNIC
3196a88a
EG
4025 /* set NIC mode */
4026 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 4027#endif
34f80b04
EG
4028 if (CHIP_IS_E1H(bp))
4029 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 4030
94a78b79
VZ
4031 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4032 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4033 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4034 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 4035
ca00392c
EG
4036 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4037 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4038 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4039 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 4040
94a78b79
VZ
4041 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4042 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4043 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4044 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 4045
34f80b04
EG
4046 /* sync semi rtc */
4047 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4048 0x80000000);
4049 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4050 0x80000000);
a2fbb9ea 4051
94a78b79
VZ
4052 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4053 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4054 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 4055
34f80b04 4056 REG_WR(bp, SRC_REG_SOFT_RST, 1);
c68ed255
TH
4057 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4058 REG_WR(bp, i, random32());
94a78b79 4059 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
4060#ifdef BCM_CNIC
4061 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4062 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4063 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4064 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4065 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4066 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4067 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4068 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4069 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4070 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4071#endif
34f80b04 4072 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 4073
34f80b04
EG
4074 if (sizeof(union cdu_context) != 1024)
4075 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
4076 dev_alert(&bp->pdev->dev, "please adjust the size "
4077 "of cdu_context(%ld)\n",
7995c64e 4078 (long)sizeof(union cdu_context));
a2fbb9ea 4079
94a78b79 4080 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
4081 val = (4 << 24) + (0 << 12) + 1024;
4082 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 4083
94a78b79 4084 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 4085 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
4086 /* enable context validation interrupt from CFC */
4087 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4088
4089 /* set the thresholds to prevent CFC/CDU race */
4090 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 4091
94a78b79
VZ
4092 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
4093 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 4094
94a78b79 4095 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
4096 /* Reset PCIE errors for debug */
4097 REG_WR(bp, 0x2814, 0xffffffff);
4098 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 4099
94a78b79 4100 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 4101 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 4102 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 4103 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 4104
94a78b79 4105 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
4106 if (CHIP_IS_E1H(bp)) {
4107 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
4108 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
4109 }
4110
4111 if (CHIP_REV_IS_SLOW(bp))
4112 msleep(200);
4113
4114 /* finish CFC init */
4115 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4116 if (val != 1) {
4117 BNX2X_ERR("CFC LL_INIT failed\n");
4118 return -EBUSY;
4119 }
4120 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4121 if (val != 1) {
4122 BNX2X_ERR("CFC AC_INIT failed\n");
4123 return -EBUSY;
4124 }
4125 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4126 if (val != 1) {
4127 BNX2X_ERR("CFC CAM_INIT failed\n");
4128 return -EBUSY;
4129 }
4130 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 4131
34f80b04
EG
4132 /* read NIG statistic
4133 to see if this is our first up since powerup */
4134 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4135 val = *bnx2x_sp(bp, wb_data[0]);
4136
4137 /* do internal memory self test */
4138 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4139 BNX2X_ERR("internal mem self test failed\n");
4140 return -EBUSY;
4141 }
4142
35b19ba5 4143 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
4144 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4145 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4146 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 4147 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
4148 bp->port.need_hw_lock = 1;
4149 break;
4150
34f80b04
EG
4151 default:
4152 break;
4153 }
f1410647 4154
fd4ef40d
EG
4155 bnx2x_setup_fan_failure_detection(bp);
4156
34f80b04
EG
4157 /* clear PXP2 attentions */
4158 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 4159
34f80b04 4160 enable_blocks_attention(bp);
72fd0718
VZ
4161 if (CHIP_PARITY_SUPPORTED(bp))
4162 enable_blocks_parity(bp);
a2fbb9ea 4163
6bbca910
YR
4164 if (!BP_NOMCP(bp)) {
4165 bnx2x_acquire_phy_lock(bp);
4166 bnx2x_common_init_phy(bp, bp->common.shmem_base);
4167 bnx2x_release_phy_lock(bp);
4168 } else
4169 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4170
34f80b04
EG
4171 return 0;
4172}
a2fbb9ea 4173
34f80b04
EG
4174static int bnx2x_init_port(struct bnx2x *bp)
4175{
4176 int port = BP_PORT(bp);
94a78b79 4177 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 4178 u32 low, high;
34f80b04 4179 u32 val;
a2fbb9ea 4180
cdaa7cb8 4181 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
4182
4183 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 4184
94a78b79 4185 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 4186 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
4187
4188 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4189 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4190 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 4191 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 4192
37b091ba
MC
4193#ifdef BCM_CNIC
4194 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 4195
94a78b79 4196 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
4197 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4198 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 4199#endif
cdaa7cb8 4200
94a78b79 4201 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 4202
94a78b79 4203 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
4204 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
4205 /* no pause for emulation and FPGA */
4206 low = 0;
4207 high = 513;
4208 } else {
4209 if (IS_E1HMF(bp))
4210 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4211 else if (bp->dev->mtu > 4096) {
4212 if (bp->flags & ONE_PORT_FLAG)
4213 low = 160;
4214 else {
4215 val = bp->dev->mtu;
4216 /* (24*1024 + val*4)/256 */
4217 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
4218 }
4219 } else
4220 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4221 high = low + 56; /* 14*1024/256 */
4222 }
4223 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4224 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4225
4226
94a78b79 4227 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 4228
94a78b79 4229 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 4230 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 4231 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 4232 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 4233
94a78b79
VZ
4234 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4235 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4236 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4237 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 4238
94a78b79 4239 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 4240 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 4241
94a78b79 4242 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
4243
4244 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 4245 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
4246
4247 /* update threshold */
34f80b04 4248 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 4249 /* update init credit */
34f80b04 4250 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
4251
4252 /* probe changes */
34f80b04 4253 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 4254 msleep(5);
34f80b04 4255 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 4256
37b091ba
MC
4257#ifdef BCM_CNIC
4258 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 4259#endif
94a78b79 4260 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 4261 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
4262
4263 if (CHIP_IS_E1(bp)) {
4264 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4265 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4266 }
94a78b79 4267 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 4268
94a78b79 4269 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
4270 /* init aeu_mask_attn_func_0/1:
4271 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4272 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4273 * bits 4-7 are used for "per vn group attention" */
4274 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4275 (IS_E1HMF(bp) ? 0xF7 : 0x7));
4276
94a78b79 4277 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 4278 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 4279 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 4280 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 4281 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 4282
94a78b79 4283 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
4284
4285 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4286
4287 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
4288 /* 0x2 disable e1hov, 0x1 enable */
4289 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4290 (IS_E1HMF(bp) ? 0x1 : 0x2));
4291
1c06328c
EG
4292 {
4293 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4294 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
4295 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
4296 }
34f80b04
EG
4297 }
4298
94a78b79 4299 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 4300 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 4301
35b19ba5 4302 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
4303 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4304 {
4305 u32 swap_val, swap_override, aeu_gpio_mask, offset;
4306
4307 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
4308 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
4309
4310 /* The GPIO should be swapped if the swap register is
4311 set and active */
4312 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4313 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
4314
4315 /* Select function upon port-swap configuration */
4316 if (port == 0) {
4317 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
4318 aeu_gpio_mask = (swap_val && swap_override) ?
4319 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
4320 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
4321 } else {
4322 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
4323 aeu_gpio_mask = (swap_val && swap_override) ?
4324 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
4325 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
4326 }
4327 val = REG_RD(bp, offset);
4328 /* add GPIO3 to group */
4329 val |= aeu_gpio_mask;
4330 REG_WR(bp, offset, val);
4331 }
3971a230 4332 bp->port.need_hw_lock = 1;
589abe3a
EG
4333 break;
4334
4d295db0 4335 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
3971a230
YR
4336 bp->port.need_hw_lock = 1;
4337 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
f1410647 4338 /* add SPIO 5 to group 0 */
4d295db0
EG
4339 {
4340 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4341 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4342 val = REG_RD(bp, reg_addr);
f1410647 4343 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
4344 REG_WR(bp, reg_addr, val);
4345 }
f1410647 4346 break;
3971a230
YR
4347 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4348 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4349 bp->port.need_hw_lock = 1;
4350 break;
f1410647
ET
4351 default:
4352 break;
4353 }
c18487ee 4354 bnx2x__link_reset(bp);
a2fbb9ea 4355
34f80b04
EG
4356 return 0;
4357}
4358
4359#define ILT_PER_FUNC (768/2)
4360#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
4361/* the phys address is shifted right 12 bits and has an added
4362 1=valid bit added to the 53rd bit
4363 then since this is a wide register(TM)
4364 we split it into two 32 bit writes
4365 */
4366#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4367#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
4368#define PXP_ONE_ILT(x) (((x) << 10) | x)
4369#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
4370
37b091ba
MC
4371#ifdef BCM_CNIC
4372#define CNIC_ILT_LINES 127
4373#define CNIC_CTX_PER_ILT 16
4374#else
34f80b04 4375#define CNIC_ILT_LINES 0
37b091ba 4376#endif
34f80b04
EG
4377
4378static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4379{
4380 int reg;
4381
4382 if (CHIP_IS_E1H(bp))
4383 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4384 else /* E1 */
4385 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4386
4387 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4388}
4389
4390static int bnx2x_init_func(struct bnx2x *bp)
4391{
4392 int port = BP_PORT(bp);
4393 int func = BP_FUNC(bp);
8badd27a 4394 u32 addr, val;
34f80b04
EG
4395 int i;
4396
cdaa7cb8 4397 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 4398
8badd27a
EG
4399 /* set MSI reconfigure capability */
4400 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4401 val = REG_RD(bp, addr);
4402 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
4403 REG_WR(bp, addr, val);
4404
34f80b04
EG
4405 i = FUNC_ILT_BASE(func);
4406
4407 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
4408 if (CHIP_IS_E1H(bp)) {
4409 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
4410 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
4411 } else /* E1 */
4412 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
4413 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
4414
37b091ba
MC
4415#ifdef BCM_CNIC
4416 i += 1 + CNIC_ILT_LINES;
4417 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
4418 if (CHIP_IS_E1(bp))
4419 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
4420 else {
4421 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
4422 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
4423 }
4424
4425 i++;
4426 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
4427 if (CHIP_IS_E1(bp))
4428 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
4429 else {
4430 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
4431 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
4432 }
4433
4434 i++;
4435 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
4436 if (CHIP_IS_E1(bp))
4437 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
4438 else {
4439 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
4440 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
4441 }
4442
4443 /* tell the searcher where the T2 table is */
4444 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
4445
4446 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
4447 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
4448
4449 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
4450 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
4451 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
4452
4453 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
4454#endif
34f80b04
EG
4455
4456 if (CHIP_IS_E1H(bp)) {
573f2035
EG
4457 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4458 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4459 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4460 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4461 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4462 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4463 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4464 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4465 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
4466
4467 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4468 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
4469 }
4470
4471 /* HC init per function */
4472 if (CHIP_IS_E1H(bp)) {
4473 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4474
4475 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4476 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4477 }
94a78b79 4478 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 4479
c14423fe 4480 /* Reset PCIE errors for debug */
a2fbb9ea
ET
4481 REG_WR(bp, 0x2114, 0xffffffff);
4482 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 4483
34f80b04
EG
4484 return 0;
4485}
4486
9f6c9258 4487int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
34f80b04
EG
4488{
4489 int i, rc = 0;
a2fbb9ea 4490
34f80b04
EG
4491 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
4492 BP_FUNC(bp), load_code);
a2fbb9ea 4493
34f80b04
EG
4494 bp->dmae_ready = 0;
4495 mutex_init(&bp->dmae_mutex);
54016b26
EG
4496 rc = bnx2x_gunzip_init(bp);
4497 if (rc)
4498 return rc;
a2fbb9ea 4499
34f80b04
EG
4500 switch (load_code) {
4501 case FW_MSG_CODE_DRV_LOAD_COMMON:
4502 rc = bnx2x_init_common(bp);
4503 if (rc)
4504 goto init_hw_err;
4505 /* no break */
4506
4507 case FW_MSG_CODE_DRV_LOAD_PORT:
4508 bp->dmae_ready = 1;
4509 rc = bnx2x_init_port(bp);
4510 if (rc)
4511 goto init_hw_err;
4512 /* no break */
4513
4514 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4515 bp->dmae_ready = 1;
4516 rc = bnx2x_init_func(bp);
4517 if (rc)
4518 goto init_hw_err;
4519 break;
4520
4521 default:
4522 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4523 break;
4524 }
4525
4526 if (!BP_NOMCP(bp)) {
4527 int func = BP_FUNC(bp);
a2fbb9ea
ET
4528
4529 bp->fw_drv_pulse_wr_seq =
34f80b04 4530 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 4531 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
4532 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4533 }
a2fbb9ea 4534
34f80b04
EG
4535 /* this needs to be done before gunzip end */
4536 bnx2x_zero_def_sb(bp);
4537 for_each_queue(bp, i)
4538 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
4539#ifdef BCM_CNIC
4540 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4541#endif
34f80b04
EG
4542
4543init_hw_err:
4544 bnx2x_gunzip_end(bp);
4545
4546 return rc;
a2fbb9ea
ET
4547}
4548
9f6c9258 4549void bnx2x_free_mem(struct bnx2x *bp)
a2fbb9ea
ET
4550{
4551
4552#define BNX2X_PCI_FREE(x, y, size) \
4553 do { \
4554 if (x) { \
1a983142 4555 dma_free_coherent(&bp->pdev->dev, size, x, y); \
a2fbb9ea
ET
4556 x = NULL; \
4557 y = 0; \
4558 } \
4559 } while (0)
4560
4561#define BNX2X_FREE(x) \
4562 do { \
4563 if (x) { \
4564 vfree(x); \
4565 x = NULL; \
4566 } \
4567 } while (0)
4568
4569 int i;
4570
4571 /* fastpath */
555f6c78 4572 /* Common */
a2fbb9ea
ET
4573 for_each_queue(bp, i) {
4574
555f6c78 4575 /* status blocks */
a2fbb9ea
ET
4576 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
4577 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 4578 sizeof(struct host_status_block));
555f6c78
EG
4579 }
4580 /* Rx */
54b9ddaa 4581 for_each_queue(bp, i) {
a2fbb9ea 4582
555f6c78 4583 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
4584 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
4585 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
4586 bnx2x_fp(bp, i, rx_desc_mapping),
4587 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4588
4589 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
4590 bnx2x_fp(bp, i, rx_comp_mapping),
4591 sizeof(struct eth_fast_path_rx_cqe) *
4592 NUM_RCQ_BD);
a2fbb9ea 4593
7a9b2557 4594 /* SGE ring */
32626230 4595 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
4596 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
4597 bnx2x_fp(bp, i, rx_sge_mapping),
4598 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4599 }
555f6c78 4600 /* Tx */
54b9ddaa 4601 for_each_queue(bp, i) {
555f6c78
EG
4602
4603 /* fastpath tx rings: tx_buf tx_desc */
4604 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
4605 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
4606 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 4607 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 4608 }
a2fbb9ea
ET
4609 /* end of fastpath */
4610
4611 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 4612 sizeof(struct host_def_status_block));
a2fbb9ea
ET
4613
4614 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 4615 sizeof(struct bnx2x_slowpath));
a2fbb9ea 4616
37b091ba 4617#ifdef BCM_CNIC
a2fbb9ea
ET
4618 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
4619 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
4620 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
4621 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
4622 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
4623 sizeof(struct host_status_block));
a2fbb9ea 4624#endif
7a9b2557 4625 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
4626
4627#undef BNX2X_PCI_FREE
4628#undef BNX2X_KFREE
4629}
4630
9f6c9258 4631int bnx2x_alloc_mem(struct bnx2x *bp)
a2fbb9ea
ET
4632{
4633
4634#define BNX2X_PCI_ALLOC(x, y, size) \
4635 do { \
1a983142 4636 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
9f6c9258
DK
4637 if (x == NULL) \
4638 goto alloc_mem_err; \
4639 memset(x, 0, size); \
4640 } while (0)
a2fbb9ea 4641
9f6c9258
DK
4642#define BNX2X_ALLOC(x, size) \
4643 do { \
4644 x = vmalloc(size); \
4645 if (x == NULL) \
4646 goto alloc_mem_err; \
4647 memset(x, 0, size); \
4648 } while (0)
a2fbb9ea 4649
9f6c9258 4650 int i;
a2fbb9ea 4651
9f6c9258
DK
4652 /* fastpath */
4653 /* Common */
a2fbb9ea 4654 for_each_queue(bp, i) {
9f6c9258 4655 bnx2x_fp(bp, i, bp) = bp;
a2fbb9ea 4656
9f6c9258
DK
4657 /* status blocks */
4658 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
4659 &bnx2x_fp(bp, i, status_blk_mapping),
4660 sizeof(struct host_status_block));
a2fbb9ea 4661 }
9f6c9258
DK
4662 /* Rx */
4663 for_each_queue(bp, i) {
a2fbb9ea 4664
9f6c9258
DK
4665 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4666 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
4667 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4668 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
4669 &bnx2x_fp(bp, i, rx_desc_mapping),
4670 sizeof(struct eth_rx_bd) * NUM_RX_BD);
555f6c78 4671
9f6c9258
DK
4672 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
4673 &bnx2x_fp(bp, i, rx_comp_mapping),
4674 sizeof(struct eth_fast_path_rx_cqe) *
4675 NUM_RCQ_BD);
a2fbb9ea 4676
9f6c9258
DK
4677 /* SGE ring */
4678 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
4679 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4680 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
4681 &bnx2x_fp(bp, i, rx_sge_mapping),
4682 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4683 }
4684 /* Tx */
4685 for_each_queue(bp, i) {
8badd27a 4686
9f6c9258
DK
4687 /* fastpath tx rings: tx_buf tx_desc */
4688 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
4689 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4690 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
4691 &bnx2x_fp(bp, i, tx_desc_mapping),
4692 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
8badd27a 4693 }
9f6c9258 4694 /* end of fastpath */
8badd27a 4695
9f6c9258
DK
4696 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4697 sizeof(struct host_def_status_block));
8badd27a 4698
9f6c9258
DK
4699 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4700 sizeof(struct bnx2x_slowpath));
a2fbb9ea 4701
9f6c9258
DK
4702#ifdef BCM_CNIC
4703 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
8badd27a 4704
9f6c9258
DK
4705 /* allocate searcher T2 table
4706 we allocate 1/4 of alloc num for T2
4707 (which is not entered into the ILT) */
4708 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
a2fbb9ea 4709
9f6c9258
DK
4710 /* Initialize T2 (for 1024 connections) */
4711 for (i = 0; i < 16*1024; i += 64)
4712 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 4713
9f6c9258
DK
4714 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
4715 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
65abd74d 4716
9f6c9258
DK
4717 /* QM queues (128*MAX_CONN) */
4718 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
65abd74d 4719
9f6c9258
DK
4720 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
4721 sizeof(struct host_status_block));
4722#endif
65abd74d 4723
9f6c9258
DK
4724 /* Slow path ring */
4725 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
65abd74d 4726
9f6c9258 4727 return 0;
e1510706 4728
9f6c9258
DK
4729alloc_mem_err:
4730 bnx2x_free_mem(bp);
4731 return -ENOMEM;
e1510706 4732
9f6c9258
DK
4733#undef BNX2X_PCI_ALLOC
4734#undef BNX2X_ALLOC
65abd74d
YG
4735}
4736
65abd74d 4737
a2fbb9ea
ET
4738/*
4739 * Init service functions
4740 */
4741
e665bfda
MC
4742/**
4743 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
4744 *
4745 * @param bp driver descriptor
4746 * @param set set or clear an entry (1 or 0)
4747 * @param mac pointer to a buffer containing a MAC
4748 * @param cl_bit_vec bit vector of clients to register a MAC for
4749 * @param cam_offset offset in a CAM to use
4750 * @param with_bcast set broadcast MAC as well
4751 */
4752static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
4753 u32 cl_bit_vec, u8 cam_offset,
4754 u8 with_bcast)
a2fbb9ea
ET
4755{
4756 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 4757 int port = BP_PORT(bp);
a2fbb9ea
ET
4758
4759 /* CAM allocation
4760 * unicasts 0-31:port0 32-63:port1
4761 * multicast 64-127:port0 128-191:port1
4762 */
e665bfda
MC
4763 config->hdr.length = 1 + (with_bcast ? 1 : 0);
4764 config->hdr.offset = cam_offset;
4765 config->hdr.client_id = 0xff;
a2fbb9ea
ET
4766 config->hdr.reserved1 = 0;
4767
4768 /* primary MAC */
4769 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 4770 swab16(*(u16 *)&mac[0]);
a2fbb9ea 4771 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 4772 swab16(*(u16 *)&mac[2]);
a2fbb9ea 4773 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 4774 swab16(*(u16 *)&mac[4]);
34f80b04 4775 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
4776 if (set)
4777 config->config_table[0].target_table_entry.flags = 0;
4778 else
4779 CAM_INVALIDATE(config->config_table[0]);
ca00392c 4780 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 4781 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
4782 config->config_table[0].target_table_entry.vlan_id = 0;
4783
3101c2bc
YG
4784 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
4785 (set ? "setting" : "clearing"),
a2fbb9ea
ET
4786 config->config_table[0].cam_entry.msb_mac_addr,
4787 config->config_table[0].cam_entry.middle_mac_addr,
4788 config->config_table[0].cam_entry.lsb_mac_addr);
4789
4790 /* broadcast */
e665bfda
MC
4791 if (with_bcast) {
4792 config->config_table[1].cam_entry.msb_mac_addr =
4793 cpu_to_le16(0xffff);
4794 config->config_table[1].cam_entry.middle_mac_addr =
4795 cpu_to_le16(0xffff);
4796 config->config_table[1].cam_entry.lsb_mac_addr =
4797 cpu_to_le16(0xffff);
4798 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
4799 if (set)
4800 config->config_table[1].target_table_entry.flags =
4801 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4802 else
4803 CAM_INVALIDATE(config->config_table[1]);
4804 config->config_table[1].target_table_entry.clients_bit_vector =
4805 cpu_to_le32(cl_bit_vec);
4806 config->config_table[1].target_table_entry.vlan_id = 0;
4807 }
a2fbb9ea
ET
4808
4809 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4810 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4811 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4812}
4813
e665bfda
MC
4814/**
4815 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
4816 *
4817 * @param bp driver descriptor
4818 * @param set set or clear an entry (1 or 0)
4819 * @param mac pointer to a buffer containing a MAC
4820 * @param cl_bit_vec bit vector of clients to register a MAC for
4821 * @param cam_offset offset in a CAM to use
4822 */
4823static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
4824 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
4825{
4826 struct mac_configuration_cmd_e1h *config =
4827 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
4828
8d9c5f34 4829 config->hdr.length = 1;
e665bfda
MC
4830 config->hdr.offset = cam_offset;
4831 config->hdr.client_id = 0xff;
34f80b04
EG
4832 config->hdr.reserved1 = 0;
4833
4834 /* primary MAC */
4835 config->config_table[0].msb_mac_addr =
e665bfda 4836 swab16(*(u16 *)&mac[0]);
34f80b04 4837 config->config_table[0].middle_mac_addr =
e665bfda 4838 swab16(*(u16 *)&mac[2]);
34f80b04 4839 config->config_table[0].lsb_mac_addr =
e665bfda 4840 swab16(*(u16 *)&mac[4]);
ca00392c 4841 config->config_table[0].clients_bit_vector =
e665bfda 4842 cpu_to_le32(cl_bit_vec);
34f80b04
EG
4843 config->config_table[0].vlan_id = 0;
4844 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
4845 if (set)
4846 config->config_table[0].flags = BP_PORT(bp);
4847 else
4848 config->config_table[0].flags =
4849 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 4850
e665bfda 4851 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 4852 (set ? "setting" : "clearing"),
34f80b04
EG
4853 config->config_table[0].msb_mac_addr,
4854 config->config_table[0].middle_mac_addr,
e665bfda 4855 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
4856
4857 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4858 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4859 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4860}
4861
a2fbb9ea
ET
4862static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4863 int *state_p, int poll)
4864{
4865 /* can take a while if any port is running */
8b3a0f0b 4866 int cnt = 5000;
a2fbb9ea 4867
c14423fe
ET
4868 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4869 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
4870
4871 might_sleep();
34f80b04 4872 while (cnt--) {
a2fbb9ea
ET
4873 if (poll) {
4874 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
4875 /* if index is different from 0
4876 * the reply for some commands will
3101c2bc 4877 * be on the non default queue
a2fbb9ea
ET
4878 */
4879 if (idx)
4880 bnx2x_rx_int(&bp->fp[idx], 10);
4881 }
a2fbb9ea 4882
3101c2bc 4883 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
4884 if (*state_p == state) {
4885#ifdef BNX2X_STOP_ON_ERROR
4886 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
4887#endif
a2fbb9ea 4888 return 0;
8b3a0f0b 4889 }
a2fbb9ea 4890
a2fbb9ea 4891 msleep(1);
e3553b29
EG
4892
4893 if (bp->panic)
4894 return -EIO;
a2fbb9ea
ET
4895 }
4896
a2fbb9ea 4897 /* timeout! */
49d66772
ET
4898 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4899 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
4900#ifdef BNX2X_STOP_ON_ERROR
4901 bnx2x_panic();
4902#endif
a2fbb9ea 4903
49d66772 4904 return -EBUSY;
a2fbb9ea
ET
4905}
4906
9f6c9258 4907void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
e665bfda
MC
4908{
4909 bp->set_mac_pending++;
4910 smp_wmb();
4911
4912 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
4913 (1 << bp->fp->cl_id), BP_FUNC(bp));
4914
4915 /* Wait for a completion */
4916 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4917}
4918
9f6c9258 4919void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
e665bfda
MC
4920{
4921 bp->set_mac_pending++;
4922 smp_wmb();
4923
4924 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
4925 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
4926 1);
4927
4928 /* Wait for a completion */
4929 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4930}
4931
993ac7b5
MC
4932#ifdef BCM_CNIC
4933/**
4934 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
4935 * MAC(s). This function will wait until the ramdord completion
4936 * returns.
4937 *
4938 * @param bp driver handle
4939 * @param set set or clear the CAM entry
4940 *
4941 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
4942 */
9f6c9258 4943int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
993ac7b5
MC
4944{
4945 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
4946
4947 bp->set_mac_pending++;
4948 smp_wmb();
4949
4950 /* Send a SET_MAC ramrod */
4951 if (CHIP_IS_E1(bp))
4952 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
4953 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
4954 1);
4955 else
4956 /* CAM allocation for E1H
4957 * unicasts: by func number
4958 * multicast: 20+FUNC*20, 20 each
4959 */
4960 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
4961 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
4962
4963 /* Wait for a completion when setting */
4964 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4965
4966 return 0;
4967}
4968#endif
4969
9f6c9258 4970int bnx2x_setup_leading(struct bnx2x *bp)
a2fbb9ea 4971{
34f80b04 4972 int rc;
a2fbb9ea 4973
c14423fe 4974 /* reset IGU state */
34f80b04 4975 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4976
4977 /* SETUP ramrod */
4978 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
4979
34f80b04
EG
4980 /* Wait for completion */
4981 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 4982
34f80b04 4983 return rc;
a2fbb9ea
ET
4984}
4985
9f6c9258 4986int bnx2x_setup_multi(struct bnx2x *bp, int index)
a2fbb9ea 4987{
555f6c78
EG
4988 struct bnx2x_fastpath *fp = &bp->fp[index];
4989
a2fbb9ea 4990 /* reset IGU state */
555f6c78 4991 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 4992
228241eb 4993 /* SETUP ramrod */
555f6c78
EG
4994 fp->state = BNX2X_FP_STATE_OPENING;
4995 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
4996 fp->cl_id, 0);
a2fbb9ea
ET
4997
4998 /* Wait for completion */
4999 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 5000 &(fp->state), 0);
a2fbb9ea
ET
5001}
5002
a2fbb9ea 5003
9f6c9258 5004void bnx2x_set_num_queues_msix(struct bnx2x *bp)
ca00392c 5005{
ca00392c
EG
5006
5007 switch (bp->multi_mode) {
5008 case ETH_RSS_MODE_DISABLED:
54b9ddaa 5009 bp->num_queues = 1;
ca00392c
EG
5010 break;
5011
5012 case ETH_RSS_MODE_REGULAR:
54b9ddaa
VZ
5013 if (num_queues)
5014 bp->num_queues = min_t(u32, num_queues,
5015 BNX2X_MAX_QUEUES(bp));
ca00392c 5016 else
54b9ddaa
VZ
5017 bp->num_queues = min_t(u32, num_online_cpus(),
5018 BNX2X_MAX_QUEUES(bp));
ca00392c
EG
5019 break;
5020
5021
5022 default:
54b9ddaa 5023 bp->num_queues = 1;
9f6c9258
DK
5024 break;
5025 }
a2fbb9ea
ET
5026}
5027
9f6c9258
DK
5028
5029
a2fbb9ea
ET
5030static int bnx2x_stop_multi(struct bnx2x *bp, int index)
5031{
555f6c78 5032 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
5033 int rc;
5034
c14423fe 5035 /* halt the connection */
555f6c78
EG
5036 fp->state = BNX2X_FP_STATE_HALTING;
5037 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 5038
34f80b04 5039 /* Wait for completion */
a2fbb9ea 5040 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 5041 &(fp->state), 1);
c14423fe 5042 if (rc) /* timeout */
a2fbb9ea
ET
5043 return rc;
5044
5045 /* delete cfc entry */
5046 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
5047
34f80b04
EG
5048 /* Wait for completion */
5049 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 5050 &(fp->state), 1);
34f80b04 5051 return rc;
a2fbb9ea
ET
5052}
5053
da5a662a 5054static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 5055{
4781bfad 5056 __le16 dsb_sp_prod_idx;
c14423fe 5057 /* if the other port is handling traffic,
a2fbb9ea 5058 this can take a lot of time */
34f80b04
EG
5059 int cnt = 500;
5060 int rc;
a2fbb9ea
ET
5061
5062 might_sleep();
5063
5064 /* Send HALT ramrod */
5065 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 5066 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 5067
34f80b04
EG
5068 /* Wait for completion */
5069 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
5070 &(bp->fp[0].state), 1);
5071 if (rc) /* timeout */
da5a662a 5072 return rc;
a2fbb9ea 5073
49d66772 5074 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 5075
228241eb 5076 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
5077 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
5078
49d66772 5079 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
5080 we are going to reset the chip anyway
5081 so there is not much to do if this times out
5082 */
34f80b04 5083 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
5084 if (!cnt) {
5085 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
5086 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5087 *bp->dsb_sp_prod, dsb_sp_prod_idx);
5088#ifdef BNX2X_STOP_ON_ERROR
5089 bnx2x_panic();
5090#endif
36e552ab 5091 rc = -EBUSY;
34f80b04
EG
5092 break;
5093 }
5094 cnt--;
da5a662a 5095 msleep(1);
5650d9d4 5096 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
5097 }
5098 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
5099 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
5100
5101 return rc;
a2fbb9ea
ET
5102}
5103
34f80b04
EG
5104static void bnx2x_reset_func(struct bnx2x *bp)
5105{
5106 int port = BP_PORT(bp);
5107 int func = BP_FUNC(bp);
5108 int base, i;
5109
5110 /* Configure IGU */
5111 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5112 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5113
37b091ba
MC
5114#ifdef BCM_CNIC
5115 /* Disable Timer scan */
5116 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
5117 /*
5118 * Wait for at least 10ms and up to 2 second for the timers scan to
5119 * complete
5120 */
5121 for (i = 0; i < 200; i++) {
5122 msleep(10);
5123 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
5124 break;
5125 }
5126#endif
34f80b04
EG
5127 /* Clear ILT */
5128 base = FUNC_ILT_BASE(func);
5129 for (i = base; i < base + ILT_PER_FUNC; i++)
5130 bnx2x_ilt_wr(bp, i, 0);
5131}
5132
5133static void bnx2x_reset_port(struct bnx2x *bp)
5134{
5135 int port = BP_PORT(bp);
5136 u32 val;
5137
5138 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5139
5140 /* Do not rcv packets to BRB */
5141 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5142 /* Do not direct rcv packets that are not for MCP to the BRB */
5143 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5144 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5145
5146 /* Configure AEU */
5147 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5148
5149 msleep(100);
5150 /* Check for BRB port occupancy */
5151 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5152 if (val)
5153 DP(NETIF_MSG_IFDOWN,
33471629 5154 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
5155
5156 /* TODO: Close Doorbell port? */
5157}
5158
34f80b04
EG
5159static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5160{
5161 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
5162 BP_FUNC(bp), reset_code);
5163
5164 switch (reset_code) {
5165 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5166 bnx2x_reset_port(bp);
5167 bnx2x_reset_func(bp);
5168 bnx2x_reset_common(bp);
5169 break;
5170
5171 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5172 bnx2x_reset_port(bp);
5173 bnx2x_reset_func(bp);
5174 break;
5175
5176 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5177 bnx2x_reset_func(bp);
5178 break;
49d66772 5179
34f80b04
EG
5180 default:
5181 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5182 break;
5183 }
5184}
5185
9f6c9258 5186void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 5187{
da5a662a 5188 int port = BP_PORT(bp);
a2fbb9ea 5189 u32 reset_code = 0;
da5a662a 5190 int i, cnt, rc;
a2fbb9ea 5191
555f6c78 5192 /* Wait until tx fastpath tasks complete */
54b9ddaa 5193 for_each_queue(bp, i) {
228241eb
ET
5194 struct bnx2x_fastpath *fp = &bp->fp[i];
5195
34f80b04 5196 cnt = 1000;
e8b5fc51 5197 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 5198
7961f791 5199 bnx2x_tx_int(fp);
34f80b04
EG
5200 if (!cnt) {
5201 BNX2X_ERR("timeout waiting for queue[%d]\n",
5202 i);
5203#ifdef BNX2X_STOP_ON_ERROR
5204 bnx2x_panic();
5205 return -EBUSY;
5206#else
5207 break;
5208#endif
5209 }
5210 cnt--;
da5a662a 5211 msleep(1);
34f80b04 5212 }
228241eb 5213 }
da5a662a
VZ
5214 /* Give HW time to discard old tx messages */
5215 msleep(1);
a2fbb9ea 5216
3101c2bc
YG
5217 if (CHIP_IS_E1(bp)) {
5218 struct mac_configuration_cmd *config =
5219 bnx2x_sp(bp, mcast_config);
5220
e665bfda 5221 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 5222
8d9c5f34 5223 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
5224 CAM_INVALIDATE(config->config_table[i]);
5225
8d9c5f34 5226 config->hdr.length = i;
3101c2bc
YG
5227 if (CHIP_REV_IS_SLOW(bp))
5228 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
5229 else
5230 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 5231 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
5232 config->hdr.reserved1 = 0;
5233
e665bfda
MC
5234 bp->set_mac_pending++;
5235 smp_wmb();
5236
3101c2bc
YG
5237 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5238 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
5239 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
5240
5241 } else { /* E1H */
65abd74d
YG
5242 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5243
e665bfda 5244 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
5245
5246 for (i = 0; i < MC_HASH_SIZE; i++)
5247 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
5248
5249 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 5250 }
993ac7b5
MC
5251#ifdef BCM_CNIC
5252 /* Clear iSCSI L2 MAC */
5253 mutex_lock(&bp->cnic_mutex);
5254 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
5255 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
5256 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
5257 }
5258 mutex_unlock(&bp->cnic_mutex);
5259#endif
3101c2bc 5260
65abd74d
YG
5261 if (unload_mode == UNLOAD_NORMAL)
5262 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5263
7d0446c2 5264 else if (bp->flags & NO_WOL_FLAG)
65abd74d 5265 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 5266
7d0446c2 5267 else if (bp->wol) {
65abd74d
YG
5268 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5269 u8 *mac_addr = bp->dev->dev_addr;
5270 u32 val;
5271 /* The mac address is written to entries 1-4 to
5272 preserve entry 0 which is used by the PMF */
5273 u8 entry = (BP_E1HVN(bp) + 1)*8;
5274
5275 val = (mac_addr[0] << 8) | mac_addr[1];
5276 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
5277
5278 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
5279 (mac_addr[4] << 8) | mac_addr[5];
5280 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
5281
5282 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
5283
5284 } else
5285 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 5286
34f80b04
EG
5287 /* Close multi and leading connections
5288 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
5289 for_each_nondefault_queue(bp, i)
5290 if (bnx2x_stop_multi(bp, i))
228241eb 5291 goto unload_error;
a2fbb9ea 5292
da5a662a
VZ
5293 rc = bnx2x_stop_leading(bp);
5294 if (rc) {
34f80b04 5295 BNX2X_ERR("Stop leading failed!\n");
da5a662a 5296#ifdef BNX2X_STOP_ON_ERROR
34f80b04 5297 return -EBUSY;
da5a662a
VZ
5298#else
5299 goto unload_error;
34f80b04 5300#endif
228241eb
ET
5301 }
5302
5303unload_error:
34f80b04 5304 if (!BP_NOMCP(bp))
228241eb 5305 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 5306 else {
f5372251 5307 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
5308 load_count[0], load_count[1], load_count[2]);
5309 load_count[0]--;
da5a662a 5310 load_count[1 + port]--;
f5372251 5311 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
5312 load_count[0], load_count[1], load_count[2]);
5313 if (load_count[0] == 0)
5314 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 5315 else if (load_count[1 + port] == 0)
34f80b04
EG
5316 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
5317 else
5318 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
5319 }
a2fbb9ea 5320
34f80b04
EG
5321 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
5322 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
5323 bnx2x__link_reset(bp);
a2fbb9ea
ET
5324
5325 /* Reset the chip */
228241eb 5326 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
5327
5328 /* Report UNLOAD_DONE to MCP */
34f80b04 5329 if (!BP_NOMCP(bp))
a2fbb9ea 5330 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 5331
72fd0718
VZ
5332}
5333
9f6c9258 5334void bnx2x_disable_close_the_gate(struct bnx2x *bp)
72fd0718
VZ
5335{
5336 u32 val;
5337
5338 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
5339
5340 if (CHIP_IS_E1(bp)) {
5341 int port = BP_PORT(bp);
5342 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5343 MISC_REG_AEU_MASK_ATTN_FUNC_0;
5344
5345 val = REG_RD(bp, addr);
5346 val &= ~(0x300);
5347 REG_WR(bp, addr, val);
5348 } else if (CHIP_IS_E1H(bp)) {
5349 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
5350 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
5351 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
5352 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
5353 }
5354}
5355
72fd0718
VZ
5356
5357/* Close gates #2, #3 and #4: */
5358static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
5359{
5360 u32 val, addr;
5361
5362 /* Gates #2 and #4a are closed/opened for "not E1" only */
5363 if (!CHIP_IS_E1(bp)) {
5364 /* #4 */
5365 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
5366 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
5367 close ? (val | 0x1) : (val & (~(u32)1)));
5368 /* #2 */
5369 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
5370 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
5371 close ? (val | 0x1) : (val & (~(u32)1)));
5372 }
5373
5374 /* #3 */
5375 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
5376 val = REG_RD(bp, addr);
5377 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
5378
5379 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
5380 close ? "closing" : "opening");
5381 mmiowb();
5382}
5383
5384#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
5385
5386static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
5387{
5388 /* Do some magic... */
5389 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5390 *magic_val = val & SHARED_MF_CLP_MAGIC;
5391 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
5392}
5393
5394/* Restore the value of the `magic' bit.
5395 *
5396 * @param pdev Device handle.
5397 * @param magic_val Old value of the `magic' bit.
5398 */
5399static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
5400{
5401 /* Restore the `magic' bit value... */
5402 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
5403 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
5404 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
5405 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5406 MF_CFG_WR(bp, shared_mf_config.clp_mb,
5407 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
5408}
5409
5410/* Prepares for MCP reset: takes care of CLP configurations.
5411 *
5412 * @param bp
5413 * @param magic_val Old value of 'magic' bit.
5414 */
5415static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
5416{
5417 u32 shmem;
5418 u32 validity_offset;
5419
5420 DP(NETIF_MSG_HW, "Starting\n");
5421
5422 /* Set `magic' bit in order to save MF config */
5423 if (!CHIP_IS_E1(bp))
5424 bnx2x_clp_reset_prep(bp, magic_val);
5425
5426 /* Get shmem offset */
5427 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5428 validity_offset = offsetof(struct shmem_region, validity_map[0]);
5429
5430 /* Clear validity map flags */
5431 if (shmem > 0)
5432 REG_WR(bp, shmem + validity_offset, 0);
5433}
5434
5435#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
5436#define MCP_ONE_TIMEOUT 100 /* 100 ms */
5437
5438/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
5439 * depending on the HW type.
5440 *
5441 * @param bp
5442 */
5443static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
5444{
5445 /* special handling for emulation and FPGA,
5446 wait 10 times longer */
5447 if (CHIP_REV_IS_SLOW(bp))
5448 msleep(MCP_ONE_TIMEOUT*10);
5449 else
5450 msleep(MCP_ONE_TIMEOUT);
5451}
5452
5453static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
5454{
5455 u32 shmem, cnt, validity_offset, val;
5456 int rc = 0;
5457
5458 msleep(100);
5459
5460 /* Get shmem offset */
5461 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5462 if (shmem == 0) {
5463 BNX2X_ERR("Shmem 0 return failure\n");
5464 rc = -ENOTTY;
5465 goto exit_lbl;
5466 }
5467
5468 validity_offset = offsetof(struct shmem_region, validity_map[0]);
5469
5470 /* Wait for MCP to come up */
5471 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
5472 /* TBD: its best to check validity map of last port.
5473 * currently checks on port 0.
5474 */
5475 val = REG_RD(bp, shmem + validity_offset);
5476 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
5477 shmem + validity_offset, val);
5478
5479 /* check that shared memory is valid. */
5480 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5481 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5482 break;
5483
5484 bnx2x_mcp_wait_one(bp);
5485 }
5486
5487 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
5488
5489 /* Check that shared memory is valid. This indicates that MCP is up. */
5490 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
5491 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
5492 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
5493 rc = -ENOTTY;
5494 goto exit_lbl;
5495 }
5496
5497exit_lbl:
5498 /* Restore the `magic' bit value */
5499 if (!CHIP_IS_E1(bp))
5500 bnx2x_clp_reset_done(bp, magic_val);
5501
5502 return rc;
5503}
5504
5505static void bnx2x_pxp_prep(struct bnx2x *bp)
5506{
5507 if (!CHIP_IS_E1(bp)) {
5508 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
5509 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
5510 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
5511 mmiowb();
5512 }
5513}
5514
5515/*
5516 * Reset the whole chip except for:
5517 * - PCIE core
5518 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
5519 * one reset bit)
5520 * - IGU
5521 * - MISC (including AEU)
5522 * - GRC
5523 * - RBCN, RBCP
5524 */
5525static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
5526{
5527 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
5528
5529 not_reset_mask1 =
5530 MISC_REGISTERS_RESET_REG_1_RST_HC |
5531 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
5532 MISC_REGISTERS_RESET_REG_1_RST_PXP;
5533
5534 not_reset_mask2 =
5535 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
5536 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
5537 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
5538 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
5539 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
5540 MISC_REGISTERS_RESET_REG_2_RST_GRC |
5541 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
5542 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
5543
5544 reset_mask1 = 0xffffffff;
5545
5546 if (CHIP_IS_E1(bp))
5547 reset_mask2 = 0xffff;
5548 else
5549 reset_mask2 = 0x1ffff;
5550
5551 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5552 reset_mask1 & (~not_reset_mask1));
5553 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5554 reset_mask2 & (~not_reset_mask2));
5555
5556 barrier();
5557 mmiowb();
5558
5559 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
5560 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
5561 mmiowb();
5562}
5563
5564static int bnx2x_process_kill(struct bnx2x *bp)
5565{
5566 int cnt = 1000;
5567 u32 val = 0;
5568 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
5569
5570
5571 /* Empty the Tetris buffer, wait for 1s */
5572 do {
5573 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
5574 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
5575 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
5576 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
5577 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
5578 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
5579 ((port_is_idle_0 & 0x1) == 0x1) &&
5580 ((port_is_idle_1 & 0x1) == 0x1) &&
5581 (pgl_exp_rom2 == 0xffffffff))
5582 break;
5583 msleep(1);
5584 } while (cnt-- > 0);
5585
5586 if (cnt <= 0) {
5587 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
5588 " are still"
5589 " outstanding read requests after 1s!\n");
5590 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
5591 " port_is_idle_0=0x%08x,"
5592 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
5593 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
5594 pgl_exp_rom2);
5595 return -EAGAIN;
5596 }
5597
5598 barrier();
5599
5600 /* Close gates #2, #3 and #4 */
5601 bnx2x_set_234_gates(bp, true);
5602
5603 /* TBD: Indicate that "process kill" is in progress to MCP */
5604
5605 /* Clear "unprepared" bit */
5606 REG_WR(bp, MISC_REG_UNPREPARED, 0);
5607 barrier();
5608
5609 /* Make sure all is written to the chip before the reset */
5610 mmiowb();
5611
5612 /* Wait for 1ms to empty GLUE and PCI-E core queues,
5613 * PSWHST, GRC and PSWRD Tetris buffer.
5614 */
5615 msleep(1);
5616
5617 /* Prepare to chip reset: */
5618 /* MCP */
5619 bnx2x_reset_mcp_prep(bp, &val);
5620
5621 /* PXP */
5622 bnx2x_pxp_prep(bp);
5623 barrier();
5624
5625 /* reset the chip */
5626 bnx2x_process_kill_chip_reset(bp);
5627 barrier();
5628
5629 /* Recover after reset: */
5630 /* MCP */
5631 if (bnx2x_reset_mcp_comp(bp, val))
5632 return -EAGAIN;
5633
5634 /* PXP */
5635 bnx2x_pxp_prep(bp);
5636
5637 /* Open the gates #2, #3 and #4 */
5638 bnx2x_set_234_gates(bp, false);
5639
5640 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
5641 * reset state, re-enable attentions. */
5642
a2fbb9ea
ET
5643 return 0;
5644}
5645
72fd0718
VZ
5646static int bnx2x_leader_reset(struct bnx2x *bp)
5647{
5648 int rc = 0;
5649 /* Try to recover after the failure */
5650 if (bnx2x_process_kill(bp)) {
5651 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
5652 bp->dev->name);
5653 rc = -EAGAIN;
5654 goto exit_leader_reset;
5655 }
5656
5657 /* Clear "reset is in progress" bit and update the driver state */
5658 bnx2x_set_reset_done(bp);
5659 bp->recovery_state = BNX2X_RECOVERY_DONE;
5660
5661exit_leader_reset:
5662 bp->is_leader = 0;
5663 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
5664 smp_wmb();
5665 return rc;
5666}
5667
72fd0718
VZ
5668/* Assumption: runs under rtnl lock. This together with the fact
5669 * that it's called only from bnx2x_reset_task() ensure that it
5670 * will never be called when netif_running(bp->dev) is false.
5671 */
5672static void bnx2x_parity_recover(struct bnx2x *bp)
5673{
5674 DP(NETIF_MSG_HW, "Handling parity\n");
5675 while (1) {
5676 switch (bp->recovery_state) {
5677 case BNX2X_RECOVERY_INIT:
5678 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
5679 /* Try to get a LEADER_LOCK HW lock */
5680 if (bnx2x_trylock_hw_lock(bp,
5681 HW_LOCK_RESOURCE_RESERVED_08))
5682 bp->is_leader = 1;
5683
5684 /* Stop the driver */
5685 /* If interface has been removed - break */
5686 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
5687 return;
5688
5689 bp->recovery_state = BNX2X_RECOVERY_WAIT;
5690 /* Ensure "is_leader" and "recovery_state"
5691 * update values are seen on other CPUs
5692 */
5693 smp_wmb();
5694 break;
5695
5696 case BNX2X_RECOVERY_WAIT:
5697 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
5698 if (bp->is_leader) {
5699 u32 load_counter = bnx2x_get_load_cnt(bp);
5700 if (load_counter) {
5701 /* Wait until all other functions get
5702 * down.
5703 */
5704 schedule_delayed_work(&bp->reset_task,
5705 HZ/10);
5706 return;
5707 } else {
5708 /* If all other functions got down -
5709 * try to bring the chip back to
5710 * normal. In any case it's an exit
5711 * point for a leader.
5712 */
5713 if (bnx2x_leader_reset(bp) ||
5714 bnx2x_nic_load(bp, LOAD_NORMAL)) {
5715 printk(KERN_ERR"%s: Recovery "
5716 "has failed. Power cycle is "
5717 "needed.\n", bp->dev->name);
5718 /* Disconnect this device */
5719 netif_device_detach(bp->dev);
5720 /* Block ifup for all function
5721 * of this ASIC until
5722 * "process kill" or power
5723 * cycle.
5724 */
5725 bnx2x_set_reset_in_progress(bp);
5726 /* Shut down the power */
5727 bnx2x_set_power_state(bp,
5728 PCI_D3hot);
5729 return;
5730 }
5731
5732 return;
5733 }
5734 } else { /* non-leader */
5735 if (!bnx2x_reset_is_done(bp)) {
5736 /* Try to get a LEADER_LOCK HW lock as
5737 * long as a former leader may have
5738 * been unloaded by the user or
5739 * released a leadership by another
5740 * reason.
5741 */
5742 if (bnx2x_trylock_hw_lock(bp,
5743 HW_LOCK_RESOURCE_RESERVED_08)) {
5744 /* I'm a leader now! Restart a
5745 * switch case.
5746 */
5747 bp->is_leader = 1;
5748 break;
5749 }
5750
5751 schedule_delayed_work(&bp->reset_task,
5752 HZ/10);
5753 return;
5754
5755 } else { /* A leader has completed
5756 * the "process kill". It's an exit
5757 * point for a non-leader.
5758 */
5759 bnx2x_nic_load(bp, LOAD_NORMAL);
5760 bp->recovery_state =
5761 BNX2X_RECOVERY_DONE;
5762 smp_wmb();
5763 return;
5764 }
5765 }
5766 default:
5767 return;
5768 }
5769 }
5770}
5771
5772/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
5773 * scheduled on a general queue in order to prevent a dead lock.
5774 */
34f80b04
EG
5775static void bnx2x_reset_task(struct work_struct *work)
5776{
72fd0718 5777 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
5778
5779#ifdef BNX2X_STOP_ON_ERROR
5780 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
5781 " so reset not done to allow debug dump,\n"
72fd0718 5782 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
5783 return;
5784#endif
5785
5786 rtnl_lock();
5787
5788 if (!netif_running(bp->dev))
5789 goto reset_task_exit;
5790
72fd0718
VZ
5791 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
5792 bnx2x_parity_recover(bp);
5793 else {
5794 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
5795 bnx2x_nic_load(bp, LOAD_NORMAL);
5796 }
34f80b04
EG
5797
5798reset_task_exit:
5799 rtnl_unlock();
5800}
5801
a2fbb9ea
ET
5802/* end of nic load/unload */
5803
a2fbb9ea
ET
5804/*
5805 * Init service functions
5806 */
5807
f1ef27ef
EG
5808static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
5809{
5810 switch (func) {
5811 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
5812 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
5813 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
5814 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
5815 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
5816 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
5817 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
5818 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
5819 default:
5820 BNX2X_ERR("Unsupported function index: %d\n", func);
5821 return (u32)(-1);
5822 }
5823}
5824
5825static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
5826{
5827 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
5828
5829 /* Flush all outstanding writes */
5830 mmiowb();
5831
5832 /* Pretend to be function 0 */
5833 REG_WR(bp, reg, 0);
5834 /* Flush the GRC transaction (in the chip) */
5835 new_val = REG_RD(bp, reg);
5836 if (new_val != 0) {
5837 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
5838 new_val);
5839 BUG();
5840 }
5841
5842 /* From now we are in the "like-E1" mode */
5843 bnx2x_int_disable(bp);
5844
5845 /* Flush all outstanding writes */
5846 mmiowb();
5847
5848 /* Restore the original funtion settings */
5849 REG_WR(bp, reg, orig_func);
5850 new_val = REG_RD(bp, reg);
5851 if (new_val != orig_func) {
5852 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
5853 orig_func, new_val);
5854 BUG();
5855 }
5856}
5857
5858static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
5859{
5860 if (CHIP_IS_E1H(bp))
5861 bnx2x_undi_int_disable_e1h(bp, func);
5862 else
5863 bnx2x_int_disable(bp);
5864}
5865
34f80b04
EG
5866static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5867{
5868 u32 val;
5869
5870 /* Check if there is any driver already loaded */
5871 val = REG_RD(bp, MISC_REG_UNPREPARED);
5872 if (val == 0x1) {
5873 /* Check if it is the UNDI driver
5874 * UNDI driver initializes CID offset for normal bell to 0x7
5875 */
4a37fb66 5876 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
5877 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
5878 if (val == 0x7) {
5879 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 5880 /* save our func */
34f80b04 5881 int func = BP_FUNC(bp);
da5a662a
VZ
5882 u32 swap_en;
5883 u32 swap_val;
34f80b04 5884
b4661739
EG
5885 /* clear the UNDI indication */
5886 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
5887
34f80b04
EG
5888 BNX2X_DEV_INFO("UNDI is active! reset device\n");
5889
5890 /* try unload UNDI on port 0 */
5891 bp->func = 0;
da5a662a
VZ
5892 bp->fw_seq =
5893 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5894 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 5895 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
5896
5897 /* if UNDI is loaded on the other port */
5898 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
5899
da5a662a
VZ
5900 /* send "DONE" for previous unload */
5901 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5902
5903 /* unload UNDI on port 1 */
34f80b04 5904 bp->func = 1;
da5a662a
VZ
5905 bp->fw_seq =
5906 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5907 DRV_MSG_SEQ_NUMBER_MASK);
5908 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5909
5910 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
5911 }
5912
b4661739
EG
5913 /* now it's safe to release the lock */
5914 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5915
f1ef27ef 5916 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
5917
5918 /* close input traffic and wait for it */
5919 /* Do not rcv packets to BRB */
5920 REG_WR(bp,
5921 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
5922 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
5923 /* Do not direct rcv packets that are not for MCP to
5924 * the BRB */
5925 REG_WR(bp,
5926 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
5927 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5928 /* clear AEU */
5929 REG_WR(bp,
5930 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5931 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
5932 msleep(10);
5933
5934 /* save NIG port swap info */
5935 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5936 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
5937 /* reset device */
5938 REG_WR(bp,
5939 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 5940 0xd3ffffff);
34f80b04
EG
5941 REG_WR(bp,
5942 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5943 0x1403);
da5a662a
VZ
5944 /* take the NIG out of reset and restore swap values */
5945 REG_WR(bp,
5946 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5947 MISC_REGISTERS_RESET_REG_1_RST_NIG);
5948 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
5949 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
5950
5951 /* send unload done to the MCP */
5952 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5953
5954 /* restore our func and fw_seq */
5955 bp->func = func;
5956 bp->fw_seq =
5957 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5958 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
5959
5960 } else
5961 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
5962 }
5963}
5964
5965static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5966{
5967 u32 val, val2, val3, val4, id;
72ce58c3 5968 u16 pmc;
34f80b04
EG
5969
5970 /* Get the chip revision id and number. */
5971 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5972 val = REG_RD(bp, MISC_REG_CHIP_NUM);
5973 id = ((val & 0xffff) << 16);
5974 val = REG_RD(bp, MISC_REG_CHIP_REV);
5975 id |= ((val & 0xf) << 12);
5976 val = REG_RD(bp, MISC_REG_CHIP_METAL);
5977 id |= ((val & 0xff) << 4);
5a40e08e 5978 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
5979 id |= (val & 0xf);
5980 bp->common.chip_id = id;
5981 bp->link_params.chip_id = bp->common.chip_id;
5982 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
5983
1c06328c
EG
5984 val = (REG_RD(bp, 0x2874) & 0x55);
5985 if ((bp->common.chip_id & 0x1) ||
5986 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
5987 bp->flags |= ONE_PORT_FLAG;
5988 BNX2X_DEV_INFO("single port device\n");
5989 }
5990
34f80b04
EG
5991 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
5992 bp->common.flash_size = (NVRAM_1MB_SIZE <<
5993 (val & MCPR_NVM_CFG4_FLASH_SIZE));
5994 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5995 bp->common.flash_size, bp->common.flash_size);
5996
5997 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 5998 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 5999 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
6000 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
6001 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
6002
6003 if (!bp->common.shmem_base ||
6004 (bp->common.shmem_base < 0xA0000) ||
6005 (bp->common.shmem_base >= 0xC0000)) {
6006 BNX2X_DEV_INFO("MCP not active\n");
6007 bp->flags |= NO_MCP_FLAG;
6008 return;
6009 }
6010
6011 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6012 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6013 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
cdaa7cb8 6014 BNX2X_ERROR("BAD MCP validity signature\n");
34f80b04
EG
6015
6016 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 6017 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
6018
6019 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6020 SHARED_HW_CFG_LED_MODE_MASK) >>
6021 SHARED_HW_CFG_LED_MODE_SHIFT);
6022
c2c8b03e
EG
6023 bp->link_params.feature_config_flags = 0;
6024 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
6025 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
6026 bp->link_params.feature_config_flags |=
6027 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6028 else
6029 bp->link_params.feature_config_flags &=
6030 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6031
34f80b04
EG
6032 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6033 bp->common.bc_ver = val;
6034 BNX2X_DEV_INFO("bc_ver %X\n", val);
6035 if (val < BNX2X_BC_VER) {
6036 /* for now only warn
6037 * later we might need to enforce this */
cdaa7cb8
VZ
6038 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
6039 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 6040 }
4d295db0
EG
6041 bp->link_params.feature_config_flags |=
6042 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
6043 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
6044
6045 if (BP_E1HVN(bp) == 0) {
6046 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6047 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6048 } else {
6049 /* no WOL capability for E1HVN != 0 */
6050 bp->flags |= NO_WOL_FLAG;
6051 }
6052 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 6053 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
6054
6055 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6056 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6057 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6058 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6059
cdaa7cb8
VZ
6060 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
6061 val, val2, val3, val4);
34f80b04
EG
6062}
6063
6064static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6065 u32 switch_cfg)
a2fbb9ea 6066{
34f80b04 6067 int port = BP_PORT(bp);
a2fbb9ea
ET
6068 u32 ext_phy_type;
6069
a2fbb9ea
ET
6070 switch (switch_cfg) {
6071 case SWITCH_CFG_1G:
6072 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6073
c18487ee
YR
6074 ext_phy_type =
6075 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
6076 switch (ext_phy_type) {
6077 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6078 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6079 ext_phy_type);
6080
34f80b04
EG
6081 bp->port.supported |= (SUPPORTED_10baseT_Half |
6082 SUPPORTED_10baseT_Full |
6083 SUPPORTED_100baseT_Half |
6084 SUPPORTED_100baseT_Full |
6085 SUPPORTED_1000baseT_Full |
6086 SUPPORTED_2500baseX_Full |
6087 SUPPORTED_TP |
6088 SUPPORTED_FIBRE |
6089 SUPPORTED_Autoneg |
6090 SUPPORTED_Pause |
6091 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6092 break;
6093
6094 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6095 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6096 ext_phy_type);
6097
34f80b04
EG
6098 bp->port.supported |= (SUPPORTED_10baseT_Half |
6099 SUPPORTED_10baseT_Full |
6100 SUPPORTED_100baseT_Half |
6101 SUPPORTED_100baseT_Full |
6102 SUPPORTED_1000baseT_Full |
6103 SUPPORTED_TP |
6104 SUPPORTED_FIBRE |
6105 SUPPORTED_Autoneg |
6106 SUPPORTED_Pause |
6107 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6108 break;
6109
6110 default:
6111 BNX2X_ERR("NVRAM config error. "
6112 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 6113 bp->link_params.ext_phy_config);
a2fbb9ea
ET
6114 return;
6115 }
6116
34f80b04
EG
6117 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6118 port*0x10);
6119 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
6120 break;
6121
6122 case SWITCH_CFG_10G:
6123 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6124
c18487ee
YR
6125 ext_phy_type =
6126 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
6127 switch (ext_phy_type) {
6128 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6129 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6130 ext_phy_type);
6131
34f80b04
EG
6132 bp->port.supported |= (SUPPORTED_10baseT_Half |
6133 SUPPORTED_10baseT_Full |
6134 SUPPORTED_100baseT_Half |
6135 SUPPORTED_100baseT_Full |
6136 SUPPORTED_1000baseT_Full |
6137 SUPPORTED_2500baseX_Full |
6138 SUPPORTED_10000baseT_Full |
6139 SUPPORTED_TP |
6140 SUPPORTED_FIBRE |
6141 SUPPORTED_Autoneg |
6142 SUPPORTED_Pause |
6143 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6144 break;
6145
589abe3a
EG
6146 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6147 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 6148 ext_phy_type);
f1410647 6149
34f80b04 6150 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 6151 SUPPORTED_1000baseT_Full |
34f80b04 6152 SUPPORTED_FIBRE |
589abe3a 6153 SUPPORTED_Autoneg |
34f80b04
EG
6154 SUPPORTED_Pause |
6155 SUPPORTED_Asym_Pause);
f1410647
ET
6156 break;
6157
589abe3a
EG
6158 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6159 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
6160 ext_phy_type);
6161
34f80b04 6162 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 6163 SUPPORTED_2500baseX_Full |
34f80b04 6164 SUPPORTED_1000baseT_Full |
589abe3a
EG
6165 SUPPORTED_FIBRE |
6166 SUPPORTED_Autoneg |
6167 SUPPORTED_Pause |
6168 SUPPORTED_Asym_Pause);
6169 break;
6170
6171 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
6172 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
6173 ext_phy_type);
6174
6175 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
6176 SUPPORTED_FIBRE |
6177 SUPPORTED_Pause |
6178 SUPPORTED_Asym_Pause);
f1410647
ET
6179 break;
6180
589abe3a
EG
6181 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6182 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
6183 ext_phy_type);
6184
34f80b04
EG
6185 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6186 SUPPORTED_1000baseT_Full |
6187 SUPPORTED_FIBRE |
34f80b04
EG
6188 SUPPORTED_Pause |
6189 SUPPORTED_Asym_Pause);
f1410647
ET
6190 break;
6191
589abe3a
EG
6192 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6193 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
6194 ext_phy_type);
6195
34f80b04 6196 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 6197 SUPPORTED_1000baseT_Full |
34f80b04 6198 SUPPORTED_Autoneg |
589abe3a 6199 SUPPORTED_FIBRE |
34f80b04
EG
6200 SUPPORTED_Pause |
6201 SUPPORTED_Asym_Pause);
c18487ee
YR
6202 break;
6203
4d295db0
EG
6204 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6205 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
6206 ext_phy_type);
6207
6208 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6209 SUPPORTED_1000baseT_Full |
6210 SUPPORTED_Autoneg |
6211 SUPPORTED_FIBRE |
6212 SUPPORTED_Pause |
6213 SUPPORTED_Asym_Pause);
6214 break;
6215
f1410647
ET
6216 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6217 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
6218 ext_phy_type);
6219
34f80b04
EG
6220 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6221 SUPPORTED_TP |
6222 SUPPORTED_Autoneg |
6223 SUPPORTED_Pause |
6224 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6225 break;
6226
28577185 6227 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9a8a97e8
YR
6228 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
6229 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM848xx)\n",
28577185
EG
6230 ext_phy_type);
6231
6232 bp->port.supported |= (SUPPORTED_10baseT_Half |
6233 SUPPORTED_10baseT_Full |
6234 SUPPORTED_100baseT_Half |
6235 SUPPORTED_100baseT_Full |
6236 SUPPORTED_1000baseT_Full |
6237 SUPPORTED_10000baseT_Full |
6238 SUPPORTED_TP |
6239 SUPPORTED_Autoneg |
6240 SUPPORTED_Pause |
6241 SUPPORTED_Asym_Pause);
6242 break;
6243
c18487ee
YR
6244 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
6245 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
6246 bp->link_params.ext_phy_config);
6247 break;
6248
a2fbb9ea
ET
6249 default:
6250 BNX2X_ERR("NVRAM config error. "
6251 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 6252 bp->link_params.ext_phy_config);
a2fbb9ea
ET
6253 return;
6254 }
6255
34f80b04
EG
6256 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6257 port*0x18);
6258 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 6259
a2fbb9ea
ET
6260 break;
6261
6262 default:
6263 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 6264 bp->port.link_config);
a2fbb9ea
ET
6265 return;
6266 }
34f80b04 6267 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
6268
6269 /* mask what we support according to speed_cap_mask */
c18487ee
YR
6270 if (!(bp->link_params.speed_cap_mask &
6271 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 6272 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 6273
c18487ee
YR
6274 if (!(bp->link_params.speed_cap_mask &
6275 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 6276 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 6277
c18487ee
YR
6278 if (!(bp->link_params.speed_cap_mask &
6279 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 6280 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 6281
c18487ee
YR
6282 if (!(bp->link_params.speed_cap_mask &
6283 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 6284 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 6285
c18487ee
YR
6286 if (!(bp->link_params.speed_cap_mask &
6287 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
6288 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
6289 SUPPORTED_1000baseT_Full);
a2fbb9ea 6290
c18487ee
YR
6291 if (!(bp->link_params.speed_cap_mask &
6292 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 6293 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 6294
c18487ee
YR
6295 if (!(bp->link_params.speed_cap_mask &
6296 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 6297 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 6298
34f80b04 6299 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
6300}
6301
34f80b04 6302static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 6303{
c18487ee 6304 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 6305
34f80b04 6306 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 6307 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 6308 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 6309 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 6310 bp->port.advertising = bp->port.supported;
a2fbb9ea 6311 } else {
c18487ee
YR
6312 u32 ext_phy_type =
6313 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6314
6315 if ((ext_phy_type ==
6316 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
6317 (ext_phy_type ==
6318 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 6319 /* force 10G, no AN */
c18487ee 6320 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 6321 bp->port.advertising =
a2fbb9ea
ET
6322 (ADVERTISED_10000baseT_Full |
6323 ADVERTISED_FIBRE);
6324 break;
6325 }
6326 BNX2X_ERR("NVRAM config error. "
6327 "Invalid link_config 0x%x"
6328 " Autoneg not supported\n",
34f80b04 6329 bp->port.link_config);
a2fbb9ea
ET
6330 return;
6331 }
6332 break;
6333
6334 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 6335 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 6336 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
6337 bp->port.advertising = (ADVERTISED_10baseT_Full |
6338 ADVERTISED_TP);
a2fbb9ea 6339 } else {
cdaa7cb8
VZ
6340 BNX2X_ERROR("NVRAM config error. "
6341 "Invalid link_config 0x%x"
6342 " speed_cap_mask 0x%x\n",
6343 bp->port.link_config,
6344 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6345 return;
6346 }
6347 break;
6348
6349 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 6350 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
6351 bp->link_params.req_line_speed = SPEED_10;
6352 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
6353 bp->port.advertising = (ADVERTISED_10baseT_Half |
6354 ADVERTISED_TP);
a2fbb9ea 6355 } else {
cdaa7cb8
VZ
6356 BNX2X_ERROR("NVRAM config error. "
6357 "Invalid link_config 0x%x"
6358 " speed_cap_mask 0x%x\n",
6359 bp->port.link_config,
6360 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6361 return;
6362 }
6363 break;
6364
6365 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 6366 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 6367 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
6368 bp->port.advertising = (ADVERTISED_100baseT_Full |
6369 ADVERTISED_TP);
a2fbb9ea 6370 } else {
cdaa7cb8
VZ
6371 BNX2X_ERROR("NVRAM config error. "
6372 "Invalid link_config 0x%x"
6373 " speed_cap_mask 0x%x\n",
6374 bp->port.link_config,
6375 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6376 return;
6377 }
6378 break;
6379
6380 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 6381 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
6382 bp->link_params.req_line_speed = SPEED_100;
6383 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
6384 bp->port.advertising = (ADVERTISED_100baseT_Half |
6385 ADVERTISED_TP);
a2fbb9ea 6386 } else {
cdaa7cb8
VZ
6387 BNX2X_ERROR("NVRAM config error. "
6388 "Invalid link_config 0x%x"
6389 " speed_cap_mask 0x%x\n",
6390 bp->port.link_config,
6391 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6392 return;
6393 }
6394 break;
6395
6396 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 6397 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 6398 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
6399 bp->port.advertising = (ADVERTISED_1000baseT_Full |
6400 ADVERTISED_TP);
a2fbb9ea 6401 } else {
cdaa7cb8
VZ
6402 BNX2X_ERROR("NVRAM config error. "
6403 "Invalid link_config 0x%x"
6404 " speed_cap_mask 0x%x\n",
6405 bp->port.link_config,
6406 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6407 return;
6408 }
6409 break;
6410
6411 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 6412 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 6413 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
6414 bp->port.advertising = (ADVERTISED_2500baseX_Full |
6415 ADVERTISED_TP);
a2fbb9ea 6416 } else {
cdaa7cb8
VZ
6417 BNX2X_ERROR("NVRAM config error. "
6418 "Invalid link_config 0x%x"
6419 " speed_cap_mask 0x%x\n",
6420 bp->port.link_config,
6421 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6422 return;
6423 }
6424 break;
6425
6426 case PORT_FEATURE_LINK_SPEED_10G_CX4:
6427 case PORT_FEATURE_LINK_SPEED_10G_KX4:
6428 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 6429 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 6430 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
6431 bp->port.advertising = (ADVERTISED_10000baseT_Full |
6432 ADVERTISED_FIBRE);
a2fbb9ea 6433 } else {
cdaa7cb8
VZ
6434 BNX2X_ERROR("NVRAM config error. "
6435 "Invalid link_config 0x%x"
6436 " speed_cap_mask 0x%x\n",
6437 bp->port.link_config,
6438 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6439 return;
6440 }
6441 break;
6442
6443 default:
cdaa7cb8
VZ
6444 BNX2X_ERROR("NVRAM config error. "
6445 "BAD link speed link_config 0x%x\n",
6446 bp->port.link_config);
c18487ee 6447 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 6448 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
6449 break;
6450 }
a2fbb9ea 6451
34f80b04
EG
6452 bp->link_params.req_flow_ctrl = (bp->port.link_config &
6453 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 6454 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 6455 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 6456 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 6457
c18487ee 6458 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 6459 " advertising 0x%x\n",
c18487ee
YR
6460 bp->link_params.req_line_speed,
6461 bp->link_params.req_duplex,
34f80b04 6462 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
6463}
6464
e665bfda
MC
6465static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
6466{
6467 mac_hi = cpu_to_be16(mac_hi);
6468 mac_lo = cpu_to_be32(mac_lo);
6469 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
6470 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
6471}
6472
34f80b04 6473static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 6474{
34f80b04
EG
6475 int port = BP_PORT(bp);
6476 u32 val, val2;
589abe3a 6477 u32 config;
c2c8b03e 6478 u16 i;
01cd4528 6479 u32 ext_phy_type;
a2fbb9ea 6480
c18487ee 6481 bp->link_params.bp = bp;
34f80b04 6482 bp->link_params.port = port;
c18487ee 6483
c18487ee 6484 bp->link_params.lane_config =
a2fbb9ea 6485 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 6486 bp->link_params.ext_phy_config =
a2fbb9ea
ET
6487 SHMEM_RD(bp,
6488 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
6489 /* BCM8727_NOC => BCM8727 no over current */
6490 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
6491 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
6492 bp->link_params.ext_phy_config &=
6493 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6494 bp->link_params.ext_phy_config |=
6495 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
6496 bp->link_params.feature_config_flags |=
6497 FEATURE_CONFIG_BCM8727_NOC;
6498 }
6499
c18487ee 6500 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
6501 SHMEM_RD(bp,
6502 dev_info.port_hw_config[port].speed_capability_mask);
6503
34f80b04 6504 bp->port.link_config =
a2fbb9ea
ET
6505 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6506
c2c8b03e
EG
6507 /* Get the 4 lanes xgxs config rx and tx */
6508 for (i = 0; i < 2; i++) {
6509 val = SHMEM_RD(bp,
6510 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
6511 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
6512 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
6513
6514 val = SHMEM_RD(bp,
6515 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
6516 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
6517 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
6518 }
6519
3ce2c3f9
EG
6520 /* If the device is capable of WoL, set the default state according
6521 * to the HW
6522 */
4d295db0 6523 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
6524 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
6525 (config & PORT_FEATURE_WOL_ENABLED));
6526
c2c8b03e
EG
6527 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
6528 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
6529 bp->link_params.lane_config,
6530 bp->link_params.ext_phy_config,
34f80b04 6531 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 6532
4d295db0
EG
6533 bp->link_params.switch_cfg |= (bp->port.link_config &
6534 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 6535 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
6536
6537 bnx2x_link_settings_requested(bp);
6538
01cd4528
EG
6539 /*
6540 * If connected directly, work with the internal PHY, otherwise, work
6541 * with the external PHY
6542 */
6543 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6544 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
6545 bp->mdio.prtad = bp->link_params.phy_addr;
6546
6547 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
6548 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
6549 bp->mdio.prtad =
659bc5c4 6550 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 6551
a2fbb9ea
ET
6552 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6553 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 6554 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
6555 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
6556 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
6557
6558#ifdef BCM_CNIC
6559 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
6560 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
6561 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
6562#endif
34f80b04
EG
6563}
6564
6565static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6566{
6567 int func = BP_FUNC(bp);
6568 u32 val, val2;
6569 int rc = 0;
a2fbb9ea 6570
34f80b04 6571 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 6572
34f80b04
EG
6573 bp->e1hov = 0;
6574 bp->e1hmf = 0;
2145a920 6575 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
34f80b04
EG
6576 bp->mf_config =
6577 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 6578
2691d51d 6579 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 6580 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 6581 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 6582 bp->e1hmf = 1;
2691d51d
EG
6583 BNX2X_DEV_INFO("%s function mode\n",
6584 IS_E1HMF(bp) ? "multi" : "single");
6585
6586 if (IS_E1HMF(bp)) {
6587 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
6588 e1hov_tag) &
6589 FUNC_MF_CFG_E1HOV_TAG_MASK);
6590 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
6591 bp->e1hov = val;
6592 BNX2X_DEV_INFO("E1HOV for func %d is %d "
6593 "(0x%04x)\n",
6594 func, bp->e1hov, bp->e1hov);
6595 } else {
cdaa7cb8
VZ
6596 BNX2X_ERROR("No valid E1HOV for func %d,"
6597 " aborting\n", func);
34f80b04
EG
6598 rc = -EPERM;
6599 }
2691d51d
EG
6600 } else {
6601 if (BP_E1HVN(bp)) {
cdaa7cb8
VZ
6602 BNX2X_ERROR("VN %d in single function mode,"
6603 " aborting\n", BP_E1HVN(bp));
2691d51d
EG
6604 rc = -EPERM;
6605 }
34f80b04
EG
6606 }
6607 }
a2fbb9ea 6608
34f80b04
EG
6609 if (!BP_NOMCP(bp)) {
6610 bnx2x_get_port_hwinfo(bp);
6611
6612 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
6613 DRV_MSG_SEQ_NUMBER_MASK);
6614 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
6615 }
6616
6617 if (IS_E1HMF(bp)) {
6618 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
6619 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
6620 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
6621 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
6622 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6623 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6624 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6625 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6626 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
6627 bp->dev->dev_addr[5] = (u8)(val & 0xff);
6628 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
6629 ETH_ALEN);
6630 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
6631 ETH_ALEN);
a2fbb9ea 6632 }
34f80b04
EG
6633
6634 return rc;
a2fbb9ea
ET
6635 }
6636
34f80b04
EG
6637 if (BP_NOMCP(bp)) {
6638 /* only supposed to happen on emulation/FPGA */
cdaa7cb8 6639 BNX2X_ERROR("warning: random MAC workaround active\n");
34f80b04
EG
6640 random_ether_addr(bp->dev->dev_addr);
6641 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6642 }
a2fbb9ea 6643
34f80b04
EG
6644 return rc;
6645}
6646
34f24c7f
VZ
6647static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
6648{
6649 int cnt, i, block_end, rodi;
6650 char vpd_data[BNX2X_VPD_LEN+1];
6651 char str_id_reg[VENDOR_ID_LEN+1];
6652 char str_id_cap[VENDOR_ID_LEN+1];
6653 u8 len;
6654
6655 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
6656 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
6657
6658 if (cnt < BNX2X_VPD_LEN)
6659 goto out_not_found;
6660
6661 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
6662 PCI_VPD_LRDT_RO_DATA);
6663 if (i < 0)
6664 goto out_not_found;
6665
6666
6667 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
6668 pci_vpd_lrdt_size(&vpd_data[i]);
6669
6670 i += PCI_VPD_LRDT_TAG_SIZE;
6671
6672 if (block_end > BNX2X_VPD_LEN)
6673 goto out_not_found;
6674
6675 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6676 PCI_VPD_RO_KEYWORD_MFR_ID);
6677 if (rodi < 0)
6678 goto out_not_found;
6679
6680 len = pci_vpd_info_field_size(&vpd_data[rodi]);
6681
6682 if (len != VENDOR_ID_LEN)
6683 goto out_not_found;
6684
6685 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6686
6687 /* vendor specific info */
6688 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
6689 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
6690 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
6691 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
6692
6693 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6694 PCI_VPD_RO_KEYWORD_VENDOR0);
6695 if (rodi >= 0) {
6696 len = pci_vpd_info_field_size(&vpd_data[rodi]);
6697
6698 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6699
6700 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
6701 memcpy(bp->fw_ver, &vpd_data[rodi], len);
6702 bp->fw_ver[len] = ' ';
6703 }
6704 }
6705 return;
6706 }
6707out_not_found:
6708 return;
6709}
6710
34f80b04
EG
6711static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6712{
6713 int func = BP_FUNC(bp);
87942b46 6714 int timer_interval;
34f80b04
EG
6715 int rc;
6716
da5a662a
VZ
6717 /* Disable interrupt handling until HW is initialized */
6718 atomic_set(&bp->intr_sem, 1);
e1510706 6719 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 6720
34f80b04 6721 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 6722 mutex_init(&bp->fw_mb_mutex);
bb7e95c8 6723 spin_lock_init(&bp->stats_lock);
993ac7b5
MC
6724#ifdef BCM_CNIC
6725 mutex_init(&bp->cnic_mutex);
6726#endif
a2fbb9ea 6727
1cf167f2 6728 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 6729 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
6730
6731 rc = bnx2x_get_hwinfo(bp);
6732
34f24c7f 6733 bnx2x_read_fwinfo(bp);
34f80b04
EG
6734 /* need to reset chip if undi was active */
6735 if (!BP_NOMCP(bp))
6736 bnx2x_undi_unload(bp);
6737
6738 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 6739 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
6740
6741 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
6742 dev_err(&bp->pdev->dev, "MCP disabled, "
6743 "must load devices in order!\n");
34f80b04 6744
555f6c78 6745 /* Set multi queue mode */
8badd27a
EG
6746 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
6747 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
cdaa7cb8
VZ
6748 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
6749 "requested is not MSI-X\n");
555f6c78
EG
6750 multi_mode = ETH_RSS_MODE_DISABLED;
6751 }
6752 bp->multi_mode = multi_mode;
5d7cd496 6753 bp->int_mode = int_mode;
555f6c78 6754
4fd89b7a
DK
6755 bp->dev->features |= NETIF_F_GRO;
6756
7a9b2557
VZ
6757 /* Set TPA flags */
6758 if (disable_tpa) {
6759 bp->flags &= ~TPA_ENABLE_FLAG;
6760 bp->dev->features &= ~NETIF_F_LRO;
6761 } else {
6762 bp->flags |= TPA_ENABLE_FLAG;
6763 bp->dev->features |= NETIF_F_LRO;
6764 }
5d7cd496 6765 bp->disable_tpa = disable_tpa;
7a9b2557 6766
a18f5128
EG
6767 if (CHIP_IS_E1(bp))
6768 bp->dropless_fc = 0;
6769 else
6770 bp->dropless_fc = dropless_fc;
6771
8d5726c4 6772 bp->mrrs = mrrs;
7a9b2557 6773
34f80b04
EG
6774 bp->tx_ring_size = MAX_TX_AVAIL;
6775 bp->rx_ring_size = MAX_RX_AVAIL;
6776
6777 bp->rx_csum = 1;
34f80b04 6778
7d323bfd
EG
6779 /* make sure that the numbers are in the right granularity */
6780 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6781 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
34f80b04 6782
87942b46
EG
6783 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
6784 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
6785
6786 init_timer(&bp->timer);
6787 bp->timer.expires = jiffies + bp->current_interval;
6788 bp->timer.data = (unsigned long) bp;
6789 bp->timer.function = bnx2x_timer;
6790
6791 return rc;
a2fbb9ea
ET
6792}
6793
a2fbb9ea 6794
de0c62db
DK
6795/****************************************************************************
6796* General service functions
6797****************************************************************************/
a2fbb9ea 6798
bb2a0f7a 6799/* called with rtnl_lock */
a2fbb9ea
ET
6800static int bnx2x_open(struct net_device *dev)
6801{
6802 struct bnx2x *bp = netdev_priv(dev);
6803
6eccabb3
EG
6804 netif_carrier_off(dev);
6805
a2fbb9ea
ET
6806 bnx2x_set_power_state(bp, PCI_D0);
6807
72fd0718
VZ
6808 if (!bnx2x_reset_is_done(bp)) {
6809 do {
6810 /* Reset MCP mail box sequence if there is on going
6811 * recovery
6812 */
6813 bp->fw_seq = 0;
6814
6815 /* If it's the first function to load and reset done
6816 * is still not cleared it may mean that. We don't
6817 * check the attention state here because it may have
6818 * already been cleared by a "common" reset but we
6819 * shell proceed with "process kill" anyway.
6820 */
6821 if ((bnx2x_get_load_cnt(bp) == 0) &&
6822 bnx2x_trylock_hw_lock(bp,
6823 HW_LOCK_RESOURCE_RESERVED_08) &&
6824 (!bnx2x_leader_reset(bp))) {
6825 DP(NETIF_MSG_HW, "Recovered in open\n");
6826 break;
6827 }
6828
6829 bnx2x_set_power_state(bp, PCI_D3hot);
6830
6831 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
6832 " completed yet. Try again later. If u still see this"
6833 " message after a few retries then power cycle is"
6834 " required.\n", bp->dev->name);
6835
6836 return -EAGAIN;
6837 } while (0);
6838 }
6839
6840 bp->recovery_state = BNX2X_RECOVERY_DONE;
6841
bb2a0f7a 6842 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
6843}
6844
bb2a0f7a 6845/* called with rtnl_lock */
a2fbb9ea
ET
6846static int bnx2x_close(struct net_device *dev)
6847{
a2fbb9ea
ET
6848 struct bnx2x *bp = netdev_priv(dev);
6849
6850 /* Unload the driver, release IRQs */
bb2a0f7a 6851 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 6852 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
6853
6854 return 0;
6855}
6856
f5372251 6857/* called with netif_tx_lock from dev_mcast.c */
9f6c9258 6858void bnx2x_set_rx_mode(struct net_device *dev)
34f80b04
EG
6859{
6860 struct bnx2x *bp = netdev_priv(dev);
6861 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
6862 int port = BP_PORT(bp);
6863
6864 if (bp->state != BNX2X_STATE_OPEN) {
6865 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6866 return;
6867 }
6868
6869 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
6870
6871 if (dev->flags & IFF_PROMISC)
6872 rx_mode = BNX2X_RX_MODE_PROMISC;
6873
6874 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
6875 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
6876 CHIP_IS_E1(bp)))
34f80b04
EG
6877 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6878
6879 else { /* some multicasts */
6880 if (CHIP_IS_E1(bp)) {
6881 int i, old, offset;
22bedad3 6882 struct netdev_hw_addr *ha;
34f80b04
EG
6883 struct mac_configuration_cmd *config =
6884 bnx2x_sp(bp, mcast_config);
6885
0ddf477b 6886 i = 0;
22bedad3 6887 netdev_for_each_mc_addr(ha, dev) {
34f80b04
EG
6888 config->config_table[i].
6889 cam_entry.msb_mac_addr =
22bedad3 6890 swab16(*(u16 *)&ha->addr[0]);
34f80b04
EG
6891 config->config_table[i].
6892 cam_entry.middle_mac_addr =
22bedad3 6893 swab16(*(u16 *)&ha->addr[2]);
34f80b04
EG
6894 config->config_table[i].
6895 cam_entry.lsb_mac_addr =
22bedad3 6896 swab16(*(u16 *)&ha->addr[4]);
34f80b04
EG
6897 config->config_table[i].cam_entry.flags =
6898 cpu_to_le16(port);
6899 config->config_table[i].
6900 target_table_entry.flags = 0;
ca00392c
EG
6901 config->config_table[i].target_table_entry.
6902 clients_bit_vector =
6903 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
6904 config->config_table[i].
6905 target_table_entry.vlan_id = 0;
6906
6907 DP(NETIF_MSG_IFUP,
6908 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6909 config->config_table[i].
6910 cam_entry.msb_mac_addr,
6911 config->config_table[i].
6912 cam_entry.middle_mac_addr,
6913 config->config_table[i].
6914 cam_entry.lsb_mac_addr);
0ddf477b 6915 i++;
34f80b04 6916 }
8d9c5f34 6917 old = config->hdr.length;
34f80b04
EG
6918 if (old > i) {
6919 for (; i < old; i++) {
6920 if (CAM_IS_INVALID(config->
6921 config_table[i])) {
af246401 6922 /* already invalidated */
34f80b04
EG
6923 break;
6924 }
6925 /* invalidate */
6926 CAM_INVALIDATE(config->
6927 config_table[i]);
6928 }
6929 }
6930
6931 if (CHIP_REV_IS_SLOW(bp))
6932 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6933 else
6934 offset = BNX2X_MAX_MULTICAST*(1 + port);
6935
8d9c5f34 6936 config->hdr.length = i;
34f80b04 6937 config->hdr.offset = offset;
8d9c5f34 6938 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6939 config->hdr.reserved1 = 0;
6940
e665bfda
MC
6941 bp->set_mac_pending++;
6942 smp_wmb();
6943
34f80b04
EG
6944 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6945 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6946 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
6947 0);
6948 } else { /* E1H */
6949 /* Accept one or more multicasts */
22bedad3 6950 struct netdev_hw_addr *ha;
34f80b04
EG
6951 u32 mc_filter[MC_HASH_SIZE];
6952 u32 crc, bit, regidx;
6953 int i;
6954
6955 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6956
22bedad3 6957 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 6958 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
22bedad3 6959 ha->addr);
34f80b04 6960
22bedad3 6961 crc = crc32c_le(0, ha->addr, ETH_ALEN);
34f80b04
EG
6962 bit = (crc >> 24) & 0xff;
6963 regidx = bit >> 5;
6964 bit &= 0x1f;
6965 mc_filter[regidx] |= (1 << bit);
6966 }
6967
6968 for (i = 0; i < MC_HASH_SIZE; i++)
6969 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6970 mc_filter[i]);
6971 }
6972 }
6973
6974 bp->rx_mode = rx_mode;
6975 bnx2x_set_storm_rx_mode(bp);
6976}
6977
a2fbb9ea 6978
c18487ee 6979/* called with rtnl_lock */
01cd4528
EG
6980static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
6981 int devad, u16 addr)
a2fbb9ea 6982{
01cd4528
EG
6983 struct bnx2x *bp = netdev_priv(netdev);
6984 u16 value;
6985 int rc;
a2fbb9ea 6986
01cd4528
EG
6987 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
6988 prtad, devad, addr);
a2fbb9ea 6989
01cd4528
EG
6990 /* The HW expects different devad if CL22 is used */
6991 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 6992
01cd4528 6993 bnx2x_acquire_phy_lock(bp);
e10bc84d 6994 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
01cd4528
EG
6995 bnx2x_release_phy_lock(bp);
6996 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 6997
01cd4528
EG
6998 if (!rc)
6999 rc = value;
7000 return rc;
7001}
a2fbb9ea 7002
01cd4528
EG
7003/* called with rtnl_lock */
7004static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
7005 u16 addr, u16 value)
7006{
7007 struct bnx2x *bp = netdev_priv(netdev);
01cd4528
EG
7008 int rc;
7009
7010 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
7011 " value 0x%x\n", prtad, devad, addr, value);
7012
01cd4528
EG
7013 /* The HW expects different devad if CL22 is used */
7014 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 7015
01cd4528 7016 bnx2x_acquire_phy_lock(bp);
e10bc84d 7017 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
01cd4528
EG
7018 bnx2x_release_phy_lock(bp);
7019 return rc;
7020}
c18487ee 7021
01cd4528
EG
7022/* called with rtnl_lock */
7023static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7024{
7025 struct bnx2x *bp = netdev_priv(dev);
7026 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 7027
01cd4528
EG
7028 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
7029 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 7030
01cd4528
EG
7031 if (!netif_running(dev))
7032 return -EAGAIN;
7033
7034 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
7035}
7036
257ddbda 7037#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
7038static void poll_bnx2x(struct net_device *dev)
7039{
7040 struct bnx2x *bp = netdev_priv(dev);
7041
7042 disable_irq(bp->pdev->irq);
7043 bnx2x_interrupt(bp->pdev->irq, dev);
7044 enable_irq(bp->pdev->irq);
7045}
7046#endif
7047
c64213cd
SH
7048static const struct net_device_ops bnx2x_netdev_ops = {
7049 .ndo_open = bnx2x_open,
7050 .ndo_stop = bnx2x_close,
7051 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 7052 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
7053 .ndo_set_mac_address = bnx2x_change_mac_addr,
7054 .ndo_validate_addr = eth_validate_addr,
7055 .ndo_do_ioctl = bnx2x_ioctl,
7056 .ndo_change_mtu = bnx2x_change_mtu,
7057 .ndo_tx_timeout = bnx2x_tx_timeout,
7058#ifdef BCM_VLAN
7059 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
7060#endif
257ddbda 7061#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
7062 .ndo_poll_controller = poll_bnx2x,
7063#endif
7064};
7065
34f80b04
EG
7066static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
7067 struct net_device *dev)
a2fbb9ea
ET
7068{
7069 struct bnx2x *bp;
7070 int rc;
7071
7072 SET_NETDEV_DEV(dev, &pdev->dev);
7073 bp = netdev_priv(dev);
7074
34f80b04
EG
7075 bp->dev = dev;
7076 bp->pdev = pdev;
a2fbb9ea 7077 bp->flags = 0;
34f80b04 7078 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
7079
7080 rc = pci_enable_device(pdev);
7081 if (rc) {
cdaa7cb8
VZ
7082 dev_err(&bp->pdev->dev,
7083 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
7084 goto err_out;
7085 }
7086
7087 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
7088 dev_err(&bp->pdev->dev,
7089 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
7090 rc = -ENODEV;
7091 goto err_out_disable;
7092 }
7093
7094 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
7095 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
7096 " base address, aborting\n");
a2fbb9ea
ET
7097 rc = -ENODEV;
7098 goto err_out_disable;
7099 }
7100
34f80b04
EG
7101 if (atomic_read(&pdev->enable_cnt) == 1) {
7102 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7103 if (rc) {
cdaa7cb8
VZ
7104 dev_err(&bp->pdev->dev,
7105 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
7106 goto err_out_disable;
7107 }
a2fbb9ea 7108
34f80b04
EG
7109 pci_set_master(pdev);
7110 pci_save_state(pdev);
7111 }
a2fbb9ea
ET
7112
7113 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7114 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
7115 dev_err(&bp->pdev->dev,
7116 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
7117 rc = -EIO;
7118 goto err_out_release;
7119 }
7120
7121 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
7122 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
7123 dev_err(&bp->pdev->dev,
7124 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
7125 rc = -EIO;
7126 goto err_out_release;
7127 }
7128
1a983142 7129 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 7130 bp->flags |= USING_DAC_FLAG;
1a983142 7131 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
7132 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
7133 " failed, aborting\n");
a2fbb9ea
ET
7134 rc = -EIO;
7135 goto err_out_release;
7136 }
7137
1a983142 7138 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
7139 dev_err(&bp->pdev->dev,
7140 "System does not support DMA, aborting\n");
a2fbb9ea
ET
7141 rc = -EIO;
7142 goto err_out_release;
7143 }
7144
34f80b04
EG
7145 dev->mem_start = pci_resource_start(pdev, 0);
7146 dev->base_addr = dev->mem_start;
7147 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
7148
7149 dev->irq = pdev->irq;
7150
275f165f 7151 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 7152 if (!bp->regview) {
cdaa7cb8
VZ
7153 dev_err(&bp->pdev->dev,
7154 "Cannot map register space, aborting\n");
a2fbb9ea
ET
7155 rc = -ENOMEM;
7156 goto err_out_release;
7157 }
7158
34f80b04
EG
7159 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
7160 min_t(u64, BNX2X_DB_SIZE,
7161 pci_resource_len(pdev, 2)));
a2fbb9ea 7162 if (!bp->doorbells) {
cdaa7cb8
VZ
7163 dev_err(&bp->pdev->dev,
7164 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
7165 rc = -ENOMEM;
7166 goto err_out_unmap;
7167 }
7168
7169 bnx2x_set_power_state(bp, PCI_D0);
7170
34f80b04
EG
7171 /* clean indirect addresses */
7172 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
7173 PCICFG_VENDOR_ID_OFFSET);
7174 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
7175 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
7176 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
7177 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 7178
72fd0718
VZ
7179 /* Reset the load counter */
7180 bnx2x_clear_load_cnt(bp);
7181
34f80b04 7182 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 7183
c64213cd 7184 dev->netdev_ops = &bnx2x_netdev_ops;
de0c62db 7185 bnx2x_set_ethtool_ops(dev);
34f80b04
EG
7186 dev->features |= NETIF_F_SG;
7187 dev->features |= NETIF_F_HW_CSUM;
7188 if (bp->flags & USING_DAC_FLAG)
7189 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
7190 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7191 dev->features |= NETIF_F_TSO6;
34f80b04
EG
7192#ifdef BCM_VLAN
7193 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 7194 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
7195
7196 dev->vlan_features |= NETIF_F_SG;
7197 dev->vlan_features |= NETIF_F_HW_CSUM;
7198 if (bp->flags & USING_DAC_FLAG)
7199 dev->vlan_features |= NETIF_F_HIGHDMA;
7200 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7201 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 7202#endif
a2fbb9ea 7203
01cd4528
EG
7204 /* get_port_hwinfo() will set prtad and mmds properly */
7205 bp->mdio.prtad = MDIO_PRTAD_NONE;
7206 bp->mdio.mmds = 0;
7207 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7208 bp->mdio.dev = dev;
7209 bp->mdio.mdio_read = bnx2x_mdio_read;
7210 bp->mdio.mdio_write = bnx2x_mdio_write;
7211
a2fbb9ea
ET
7212 return 0;
7213
7214err_out_unmap:
7215 if (bp->regview) {
7216 iounmap(bp->regview);
7217 bp->regview = NULL;
7218 }
a2fbb9ea
ET
7219 if (bp->doorbells) {
7220 iounmap(bp->doorbells);
7221 bp->doorbells = NULL;
7222 }
7223
7224err_out_release:
34f80b04
EG
7225 if (atomic_read(&pdev->enable_cnt) == 1)
7226 pci_release_regions(pdev);
a2fbb9ea
ET
7227
7228err_out_disable:
7229 pci_disable_device(pdev);
7230 pci_set_drvdata(pdev, NULL);
7231
7232err_out:
7233 return rc;
7234}
7235
37f9ce62
EG
7236static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
7237 int *width, int *speed)
25047950
ET
7238{
7239 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7240
37f9ce62 7241 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 7242
37f9ce62
EG
7243 /* return value of 1=2.5GHz 2=5GHz */
7244 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 7245}
37f9ce62 7246
6891dd25 7247static int bnx2x_check_firmware(struct bnx2x *bp)
94a78b79 7248{
37f9ce62 7249 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
7250 struct bnx2x_fw_file_hdr *fw_hdr;
7251 struct bnx2x_fw_file_section *sections;
94a78b79 7252 u32 offset, len, num_ops;
37f9ce62 7253 u16 *ops_offsets;
94a78b79 7254 int i;
37f9ce62 7255 const u8 *fw_ver;
94a78b79
VZ
7256
7257 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
7258 return -EINVAL;
7259
7260 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
7261 sections = (struct bnx2x_fw_file_section *)fw_hdr;
7262
7263 /* Make sure none of the offsets and sizes make us read beyond
7264 * the end of the firmware data */
7265 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
7266 offset = be32_to_cpu(sections[i].offset);
7267 len = be32_to_cpu(sections[i].len);
7268 if (offset + len > firmware->size) {
cdaa7cb8
VZ
7269 dev_err(&bp->pdev->dev,
7270 "Section %d length is out of bounds\n", i);
94a78b79
VZ
7271 return -EINVAL;
7272 }
7273 }
7274
7275 /* Likewise for the init_ops offsets */
7276 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
7277 ops_offsets = (u16 *)(firmware->data + offset);
7278 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
7279
7280 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
7281 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
7282 dev_err(&bp->pdev->dev,
7283 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
7284 return -EINVAL;
7285 }
7286 }
7287
7288 /* Check FW version */
7289 offset = be32_to_cpu(fw_hdr->fw_version.offset);
7290 fw_ver = firmware->data + offset;
7291 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
7292 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
7293 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
7294 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
7295 dev_err(&bp->pdev->dev,
7296 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
7297 fw_ver[0], fw_ver[1], fw_ver[2],
7298 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
7299 BCM_5710_FW_MINOR_VERSION,
7300 BCM_5710_FW_REVISION_VERSION,
7301 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 7302 return -EINVAL;
94a78b79
VZ
7303 }
7304
7305 return 0;
7306}
7307
ab6ad5a4 7308static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 7309{
ab6ad5a4
EG
7310 const __be32 *source = (const __be32 *)_source;
7311 u32 *target = (u32 *)_target;
94a78b79 7312 u32 i;
94a78b79
VZ
7313
7314 for (i = 0; i < n/4; i++)
7315 target[i] = be32_to_cpu(source[i]);
7316}
7317
7318/*
7319 Ops array is stored in the following format:
7320 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7321 */
ab6ad5a4 7322static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 7323{
ab6ad5a4
EG
7324 const __be32 *source = (const __be32 *)_source;
7325 struct raw_op *target = (struct raw_op *)_target;
94a78b79 7326 u32 i, j, tmp;
94a78b79 7327
ab6ad5a4 7328 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
7329 tmp = be32_to_cpu(source[j]);
7330 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
7331 target[i].offset = tmp & 0xffffff;
7332 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
7333 }
7334}
ab6ad5a4
EG
7335
7336static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 7337{
ab6ad5a4
EG
7338 const __be16 *source = (const __be16 *)_source;
7339 u16 *target = (u16 *)_target;
94a78b79 7340 u32 i;
94a78b79
VZ
7341
7342 for (i = 0; i < n/2; i++)
7343 target[i] = be16_to_cpu(source[i]);
7344}
7345
7995c64e
JP
7346#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
7347do { \
7348 u32 len = be32_to_cpu(fw_hdr->arr.len); \
7349 bp->arr = kmalloc(len, GFP_KERNEL); \
7350 if (!bp->arr) { \
7351 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7352 goto lbl; \
7353 } \
7354 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
7355 (u8 *)bp->arr, len); \
7356} while (0)
94a78b79 7357
6891dd25 7358int bnx2x_init_firmware(struct bnx2x *bp)
94a78b79 7359{
45229b42 7360 const char *fw_file_name;
94a78b79 7361 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 7362 int rc;
94a78b79 7363
94a78b79 7364 if (CHIP_IS_E1(bp))
45229b42 7365 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 7366 else if (CHIP_IS_E1H(bp))
45229b42 7367 fw_file_name = FW_FILE_NAME_E1H;
cdaa7cb8 7368 else {
6891dd25 7369 BNX2X_ERR("Unsupported chip revision\n");
cdaa7cb8
VZ
7370 return -EINVAL;
7371 }
94a78b79 7372
6891dd25 7373 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
94a78b79 7374
6891dd25 7375 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
94a78b79 7376 if (rc) {
6891dd25 7377 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
7378 goto request_firmware_exit;
7379 }
7380
7381 rc = bnx2x_check_firmware(bp);
7382 if (rc) {
6891dd25 7383 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
7384 goto request_firmware_exit;
7385 }
7386
7387 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
7388
7389 /* Initialize the pointers to the init arrays */
7390 /* Blob */
7391 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
7392
7393 /* Opcodes */
7394 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
7395
7396 /* Offsets */
ab6ad5a4
EG
7397 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
7398 be16_to_cpu_n);
94a78b79
VZ
7399
7400 /* STORMs firmware */
573f2035
EG
7401 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7402 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
7403 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
7404 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
7405 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7406 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
7407 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
7408 be32_to_cpu(fw_hdr->usem_pram_data.offset);
7409 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7410 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
7411 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
7412 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
7413 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7414 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
7415 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
7416 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
7417
7418 return 0;
ab6ad5a4 7419
94a78b79
VZ
7420init_offsets_alloc_err:
7421 kfree(bp->init_ops);
7422init_ops_alloc_err:
7423 kfree(bp->init_data);
7424request_firmware_exit:
7425 release_firmware(bp->firmware);
7426
7427 return rc;
7428}
7429
7430
a2fbb9ea
ET
7431static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7432 const struct pci_device_id *ent)
7433{
a2fbb9ea
ET
7434 struct net_device *dev = NULL;
7435 struct bnx2x *bp;
37f9ce62 7436 int pcie_width, pcie_speed;
25047950 7437 int rc;
a2fbb9ea 7438
a2fbb9ea 7439 /* dev zeroed in init_etherdev */
555f6c78 7440 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04 7441 if (!dev) {
cdaa7cb8 7442 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 7443 return -ENOMEM;
34f80b04 7444 }
a2fbb9ea 7445
a2fbb9ea 7446 bp = netdev_priv(dev);
7995c64e 7447 bp->msg_enable = debug;
a2fbb9ea 7448
df4770de
EG
7449 pci_set_drvdata(pdev, dev);
7450
34f80b04 7451 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
7452 if (rc < 0) {
7453 free_netdev(dev);
7454 return rc;
7455 }
7456
34f80b04 7457 rc = bnx2x_init_bp(bp);
693fc0d1
EG
7458 if (rc)
7459 goto init_one_exit;
7460
7461 rc = register_netdev(dev);
34f80b04 7462 if (rc) {
693fc0d1 7463 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
7464 goto init_one_exit;
7465 }
7466
37f9ce62 7467 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
cdaa7cb8
VZ
7468 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
7469 " IRQ %d, ", board_info[ent->driver_data].name,
7470 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
7471 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
7472 dev->base_addr, bp->pdev->irq);
7473 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 7474
a2fbb9ea 7475 return 0;
34f80b04
EG
7476
7477init_one_exit:
7478 if (bp->regview)
7479 iounmap(bp->regview);
7480
7481 if (bp->doorbells)
7482 iounmap(bp->doorbells);
7483
7484 free_netdev(dev);
7485
7486 if (atomic_read(&pdev->enable_cnt) == 1)
7487 pci_release_regions(pdev);
7488
7489 pci_disable_device(pdev);
7490 pci_set_drvdata(pdev, NULL);
7491
7492 return rc;
a2fbb9ea
ET
7493}
7494
7495static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7496{
7497 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
7498 struct bnx2x *bp;
7499
7500 if (!dev) {
cdaa7cb8 7501 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
7502 return;
7503 }
228241eb 7504 bp = netdev_priv(dev);
a2fbb9ea 7505
a2fbb9ea
ET
7506 unregister_netdev(dev);
7507
72fd0718
VZ
7508 /* Make sure RESET task is not scheduled before continuing */
7509 cancel_delayed_work_sync(&bp->reset_task);
7510
a2fbb9ea
ET
7511 if (bp->regview)
7512 iounmap(bp->regview);
7513
7514 if (bp->doorbells)
7515 iounmap(bp->doorbells);
7516
7517 free_netdev(dev);
34f80b04
EG
7518
7519 if (atomic_read(&pdev->enable_cnt) == 1)
7520 pci_release_regions(pdev);
7521
a2fbb9ea
ET
7522 pci_disable_device(pdev);
7523 pci_set_drvdata(pdev, NULL);
7524}
7525
f8ef6e44
YG
7526static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
7527{
7528 int i;
7529
7530 bp->state = BNX2X_STATE_ERROR;
7531
7532 bp->rx_mode = BNX2X_RX_MODE_NONE;
7533
7534 bnx2x_netif_stop(bp, 0);
c89af1a3 7535 netif_carrier_off(bp->dev);
f8ef6e44
YG
7536
7537 del_timer_sync(&bp->timer);
7538 bp->stats_state = STATS_STATE_DISABLED;
7539 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
7540
7541 /* Release IRQs */
6cbe5065 7542 bnx2x_free_irq(bp, false);
f8ef6e44
YG
7543
7544 if (CHIP_IS_E1(bp)) {
7545 struct mac_configuration_cmd *config =
7546 bnx2x_sp(bp, mcast_config);
7547
8d9c5f34 7548 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
7549 CAM_INVALIDATE(config->config_table[i]);
7550 }
7551
7552 /* Free SKBs, SGEs, TPA pool and driver internals */
7553 bnx2x_free_skbs(bp);
54b9ddaa 7554 for_each_queue(bp, i)
f8ef6e44 7555 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 7556 for_each_queue(bp, i)
7cde1c8b 7557 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
7558 bnx2x_free_mem(bp);
7559
7560 bp->state = BNX2X_STATE_CLOSED;
7561
f8ef6e44
YG
7562 return 0;
7563}
7564
7565static void bnx2x_eeh_recover(struct bnx2x *bp)
7566{
7567 u32 val;
7568
7569 mutex_init(&bp->port.phy_mutex);
7570
7571 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7572 bp->link_params.shmem_base = bp->common.shmem_base;
7573 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7574
7575 if (!bp->common.shmem_base ||
7576 (bp->common.shmem_base < 0xA0000) ||
7577 (bp->common.shmem_base >= 0xC0000)) {
7578 BNX2X_DEV_INFO("MCP not active\n");
7579 bp->flags |= NO_MCP_FLAG;
7580 return;
7581 }
7582
7583 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7584 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7585 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7586 BNX2X_ERR("BAD MCP validity signature\n");
7587
7588 if (!BP_NOMCP(bp)) {
7589 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
7590 & DRV_MSG_SEQ_NUMBER_MASK);
7591 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7592 }
7593}
7594
493adb1f
WX
7595/**
7596 * bnx2x_io_error_detected - called when PCI error is detected
7597 * @pdev: Pointer to PCI device
7598 * @state: The current pci connection state
7599 *
7600 * This function is called after a PCI bus error affecting
7601 * this device has been detected.
7602 */
7603static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
7604 pci_channel_state_t state)
7605{
7606 struct net_device *dev = pci_get_drvdata(pdev);
7607 struct bnx2x *bp = netdev_priv(dev);
7608
7609 rtnl_lock();
7610
7611 netif_device_detach(dev);
7612
07ce50e4
DN
7613 if (state == pci_channel_io_perm_failure) {
7614 rtnl_unlock();
7615 return PCI_ERS_RESULT_DISCONNECT;
7616 }
7617
493adb1f 7618 if (netif_running(dev))
f8ef6e44 7619 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
7620
7621 pci_disable_device(pdev);
7622
7623 rtnl_unlock();
7624
7625 /* Request a slot reset */
7626 return PCI_ERS_RESULT_NEED_RESET;
7627}
7628
7629/**
7630 * bnx2x_io_slot_reset - called after the PCI bus has been reset
7631 * @pdev: Pointer to PCI device
7632 *
7633 * Restart the card from scratch, as if from a cold-boot.
7634 */
7635static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
7636{
7637 struct net_device *dev = pci_get_drvdata(pdev);
7638 struct bnx2x *bp = netdev_priv(dev);
7639
7640 rtnl_lock();
7641
7642 if (pci_enable_device(pdev)) {
7643 dev_err(&pdev->dev,
7644 "Cannot re-enable PCI device after reset\n");
7645 rtnl_unlock();
7646 return PCI_ERS_RESULT_DISCONNECT;
7647 }
7648
7649 pci_set_master(pdev);
7650 pci_restore_state(pdev);
7651
7652 if (netif_running(dev))
7653 bnx2x_set_power_state(bp, PCI_D0);
7654
7655 rtnl_unlock();
7656
7657 return PCI_ERS_RESULT_RECOVERED;
7658}
7659
7660/**
7661 * bnx2x_io_resume - called when traffic can start flowing again
7662 * @pdev: Pointer to PCI device
7663 *
7664 * This callback is called when the error recovery driver tells us that
7665 * its OK to resume normal operation.
7666 */
7667static void bnx2x_io_resume(struct pci_dev *pdev)
7668{
7669 struct net_device *dev = pci_get_drvdata(pdev);
7670 struct bnx2x *bp = netdev_priv(dev);
7671
72fd0718
VZ
7672 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
7673 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
7674 return;
7675 }
7676
493adb1f
WX
7677 rtnl_lock();
7678
f8ef6e44
YG
7679 bnx2x_eeh_recover(bp);
7680
493adb1f 7681 if (netif_running(dev))
f8ef6e44 7682 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
7683
7684 netif_device_attach(dev);
7685
7686 rtnl_unlock();
7687}
7688
7689static struct pci_error_handlers bnx2x_err_handler = {
7690 .error_detected = bnx2x_io_error_detected,
356e2385
EG
7691 .slot_reset = bnx2x_io_slot_reset,
7692 .resume = bnx2x_io_resume,
493adb1f
WX
7693};
7694
a2fbb9ea 7695static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
7696 .name = DRV_MODULE_NAME,
7697 .id_table = bnx2x_pci_tbl,
7698 .probe = bnx2x_init_one,
7699 .remove = __devexit_p(bnx2x_remove_one),
7700 .suspend = bnx2x_suspend,
7701 .resume = bnx2x_resume,
7702 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
7703};
7704
7705static int __init bnx2x_init(void)
7706{
dd21ca6d
SG
7707 int ret;
7708
7995c64e 7709 pr_info("%s", version);
938cf541 7710
1cf167f2
EG
7711 bnx2x_wq = create_singlethread_workqueue("bnx2x");
7712 if (bnx2x_wq == NULL) {
7995c64e 7713 pr_err("Cannot create workqueue\n");
1cf167f2
EG
7714 return -ENOMEM;
7715 }
7716
dd21ca6d
SG
7717 ret = pci_register_driver(&bnx2x_pci_driver);
7718 if (ret) {
7995c64e 7719 pr_err("Cannot register driver\n");
dd21ca6d
SG
7720 destroy_workqueue(bnx2x_wq);
7721 }
7722 return ret;
a2fbb9ea
ET
7723}
7724
7725static void __exit bnx2x_cleanup(void)
7726{
7727 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
7728
7729 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
7730}
7731
7732module_init(bnx2x_init);
7733module_exit(bnx2x_cleanup);
7734
993ac7b5
MC
7735#ifdef BCM_CNIC
7736
7737/* count denotes the number of new completions we have seen */
7738static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
7739{
7740 struct eth_spe *spe;
7741
7742#ifdef BNX2X_STOP_ON_ERROR
7743 if (unlikely(bp->panic))
7744 return;
7745#endif
7746
7747 spin_lock_bh(&bp->spq_lock);
7748 bp->cnic_spq_pending -= count;
7749
7750 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
7751 bp->cnic_spq_pending++) {
7752
7753 if (!bp->cnic_kwq_pending)
7754 break;
7755
7756 spe = bnx2x_sp_get_next(bp);
7757 *spe = *bp->cnic_kwq_cons;
7758
7759 bp->cnic_kwq_pending--;
7760
7761 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
7762 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
7763
7764 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
7765 bp->cnic_kwq_cons = bp->cnic_kwq;
7766 else
7767 bp->cnic_kwq_cons++;
7768 }
7769 bnx2x_sp_prod_update(bp);
7770 spin_unlock_bh(&bp->spq_lock);
7771}
7772
7773static int bnx2x_cnic_sp_queue(struct net_device *dev,
7774 struct kwqe_16 *kwqes[], u32 count)
7775{
7776 struct bnx2x *bp = netdev_priv(dev);
7777 int i;
7778
7779#ifdef BNX2X_STOP_ON_ERROR
7780 if (unlikely(bp->panic))
7781 return -EIO;
7782#endif
7783
7784 spin_lock_bh(&bp->spq_lock);
7785
7786 for (i = 0; i < count; i++) {
7787 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
7788
7789 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
7790 break;
7791
7792 *bp->cnic_kwq_prod = *spe;
7793
7794 bp->cnic_kwq_pending++;
7795
7796 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
7797 spe->hdr.conn_and_cmd_data, spe->hdr.type,
7798 spe->data.mac_config_addr.hi,
7799 spe->data.mac_config_addr.lo,
7800 bp->cnic_kwq_pending);
7801
7802 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
7803 bp->cnic_kwq_prod = bp->cnic_kwq;
7804 else
7805 bp->cnic_kwq_prod++;
7806 }
7807
7808 spin_unlock_bh(&bp->spq_lock);
7809
7810 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
7811 bnx2x_cnic_sp_post(bp, 0);
7812
7813 return i;
7814}
7815
7816static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7817{
7818 struct cnic_ops *c_ops;
7819 int rc = 0;
7820
7821 mutex_lock(&bp->cnic_mutex);
7822 c_ops = bp->cnic_ops;
7823 if (c_ops)
7824 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7825 mutex_unlock(&bp->cnic_mutex);
7826
7827 return rc;
7828}
7829
7830static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7831{
7832 struct cnic_ops *c_ops;
7833 int rc = 0;
7834
7835 rcu_read_lock();
7836 c_ops = rcu_dereference(bp->cnic_ops);
7837 if (c_ops)
7838 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7839 rcu_read_unlock();
7840
7841 return rc;
7842}
7843
7844/*
7845 * for commands that have no data
7846 */
9f6c9258 7847int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
993ac7b5
MC
7848{
7849 struct cnic_ctl_info ctl = {0};
7850
7851 ctl.cmd = cmd;
7852
7853 return bnx2x_cnic_ctl_send(bp, &ctl);
7854}
7855
7856static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
7857{
7858 struct cnic_ctl_info ctl;
7859
7860 /* first we tell CNIC and only then we count this as a completion */
7861 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
7862 ctl.data.comp.cid = cid;
7863
7864 bnx2x_cnic_ctl_send_bh(bp, &ctl);
7865 bnx2x_cnic_sp_post(bp, 1);
7866}
7867
7868static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7869{
7870 struct bnx2x *bp = netdev_priv(dev);
7871 int rc = 0;
7872
7873 switch (ctl->cmd) {
7874 case DRV_CTL_CTXTBL_WR_CMD: {
7875 u32 index = ctl->data.io.offset;
7876 dma_addr_t addr = ctl->data.io.dma_addr;
7877
7878 bnx2x_ilt_wr(bp, index, addr);
7879 break;
7880 }
7881
7882 case DRV_CTL_COMPLETION_CMD: {
7883 int count = ctl->data.comp.comp_count;
7884
7885 bnx2x_cnic_sp_post(bp, count);
7886 break;
7887 }
7888
7889 /* rtnl_lock is held. */
7890 case DRV_CTL_START_L2_CMD: {
7891 u32 cli = ctl->data.ring.client_id;
7892
7893 bp->rx_mode_cl_mask |= (1 << cli);
7894 bnx2x_set_storm_rx_mode(bp);
7895 break;
7896 }
7897
7898 /* rtnl_lock is held. */
7899 case DRV_CTL_STOP_L2_CMD: {
7900 u32 cli = ctl->data.ring.client_id;
7901
7902 bp->rx_mode_cl_mask &= ~(1 << cli);
7903 bnx2x_set_storm_rx_mode(bp);
7904 break;
7905 }
7906
7907 default:
7908 BNX2X_ERR("unknown command %x\n", ctl->cmd);
7909 rc = -EINVAL;
7910 }
7911
7912 return rc;
7913}
7914
9f6c9258 7915void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
993ac7b5
MC
7916{
7917 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7918
7919 if (bp->flags & USING_MSIX_FLAG) {
7920 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
7921 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
7922 cp->irq_arr[0].vector = bp->msix_table[1].vector;
7923 } else {
7924 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
7925 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
7926 }
7927 cp->irq_arr[0].status_blk = bp->cnic_sb;
7928 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
7929 cp->irq_arr[1].status_blk = bp->def_status_blk;
7930 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
7931
7932 cp->num_irq = 2;
7933}
7934
7935static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
7936 void *data)
7937{
7938 struct bnx2x *bp = netdev_priv(dev);
7939 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7940
7941 if (ops == NULL)
7942 return -EINVAL;
7943
7944 if (atomic_read(&bp->intr_sem) != 0)
7945 return -EBUSY;
7946
7947 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
7948 if (!bp->cnic_kwq)
7949 return -ENOMEM;
7950
7951 bp->cnic_kwq_cons = bp->cnic_kwq;
7952 bp->cnic_kwq_prod = bp->cnic_kwq;
7953 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
7954
7955 bp->cnic_spq_pending = 0;
7956 bp->cnic_kwq_pending = 0;
7957
7958 bp->cnic_data = data;
7959
7960 cp->num_irq = 0;
7961 cp->drv_state = CNIC_DRV_STATE_REGD;
7962
7963 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
7964
7965 bnx2x_setup_cnic_irq_info(bp);
7966 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7967 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7968 rcu_assign_pointer(bp->cnic_ops, ops);
7969
7970 return 0;
7971}
7972
7973static int bnx2x_unregister_cnic(struct net_device *dev)
7974{
7975 struct bnx2x *bp = netdev_priv(dev);
7976 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7977
7978 mutex_lock(&bp->cnic_mutex);
7979 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7980 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7981 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7982 }
7983 cp->drv_state = 0;
7984 rcu_assign_pointer(bp->cnic_ops, NULL);
7985 mutex_unlock(&bp->cnic_mutex);
7986 synchronize_rcu();
7987 kfree(bp->cnic_kwq);
7988 bp->cnic_kwq = NULL;
7989
7990 return 0;
7991}
7992
7993struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
7994{
7995 struct bnx2x *bp = netdev_priv(dev);
7996 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7997
7998 cp->drv_owner = THIS_MODULE;
7999 cp->chip_id = CHIP_ID(bp);
8000 cp->pdev = bp->pdev;
8001 cp->io_base = bp->regview;
8002 cp->io_base2 = bp->doorbells;
8003 cp->max_kwqe_pending = 8;
8004 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
8005 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
8006 cp->ctx_tbl_len = CNIC_ILT_LINES;
8007 cp->starting_cid = BCM_CNIC_CID_START;
8008 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
8009 cp->drv_ctl = bnx2x_drv_ctl;
8010 cp->drv_register_cnic = bnx2x_register_cnic;
8011 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
8012
8013 return cp;
8014}
8015EXPORT_SYMBOL(bnx2x_cnic_probe);
8016
8017#endif /* BCM_CNIC */
94a78b79 8018