]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x/bnx2x_main.c
bnx2x: remove old FW files
[net-next-2.6.git] / drivers / net / bnx2x / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea 51#include <linux/io.h>
45229b42 52#include <linux/stringify.h>
a2fbb9ea 53
b0efbb99 54#define BNX2X_MAIN
a2fbb9ea
ET
55#include "bnx2x.h"
56#include "bnx2x_init.h"
94a78b79 57#include "bnx2x_init_ops.h"
9f6c9258 58#include "bnx2x_cmn.h"
a2fbb9ea 59
a2fbb9ea 60
94a78b79
VZ
61#include <linux/firmware.h>
62#include "bnx2x_fw_file_hdr.h"
63/* FW files */
45229b42
BH
64#define FW_FILE_VERSION \
65 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
66 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
67 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
68 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
560131f3
DK
69#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
70#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
94a78b79 71
34f80b04
EG
72/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ)
a2fbb9ea 74
53a10565 75static char version[] __devinitdata =
34f80b04 76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
24e3fcef 79MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 80MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
81MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
83MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H);
a2fbb9ea 85
555f6c78
EG
86static int multi_mode = 1;
87module_param(multi_mode, int, 0);
ca00392c
EG
88MODULE_PARM_DESC(multi_mode, " Multi queue mode "
89 "(0 Disable; 1 Enable (default))");
90
54b9ddaa
VZ
91static int num_queues;
92module_param(num_queues, int, 0);
93MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
94 " (default is as a number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
cdaa7cb8
VZ
102MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
103 "(1 INT#x; 2 MSI)");
8badd27a 104
a18f5128
EG
105static int dropless_fc;
106module_param(dropless_fc, int, 0);
107MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
108
9898f86d 109static int poll;
a2fbb9ea 110module_param(poll, int, 0);
9898f86d 111MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
112
113static int mrrs = -1;
114module_param(mrrs, int, 0);
115MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
116
9898f86d 117static int debug;
a2fbb9ea 118module_param(debug, int, 0);
9898f86d
EG
119MODULE_PARM_DESC(debug, " Default debug msglevel");
120
1cf167f2 121static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
122
123enum bnx2x_board_type {
124 BCM57710 = 0,
34f80b04
EG
125 BCM57711 = 1,
126 BCM57711E = 2,
a2fbb9ea
ET
127};
128
34f80b04 129/* indexed by board_type, above */
53a10565 130static struct {
a2fbb9ea
ET
131 char *name;
132} board_info[] __devinitdata = {
34f80b04
EG
133 { "Broadcom NetXtreme II BCM57710 XGb" },
134 { "Broadcom NetXtreme II BCM57711 XGb" },
135 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
136};
137
34f80b04 138
a3aa1884 139static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
140 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
143 { 0 }
144};
145
146MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148/****************************************************************************
149* General service functions
150****************************************************************************/
151
523224a3
DK
152static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
153 u32 addr, dma_addr_t mapping)
154{
155 REG_WR(bp, addr, U64_LO(mapping));
156 REG_WR(bp, addr + 4, U64_HI(mapping));
157}
158
159static inline void __storm_memset_fill(struct bnx2x *bp,
160 u32 addr, size_t size, u32 val)
161{
162 int i;
163 for (i = 0; i < size/4; i++)
164 REG_WR(bp, addr + (i * 4), val);
165}
166
167static inline void storm_memset_ustats_zero(struct bnx2x *bp,
168 u8 port, u16 stat_id)
169{
170 size_t size = sizeof(struct ustorm_per_client_stats);
171
172 u32 addr = BAR_USTRORM_INTMEM +
173 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
174
175 __storm_memset_fill(bp, addr, size, 0);
176}
177
178static inline void storm_memset_tstats_zero(struct bnx2x *bp,
179 u8 port, u16 stat_id)
180{
181 size_t size = sizeof(struct tstorm_per_client_stats);
182
183 u32 addr = BAR_TSTRORM_INTMEM +
184 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
185
186 __storm_memset_fill(bp, addr, size, 0);
187}
188
189static inline void storm_memset_xstats_zero(struct bnx2x *bp,
190 u8 port, u16 stat_id)
191{
192 size_t size = sizeof(struct xstorm_per_client_stats);
193
194 u32 addr = BAR_XSTRORM_INTMEM +
195 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
196
197 __storm_memset_fill(bp, addr, size, 0);
198}
199
200
201static inline void storm_memset_spq_addr(struct bnx2x *bp,
202 dma_addr_t mapping, u16 abs_fid)
203{
204 u32 addr = XSEM_REG_FAST_MEMORY +
205 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
206
207 __storm_memset_dma_mapping(bp, addr, mapping);
208}
209
210static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
211{
212 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
213}
214
215static inline void storm_memset_func_cfg(struct bnx2x *bp,
216 struct tstorm_eth_function_common_config *tcfg,
217 u16 abs_fid)
218{
219 size_t size = sizeof(struct tstorm_eth_function_common_config);
220
221 u32 addr = BAR_TSTRORM_INTMEM +
222 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
223
224 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
225}
226
227static inline void storm_memset_xstats_flags(struct bnx2x *bp,
228 struct stats_indication_flags *flags,
229 u16 abs_fid)
230{
231 size_t size = sizeof(struct stats_indication_flags);
232
233 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
234
235 __storm_memset_struct(bp, addr, size, (u32 *)flags);
236}
237
238static inline void storm_memset_tstats_flags(struct bnx2x *bp,
239 struct stats_indication_flags *flags,
240 u16 abs_fid)
241{
242 size_t size = sizeof(struct stats_indication_flags);
243
244 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
245
246 __storm_memset_struct(bp, addr, size, (u32 *)flags);
247}
248
249static inline void storm_memset_ustats_flags(struct bnx2x *bp,
250 struct stats_indication_flags *flags,
251 u16 abs_fid)
252{
253 size_t size = sizeof(struct stats_indication_flags);
254
255 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
256
257 __storm_memset_struct(bp, addr, size, (u32 *)flags);
258}
259
260static inline void storm_memset_cstats_flags(struct bnx2x *bp,
261 struct stats_indication_flags *flags,
262 u16 abs_fid)
263{
264 size_t size = sizeof(struct stats_indication_flags);
265
266 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
267
268 __storm_memset_struct(bp, addr, size, (u32 *)flags);
269}
270
271static inline void storm_memset_xstats_addr(struct bnx2x *bp,
272 dma_addr_t mapping, u16 abs_fid)
273{
274 u32 addr = BAR_XSTRORM_INTMEM +
275 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
276
277 __storm_memset_dma_mapping(bp, addr, mapping);
278}
279
280static inline void storm_memset_tstats_addr(struct bnx2x *bp,
281 dma_addr_t mapping, u16 abs_fid)
282{
283 u32 addr = BAR_TSTRORM_INTMEM +
284 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
285
286 __storm_memset_dma_mapping(bp, addr, mapping);
287}
288
289static inline void storm_memset_ustats_addr(struct bnx2x *bp,
290 dma_addr_t mapping, u16 abs_fid)
291{
292 u32 addr = BAR_USTRORM_INTMEM +
293 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
294
295 __storm_memset_dma_mapping(bp, addr, mapping);
296}
297
298static inline void storm_memset_cstats_addr(struct bnx2x *bp,
299 dma_addr_t mapping, u16 abs_fid)
300{
301 u32 addr = BAR_CSTRORM_INTMEM +
302 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
303
304 __storm_memset_dma_mapping(bp, addr, mapping);
305}
306
307static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
308 u16 pf_id)
309{
310 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
311 pf_id);
312 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
313 pf_id);
314 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
315 pf_id);
316 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
317 pf_id);
318}
319
320static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
321 u8 enable)
322{
323 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
324 enable);
325 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
326 enable);
327 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
328 enable);
329 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
330 enable);
331}
332
333static inline void storm_memset_eq_data(struct bnx2x *bp,
334 struct event_ring_data *eq_data,
335 u16 pfid)
336{
337 size_t size = sizeof(struct event_ring_data);
338
339 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
340
341 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
342}
343
344static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
345 u16 pfid)
346{
347 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
348 REG_WR16(bp, addr, eq_prod);
349}
350
351static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
352 u16 fw_sb_id, u8 sb_index,
353 u8 ticks)
354{
355
356 int index_offset =
357 offsetof(struct hc_status_block_data_e1x, index_data);
358 u32 addr = BAR_CSTRORM_INTMEM +
359 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
360 index_offset +
361 sizeof(struct hc_index_data)*sb_index +
362 offsetof(struct hc_index_data, timeout);
363 REG_WR8(bp, addr, ticks);
364 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
365 port, fw_sb_id, sb_index, ticks);
366}
367static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
368 u16 fw_sb_id, u8 sb_index,
369 u8 disable)
370{
371 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
372 int index_offset =
373 offsetof(struct hc_status_block_data_e1x, index_data);
374 u32 addr = BAR_CSTRORM_INTMEM +
375 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
376 index_offset +
377 sizeof(struct hc_index_data)*sb_index +
378 offsetof(struct hc_index_data, flags);
379 u16 flags = REG_RD16(bp, addr);
380 /* clear and set */
381 flags &= ~HC_INDEX_DATA_HC_ENABLED;
382 flags |= enable_flag;
383 REG_WR16(bp, addr, flags);
384 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
385 port, fw_sb_id, sb_index, disable);
386}
387
a2fbb9ea
ET
388/* used only at init
389 * locking is done by mcp
390 */
573f2035 391void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
392{
393 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
394 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
395 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
396 PCICFG_VENDOR_ID_OFFSET);
397}
398
a2fbb9ea
ET
399static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
400{
401 u32 val;
402
403 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
404 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
405 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
406 PCICFG_VENDOR_ID_OFFSET);
407
408 return val;
409}
a2fbb9ea 410
6c719d00 411const u32 dmae_reg_go_c[] = {
a2fbb9ea
ET
412 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
413 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
414 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
415 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
416};
417
418/* copy command into DMAE command memory and set DMAE command go */
6c719d00 419void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
a2fbb9ea
ET
420{
421 u32 cmd_offset;
422 int i;
423
424 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
425 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
426 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
427
ad8d3948
EG
428 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
429 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
430 }
431 REG_WR(bp, dmae_reg_go_c[idx], 1);
432}
433
ad8d3948
EG
434void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
435 u32 len32)
a2fbb9ea 436{
5ff7b6d4 437 struct dmae_command dmae;
a2fbb9ea 438 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
439 int cnt = 200;
440
441 if (!bp->dmae_ready) {
442 u32 *data = bnx2x_sp(bp, wb_data[0]);
443
444 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
445 " using indirect\n", dst_addr, len32);
446 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
447 return;
448 }
449
5ff7b6d4 450 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 451
5ff7b6d4
EG
452 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
453 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
454 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 455#ifdef __BIG_ENDIAN
5ff7b6d4 456 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 457#else
5ff7b6d4 458 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 459#endif
5ff7b6d4
EG
460 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
461 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
462 dmae.src_addr_lo = U64_LO(dma_addr);
463 dmae.src_addr_hi = U64_HI(dma_addr);
464 dmae.dst_addr_lo = dst_addr >> 2;
465 dmae.dst_addr_hi = 0;
466 dmae.len = len32;
467 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
468 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
469 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 470
c3eefaf6 471 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
472 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
473 "dst_addr [%x:%08x (%08x)]\n"
474 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
475 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
476 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
477 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 478 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
479 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
480 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 481
5ff7b6d4
EG
482 mutex_lock(&bp->dmae_mutex);
483
a2fbb9ea
ET
484 *wb_comp = 0;
485
5ff7b6d4 486 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
487
488 udelay(5);
ad8d3948
EG
489
490 while (*wb_comp != DMAE_COMP_VAL) {
491 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
492
ad8d3948 493 if (!cnt) {
c3eefaf6 494 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
495 break;
496 }
ad8d3948 497 cnt--;
12469401
YG
498 /* adjust delay for emulation/FPGA */
499 if (CHIP_REV_IS_SLOW(bp))
500 msleep(100);
501 else
502 udelay(5);
a2fbb9ea 503 }
ad8d3948
EG
504
505 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
506}
507
c18487ee 508void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 509{
5ff7b6d4 510 struct dmae_command dmae;
a2fbb9ea 511 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
512 int cnt = 200;
513
514 if (!bp->dmae_ready) {
515 u32 *data = bnx2x_sp(bp, wb_data[0]);
516 int i;
517
518 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
519 " using indirect\n", src_addr, len32);
520 for (i = 0; i < len32; i++)
521 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
522 return;
523 }
524
5ff7b6d4 525 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 526
5ff7b6d4
EG
527 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
528 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
529 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 530#ifdef __BIG_ENDIAN
5ff7b6d4 531 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 532#else
5ff7b6d4 533 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 534#endif
5ff7b6d4
EG
535 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
536 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
537 dmae.src_addr_lo = src_addr >> 2;
538 dmae.src_addr_hi = 0;
539 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
540 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
541 dmae.len = len32;
542 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
543 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
544 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 545
c3eefaf6 546 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
547 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
548 "dst_addr [%x:%08x (%08x)]\n"
549 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
550 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
551 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
552 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 553
5ff7b6d4
EG
554 mutex_lock(&bp->dmae_mutex);
555
556 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
557 *wb_comp = 0;
558
5ff7b6d4 559 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
560
561 udelay(5);
ad8d3948
EG
562
563 while (*wb_comp != DMAE_COMP_VAL) {
564
ad8d3948 565 if (!cnt) {
c3eefaf6 566 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
567 break;
568 }
ad8d3948 569 cnt--;
12469401
YG
570 /* adjust delay for emulation/FPGA */
571 if (CHIP_REV_IS_SLOW(bp))
572 msleep(100);
573 else
574 udelay(5);
a2fbb9ea 575 }
ad8d3948 576 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
577 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
578 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
579
580 mutex_unlock(&bp->dmae_mutex);
581}
582
573f2035
EG
583void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
584 u32 addr, u32 len)
585{
02e3c6cb 586 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
587 int offset = 0;
588
02e3c6cb 589 while (len > dmae_wr_max) {
573f2035 590 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
591 addr + offset, dmae_wr_max);
592 offset += dmae_wr_max * 4;
593 len -= dmae_wr_max;
573f2035
EG
594 }
595
596 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
597}
598
ad8d3948
EG
599/* used only for slowpath so not inlined */
600static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
601{
602 u32 wb_write[2];
603
604 wb_write[0] = val_hi;
605 wb_write[1] = val_lo;
606 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 607}
a2fbb9ea 608
ad8d3948
EG
609#ifdef USE_WB_RD
610static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
611{
612 u32 wb_data[2];
613
614 REG_RD_DMAE(bp, reg, wb_data, 2);
615
616 return HILO_U64(wb_data[0], wb_data[1]);
617}
618#endif
619
a2fbb9ea
ET
620static int bnx2x_mc_assert(struct bnx2x *bp)
621{
a2fbb9ea 622 char last_idx;
34f80b04
EG
623 int i, rc = 0;
624 u32 row0, row1, row2, row3;
625
626 /* XSTORM */
627 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
628 XSTORM_ASSERT_LIST_INDEX_OFFSET);
629 if (last_idx)
630 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
631
632 /* print the asserts */
633 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
634
635 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
636 XSTORM_ASSERT_LIST_OFFSET(i));
637 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
638 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
639 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
640 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
641 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
642 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
643
644 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
645 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
646 " 0x%08x 0x%08x 0x%08x\n",
647 i, row3, row2, row1, row0);
648 rc++;
649 } else {
650 break;
651 }
652 }
653
654 /* TSTORM */
655 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
656 TSTORM_ASSERT_LIST_INDEX_OFFSET);
657 if (last_idx)
658 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
659
660 /* print the asserts */
661 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
662
663 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
664 TSTORM_ASSERT_LIST_OFFSET(i));
665 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
666 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
667 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
668 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
669 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
670 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
671
672 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
673 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
674 " 0x%08x 0x%08x 0x%08x\n",
675 i, row3, row2, row1, row0);
676 rc++;
677 } else {
678 break;
679 }
680 }
681
682 /* CSTORM */
683 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
684 CSTORM_ASSERT_LIST_INDEX_OFFSET);
685 if (last_idx)
686 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
687
688 /* print the asserts */
689 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
690
691 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
692 CSTORM_ASSERT_LIST_OFFSET(i));
693 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
694 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
695 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
696 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
697 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
698 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
699
700 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
701 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
702 " 0x%08x 0x%08x 0x%08x\n",
703 i, row3, row2, row1, row0);
704 rc++;
705 } else {
706 break;
707 }
708 }
709
710 /* USTORM */
711 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
712 USTORM_ASSERT_LIST_INDEX_OFFSET);
713 if (last_idx)
714 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
715
716 /* print the asserts */
717 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
718
719 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
720 USTORM_ASSERT_LIST_OFFSET(i));
721 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
722 USTORM_ASSERT_LIST_OFFSET(i) + 4);
723 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
724 USTORM_ASSERT_LIST_OFFSET(i) + 8);
725 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
726 USTORM_ASSERT_LIST_OFFSET(i) + 12);
727
728 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
729 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
730 " 0x%08x 0x%08x 0x%08x\n",
731 i, row3, row2, row1, row0);
732 rc++;
733 } else {
734 break;
a2fbb9ea
ET
735 }
736 }
34f80b04 737
a2fbb9ea
ET
738 return rc;
739}
c14423fe 740
a2fbb9ea
ET
741static void bnx2x_fw_dump(struct bnx2x *bp)
742{
cdaa7cb8 743 u32 addr;
a2fbb9ea 744 u32 mark, offset;
4781bfad 745 __be32 data[9];
a2fbb9ea
ET
746 int word;
747
2145a920
VZ
748 if (BP_NOMCP(bp)) {
749 BNX2X_ERR("NO MCP - can not dump\n");
750 return;
751 }
cdaa7cb8
VZ
752
753 addr = bp->common.shmem_base - 0x0800 + 4;
754 mark = REG_RD(bp, addr);
755 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
7995c64e 756 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 757
7995c64e 758 pr_err("");
cdaa7cb8 759 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
a2fbb9ea 760 for (word = 0; word < 8; word++)
cdaa7cb8 761 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 762 data[8] = 0x0;
7995c64e 763 pr_cont("%s", (char *)data);
a2fbb9ea 764 }
cdaa7cb8 765 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 766 for (word = 0; word < 8; word++)
cdaa7cb8 767 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 768 data[8] = 0x0;
7995c64e 769 pr_cont("%s", (char *)data);
a2fbb9ea 770 }
7995c64e 771 pr_err("end of fw dump\n");
a2fbb9ea
ET
772}
773
6c719d00 774void bnx2x_panic_dump(struct bnx2x *bp)
a2fbb9ea
ET
775{
776 int i;
523224a3
DK
777 u16 j;
778 struct hc_sp_status_block_data sp_sb_data;
779 int func = BP_FUNC(bp);
780#ifdef BNX2X_STOP_ON_ERROR
781 u16 start = 0, end = 0;
782#endif
a2fbb9ea 783
66e855f3
YG
784 bp->stats_state = STATS_STATE_DISABLED;
785 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
786
a2fbb9ea
ET
787 BNX2X_ERR("begin crash dump -----------------\n");
788
8440d2b6
EG
789 /* Indices */
790 /* Common */
523224a3 791 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
cdaa7cb8 792 " spq_prod_idx(0x%x)\n",
523224a3
DK
793 bp->def_idx, bp->def_att_idx,
794 bp->attn_state, bp->spq_prod_idx);
795 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
796 bp->def_status_blk->atten_status_block.attn_bits,
797 bp->def_status_blk->atten_status_block.attn_bits_ack,
798 bp->def_status_blk->atten_status_block.status_block_id,
799 bp->def_status_blk->atten_status_block.attn_bits_index);
800 BNX2X_ERR(" def (");
801 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
802 pr_cont("0x%x%s",
803 bp->def_status_blk->sp_sb.index_values[i],
804 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
805
806 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
807 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
808 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
809 i*sizeof(u32));
810
811 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
812 "pf_id(0x%x) vnic_id(0x%x) "
813 "vf_id(0x%x) vf_valid (0x%x)\n",
814 sp_sb_data.igu_sb_id,
815 sp_sb_data.igu_seg_id,
816 sp_sb_data.p_func.pf_id,
817 sp_sb_data.p_func.vnic_id,
818 sp_sb_data.p_func.vf_id,
819 sp_sb_data.p_func.vf_valid);
820
8440d2b6 821
54b9ddaa 822 for_each_queue(bp, i) {
a2fbb9ea 823 struct bnx2x_fastpath *fp = &bp->fp[i];
523224a3
DK
824 int loop;
825 struct hc_status_block_data_e1x sb_data_e1x;
826 struct hc_status_block_sm *hc_sm_p =
827 sb_data_e1x.common.state_machine;
828 struct hc_index_data *hc_index_p =
829 sb_data_e1x.index_data;
830 int data_size;
831 u32 *sb_data_p;
832
833 /* Rx */
cdaa7cb8 834 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
523224a3 835 " rx_comp_prod(0x%x)"
cdaa7cb8 836 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 837 i, fp->rx_bd_prod, fp->rx_bd_cons,
523224a3 838 fp->rx_comp_prod,
66e855f3 839 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8 840 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
523224a3 841 " fp_hc_idx(0x%x)\n",
8440d2b6 842 fp->rx_sge_prod, fp->last_max_sge,
523224a3 843 le16_to_cpu(fp->fp_hc_idx));
a2fbb9ea 844
523224a3 845 /* Tx */
cdaa7cb8
VZ
846 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
847 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
848 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
849 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
850 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
523224a3
DK
851
852 loop = HC_SB_MAX_INDICES_E1X;
853
854 /* host sb data */
855
856 BNX2X_ERR(" run indexes (");
857 for (j = 0; j < HC_SB_MAX_SM; j++)
858 pr_cont("0x%x%s",
859 fp->sb_running_index[j],
860 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
861
862 BNX2X_ERR(" indexes (");
863 for (j = 0; j < loop; j++)
864 pr_cont("0x%x%s",
865 fp->sb_index_values[j],
866 (j == loop - 1) ? ")" : " ");
867 /* fw sb data */
868 data_size =
869 sizeof(struct hc_status_block_data_e1x);
870 data_size /= sizeof(u32);
871 sb_data_p = (u32 *)&sb_data_e1x;
872 /* copy sb data in here */
873 for (j = 0; j < data_size; j++)
874 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
875 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
876 j * sizeof(u32));
877
878 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
879 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
880 sb_data_e1x.common.p_func.pf_id,
881 sb_data_e1x.common.p_func.vf_id,
882 sb_data_e1x.common.p_func.vf_valid,
883 sb_data_e1x.common.p_func.vnic_id,
884 sb_data_e1x.common.same_igu_sb_1b);
885
886 /* SB_SMs data */
887 for (j = 0; j < HC_SB_MAX_SM; j++) {
888 pr_cont("SM[%d] __flags (0x%x) "
889 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
890 "time_to_expire (0x%x) "
891 "timer_value(0x%x)\n", j,
892 hc_sm_p[j].__flags,
893 hc_sm_p[j].igu_sb_id,
894 hc_sm_p[j].igu_seg_id,
895 hc_sm_p[j].time_to_expire,
896 hc_sm_p[j].timer_value);
897 }
898
899 /* Indecies data */
900 for (j = 0; j < loop; j++) {
901 pr_cont("INDEX[%d] flags (0x%x) "
902 "timeout (0x%x)\n", j,
903 hc_index_p[j].flags,
904 hc_index_p[j].timeout);
905 }
8440d2b6 906 }
a2fbb9ea 907
523224a3 908#ifdef BNX2X_STOP_ON_ERROR
8440d2b6
EG
909 /* Rings */
910 /* Rx */
54b9ddaa 911 for_each_queue(bp, i) {
8440d2b6 912 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
913
914 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
915 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 916 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
917 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
918 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
919
c3eefaf6
EG
920 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
921 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
922 }
923
3196a88a
EG
924 start = RX_SGE(fp->rx_sge_prod);
925 end = RX_SGE(fp->last_max_sge);
8440d2b6 926 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
927 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
928 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
929
c3eefaf6
EG
930 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
931 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
932 }
933
a2fbb9ea
ET
934 start = RCQ_BD(fp->rx_comp_cons - 10);
935 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 936 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
937 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
938
c3eefaf6
EG
939 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
940 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
941 }
942 }
943
8440d2b6 944 /* Tx */
54b9ddaa 945 for_each_queue(bp, i) {
8440d2b6
EG
946 struct bnx2x_fastpath *fp = &bp->fp[i];
947
948 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
949 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
950 for (j = start; j != end; j = TX_BD(j + 1)) {
951 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
952
c3eefaf6
EG
953 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
954 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
955 }
956
957 start = TX_BD(fp->tx_bd_cons - 10);
958 end = TX_BD(fp->tx_bd_cons + 254);
959 for (j = start; j != end; j = TX_BD(j + 1)) {
960 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
961
c3eefaf6
EG
962 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
963 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
964 }
965 }
523224a3 966#endif
34f80b04 967 bnx2x_fw_dump(bp);
a2fbb9ea
ET
968 bnx2x_mc_assert(bp);
969 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
970}
971
9f6c9258 972void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 973{
34f80b04 974 int port = BP_PORT(bp);
a2fbb9ea
ET
975 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
976 u32 val = REG_RD(bp, addr);
977 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 978 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
979
980 if (msix) {
8badd27a
EG
981 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
982 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
983 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
984 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
985 } else if (msi) {
986 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
987 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
988 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
989 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
990 } else {
991 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 992 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
993 HC_CONFIG_0_REG_INT_LINE_EN_0 |
994 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 995
8badd27a
EG
996 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
997 val, port, addr);
615f8fd9
ET
998
999 REG_WR(bp, addr, val);
1000
a2fbb9ea
ET
1001 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1002 }
1003
8badd27a
EG
1004 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1005 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
1006
1007 REG_WR(bp, addr, val);
37dbbf32
EG
1008 /*
1009 * Ensure that HC_CONFIG is written before leading/trailing edge config
1010 */
1011 mmiowb();
1012 barrier();
34f80b04
EG
1013
1014 if (CHIP_IS_E1H(bp)) {
1015 /* init leading/trailing edge */
1016 if (IS_E1HMF(bp)) {
8badd27a 1017 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 1018 if (bp->port.pmf)
4acac6a5
EG
1019 /* enable nig and gpio3 attention */
1020 val |= 0x1100;
34f80b04
EG
1021 } else
1022 val = 0xffff;
1023
1024 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1025 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1026 }
37dbbf32
EG
1027
1028 /* Make sure that interrupts are indeed enabled from here on */
1029 mmiowb();
a2fbb9ea
ET
1030}
1031
523224a3 1032void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 1033{
34f80b04 1034 int port = BP_PORT(bp);
a2fbb9ea
ET
1035 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1036 u32 val = REG_RD(bp, addr);
1037
1038 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1039 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1040 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1041 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1042
1043 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1044 val, port, addr);
1045
8badd27a
EG
1046 /* flush all outstanding writes */
1047 mmiowb();
1048
a2fbb9ea
ET
1049 REG_WR(bp, addr, val);
1050 if (REG_RD(bp, addr) != val)
1051 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1052}
1053
9f6c9258 1054void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 1055{
a2fbb9ea 1056 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1057 int i, offset;
a2fbb9ea 1058
34f80b04 1059 /* disable interrupt handling */
a2fbb9ea 1060 atomic_inc(&bp->intr_sem);
e1510706
EG
1061 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1062
f8ef6e44
YG
1063 if (disable_hw)
1064 /* prevent the HW from sending interrupts */
1065 bnx2x_int_disable(bp);
a2fbb9ea
ET
1066
1067 /* make sure all ISRs are done */
1068 if (msix) {
8badd27a
EG
1069 synchronize_irq(bp->msix_table[0].vector);
1070 offset = 1;
37b091ba
MC
1071#ifdef BCM_CNIC
1072 offset++;
1073#endif
a2fbb9ea 1074 for_each_queue(bp, i)
8badd27a 1075 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
1076 } else
1077 synchronize_irq(bp->pdev->irq);
1078
1079 /* make sure sp_task is not running */
1cf167f2
EG
1080 cancel_delayed_work(&bp->sp_task);
1081 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
1082}
1083
34f80b04 1084/* fast path */
a2fbb9ea
ET
1085
1086/*
34f80b04 1087 * General service functions
a2fbb9ea
ET
1088 */
1089
72fd0718
VZ
1090/* Return true if succeeded to acquire the lock */
1091static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1092{
1093 u32 lock_status;
1094 u32 resource_bit = (1 << resource);
1095 int func = BP_FUNC(bp);
1096 u32 hw_lock_control_reg;
1097
1098 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1099
1100 /* Validating that the resource is within range */
1101 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1102 DP(NETIF_MSG_HW,
1103 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1104 resource, HW_LOCK_MAX_RESOURCE_VALUE);
0fdf4d09 1105 return false;
72fd0718
VZ
1106 }
1107
1108 if (func <= 5)
1109 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1110 else
1111 hw_lock_control_reg =
1112 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1113
1114 /* Try to acquire the lock */
1115 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1116 lock_status = REG_RD(bp, hw_lock_control_reg);
1117 if (lock_status & resource_bit)
1118 return true;
1119
1120 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1121 return false;
1122}
1123
a2fbb9ea 1124
993ac7b5
MC
1125#ifdef BCM_CNIC
1126static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1127#endif
3196a88a 1128
9f6c9258 1129void bnx2x_sp_event(struct bnx2x_fastpath *fp,
a2fbb9ea
ET
1130 union eth_rx_cqe *rr_cqe)
1131{
1132 struct bnx2x *bp = fp->bp;
1133 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1134 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1135
34f80b04 1136 DP(BNX2X_MSG_SP,
a2fbb9ea 1137 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1138 fp->index, cid, command, bp->state,
34f80b04 1139 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea 1140
523224a3
DK
1141 switch (command | fp->state) {
1142 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1143 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1144 fp->state = BNX2X_FP_STATE_OPEN;
a2fbb9ea
ET
1145 break;
1146
523224a3
DK
1147 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1148 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
a2fbb9ea
ET
1149 fp->state = BNX2X_FP_STATE_HALTED;
1150 break;
1151
523224a3
DK
1152 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1153 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1154 fp->state = BNX2X_FP_STATE_TERMINATED;
a2fbb9ea
ET
1155 break;
1156
523224a3
DK
1157 default:
1158 BNX2X_ERR("unexpected MC reply (%d) "
1159 "fp[%d] state is %x\n",
1160 command, fp->index, fp->state);
993ac7b5 1161 break;
523224a3 1162 }
3196a88a 1163
523224a3 1164 bp->spq_left++;
a2fbb9ea 1165
523224a3
DK
1166 /* push the change in fp->state and towards the memory */
1167 smp_wmb();
49d66772 1168
523224a3 1169 return;
a2fbb9ea
ET
1170}
1171
9f6c9258 1172irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
a2fbb9ea 1173{
555f6c78 1174 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1175 u16 status = bnx2x_ack_int(bp);
34f80b04 1176 u16 mask;
ca00392c 1177 int i;
a2fbb9ea 1178
34f80b04 1179 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1180 if (unlikely(status == 0)) {
1181 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1182 return IRQ_NONE;
1183 }
f5372251 1184 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1185
34f80b04 1186 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1187 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1188 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1189 return IRQ_HANDLED;
1190 }
1191
3196a88a
EG
1192#ifdef BNX2X_STOP_ON_ERROR
1193 if (unlikely(bp->panic))
1194 return IRQ_HANDLED;
1195#endif
1196
ca00392c
EG
1197 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1198 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1199
523224a3 1200 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
ca00392c 1201 if (status & mask) {
54b9ddaa
VZ
1202 /* Handle Rx and Tx according to SB id */
1203 prefetch(fp->rx_cons_sb);
54b9ddaa 1204 prefetch(fp->tx_cons_sb);
523224a3 1205 prefetch(&fp->sb_running_index[SM_RX_ID]);
54b9ddaa 1206 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1207 status &= ~mask;
1208 }
a2fbb9ea
ET
1209 }
1210
993ac7b5 1211#ifdef BCM_CNIC
523224a3 1212 mask = 0x2;
993ac7b5
MC
1213 if (status & (mask | 0x1)) {
1214 struct cnic_ops *c_ops = NULL;
1215
1216 rcu_read_lock();
1217 c_ops = rcu_dereference(bp->cnic_ops);
1218 if (c_ops)
1219 c_ops->cnic_handler(bp->cnic_data, NULL);
1220 rcu_read_unlock();
1221
1222 status &= ~mask;
1223 }
1224#endif
a2fbb9ea 1225
34f80b04 1226 if (unlikely(status & 0x1)) {
1cf167f2 1227 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1228
1229 status &= ~0x1;
1230 if (!status)
1231 return IRQ_HANDLED;
1232 }
1233
cdaa7cb8
VZ
1234 if (unlikely(status))
1235 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 1236 status);
a2fbb9ea 1237
c18487ee 1238 return IRQ_HANDLED;
a2fbb9ea
ET
1239}
1240
c18487ee 1241/* end of fast path */
a2fbb9ea 1242
a2fbb9ea 1243
c18487ee
YR
1244/* Link */
1245
1246/*
1247 * General service functions
1248 */
a2fbb9ea 1249
9f6c9258 1250int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1251{
1252 u32 lock_status;
1253 u32 resource_bit = (1 << resource);
4a37fb66
YG
1254 int func = BP_FUNC(bp);
1255 u32 hw_lock_control_reg;
c18487ee 1256 int cnt;
a2fbb9ea 1257
c18487ee
YR
1258 /* Validating that the resource is within range */
1259 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1260 DP(NETIF_MSG_HW,
1261 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1262 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1263 return -EINVAL;
1264 }
a2fbb9ea 1265
4a37fb66
YG
1266 if (func <= 5) {
1267 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1268 } else {
1269 hw_lock_control_reg =
1270 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1271 }
1272
c18487ee 1273 /* Validating that the resource is not already taken */
4a37fb66 1274 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1275 if (lock_status & resource_bit) {
1276 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1277 lock_status, resource_bit);
1278 return -EEXIST;
1279 }
a2fbb9ea 1280
46230476
EG
1281 /* Try for 5 second every 5ms */
1282 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1283 /* Try to acquire the lock */
4a37fb66
YG
1284 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1285 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1286 if (lock_status & resource_bit)
1287 return 0;
a2fbb9ea 1288
c18487ee 1289 msleep(5);
a2fbb9ea 1290 }
c18487ee
YR
1291 DP(NETIF_MSG_HW, "Timeout\n");
1292 return -EAGAIN;
1293}
a2fbb9ea 1294
9f6c9258 1295int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1296{
1297 u32 lock_status;
1298 u32 resource_bit = (1 << resource);
4a37fb66
YG
1299 int func = BP_FUNC(bp);
1300 u32 hw_lock_control_reg;
a2fbb9ea 1301
72fd0718
VZ
1302 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1303
c18487ee
YR
1304 /* Validating that the resource is within range */
1305 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1306 DP(NETIF_MSG_HW,
1307 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1308 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1309 return -EINVAL;
1310 }
1311
4a37fb66
YG
1312 if (func <= 5) {
1313 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1314 } else {
1315 hw_lock_control_reg =
1316 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1317 }
1318
c18487ee 1319 /* Validating that the resource is currently taken */
4a37fb66 1320 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1321 if (!(lock_status & resource_bit)) {
1322 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1323 lock_status, resource_bit);
1324 return -EFAULT;
a2fbb9ea
ET
1325 }
1326
9f6c9258
DK
1327 REG_WR(bp, hw_lock_control_reg, resource_bit);
1328 return 0;
c18487ee 1329}
a2fbb9ea 1330
9f6c9258 1331
4acac6a5
EG
1332int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1333{
1334 /* The GPIO should be swapped if swap register is set and active */
1335 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1336 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1337 int gpio_shift = gpio_num +
1338 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1339 u32 gpio_mask = (1 << gpio_shift);
1340 u32 gpio_reg;
1341 int value;
1342
1343 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1344 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1345 return -EINVAL;
1346 }
1347
1348 /* read GPIO value */
1349 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1350
1351 /* get the requested pin value */
1352 if ((gpio_reg & gpio_mask) == gpio_mask)
1353 value = 1;
1354 else
1355 value = 0;
1356
1357 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1358
1359 return value;
1360}
1361
17de50b7 1362int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1363{
1364 /* The GPIO should be swapped if swap register is set and active */
1365 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1366 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1367 int gpio_shift = gpio_num +
1368 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1369 u32 gpio_mask = (1 << gpio_shift);
1370 u32 gpio_reg;
a2fbb9ea 1371
c18487ee
YR
1372 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1373 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1374 return -EINVAL;
1375 }
a2fbb9ea 1376
4a37fb66 1377 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1378 /* read GPIO and mask except the float bits */
1379 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1380
c18487ee
YR
1381 switch (mode) {
1382 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1383 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1384 gpio_num, gpio_shift);
1385 /* clear FLOAT and set CLR */
1386 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1387 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1388 break;
a2fbb9ea 1389
c18487ee
YR
1390 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1391 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1392 gpio_num, gpio_shift);
1393 /* clear FLOAT and set SET */
1394 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1395 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1396 break;
a2fbb9ea 1397
17de50b7 1398 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1399 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1400 gpio_num, gpio_shift);
1401 /* set FLOAT */
1402 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1403 break;
a2fbb9ea 1404
c18487ee
YR
1405 default:
1406 break;
a2fbb9ea
ET
1407 }
1408
c18487ee 1409 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1410 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1411
c18487ee 1412 return 0;
a2fbb9ea
ET
1413}
1414
4acac6a5
EG
1415int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1416{
1417 /* The GPIO should be swapped if swap register is set and active */
1418 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1419 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1420 int gpio_shift = gpio_num +
1421 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1422 u32 gpio_mask = (1 << gpio_shift);
1423 u32 gpio_reg;
1424
1425 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1426 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1427 return -EINVAL;
1428 }
1429
1430 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1431 /* read GPIO int */
1432 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1433
1434 switch (mode) {
1435 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1436 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1437 "output low\n", gpio_num, gpio_shift);
1438 /* clear SET and set CLR */
1439 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1440 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1441 break;
1442
1443 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1444 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1445 "output high\n", gpio_num, gpio_shift);
1446 /* clear CLR and set SET */
1447 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1448 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1449 break;
1450
1451 default:
1452 break;
1453 }
1454
1455 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1456 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1457
1458 return 0;
1459}
1460
c18487ee 1461static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1462{
c18487ee
YR
1463 u32 spio_mask = (1 << spio_num);
1464 u32 spio_reg;
a2fbb9ea 1465
c18487ee
YR
1466 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1467 (spio_num > MISC_REGISTERS_SPIO_7)) {
1468 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1469 return -EINVAL;
a2fbb9ea
ET
1470 }
1471
4a37fb66 1472 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1473 /* read SPIO and mask except the float bits */
1474 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1475
c18487ee 1476 switch (mode) {
6378c025 1477 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1478 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1479 /* clear FLOAT and set CLR */
1480 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1481 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1482 break;
a2fbb9ea 1483
6378c025 1484 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1485 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1486 /* clear FLOAT and set SET */
1487 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1488 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1489 break;
a2fbb9ea 1490
c18487ee
YR
1491 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1492 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1493 /* set FLOAT */
1494 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1495 break;
a2fbb9ea 1496
c18487ee
YR
1497 default:
1498 break;
a2fbb9ea
ET
1499 }
1500
c18487ee 1501 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1502 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1503
a2fbb9ea
ET
1504 return 0;
1505}
1506
a22f0788
YR
1507int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1508{
1509 u32 sel_phy_idx = 0;
1510 if (bp->link_vars.link_up) {
1511 sel_phy_idx = EXT_PHY1;
1512 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1513 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1514 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1515 sel_phy_idx = EXT_PHY2;
1516 } else {
1517
1518 switch (bnx2x_phy_selection(&bp->link_params)) {
1519 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1520 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1521 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1522 sel_phy_idx = EXT_PHY1;
1523 break;
1524 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1525 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1526 sel_phy_idx = EXT_PHY2;
1527 break;
1528 }
1529 }
1530 /*
1531 * The selected actived PHY is always after swapping (in case PHY
1532 * swapping is enabled). So when swapping is enabled, we need to reverse
1533 * the configuration
1534 */
1535
1536 if (bp->link_params.multi_phy_config &
1537 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1538 if (sel_phy_idx == EXT_PHY1)
1539 sel_phy_idx = EXT_PHY2;
1540 else if (sel_phy_idx == EXT_PHY2)
1541 sel_phy_idx = EXT_PHY1;
1542 }
1543 return LINK_CONFIG_IDX(sel_phy_idx);
1544}
1545
9f6c9258 1546void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1547{
a22f0788 1548 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
ad33ea3a
EG
1549 switch (bp->link_vars.ieee_fc &
1550 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1551 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
a22f0788 1552 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1553 ADVERTISED_Pause);
1554 break;
356e2385 1555
c18487ee 1556 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
a22f0788 1557 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1558 ADVERTISED_Pause);
1559 break;
356e2385 1560
c18487ee 1561 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
a22f0788 1562 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
c18487ee 1563 break;
356e2385 1564
c18487ee 1565 default:
a22f0788 1566 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1567 ADVERTISED_Pause);
1568 break;
1569 }
1570}
f1410647 1571
c18487ee 1572
9f6c9258 1573u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 1574{
19680c48
EG
1575 if (!BP_NOMCP(bp)) {
1576 u8 rc;
a22f0788
YR
1577 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1578 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
19680c48 1579 /* Initialize link parameters structure variables */
8c99e7b0
YR
1580 /* It is recommended to turn off RX FC for jumbo frames
1581 for better performance */
0c593270 1582 if (bp->dev->mtu > 5000)
c0700f90 1583 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1584 else
c0700f90 1585 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1586
4a37fb66 1587 bnx2x_acquire_phy_lock(bp);
b5bf9068 1588
a22f0788 1589 if (load_mode == LOAD_DIAG) {
de6eae1f 1590 bp->link_params.loopback_mode = LOOPBACK_XGXS;
a22f0788
YR
1591 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1592 }
b5bf9068 1593
19680c48 1594 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 1595
4a37fb66 1596 bnx2x_release_phy_lock(bp);
a2fbb9ea 1597
3c96c68b
EG
1598 bnx2x_calc_fc_adv(bp);
1599
b5bf9068
EG
1600 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1601 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 1602 bnx2x_link_report(bp);
b5bf9068 1603 }
a22f0788 1604 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
19680c48
EG
1605 return rc;
1606 }
f5372251 1607 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 1608 return -EINVAL;
a2fbb9ea
ET
1609}
1610
9f6c9258 1611void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1612{
19680c48 1613 if (!BP_NOMCP(bp)) {
4a37fb66 1614 bnx2x_acquire_phy_lock(bp);
54c2fb78 1615 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
19680c48 1616 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1617 bnx2x_release_phy_lock(bp);
a2fbb9ea 1618
19680c48
EG
1619 bnx2x_calc_fc_adv(bp);
1620 } else
f5372251 1621 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 1622}
a2fbb9ea 1623
c18487ee
YR
1624static void bnx2x__link_reset(struct bnx2x *bp)
1625{
19680c48 1626 if (!BP_NOMCP(bp)) {
4a37fb66 1627 bnx2x_acquire_phy_lock(bp);
589abe3a 1628 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 1629 bnx2x_release_phy_lock(bp);
19680c48 1630 } else
f5372251 1631 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 1632}
a2fbb9ea 1633
a22f0788 1634u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
c18487ee 1635{
2145a920 1636 u8 rc = 0;
a2fbb9ea 1637
2145a920
VZ
1638 if (!BP_NOMCP(bp)) {
1639 bnx2x_acquire_phy_lock(bp);
a22f0788
YR
1640 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1641 is_serdes);
2145a920
VZ
1642 bnx2x_release_phy_lock(bp);
1643 } else
1644 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 1645
c18487ee
YR
1646 return rc;
1647}
a2fbb9ea 1648
8a1c38d1 1649static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 1650{
8a1c38d1
EG
1651 u32 r_param = bp->link_vars.line_speed / 8;
1652 u32 fair_periodic_timeout_usec;
1653 u32 t_fair;
34f80b04 1654
8a1c38d1
EG
1655 memset(&(bp->cmng.rs_vars), 0,
1656 sizeof(struct rate_shaping_vars_per_port));
1657 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 1658
8a1c38d1
EG
1659 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1660 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 1661
8a1c38d1
EG
1662 /* this is the threshold below which no timer arming will occur
1663 1.25 coefficient is for the threshold to be a little bigger
1664 than the real time, to compensate for timer in-accuracy */
1665 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
1666 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1667
8a1c38d1
EG
1668 /* resolution of fairness timer */
1669 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1670 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1671 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 1672
8a1c38d1
EG
1673 /* this is the threshold below which we won't arm the timer anymore */
1674 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 1675
8a1c38d1
EG
1676 /* we multiply by 1e3/8 to get bytes/msec.
1677 We don't want the credits to pass a credit
1678 of the t_fair*FAIR_MEM (algorithm resolution) */
1679 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1680 /* since each tick is 4 usec */
1681 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
1682}
1683
2691d51d
EG
1684/* Calculates the sum of vn_min_rates.
1685 It's needed for further normalizing of the min_rates.
1686 Returns:
1687 sum of vn_min_rates.
1688 or
1689 0 - if all the min_rates are 0.
1690 In the later case fainess algorithm should be deactivated.
1691 If not all min_rates are zero then those that are zeroes will be set to 1.
1692 */
1693static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1694{
1695 int all_zero = 1;
1696 int port = BP_PORT(bp);
1697 int vn;
1698
1699 bp->vn_weight_sum = 0;
1700 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1701 int func = 2*vn + port;
523224a3 1702 u32 vn_cfg = MF_CFG_RD(bp, func_mf_config[func].config);
2691d51d
EG
1703 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1704 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1705
1706 /* Skip hidden vns */
1707 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1708 continue;
1709
1710 /* If min rate is zero - set it to 1 */
1711 if (!vn_min_rate)
1712 vn_min_rate = DEF_MIN_RATE;
1713 else
1714 all_zero = 0;
1715
1716 bp->vn_weight_sum += vn_min_rate;
1717 }
1718
1719 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
1720 if (all_zero) {
1721 bp->cmng.flags.cmng_enables &=
1722 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1723 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1724 " fairness will be disabled\n");
1725 } else
1726 bp->cmng.flags.cmng_enables |=
1727 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
1728}
1729
8a1c38d1 1730static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
1731{
1732 struct rate_shaping_vars_per_vn m_rs_vn;
1733 struct fairness_vars_per_vn m_fair_vn;
523224a3 1734 u32 vn_cfg = MF_CFG_RD(bp, func_mf_config[func].config);
34f80b04
EG
1735 u16 vn_min_rate, vn_max_rate;
1736 int i;
1737
1738 /* If function is hidden - set min and max to zeroes */
1739 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1740 vn_min_rate = 0;
1741 vn_max_rate = 0;
1742
1743 } else {
1744 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1745 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
1746 /* If min rate is zero - set it to 1 */
1747 if (!vn_min_rate)
34f80b04
EG
1748 vn_min_rate = DEF_MIN_RATE;
1749 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1750 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1751 }
8a1c38d1 1752 DP(NETIF_MSG_IFUP,
b015e3d1 1753 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 1754 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
1755
1756 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1757 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1758
1759 /* global vn counter - maximal Mbps for this vn */
1760 m_rs_vn.vn_counter.rate = vn_max_rate;
1761
1762 /* quota - number of bytes transmitted in this period */
1763 m_rs_vn.vn_counter.quota =
1764 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1765
8a1c38d1 1766 if (bp->vn_weight_sum) {
34f80b04
EG
1767 /* credit for each period of the fairness algorithm:
1768 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
1769 vn_weight_sum should not be larger than 10000, thus
1770 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1771 than zero */
34f80b04 1772 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
1773 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1774 (8 * bp->vn_weight_sum))),
1775 (bp->cmng.fair_vars.fair_threshold * 2));
1776 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
1777 m_fair_vn.vn_credit_delta);
1778 }
1779
34f80b04
EG
1780 /* Store it to internal memory */
1781 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1782 REG_WR(bp, BAR_XSTRORM_INTMEM +
1783 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1784 ((u32 *)(&m_rs_vn))[i]);
1785
1786 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1787 REG_WR(bp, BAR_XSTRORM_INTMEM +
1788 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1789 ((u32 *)(&m_fair_vn))[i]);
1790}
523224a3
DK
1791static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
1792{
1793 if (CHIP_REV_IS_SLOW(bp))
1794 return CMNG_FNS_NONE;
1795 if (IS_E1HMF(bp))
1796 return CMNG_FNS_MINMAX;
1797
1798 return CMNG_FNS_NONE;
1799}
1800
1801static void bnx2x_read_mf_cfg(struct bnx2x *bp)
1802{
1803 int vn;
1804
1805 if (BP_NOMCP(bp))
1806 return; /* what should be the default bvalue in this case */
1807
1808 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1809 int /*abs*/func = 2*vn + BP_PORT(bp);
1810 bp->mf_config =
1811 MF_CFG_RD(bp, func_mf_config[func].config);
1812 }
1813}
1814
1815static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
1816{
1817
1818 if (cmng_type == CMNG_FNS_MINMAX) {
1819 int vn;
1820
1821 /* clear cmng_enables */
1822 bp->cmng.flags.cmng_enables = 0;
1823
1824 /* read mf conf from shmem */
1825 if (read_cfg)
1826 bnx2x_read_mf_cfg(bp);
1827
1828 /* Init rate shaping and fairness contexts */
1829 bnx2x_init_port_minmax(bp);
1830
1831 /* vn_weight_sum and enable fairness if not 0 */
1832 bnx2x_calc_vn_weight_sum(bp);
1833
1834 /* calculate and set min-max rate for each vn */
1835 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1836 bnx2x_init_vn_minmax(bp, vn);
1837
1838 /* always enable rate shaping and fairness */
1839 bp->cmng.flags.cmng_enables |=
1840 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
1841 if (!bp->vn_weight_sum)
1842 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1843 " fairness will be disabled\n");
1844 return;
1845 }
1846
1847 /* rate shaping and fairness are disabled */
1848 DP(NETIF_MSG_IFUP,
1849 "rate shaping and fairness are disabled\n");
1850}
34f80b04 1851
523224a3
DK
1852static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
1853{
1854 int port = BP_PORT(bp);
1855 int func;
1856 int vn;
1857
1858 /* Set the attention towards other drivers on the same port */
1859 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1860 if (vn == BP_E1HVN(bp))
1861 continue;
1862
1863 func = ((vn << 1) | port);
1864 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1865 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1866 }
1867}
8a1c38d1 1868
c18487ee
YR
1869/* This function is called upon link interrupt */
1870static void bnx2x_link_attn(struct bnx2x *bp)
1871{
d9e8b185 1872 u32 prev_link_status = bp->link_vars.link_status;
bb2a0f7a
YG
1873 /* Make sure that we are synced with the current statistics */
1874 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1875
c18487ee 1876 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 1877
bb2a0f7a
YG
1878 if (bp->link_vars.link_up) {
1879
1c06328c 1880 /* dropless flow control */
a18f5128 1881 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
1882 int port = BP_PORT(bp);
1883 u32 pause_enabled = 0;
1884
1885 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1886 pause_enabled = 1;
1887
1888 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 1889 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
1890 pause_enabled);
1891 }
1892
bb2a0f7a
YG
1893 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1894 struct host_port_stats *pstats;
1895
1896 pstats = bnx2x_sp(bp, port_stats);
1897 /* reset old bmac stats */
1898 memset(&(pstats->mac_stx[0]), 0,
1899 sizeof(struct mac_stx));
1900 }
f34d28ea 1901 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
1902 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1903 }
1904
d9e8b185
VZ
1905 /* indicate link status only if link status actually changed */
1906 if (prev_link_status != bp->link_vars.link_status)
1907 bnx2x_link_report(bp);
34f80b04
EG
1908
1909 if (IS_E1HMF(bp)) {
8a1c38d1 1910 int port = BP_PORT(bp);
34f80b04 1911 int func;
8a1c38d1 1912 int vn;
34f80b04 1913
ab6ad5a4 1914 /* Set the attention towards other drivers on the same port */
34f80b04
EG
1915 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1916 if (vn == BP_E1HVN(bp))
1917 continue;
1918
8a1c38d1 1919 func = ((vn << 1) | port);
34f80b04
EG
1920 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1921 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1922 }
34f80b04 1923
8a1c38d1
EG
1924 if (bp->link_vars.link_up) {
1925 int i;
1926
1927 /* Init rate shaping and fairness contexts */
1928 bnx2x_init_port_minmax(bp);
34f80b04 1929
34f80b04 1930 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
1931 bnx2x_init_vn_minmax(bp, 2*vn + port);
1932
1933 /* Store it to internal memory */
1934 for (i = 0;
1935 i < sizeof(struct cmng_struct_per_port) / 4; i++)
1936 REG_WR(bp, BAR_XSTRORM_INTMEM +
1937 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1938 ((u32 *)(&bp->cmng))[i]);
1939 }
34f80b04 1940 }
c18487ee 1941}
a2fbb9ea 1942
9f6c9258 1943void bnx2x__link_status_update(struct bnx2x *bp)
c18487ee 1944{
f34d28ea 1945 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 1946 return;
a2fbb9ea 1947
c18487ee 1948 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 1949
bb2a0f7a
YG
1950 if (bp->link_vars.link_up)
1951 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1952 else
1953 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1954
2691d51d
EG
1955 bnx2x_calc_vn_weight_sum(bp);
1956
c18487ee
YR
1957 /* indicate link status */
1958 bnx2x_link_report(bp);
a2fbb9ea 1959}
a2fbb9ea 1960
34f80b04
EG
1961static void bnx2x_pmf_update(struct bnx2x *bp)
1962{
1963 int port = BP_PORT(bp);
1964 u32 val;
1965
1966 bp->port.pmf = 1;
1967 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1968
1969 /* enable nig attention */
1970 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1971 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1972 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
1973
1974 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
1975}
1976
c18487ee 1977/* end of Link */
a2fbb9ea
ET
1978
1979/* slow path */
1980
1981/*
1982 * General service functions
1983 */
1984
2691d51d 1985/* send the MCP a request, block until there is a reply */
a22f0788 1986u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2691d51d
EG
1987{
1988 int func = BP_FUNC(bp);
1989 u32 seq = ++bp->fw_seq;
1990 u32 rc = 0;
1991 u32 cnt = 1;
1992 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1993
c4ff7cbf 1994 mutex_lock(&bp->fw_mb_mutex);
a22f0788 1995 SHMEM_WR(bp, func_mb[func].drv_mb_param, param);
2691d51d
EG
1996 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1997 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1998
1999 do {
2000 /* let the FW do it's magic ... */
2001 msleep(delay);
2002
2003 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2004
c4ff7cbf
EG
2005 /* Give the FW up to 5 second (500*10ms) */
2006 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2007
2008 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2009 cnt*delay, rc, seq);
2010
2011 /* is this a reply to our command? */
2012 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2013 rc &= FW_MSG_CODE_MASK;
2014 else {
2015 /* FW BUG! */
2016 BNX2X_ERR("FW failed to respond!\n");
2017 bnx2x_fw_dump(bp);
2018 rc = 0;
2019 }
c4ff7cbf 2020 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2021
2022 return rc;
2023}
2024
523224a3
DK
2025/* must be called under rtnl_lock */
2026void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2691d51d 2027{
523224a3 2028 u32 mask = (1 << cl_id);
2691d51d 2029
523224a3
DK
2030 /* initial seeting is BNX2X_ACCEPT_NONE */
2031 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2032 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2033 u8 unmatched_unicast = 0;
2691d51d 2034
523224a3
DK
2035 if (filters & BNX2X_PROMISCUOUS_MODE) {
2036 /* promiscious - accept all, drop none */
2037 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2038 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2039 }
2040 if (filters & BNX2X_ACCEPT_UNICAST) {
2041 /* accept matched ucast */
2042 drop_all_ucast = 0;
2043 }
2044 if (filters & BNX2X_ACCEPT_MULTICAST) {
2045 /* accept matched mcast */
2046 drop_all_mcast = 0;
2047 }
2048 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2049 /* accept all mcast */
2050 drop_all_ucast = 0;
2051 accp_all_ucast = 1;
2052 }
2053 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2054 /* accept all mcast */
2055 drop_all_mcast = 0;
2056 accp_all_mcast = 1;
2057 }
2058 if (filters & BNX2X_ACCEPT_BROADCAST) {
2059 /* accept (all) bcast */
2060 drop_all_bcast = 0;
2061 accp_all_bcast = 1;
2062 }
2691d51d 2063
523224a3
DK
2064 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2065 bp->mac_filters.ucast_drop_all | mask :
2066 bp->mac_filters.ucast_drop_all & ~mask;
2691d51d 2067
523224a3
DK
2068 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2069 bp->mac_filters.mcast_drop_all | mask :
2070 bp->mac_filters.mcast_drop_all & ~mask;
2691d51d 2071
523224a3
DK
2072 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2073 bp->mac_filters.bcast_drop_all | mask :
2074 bp->mac_filters.bcast_drop_all & ~mask;
2691d51d 2075
523224a3
DK
2076 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2077 bp->mac_filters.ucast_accept_all | mask :
2078 bp->mac_filters.ucast_accept_all & ~mask;
2691d51d 2079
523224a3
DK
2080 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2081 bp->mac_filters.mcast_accept_all | mask :
2082 bp->mac_filters.mcast_accept_all & ~mask;
2083
2084 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2085 bp->mac_filters.bcast_accept_all | mask :
2086 bp->mac_filters.bcast_accept_all & ~mask;
2087
2088 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2089 bp->mac_filters.unmatched_unicast | mask :
2090 bp->mac_filters.unmatched_unicast & ~mask;
2691d51d
EG
2091}
2092
523224a3 2093void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2691d51d 2094{
523224a3
DK
2095 if (FUNC_CONFIG(p->func_flgs)) {
2096 struct tstorm_eth_function_common_config tcfg = {0};
2691d51d 2097
523224a3
DK
2098 /* tpa */
2099 if (p->func_flgs & FUNC_FLG_TPA)
2100 tcfg.config_flags |=
2101 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2691d51d 2102
523224a3
DK
2103 /* set rss flags */
2104 if (p->func_flgs & FUNC_FLG_RSS) {
2105 u16 rss_flgs = (p->rss->mode <<
2106 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2691d51d 2107
523224a3
DK
2108 if (p->rss->cap & RSS_IPV4_CAP)
2109 rss_flgs |= RSS_IPV4_CAP_MASK;
2110 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2111 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2112 if (p->rss->cap & RSS_IPV6_CAP)
2113 rss_flgs |= RSS_IPV6_CAP_MASK;
2114 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2115 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2691d51d 2116
523224a3
DK
2117 tcfg.config_flags |= rss_flgs;
2118 tcfg.rss_result_mask = p->rss->result_mask;
2691d51d 2119
2691d51d
EG
2120 }
2121
523224a3 2122 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2691d51d 2123 }
2691d51d 2124
523224a3
DK
2125 /* Enable the function in the FW */
2126 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2127 storm_memset_func_en(bp, p->func_id, 1);
2691d51d 2128
523224a3
DK
2129 /* statistics */
2130 if (p->func_flgs & FUNC_FLG_STATS) {
2131 struct stats_indication_flags stats_flags = {0};
2132 stats_flags.collect_eth = 1;
2691d51d 2133
523224a3
DK
2134 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2135 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2136
523224a3
DK
2137 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2138 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2139
523224a3
DK
2140 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2141 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2142
523224a3
DK
2143 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2144 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d
EG
2145 }
2146
523224a3
DK
2147 /* spq */
2148 if (p->func_flgs & FUNC_FLG_SPQ) {
2149 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2150 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2151 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2152 }
2691d51d
EG
2153}
2154
523224a3
DK
2155static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2156 struct bnx2x_fastpath *fp)
28912902 2157{
523224a3 2158 u16 flags = 0;
28912902 2159
523224a3
DK
2160 /* calculate queue flags */
2161 flags |= QUEUE_FLG_CACHE_ALIGN;
2162 flags |= QUEUE_FLG_HC;
2163 flags |= IS_E1HMF(bp) ? QUEUE_FLG_OV : 0;
28912902 2164
523224a3
DK
2165#ifdef BCM_VLAN
2166 flags |= QUEUE_FLG_VLAN;
2167 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2168#endif
2169
2170 if (!fp->disable_tpa)
2171 flags |= QUEUE_FLG_TPA;
2172
2173 flags |= QUEUE_FLG_STATS;
2174
2175 return flags;
2176}
2177
2178static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2179 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2180 struct bnx2x_rxq_init_params *rxq_init)
2181{
2182 u16 max_sge = 0;
2183 u16 sge_sz = 0;
2184 u16 tpa_agg_size = 0;
2185
2186 /* calculate queue flags */
2187 u16 flags = bnx2x_get_cl_flags(bp, fp);
2188
2189 if (!fp->disable_tpa) {
2190 pause->sge_th_hi = 250;
2191 pause->sge_th_lo = 150;
2192 tpa_agg_size = min_t(u32,
2193 (min_t(u32, 8, MAX_SKB_FRAGS) *
2194 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2195 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2196 SGE_PAGE_SHIFT;
2197 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2198 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2199 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2200 0xffff);
2201 }
2202
2203 /* pause - not for e1 */
2204 if (!CHIP_IS_E1(bp)) {
2205 pause->bd_th_hi = 350;
2206 pause->bd_th_lo = 250;
2207 pause->rcq_th_hi = 350;
2208 pause->rcq_th_lo = 250;
2209 pause->sge_th_hi = 0;
2210 pause->sge_th_lo = 0;
2211 pause->pri_map = 1;
2212 }
2213
2214 /* rxq setup */
2215 rxq_init->flags = flags;
2216 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2217 rxq_init->dscr_map = fp->rx_desc_mapping;
2218 rxq_init->sge_map = fp->rx_sge_mapping;
2219 rxq_init->rcq_map = fp->rx_comp_mapping;
2220 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2221 rxq_init->mtu = bp->dev->mtu;
2222 rxq_init->buf_sz = bp->rx_buf_size;
2223 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2224 rxq_init->cl_id = fp->cl_id;
2225 rxq_init->spcl_id = fp->cl_id;
2226 rxq_init->stat_id = fp->cl_id;
2227 rxq_init->tpa_agg_sz = tpa_agg_size;
2228 rxq_init->sge_buf_sz = sge_sz;
2229 rxq_init->max_sges_pkt = max_sge;
2230 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2231 rxq_init->fw_sb_id = fp->fw_sb_id;
2232
2233 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2234
2235 rxq_init->cid = HW_CID(bp, fp->cid);
2236
2237 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2238}
2239
2240static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2241 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2242{
2243 u16 flags = bnx2x_get_cl_flags(bp, fp);
2244
2245 txq_init->flags = flags;
2246 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2247 txq_init->dscr_map = fp->tx_desc_mapping;
2248 txq_init->stat_id = fp->cl_id;
2249 txq_init->cid = HW_CID(bp, fp->cid);
2250 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2251 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2252 txq_init->fw_sb_id = fp->fw_sb_id;
2253 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2254}
2255
2256void bnx2x_pf_init(struct bnx2x *bp)
2257{
2258 struct bnx2x_func_init_params func_init = {0};
2259 struct bnx2x_rss_params rss = {0};
2260 struct event_ring_data eq_data = { {0} };
2261 u16 flags;
2262
2263 /* pf specific setups */
2264 if (!CHIP_IS_E1(bp))
2265 storm_memset_ov(bp, bp->e1hov, BP_FUNC(bp));
2266
2267 /* function setup flags */
2268 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2269
2270 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2271
2272 /**
2273 * Although RSS is meaningless when there is a single HW queue we
2274 * still need it enabled in order to have HW Rx hash generated.
2275 *
2276 * if (is_eth_multi(bp))
2277 * flags |= FUNC_FLG_RSS;
2278 */
2279
2280 /* function setup */
2281 if (flags & FUNC_FLG_RSS) {
2282 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2283 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2284 rss.mode = bp->multi_mode;
2285 rss.result_mask = MULTI_MASK;
2286 func_init.rss = &rss;
2287 }
2288
2289 func_init.func_flgs = flags;
2290 func_init.pf_id = BP_FUNC(bp);
2291 func_init.func_id = BP_FUNC(bp);
2292 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2293 func_init.spq_map = bp->spq_mapping;
2294 func_init.spq_prod = bp->spq_prod_idx;
2295
2296 bnx2x_func_init(bp, &func_init);
2297
2298 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2299
2300 /*
2301 Congestion management values depend on the link rate
2302 There is no active link so initial link rate is set to 10 Gbps.
2303 When the link comes up The congestion management values are
2304 re-calculated according to the actual link rate.
2305 */
2306 bp->link_vars.line_speed = SPEED_10000;
2307 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2308
2309 /* Only the PMF sets the HW */
2310 if (bp->port.pmf)
2311 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2312
2313 /* no rx until link is up */
2314 bp->rx_mode = BNX2X_RX_MODE_NONE;
2315 bnx2x_set_storm_rx_mode(bp);
2316
2317 /* init Event Queue */
2318 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2319 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2320 eq_data.producer = bp->eq_prod;
2321 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2322 eq_data.sb_id = DEF_SB_ID;
2323 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2324}
2325
2326
2327static void bnx2x_e1h_disable(struct bnx2x *bp)
2328{
2329 int port = BP_PORT(bp);
2330
2331 netif_tx_disable(bp->dev);
2332
2333 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2334
2335 netif_carrier_off(bp->dev);
2336}
2337
2338static void bnx2x_e1h_enable(struct bnx2x *bp)
2339{
2340 int port = BP_PORT(bp);
2341
2342 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2343
2344 /* Tx queue should be only reenabled */
2345 netif_tx_wake_all_queues(bp->dev);
2346
2347 /*
2348 * Should not call netif_carrier_on since it will be called if the link
2349 * is up when checking for link state
2350 */
2351}
2352
2353static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2354{
2355 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2356
2357 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2358
2359 /*
2360 * This is the only place besides the function initialization
2361 * where the bp->flags can change so it is done without any
2362 * locks
2363 */
2364 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2365 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2366 bp->flags |= MF_FUNC_DIS;
2367
2368 bnx2x_e1h_disable(bp);
2369 } else {
2370 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2371 bp->flags &= ~MF_FUNC_DIS;
2372
2373 bnx2x_e1h_enable(bp);
2374 }
2375 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2376 }
2377 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2378
2379 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2380 bnx2x_link_sync_notify(bp);
2381 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2382 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2383 }
2384
2385 /* Report results to MCP */
2386 if (dcc_event)
2387 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2388 else
2389 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2390}
2391
2392/* must be called under the spq lock */
2393static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2394{
2395 struct eth_spe *next_spe = bp->spq_prod_bd;
2396
2397 if (bp->spq_prod_bd == bp->spq_last_bd) {
2398 bp->spq_prod_bd = bp->spq;
2399 bp->spq_prod_idx = 0;
2400 DP(NETIF_MSG_TIMER, "end of spq\n");
2401 } else {
2402 bp->spq_prod_bd++;
2403 bp->spq_prod_idx++;
2404 }
2405 return next_spe;
2406}
2407
2408/* must be called under the spq lock */
28912902
MC
2409static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2410{
2411 int func = BP_FUNC(bp);
2412
2413 /* Make sure that BD data is updated before writing the producer */
2414 wmb();
2415
523224a3 2416 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
28912902
MC
2417 bp->spq_prod_idx);
2418 mmiowb();
2419}
2420
a2fbb9ea 2421/* the slow path queue is odd since completions arrive on the fastpath ring */
9f6c9258 2422int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
a2fbb9ea
ET
2423 u32 data_hi, u32 data_lo, int common)
2424{
28912902 2425 struct eth_spe *spe;
523224a3 2426 u16 type;
a2fbb9ea 2427
a2fbb9ea
ET
2428#ifdef BNX2X_STOP_ON_ERROR
2429 if (unlikely(bp->panic))
2430 return -EIO;
2431#endif
2432
34f80b04 2433 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2434
2435 if (!bp->spq_left) {
2436 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2437 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2438 bnx2x_panic();
2439 return -EBUSY;
2440 }
f1410647 2441
28912902
MC
2442 spe = bnx2x_sp_get_next(bp);
2443
a2fbb9ea 2444 /* CID needs port number to be encoded int it */
28912902 2445 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
2446 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2447 HW_CID(bp, cid));
523224a3 2448
a2fbb9ea 2449 if (common)
523224a3
DK
2450 /* Common ramrods:
2451 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2452 * TRAFFIC_STOP, TRAFFIC_START
2453 */
2454 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2455 & SPE_HDR_CONN_TYPE;
2456 else
2457 /* ETH ramrods: SETUP, HALT */
2458 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2459 & SPE_HDR_CONN_TYPE;
a2fbb9ea 2460
523224a3
DK
2461 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2462 SPE_HDR_FUNCTION_ID);
a2fbb9ea 2463
523224a3
DK
2464 spe->hdr.type = cpu_to_le16(type);
2465
2466 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2467 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2468
2469 /* stats ramrod has it's own slot on the spq */
2470 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2471 /* It's ok if the actual decrement is issued towards the memory
2472 * somewhere between the spin_lock and spin_unlock. Thus no
2473 * more explict memory barrier is needed.
2474 */
2475 bp->spq_left--;
a2fbb9ea 2476
cdaa7cb8 2477 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
523224a3
DK
2478 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2479 "type(0x%x) left %x\n",
cdaa7cb8
VZ
2480 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2481 (u32)(U64_LO(bp->spq_mapping) +
2482 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
523224a3 2483 HW_CID(bp, cid), data_hi, data_lo, type, bp->spq_left);
cdaa7cb8 2484
28912902 2485 bnx2x_sp_prod_update(bp);
34f80b04 2486 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2487 return 0;
2488}
2489
2490/* acquire split MCP access lock register */
4a37fb66 2491static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2492{
72fd0718 2493 u32 j, val;
34f80b04 2494 int rc = 0;
a2fbb9ea
ET
2495
2496 might_sleep();
72fd0718 2497 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
2498 val = (1UL << 31);
2499 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2500 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2501 if (val & (1L << 31))
2502 break;
2503
2504 msleep(5);
2505 }
a2fbb9ea 2506 if (!(val & (1L << 31))) {
19680c48 2507 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2508 rc = -EBUSY;
2509 }
2510
2511 return rc;
2512}
2513
4a37fb66
YG
2514/* release split MCP access lock register */
2515static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 2516{
72fd0718 2517 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
2518}
2519
523224a3
DK
2520#define BNX2X_DEF_SB_ATT_IDX 0x0001
2521#define BNX2X_DEF_SB_IDX 0x0002
2522
a2fbb9ea
ET
2523static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2524{
523224a3 2525 struct host_sp_status_block *def_sb = bp->def_status_blk;
a2fbb9ea
ET
2526 u16 rc = 0;
2527
2528 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2529 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2530 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
523224a3 2531 rc |= BNX2X_DEF_SB_ATT_IDX;
a2fbb9ea 2532 }
523224a3
DK
2533
2534 if (bp->def_idx != def_sb->sp_sb.running_index) {
2535 bp->def_idx = def_sb->sp_sb.running_index;
2536 rc |= BNX2X_DEF_SB_IDX;
a2fbb9ea 2537 }
523224a3
DK
2538
2539 /* Do not reorder: indecies reading should complete before handling */
2540 barrier();
a2fbb9ea
ET
2541 return rc;
2542}
2543
2544/*
2545 * slow path service functions
2546 */
2547
2548static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2549{
34f80b04 2550 int port = BP_PORT(bp);
5c862848
EG
2551 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2552 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2553 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2554 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2555 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2556 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2557 u32 aeu_mask;
87942b46 2558 u32 nig_mask = 0;
a2fbb9ea 2559
a2fbb9ea
ET
2560 if (bp->attn_state & asserted)
2561 BNX2X_ERR("IGU ERROR\n");
2562
3fcaf2e5
EG
2563 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2564 aeu_mask = REG_RD(bp, aeu_addr);
2565
a2fbb9ea 2566 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 2567 aeu_mask, asserted);
72fd0718 2568 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 2569 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2570
3fcaf2e5
EG
2571 REG_WR(bp, aeu_addr, aeu_mask);
2572 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2573
3fcaf2e5 2574 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2575 bp->attn_state |= asserted;
3fcaf2e5 2576 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2577
2578 if (asserted & ATTN_HARD_WIRED_MASK) {
2579 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2580
a5e9a7cf
EG
2581 bnx2x_acquire_phy_lock(bp);
2582
877e9aa4 2583 /* save nig interrupt mask */
87942b46 2584 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2585 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2586
c18487ee 2587 bnx2x_link_attn(bp);
a2fbb9ea
ET
2588
2589 /* handle unicore attn? */
2590 }
2591 if (asserted & ATTN_SW_TIMER_4_FUNC)
2592 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2593
2594 if (asserted & GPIO_2_FUNC)
2595 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2596
2597 if (asserted & GPIO_3_FUNC)
2598 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2599
2600 if (asserted & GPIO_4_FUNC)
2601 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2602
2603 if (port == 0) {
2604 if (asserted & ATTN_GENERAL_ATTN_1) {
2605 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2606 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2607 }
2608 if (asserted & ATTN_GENERAL_ATTN_2) {
2609 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2610 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2611 }
2612 if (asserted & ATTN_GENERAL_ATTN_3) {
2613 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2614 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2615 }
2616 } else {
2617 if (asserted & ATTN_GENERAL_ATTN_4) {
2618 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2619 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2620 }
2621 if (asserted & ATTN_GENERAL_ATTN_5) {
2622 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2623 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2624 }
2625 if (asserted & ATTN_GENERAL_ATTN_6) {
2626 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2627 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2628 }
2629 }
2630
2631 } /* if hardwired */
2632
5c862848
EG
2633 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2634 asserted, hc_addr);
2635 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2636
2637 /* now set back the mask */
a5e9a7cf 2638 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2639 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2640 bnx2x_release_phy_lock(bp);
2641 }
a2fbb9ea
ET
2642}
2643
fd4ef40d
EG
2644static inline void bnx2x_fan_failure(struct bnx2x *bp)
2645{
2646 int port = BP_PORT(bp);
b7737c9b 2647 u32 ext_phy_config;
fd4ef40d 2648 /* mark the failure */
b7737c9b
YR
2649 ext_phy_config =
2650 SHMEM_RD(bp,
2651 dev_info.port_hw_config[port].external_phy_config);
2652
2653 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2654 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
fd4ef40d 2655 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
b7737c9b 2656 ext_phy_config);
fd4ef40d
EG
2657
2658 /* log the failure */
cdaa7cb8
VZ
2659 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2660 " the driver to shutdown the card to prevent permanent"
2661 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 2662}
ab6ad5a4 2663
877e9aa4 2664static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2665{
34f80b04 2666 int port = BP_PORT(bp);
877e9aa4 2667 int reg_offset;
d90d96ba 2668 u32 val;
877e9aa4 2669
34f80b04
EG
2670 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2671 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2672
34f80b04 2673 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2674
2675 val = REG_RD(bp, reg_offset);
2676 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2677 REG_WR(bp, reg_offset, val);
2678
2679 BNX2X_ERR("SPIO5 hw attention\n");
2680
fd4ef40d 2681 /* Fan failure attention */
d90d96ba 2682 bnx2x_hw_reset_phy(&bp->link_params);
fd4ef40d 2683 bnx2x_fan_failure(bp);
877e9aa4 2684 }
34f80b04 2685
589abe3a
EG
2686 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2687 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2688 bnx2x_acquire_phy_lock(bp);
2689 bnx2x_handle_module_detect_int(&bp->link_params);
2690 bnx2x_release_phy_lock(bp);
2691 }
2692
34f80b04
EG
2693 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2694
2695 val = REG_RD(bp, reg_offset);
2696 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2697 REG_WR(bp, reg_offset, val);
2698
2699 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2700 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2701 bnx2x_panic();
2702 }
877e9aa4
ET
2703}
2704
2705static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2706{
2707 u32 val;
2708
0626b899 2709 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2710
2711 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2712 BNX2X_ERR("DB hw attention 0x%x\n", val);
2713 /* DORQ discard attention */
2714 if (val & 0x2)
2715 BNX2X_ERR("FATAL error from DORQ\n");
2716 }
34f80b04
EG
2717
2718 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2719
2720 int port = BP_PORT(bp);
2721 int reg_offset;
2722
2723 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2724 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2725
2726 val = REG_RD(bp, reg_offset);
2727 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2728 REG_WR(bp, reg_offset, val);
2729
2730 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 2731 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
2732 bnx2x_panic();
2733 }
877e9aa4
ET
2734}
2735
2736static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2737{
2738 u32 val;
2739
2740 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2741
2742 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2743 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2744 /* CFC error attention */
2745 if (val & 0x2)
2746 BNX2X_ERR("FATAL error from CFC\n");
2747 }
2748
2749 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2750
2751 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2752 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2753 /* RQ_USDMDP_FIFO_OVERFLOW */
2754 if (val & 0x18000)
2755 BNX2X_ERR("FATAL error from PXP\n");
2756 }
34f80b04
EG
2757
2758 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2759
2760 int port = BP_PORT(bp);
2761 int reg_offset;
2762
2763 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2764 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2765
2766 val = REG_RD(bp, reg_offset);
2767 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2768 REG_WR(bp, reg_offset, val);
2769
2770 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 2771 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
2772 bnx2x_panic();
2773 }
877e9aa4
ET
2774}
2775
2776static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2777{
34f80b04
EG
2778 u32 val;
2779
877e9aa4
ET
2780 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2781
34f80b04
EG
2782 if (attn & BNX2X_PMF_LINK_ASSERT) {
2783 int func = BP_FUNC(bp);
2784
2785 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
523224a3
DK
2786 bp->mf_config =
2787 MF_CFG_RD(bp, func_mf_config[func].config);
2691d51d
EG
2788 val = SHMEM_RD(bp, func_mb[func].drv_status);
2789 if (val & DRV_STATUS_DCC_EVENT_MASK)
2790 bnx2x_dcc_event(bp,
2791 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 2792 bnx2x__link_status_update(bp);
2691d51d 2793 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
2794 bnx2x_pmf_update(bp);
2795
2796 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2797
2798 BNX2X_ERR("MC assert!\n");
2799 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2800 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2801 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2802 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2803 bnx2x_panic();
2804
2805 } else if (attn & BNX2X_MCP_ASSERT) {
2806
2807 BNX2X_ERR("MCP assert!\n");
2808 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2809 bnx2x_fw_dump(bp);
877e9aa4
ET
2810
2811 } else
2812 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2813 }
2814
2815 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2816 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2817 if (attn & BNX2X_GRC_TIMEOUT) {
2818 val = CHIP_IS_E1H(bp) ?
2819 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2820 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2821 }
2822 if (attn & BNX2X_GRC_RSV) {
2823 val = CHIP_IS_E1H(bp) ?
2824 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2825 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2826 }
877e9aa4 2827 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2828 }
2829}
2830
72fd0718
VZ
2831#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
2832#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
2833#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2834#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
2835#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
2836#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2837/*
2838 * should be run under rtnl lock
2839 */
2840static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2841{
2842 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2843 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2844 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2845 barrier();
2846 mmiowb();
2847}
2848
2849/*
2850 * should be run under rtnl lock
2851 */
2852static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2853{
2854 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2855 val |= (1 << 16);
2856 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2857 barrier();
2858 mmiowb();
2859}
2860
2861/*
2862 * should be run under rtnl lock
2863 */
9f6c9258 2864bool bnx2x_reset_is_done(struct bnx2x *bp)
72fd0718
VZ
2865{
2866 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2867 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2868 return (val & RESET_DONE_FLAG_MASK) ? false : true;
2869}
2870
2871/*
2872 * should be run under rtnl lock
2873 */
9f6c9258 2874inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
72fd0718
VZ
2875{
2876 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2877
2878 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2879
2880 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2881 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2882 barrier();
2883 mmiowb();
2884}
2885
2886/*
2887 * should be run under rtnl lock
2888 */
9f6c9258 2889u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
72fd0718
VZ
2890{
2891 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2892
2893 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2894
2895 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2896 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2897 barrier();
2898 mmiowb();
2899
2900 return val1;
2901}
2902
2903/*
2904 * should be run under rtnl lock
2905 */
2906static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2907{
2908 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2909}
2910
2911static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2912{
2913 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2914 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2915}
2916
2917static inline void _print_next_block(int idx, const char *blk)
2918{
2919 if (idx)
2920 pr_cont(", ");
2921 pr_cont("%s", blk);
2922}
2923
2924static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2925{
2926 int i = 0;
2927 u32 cur_bit = 0;
2928 for (i = 0; sig; i++) {
2929 cur_bit = ((u32)0x1 << i);
2930 if (sig & cur_bit) {
2931 switch (cur_bit) {
2932 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2933 _print_next_block(par_num++, "BRB");
2934 break;
2935 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2936 _print_next_block(par_num++, "PARSER");
2937 break;
2938 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2939 _print_next_block(par_num++, "TSDM");
2940 break;
2941 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2942 _print_next_block(par_num++, "SEARCHER");
2943 break;
2944 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2945 _print_next_block(par_num++, "TSEMI");
2946 break;
2947 }
2948
2949 /* Clear the bit */
2950 sig &= ~cur_bit;
2951 }
2952 }
2953
2954 return par_num;
2955}
2956
2957static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2958{
2959 int i = 0;
2960 u32 cur_bit = 0;
2961 for (i = 0; sig; i++) {
2962 cur_bit = ((u32)0x1 << i);
2963 if (sig & cur_bit) {
2964 switch (cur_bit) {
2965 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2966 _print_next_block(par_num++, "PBCLIENT");
2967 break;
2968 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2969 _print_next_block(par_num++, "QM");
2970 break;
2971 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2972 _print_next_block(par_num++, "XSDM");
2973 break;
2974 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2975 _print_next_block(par_num++, "XSEMI");
2976 break;
2977 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2978 _print_next_block(par_num++, "DOORBELLQ");
2979 break;
2980 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2981 _print_next_block(par_num++, "VAUX PCI CORE");
2982 break;
2983 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2984 _print_next_block(par_num++, "DEBUG");
2985 break;
2986 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2987 _print_next_block(par_num++, "USDM");
2988 break;
2989 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2990 _print_next_block(par_num++, "USEMI");
2991 break;
2992 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2993 _print_next_block(par_num++, "UPB");
2994 break;
2995 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2996 _print_next_block(par_num++, "CSDM");
2997 break;
2998 }
2999
3000 /* Clear the bit */
3001 sig &= ~cur_bit;
3002 }
3003 }
3004
3005 return par_num;
3006}
3007
3008static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3009{
3010 int i = 0;
3011 u32 cur_bit = 0;
3012 for (i = 0; sig; i++) {
3013 cur_bit = ((u32)0x1 << i);
3014 if (sig & cur_bit) {
3015 switch (cur_bit) {
3016 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3017 _print_next_block(par_num++, "CSEMI");
3018 break;
3019 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3020 _print_next_block(par_num++, "PXP");
3021 break;
3022 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3023 _print_next_block(par_num++,
3024 "PXPPCICLOCKCLIENT");
3025 break;
3026 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3027 _print_next_block(par_num++, "CFC");
3028 break;
3029 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3030 _print_next_block(par_num++, "CDU");
3031 break;
3032 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3033 _print_next_block(par_num++, "IGU");
3034 break;
3035 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3036 _print_next_block(par_num++, "MISC");
3037 break;
3038 }
3039
3040 /* Clear the bit */
3041 sig &= ~cur_bit;
3042 }
3043 }
3044
3045 return par_num;
3046}
3047
3048static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3049{
3050 int i = 0;
3051 u32 cur_bit = 0;
3052 for (i = 0; sig; i++) {
3053 cur_bit = ((u32)0x1 << i);
3054 if (sig & cur_bit) {
3055 switch (cur_bit) {
3056 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3057 _print_next_block(par_num++, "MCP ROM");
3058 break;
3059 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3060 _print_next_block(par_num++, "MCP UMP RX");
3061 break;
3062 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3063 _print_next_block(par_num++, "MCP UMP TX");
3064 break;
3065 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3066 _print_next_block(par_num++, "MCP SCPAD");
3067 break;
3068 }
3069
3070 /* Clear the bit */
3071 sig &= ~cur_bit;
3072 }
3073 }
3074
3075 return par_num;
3076}
3077
3078static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3079 u32 sig2, u32 sig3)
3080{
3081 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3082 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3083 int par_num = 0;
3084 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3085 "[0]:0x%08x [1]:0x%08x "
3086 "[2]:0x%08x [3]:0x%08x\n",
3087 sig0 & HW_PRTY_ASSERT_SET_0,
3088 sig1 & HW_PRTY_ASSERT_SET_1,
3089 sig2 & HW_PRTY_ASSERT_SET_2,
3090 sig3 & HW_PRTY_ASSERT_SET_3);
3091 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3092 bp->dev->name);
3093 par_num = bnx2x_print_blocks_with_parity0(
3094 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3095 par_num = bnx2x_print_blocks_with_parity1(
3096 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3097 par_num = bnx2x_print_blocks_with_parity2(
3098 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3099 par_num = bnx2x_print_blocks_with_parity3(
3100 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3101 printk("\n");
3102 return true;
3103 } else
3104 return false;
3105}
3106
9f6c9258 3107bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 3108{
a2fbb9ea 3109 struct attn_route attn;
72fd0718
VZ
3110 int port = BP_PORT(bp);
3111
3112 attn.sig[0] = REG_RD(bp,
3113 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3114 port*4);
3115 attn.sig[1] = REG_RD(bp,
3116 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3117 port*4);
3118 attn.sig[2] = REG_RD(bp,
3119 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3120 port*4);
3121 attn.sig[3] = REG_RD(bp,
3122 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3123 port*4);
3124
3125 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3126 attn.sig[3]);
3127}
3128
3129static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3130{
3131 struct attn_route attn, *group_mask;
34f80b04 3132 int port = BP_PORT(bp);
877e9aa4 3133 int index;
a2fbb9ea
ET
3134 u32 reg_addr;
3135 u32 val;
3fcaf2e5 3136 u32 aeu_mask;
a2fbb9ea
ET
3137
3138 /* need to take HW lock because MCP or other port might also
3139 try to handle this event */
4a37fb66 3140 bnx2x_acquire_alr(bp);
a2fbb9ea 3141
72fd0718
VZ
3142 if (bnx2x_chk_parity_attn(bp)) {
3143 bp->recovery_state = BNX2X_RECOVERY_INIT;
3144 bnx2x_set_reset_in_progress(bp);
3145 schedule_delayed_work(&bp->reset_task, 0);
3146 /* Disable HW interrupts */
3147 bnx2x_int_disable(bp);
3148 bnx2x_release_alr(bp);
3149 /* In case of parity errors don't handle attentions so that
3150 * other function would "see" parity errors.
3151 */
3152 return;
3153 }
3154
a2fbb9ea
ET
3155 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3156 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3157 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3158 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3159 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3160 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3161
3162 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3163 if (deasserted & (1 << index)) {
72fd0718 3164 group_mask = &bp->attn_group[index];
a2fbb9ea 3165
34f80b04 3166 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
72fd0718
VZ
3167 index, group_mask->sig[0], group_mask->sig[1],
3168 group_mask->sig[2], group_mask->sig[3]);
a2fbb9ea 3169
877e9aa4 3170 bnx2x_attn_int_deasserted3(bp,
72fd0718 3171 attn.sig[3] & group_mask->sig[3]);
877e9aa4 3172 bnx2x_attn_int_deasserted1(bp,
72fd0718 3173 attn.sig[1] & group_mask->sig[1]);
877e9aa4 3174 bnx2x_attn_int_deasserted2(bp,
72fd0718 3175 attn.sig[2] & group_mask->sig[2]);
877e9aa4 3176 bnx2x_attn_int_deasserted0(bp,
72fd0718 3177 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
3178 }
3179 }
3180
4a37fb66 3181 bnx2x_release_alr(bp);
a2fbb9ea 3182
5c862848 3183 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3184
3185 val = ~deasserted;
3fcaf2e5
EG
3186 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3187 val, reg_addr);
5c862848 3188 REG_WR(bp, reg_addr, val);
a2fbb9ea 3189
a2fbb9ea 3190 if (~bp->attn_state & deasserted)
3fcaf2e5 3191 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3192
3193 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3194 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3195
3fcaf2e5
EG
3196 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3197 aeu_mask = REG_RD(bp, reg_addr);
3198
3199 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3200 aeu_mask, deasserted);
72fd0718 3201 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 3202 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3203
3fcaf2e5
EG
3204 REG_WR(bp, reg_addr, aeu_mask);
3205 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3206
3207 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3208 bp->attn_state &= ~deasserted;
3209 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3210}
3211
3212static void bnx2x_attn_int(struct bnx2x *bp)
3213{
3214 /* read local copy of bits */
68d59484
EG
3215 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3216 attn_bits);
3217 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3218 attn_bits_ack);
a2fbb9ea
ET
3219 u32 attn_state = bp->attn_state;
3220
3221 /* look for changed bits */
3222 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3223 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3224
3225 DP(NETIF_MSG_HW,
3226 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3227 attn_bits, attn_ack, asserted, deasserted);
3228
3229 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3230 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3231
3232 /* handle bits that were raised */
3233 if (asserted)
3234 bnx2x_attn_int_asserted(bp, asserted);
3235
3236 if (deasserted)
3237 bnx2x_attn_int_deasserted(bp, deasserted);
3238}
3239
523224a3
DK
3240static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3241{
3242 /* No memory barriers */
3243 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3244 mmiowb(); /* keep prod updates ordered */
3245}
3246
3247#ifdef BCM_CNIC
3248static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3249 union event_ring_elem *elem)
3250{
3251 if (!bp->cnic_eth_dev.starting_cid ||
3252 cid < bp->cnic_eth_dev.starting_cid)
3253 return 1;
3254
3255 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3256
3257 if (unlikely(elem->message.data.cfc_del_event.error)) {
3258 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3259 cid);
3260 bnx2x_panic_dump(bp);
3261 }
3262 bnx2x_cnic_cfc_comp(bp, cid);
3263 return 0;
3264}
3265#endif
3266
3267static void bnx2x_eq_int(struct bnx2x *bp)
3268{
3269 u16 hw_cons, sw_cons, sw_prod;
3270 union event_ring_elem *elem;
3271 u32 cid;
3272 u8 opcode;
3273 int spqe_cnt = 0;
3274
3275 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3276
3277 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3278 * when we get the the next-page we nned to adjust so the loop
3279 * condition below will be met. The next element is the size of a
3280 * regular element and hence incrementing by 1
3281 */
3282 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3283 hw_cons++;
3284
3285 /* This function may never run in parralel with itself for a
3286 * specific bp, thus there is no need in "paired" read memory
3287 * barrier here.
3288 */
3289 sw_cons = bp->eq_cons;
3290 sw_prod = bp->eq_prod;
3291
3292 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
3293 hw_cons, sw_cons, bp->spq_left);
3294
3295 for (; sw_cons != hw_cons;
3296 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3297
3298
3299 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3300
3301 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3302 opcode = elem->message.opcode;
3303
3304
3305 /* handle eq element */
3306 switch (opcode) {
3307 case EVENT_RING_OPCODE_STAT_QUERY:
3308 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3309 /* nothing to do with stats comp */
3310 continue;
3311
3312 case EVENT_RING_OPCODE_CFC_DEL:
3313 /* handle according to cid range */
3314 /*
3315 * we may want to verify here that the bp state is
3316 * HALTING
3317 */
3318 DP(NETIF_MSG_IFDOWN,
3319 "got delete ramrod for MULTI[%d]\n", cid);
3320#ifdef BCM_CNIC
3321 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3322 goto next_spqe;
3323#endif
3324 bnx2x_fp(bp, cid, state) =
3325 BNX2X_FP_STATE_CLOSED;
3326
3327 goto next_spqe;
3328 }
3329
3330 switch (opcode | bp->state) {
3331 case (EVENT_RING_OPCODE_FUNCTION_START |
3332 BNX2X_STATE_OPENING_WAIT4_PORT):
3333 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3334 bp->state = BNX2X_STATE_FUNC_STARTED;
3335 break;
3336
3337 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3338 BNX2X_STATE_CLOSING_WAIT4_HALT):
3339 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3340 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3341 break;
3342
3343 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3344 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3345 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3346 bp->set_mac_pending = 0;
3347 break;
3348
3349 case (EVENT_RING_OPCODE_SET_MAC |
3350 BNX2X_STATE_CLOSING_WAIT4_HALT):
3351 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3352 bp->set_mac_pending = 0;
3353 break;
3354 default:
3355 /* unknown event log error and continue */
3356 BNX2X_ERR("Unknown EQ event %d\n",
3357 elem->message.opcode);
3358 }
3359next_spqe:
3360 spqe_cnt++;
3361 } /* for */
3362
3363 bp->spq_left++;
3364
3365 bp->eq_cons = sw_cons;
3366 bp->eq_prod = sw_prod;
3367 /* Make sure that above mem writes were issued towards the memory */
3368 smp_wmb();
3369
3370 /* update producer */
3371 bnx2x_update_eq_prod(bp, bp->eq_prod);
3372}
3373
a2fbb9ea
ET
3374static void bnx2x_sp_task(struct work_struct *work)
3375{
1cf167f2 3376 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3377 u16 status;
3378
3379 /* Return here if interrupt is disabled */
3380 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3381 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3382 return;
3383 }
3384
3385 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3386/* if (status == 0) */
3387/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3388
cdaa7cb8 3389 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 3390
877e9aa4 3391 /* HW attentions */
523224a3 3392 if (status & BNX2X_DEF_SB_ATT_IDX) {
a2fbb9ea 3393 bnx2x_attn_int(bp);
523224a3 3394 status &= ~BNX2X_DEF_SB_ATT_IDX;
cdaa7cb8
VZ
3395 }
3396
523224a3
DK
3397 /* SP events: STAT_QUERY and others */
3398 if (status & BNX2X_DEF_SB_IDX) {
3399
3400 /* Handle EQ completions */
3401 bnx2x_eq_int(bp);
3402
3403 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3404 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3405
3406 status &= ~BNX2X_DEF_SB_IDX;
cdaa7cb8
VZ
3407 }
3408
3409 if (unlikely(status))
3410 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3411 status);
a2fbb9ea 3412
523224a3
DK
3413 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3414 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
a2fbb9ea
ET
3415}
3416
9f6c9258 3417irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
a2fbb9ea
ET
3418{
3419 struct net_device *dev = dev_instance;
3420 struct bnx2x *bp = netdev_priv(dev);
3421
3422 /* Return here if interrupt is disabled */
3423 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3424 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3425 return IRQ_HANDLED;
3426 }
3427
523224a3
DK
3428 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3429 IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3430
3431#ifdef BNX2X_STOP_ON_ERROR
3432 if (unlikely(bp->panic))
3433 return IRQ_HANDLED;
3434#endif
3435
993ac7b5
MC
3436#ifdef BCM_CNIC
3437 {
3438 struct cnic_ops *c_ops;
3439
3440 rcu_read_lock();
3441 c_ops = rcu_dereference(bp->cnic_ops);
3442 if (c_ops)
3443 c_ops->cnic_handler(bp->cnic_data, NULL);
3444 rcu_read_unlock();
3445 }
3446#endif
1cf167f2 3447 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3448
3449 return IRQ_HANDLED;
3450}
3451
3452/* end of slow path */
3453
a2fbb9ea
ET
3454static void bnx2x_timer(unsigned long data)
3455{
3456 struct bnx2x *bp = (struct bnx2x *) data;
3457
3458 if (!netif_running(bp->dev))
3459 return;
3460
3461 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3462 goto timer_restart;
a2fbb9ea
ET
3463
3464 if (poll) {
3465 struct bnx2x_fastpath *fp = &bp->fp[0];
3466 int rc;
3467
7961f791 3468 bnx2x_tx_int(fp);
a2fbb9ea
ET
3469 rc = bnx2x_rx_int(fp, 1000);
3470 }
3471
34f80b04
EG
3472 if (!BP_NOMCP(bp)) {
3473 int func = BP_FUNC(bp);
a2fbb9ea
ET
3474 u32 drv_pulse;
3475 u32 mcp_pulse;
3476
3477 ++bp->fw_drv_pulse_wr_seq;
3478 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3479 /* TBD - add SYSTEM_TIME */
3480 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 3481 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 3482
34f80b04 3483 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
3484 MCP_PULSE_SEQ_MASK);
3485 /* The delta between driver pulse and mcp response
3486 * should be 1 (before mcp response) or 0 (after mcp response)
3487 */
3488 if ((drv_pulse != mcp_pulse) &&
3489 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3490 /* someone lost a heartbeat... */
3491 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3492 drv_pulse, mcp_pulse);
3493 }
3494 }
3495
f34d28ea 3496 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 3497 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3498
f1410647 3499timer_restart:
a2fbb9ea
ET
3500 mod_timer(&bp->timer, jiffies + bp->current_interval);
3501}
3502
3503/* end of Statistics */
3504
3505/* nic init */
3506
3507/*
3508 * nic init service functions
3509 */
3510
523224a3 3511static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
a2fbb9ea 3512{
523224a3
DK
3513 u32 i;
3514 if (!(len%4) && !(addr%4))
3515 for (i = 0; i < len; i += 4)
3516 REG_WR(bp, addr + i, fill);
3517 else
3518 for (i = 0; i < len; i++)
3519 REG_WR8(bp, addr + i, fill);
34f80b04 3520
34f80b04
EG
3521}
3522
523224a3
DK
3523/* helper: writes FP SP data to FW - data_size in dwords */
3524static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3525 int fw_sb_id,
3526 u32 *sb_data_p,
3527 u32 data_size)
34f80b04 3528{
a2fbb9ea 3529 int index;
523224a3
DK
3530 for (index = 0; index < data_size; index++)
3531 REG_WR(bp, BAR_CSTRORM_INTMEM +
3532 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3533 sizeof(u32)*index,
3534 *(sb_data_p + index));
3535}
a2fbb9ea 3536
523224a3
DK
3537static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3538{
3539 u32 *sb_data_p;
3540 u32 data_size = 0;
3541 struct hc_status_block_data_e1x sb_data_e1x;
a2fbb9ea 3542
523224a3
DK
3543 /* disable the function first */
3544 memset(&sb_data_e1x, 0,
3545 sizeof(struct hc_status_block_data_e1x));
3546 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3547 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3548 sb_data_e1x.common.p_func.vf_valid = false;
3549 sb_data_p = (u32 *)&sb_data_e1x;
3550 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
a2fbb9ea 3551
523224a3 3552 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
a2fbb9ea 3553
523224a3
DK
3554 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3555 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3556 CSTORM_STATUS_BLOCK_SIZE);
3557 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3558 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3559 CSTORM_SYNC_BLOCK_SIZE);
3560}
34f80b04 3561
523224a3
DK
3562/* helper: writes SP SB data to FW */
3563static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3564 struct hc_sp_status_block_data *sp_sb_data)
3565{
3566 int func = BP_FUNC(bp);
3567 int i;
3568 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3569 REG_WR(bp, BAR_CSTRORM_INTMEM +
3570 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3571 i*sizeof(u32),
3572 *((u32 *)sp_sb_data + i));
34f80b04
EG
3573}
3574
523224a3 3575static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
34f80b04
EG
3576{
3577 int func = BP_FUNC(bp);
523224a3
DK
3578 struct hc_sp_status_block_data sp_sb_data;
3579 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
a2fbb9ea 3580
523224a3
DK
3581 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3582 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3583 sp_sb_data.p_func.vf_valid = false;
3584
3585 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3586
3587 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3588 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3589 CSTORM_SP_STATUS_BLOCK_SIZE);
3590 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3591 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3592 CSTORM_SP_SYNC_BLOCK_SIZE);
3593
3594}
3595
3596
3597static inline
3598void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3599 int igu_sb_id, int igu_seg_id)
3600{
3601 hc_sm->igu_sb_id = igu_sb_id;
3602 hc_sm->igu_seg_id = igu_seg_id;
3603 hc_sm->timer_value = 0xFF;
3604 hc_sm->time_to_expire = 0xFFFFFFFF;
a2fbb9ea
ET
3605}
3606
523224a3
DK
3607void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
3608 u8 vf_valid, int fw_sb_id, int igu_sb_id)
a2fbb9ea 3609{
523224a3
DK
3610 int igu_seg_id;
3611
3612 struct hc_status_block_data_e1x sb_data_e1x;
3613 struct hc_status_block_sm *hc_sm_p;
3614 struct hc_index_data *hc_index_p;
3615 int data_size;
3616 u32 *sb_data_p;
3617
3618 igu_seg_id = HC_SEG_ACCESS_NORM;
3619
3620 bnx2x_zero_fp_sb(bp, fw_sb_id);
3621
3622 memset(&sb_data_e1x, 0,
3623 sizeof(struct hc_status_block_data_e1x));
3624 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3625 sb_data_e1x.common.p_func.vf_id = 0xff;
3626 sb_data_e1x.common.p_func.vf_valid = false;
3627 sb_data_e1x.common.p_func.vnic_id = BP_E1HVN(bp);
3628 sb_data_e1x.common.same_igu_sb_1b = true;
3629 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3630 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3631 hc_sm_p = sb_data_e1x.common.state_machine;
3632 hc_index_p = sb_data_e1x.index_data;
3633 sb_data_p = (u32 *)&sb_data_e1x;
3634 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3635
3636
3637 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3638 igu_sb_id, igu_seg_id);
3639 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
3640 igu_sb_id, igu_seg_id);
3641
3642 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
3643
3644 /* write indecies to HW */
3645 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3646}
3647
3648static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
3649 u8 sb_index, u8 disable, u16 usec)
3650{
3651 int port = BP_PORT(bp);
3652 u8 ticks = usec / BNX2X_BTR;
3653
3654 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3655
3656 disable = disable ? 1 : (usec ? 0 : 1);
3657 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3658}
3659
3660static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
3661 u16 tx_usec, u16 rx_usec)
3662{
3663 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
3664 false, rx_usec);
3665 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
3666 false, tx_usec);
3667}
3668static void bnx2x_init_def_sb(struct bnx2x *bp)
3669{
3670 struct host_sp_status_block *def_sb = bp->def_status_blk;
3671 dma_addr_t mapping = bp->def_status_blk_mapping;
3672 int igu_sp_sb_index;
3673 int igu_seg_id;
34f80b04
EG
3674 int port = BP_PORT(bp);
3675 int func = BP_FUNC(bp);
523224a3 3676 int reg_offset;
a2fbb9ea 3677 u64 section;
523224a3
DK
3678 int index;
3679 struct hc_sp_status_block_data sp_sb_data;
3680 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3681
3682 igu_sp_sb_index = DEF_SB_IGU_ID;
3683 igu_seg_id = HC_SEG_ACCESS_DEF;
a2fbb9ea
ET
3684
3685 /* ATTN */
523224a3 3686 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
a2fbb9ea 3687 atten_status_block);
523224a3 3688 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
a2fbb9ea 3689
49d66772
ET
3690 bp->attn_state = 0;
3691
a2fbb9ea
ET
3692 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
3693 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
34f80b04 3694 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
523224a3
DK
3695 int sindex;
3696 /* take care of sig[0]..sig[4] */
3697 for (sindex = 0; sindex < 4; sindex++)
3698 bp->attn_group[index].sig[sindex] =
3699 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
a2fbb9ea
ET
3700 }
3701
a2fbb9ea
ET
3702 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
3703 HC_REG_ATTN_MSG0_ADDR_L);
a2fbb9ea
ET
3704 REG_WR(bp, reg_offset, U64_LO(section));
3705 REG_WR(bp, reg_offset + 4, U64_HI(section));
3706
523224a3
DK
3707 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
3708 sp_sb);
a2fbb9ea 3709
523224a3 3710 bnx2x_zero_sp_sb(bp);
a2fbb9ea 3711
523224a3
DK
3712 sp_sb_data.host_sb_addr.lo = U64_LO(section);
3713 sp_sb_data.host_sb_addr.hi = U64_HI(section);
3714 sp_sb_data.igu_sb_id = igu_sp_sb_index;
3715 sp_sb_data.igu_seg_id = igu_seg_id;
3716 sp_sb_data.p_func.pf_id = func;
3717 sp_sb_data.p_func.vnic_id = BP_E1HVN(bp);
3718 sp_sb_data.p_func.vf_id = 0xff;
a2fbb9ea 3719
523224a3 3720 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
49d66772 3721
bb2a0f7a 3722 bp->stats_pending = 0;
66e855f3 3723 bp->set_mac_pending = 0;
bb2a0f7a 3724
523224a3 3725 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
3726}
3727
9f6c9258 3728void bnx2x_update_coalesce(struct bnx2x *bp)
a2fbb9ea 3729{
a2fbb9ea
ET
3730 int i;
3731
523224a3
DK
3732 for_each_queue(bp, i)
3733 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
3734 bp->rx_ticks, bp->tx_ticks);
a2fbb9ea
ET
3735}
3736
a2fbb9ea
ET
3737static void bnx2x_init_sp_ring(struct bnx2x *bp)
3738{
a2fbb9ea
ET
3739 spin_lock_init(&bp->spq_lock);
3740
3741 bp->spq_left = MAX_SPQ_PENDING;
3742 bp->spq_prod_idx = 0;
a2fbb9ea
ET
3743 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
3744 bp->spq_prod_bd = bp->spq;
3745 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
a2fbb9ea
ET
3746}
3747
523224a3 3748static void bnx2x_init_eq_ring(struct bnx2x *bp)
a2fbb9ea
ET
3749{
3750 int i;
523224a3
DK
3751 for (i = 1; i <= NUM_EQ_PAGES; i++) {
3752 union event_ring_elem *elem =
3753 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
a2fbb9ea 3754
523224a3
DK
3755 elem->next_page.addr.hi =
3756 cpu_to_le32(U64_HI(bp->eq_mapping +
3757 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
3758 elem->next_page.addr.lo =
3759 cpu_to_le32(U64_LO(bp->eq_mapping +
3760 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
a2fbb9ea 3761 }
523224a3
DK
3762 bp->eq_cons = 0;
3763 bp->eq_prod = NUM_EQ_DESC;
3764 bp->eq_cons_sb = BNX2X_EQ_INDEX;
a2fbb9ea
ET
3765}
3766
3767static void bnx2x_init_ind_table(struct bnx2x *bp)
3768{
26c8fa4d 3769 int func = BP_FUNC(bp);
a2fbb9ea
ET
3770 int i;
3771
555f6c78 3772 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
3773 return;
3774
555f6c78
EG
3775 DP(NETIF_MSG_IFUP,
3776 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 3777 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 3778 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 3779 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 3780 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
3781}
3782
9f6c9258 3783void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
a2fbb9ea 3784{
34f80b04 3785 int mode = bp->rx_mode;
523224a3
DK
3786 u16 cl_id;
3787
581ce43d
EG
3788 /* All but management unicast packets should pass to the host as well */
3789 u32 llh_mask =
3790 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
3791 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
3792 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3793 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 3794
a2fbb9ea
ET
3795 switch (mode) {
3796 case BNX2X_RX_MODE_NONE: /* no Rx */
523224a3
DK
3797 cl_id = BP_L_ID(bp);
3798 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
a2fbb9ea 3799 break;
356e2385 3800
a2fbb9ea 3801 case BNX2X_RX_MODE_NORMAL:
523224a3
DK
3802 cl_id = BP_L_ID(bp);
3803 bnx2x_rxq_set_mac_filters(bp, cl_id,
3804 BNX2X_ACCEPT_UNICAST |
3805 BNX2X_ACCEPT_BROADCAST |
3806 BNX2X_ACCEPT_MULTICAST);
a2fbb9ea 3807 break;
356e2385 3808
a2fbb9ea 3809 case BNX2X_RX_MODE_ALLMULTI:
523224a3
DK
3810 cl_id = BP_L_ID(bp);
3811 bnx2x_rxq_set_mac_filters(bp, cl_id,
3812 BNX2X_ACCEPT_UNICAST |
3813 BNX2X_ACCEPT_BROADCAST |
3814 BNX2X_ACCEPT_ALL_MULTICAST);
a2fbb9ea 3815 break;
356e2385 3816
a2fbb9ea 3817 case BNX2X_RX_MODE_PROMISC:
523224a3
DK
3818 cl_id = BP_L_ID(bp);
3819 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
3820
581ce43d
EG
3821 /* pass management unicast packets as well */
3822 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 3823 break;
356e2385 3824
a2fbb9ea 3825 default:
34f80b04
EG
3826 BNX2X_ERR("BAD rx mode (%d)\n", mode);
3827 break;
a2fbb9ea
ET
3828 }
3829
581ce43d 3830 REG_WR(bp,
523224a3
DK
3831 BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
3832 NIG_REG_LLH0_BRB1_DRV_MASK,
581ce43d
EG
3833 llh_mask);
3834
523224a3
DK
3835 DP(NETIF_MSG_IFUP, "rx mode %d\n"
3836 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
3837 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
3838 bp->mac_filters.ucast_drop_all,
3839 bp->mac_filters.mcast_drop_all,
3840 bp->mac_filters.bcast_drop_all,
3841 bp->mac_filters.ucast_accept_all,
3842 bp->mac_filters.mcast_accept_all,
3843 bp->mac_filters.bcast_accept_all
3844 );
a2fbb9ea 3845
523224a3 3846 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
a2fbb9ea
ET
3847}
3848
471de716
EG
3849static void bnx2x_init_internal_common(struct bnx2x *bp)
3850{
3851 int i;
3852
523224a3 3853 if (!CHIP_IS_E1(bp)) {
de832a55 3854
523224a3
DK
3855 /* xstorm needs to know whether to add ovlan to packets or not,
3856 * in switch-independent we'll write 0 to here... */
34f80b04 3857 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
523224a3 3858 bp->e1hmf);
34f80b04 3859 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
523224a3 3860 bp->e1hmf);
34f80b04 3861 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
523224a3 3862 bp->e1hmf);
34f80b04 3863 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
523224a3 3864 bp->e1hmf);
34f80b04
EG
3865 }
3866
523224a3
DK
3867 /* Zero this manually as its initialization is
3868 currently missing in the initTool */
3869 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
ca00392c 3870 REG_WR(bp, BAR_USTRORM_INTMEM +
523224a3
DK
3871 USTORM_AGG_DATA_OFFSET + i * 4, 0);
3872}
8a1c38d1 3873
523224a3
DK
3874static void bnx2x_init_internal_port(struct bnx2x *bp)
3875{
3876 /* port */
a2fbb9ea
ET
3877}
3878
471de716
EG
3879static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3880{
3881 switch (load_code) {
3882 case FW_MSG_CODE_DRV_LOAD_COMMON:
3883 bnx2x_init_internal_common(bp);
3884 /* no break */
3885
3886 case FW_MSG_CODE_DRV_LOAD_PORT:
3887 bnx2x_init_internal_port(bp);
3888 /* no break */
3889
3890 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3
DK
3891 /* internal memory per function is
3892 initialized inside bnx2x_pf_init */
471de716
EG
3893 break;
3894
3895 default:
3896 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3897 break;
3898 }
3899}
3900
523224a3
DK
3901static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
3902{
3903 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
3904
3905 fp->state = BNX2X_FP_STATE_CLOSED;
3906
3907 fp->index = fp->cid = fp_idx;
3908 fp->cl_id = BP_L_ID(bp) + fp_idx;
3909 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
3910 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
3911 /* qZone id equals to FW (per path) client id */
3912 fp->cl_qzone_id = fp->cl_id +
3913 BP_PORT(bp)*(ETH_MAX_RX_CLIENTS_E1H);
3914 /* init shortcut */
3915 fp->ustorm_rx_prods_offset =
3916 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
3917 /* Setup SB indicies */
3918 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
3919 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
3920
3921 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
3922 "cl_id %d fw_sb %d igu_sb %d\n",
3923 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
3924 fp->igu_sb_id);
3925 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
3926 fp->fw_sb_id, fp->igu_sb_id);
3927
3928 bnx2x_update_fpsb_idx(fp);
3929}
3930
9f6c9258 3931void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
3932{
3933 int i;
3934
523224a3
DK
3935 for_each_queue(bp, i)
3936 bnx2x_init_fp_sb(bp, i);
37b091ba 3937#ifdef BCM_CNIC
523224a3
DK
3938
3939 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
3940 BNX2X_VF_ID_INVALID, false,
3941 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
3942
37b091ba 3943#endif
a2fbb9ea 3944
16119785
EG
3945 /* ensure status block indices were read */
3946 rmb();
3947
523224a3 3948 bnx2x_init_def_sb(bp);
5c862848 3949 bnx2x_update_dsb_idx(bp);
a2fbb9ea 3950 bnx2x_init_rx_rings(bp);
523224a3 3951 bnx2x_init_tx_rings(bp);
a2fbb9ea 3952 bnx2x_init_sp_ring(bp);
523224a3 3953 bnx2x_init_eq_ring(bp);
471de716 3954 bnx2x_init_internal(bp, load_code);
523224a3 3955 bnx2x_pf_init(bp);
a2fbb9ea 3956 bnx2x_init_ind_table(bp);
0ef00459
EG
3957 bnx2x_stats_init(bp);
3958
3959 /* At this point, we are ready for interrupts */
3960 atomic_set(&bp->intr_sem, 0);
3961
3962 /* flush all before enabling interrupts */
3963 mb();
3964 mmiowb();
3965
615f8fd9 3966 bnx2x_int_enable(bp);
eb8da205
EG
3967
3968 /* Check for SPIO5 */
3969 bnx2x_attn_int_deasserted0(bp,
3970 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
3971 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
3972}
3973
3974/* end of nic init */
3975
3976/*
3977 * gzip service functions
3978 */
3979
3980static int bnx2x_gunzip_init(struct bnx2x *bp)
3981{
1a983142
FT
3982 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
3983 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
3984 if (bp->gunzip_buf == NULL)
3985 goto gunzip_nomem1;
3986
3987 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3988 if (bp->strm == NULL)
3989 goto gunzip_nomem2;
3990
3991 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3992 GFP_KERNEL);
3993 if (bp->strm->workspace == NULL)
3994 goto gunzip_nomem3;
3995
3996 return 0;
3997
3998gunzip_nomem3:
3999 kfree(bp->strm);
4000 bp->strm = NULL;
4001
4002gunzip_nomem2:
1a983142
FT
4003 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4004 bp->gunzip_mapping);
a2fbb9ea
ET
4005 bp->gunzip_buf = NULL;
4006
4007gunzip_nomem1:
cdaa7cb8
VZ
4008 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4009 " un-compression\n");
a2fbb9ea
ET
4010 return -ENOMEM;
4011}
4012
4013static void bnx2x_gunzip_end(struct bnx2x *bp)
4014{
4015 kfree(bp->strm->workspace);
4016
4017 kfree(bp->strm);
4018 bp->strm = NULL;
4019
4020 if (bp->gunzip_buf) {
1a983142
FT
4021 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4022 bp->gunzip_mapping);
a2fbb9ea
ET
4023 bp->gunzip_buf = NULL;
4024 }
4025}
4026
94a78b79 4027static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
4028{
4029 int n, rc;
4030
4031 /* check gzip header */
94a78b79
VZ
4032 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4033 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 4034 return -EINVAL;
94a78b79 4035 }
a2fbb9ea
ET
4036
4037 n = 10;
4038
34f80b04 4039#define FNAME 0x8
a2fbb9ea
ET
4040
4041 if (zbuf[3] & FNAME)
4042 while ((zbuf[n++] != 0) && (n < len));
4043
94a78b79 4044 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
4045 bp->strm->avail_in = len - n;
4046 bp->strm->next_out = bp->gunzip_buf;
4047 bp->strm->avail_out = FW_BUF_SIZE;
4048
4049 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4050 if (rc != Z_OK)
4051 return rc;
4052
4053 rc = zlib_inflate(bp->strm, Z_FINISH);
4054 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
4055 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4056 bp->strm->msg);
a2fbb9ea
ET
4057
4058 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4059 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
4060 netdev_err(bp->dev, "Firmware decompression error:"
4061 " gunzip_outlen (%d) not aligned\n",
4062 bp->gunzip_outlen);
a2fbb9ea
ET
4063 bp->gunzip_outlen >>= 2;
4064
4065 zlib_inflateEnd(bp->strm);
4066
4067 if (rc == Z_STREAM_END)
4068 return 0;
4069
4070 return rc;
4071}
4072
4073/* nic load/unload */
4074
4075/*
34f80b04 4076 * General service functions
a2fbb9ea
ET
4077 */
4078
4079/* send a NIG loopback debug packet */
4080static void bnx2x_lb_pckt(struct bnx2x *bp)
4081{
a2fbb9ea 4082 u32 wb_write[3];
a2fbb9ea
ET
4083
4084 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4085 wb_write[0] = 0x55555555;
4086 wb_write[1] = 0x55555555;
34f80b04 4087 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4088 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4089
4090 /* NON-IP protocol */
a2fbb9ea
ET
4091 wb_write[0] = 0x09000000;
4092 wb_write[1] = 0x55555555;
34f80b04 4093 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4094 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4095}
4096
4097/* some of the internal memories
4098 * are not directly readable from the driver
4099 * to test them we send debug packets
4100 */
4101static int bnx2x_int_mem_test(struct bnx2x *bp)
4102{
4103 int factor;
4104 int count, i;
4105 u32 val = 0;
4106
ad8d3948 4107 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4108 factor = 120;
ad8d3948
EG
4109 else if (CHIP_REV_IS_EMUL(bp))
4110 factor = 200;
4111 else
a2fbb9ea 4112 factor = 1;
a2fbb9ea 4113
a2fbb9ea
ET
4114 /* Disable inputs of parser neighbor blocks */
4115 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4116 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4117 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4118 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4119
4120 /* Write 0 to parser credits for CFC search request */
4121 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4122
4123 /* send Ethernet packet */
4124 bnx2x_lb_pckt(bp);
4125
4126 /* TODO do i reset NIG statistic? */
4127 /* Wait until NIG register shows 1 packet of size 0x10 */
4128 count = 1000 * factor;
4129 while (count) {
34f80b04 4130
a2fbb9ea
ET
4131 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4132 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4133 if (val == 0x10)
4134 break;
4135
4136 msleep(10);
4137 count--;
4138 }
4139 if (val != 0x10) {
4140 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4141 return -1;
4142 }
4143
4144 /* Wait until PRS register shows 1 packet */
4145 count = 1000 * factor;
4146 while (count) {
4147 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4148 if (val == 1)
4149 break;
4150
4151 msleep(10);
4152 count--;
4153 }
4154 if (val != 0x1) {
4155 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4156 return -2;
4157 }
4158
4159 /* Reset and init BRB, PRS */
34f80b04 4160 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4161 msleep(50);
34f80b04 4162 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 4163 msleep(50);
94a78b79
VZ
4164 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4165 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
4166
4167 DP(NETIF_MSG_HW, "part2\n");
4168
4169 /* Disable inputs of parser neighbor blocks */
4170 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4171 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4172 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4173 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4174
4175 /* Write 0 to parser credits for CFC search request */
4176 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4177
4178 /* send 10 Ethernet packets */
4179 for (i = 0; i < 10; i++)
4180 bnx2x_lb_pckt(bp);
4181
4182 /* Wait until NIG register shows 10 + 1
4183 packets of size 11*0x10 = 0xb0 */
4184 count = 1000 * factor;
4185 while (count) {
34f80b04 4186
a2fbb9ea
ET
4187 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4188 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4189 if (val == 0xb0)
4190 break;
4191
4192 msleep(10);
4193 count--;
4194 }
4195 if (val != 0xb0) {
4196 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4197 return -3;
4198 }
4199
4200 /* Wait until PRS register shows 2 packets */
4201 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4202 if (val != 2)
4203 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4204
4205 /* Write 1 to parser credits for CFC search request */
4206 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4207
4208 /* Wait until PRS register shows 3 packets */
4209 msleep(10 * factor);
4210 /* Wait until NIG register shows 1 packet of size 0x10 */
4211 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4212 if (val != 3)
4213 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4214
4215 /* clear NIG EOP FIFO */
4216 for (i = 0; i < 11; i++)
4217 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4218 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4219 if (val != 1) {
4220 BNX2X_ERR("clear of NIG failed\n");
4221 return -4;
4222 }
4223
4224 /* Reset and init BRB, PRS, NIG */
4225 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4226 msleep(50);
4227 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4228 msleep(50);
94a78b79
VZ
4229 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4230 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 4231#ifndef BCM_CNIC
a2fbb9ea
ET
4232 /* set NIC mode */
4233 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4234#endif
4235
4236 /* Enable inputs of parser neighbor blocks */
4237 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4238 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4239 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 4240 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
4241
4242 DP(NETIF_MSG_HW, "done\n");
4243
4244 return 0; /* OK */
4245}
4246
4247static void enable_blocks_attention(struct bnx2x *bp)
4248{
4249 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4250 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
4251 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4252 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4253 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4254 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4255 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4256 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4257 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
4258/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4259/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4260 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4261 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4262 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
4263/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4264/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4265 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4266 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4267 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4268 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
4269/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4270/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4271 if (CHIP_REV_IS_FPGA(bp))
4272 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
4273 else
4274 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
4275 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4276 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4277 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
4278/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4279/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4280 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4281 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
4282/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4283 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
4284}
4285
72fd0718
VZ
4286static const struct {
4287 u32 addr;
4288 u32 mask;
4289} bnx2x_parity_mask[] = {
4290 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
4291 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4292 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
4293 {HC_REG_HC_PRTY_MASK, 0xffffffff},
4294 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
4295 {QM_REG_QM_PRTY_MASK, 0x0},
4296 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
4297 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4298 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
4299 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
4300 {CDU_REG_CDU_PRTY_MASK, 0x0},
4301 {CFC_REG_CFC_PRTY_MASK, 0x0},
4302 {DBG_REG_DBG_PRTY_MASK, 0x0},
4303 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
4304 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
4305 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
4306 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
4307 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
4308 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
4309 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
4310 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
4311 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
4312 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
4313 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
4314 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
4315 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
4316 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
4317 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
4318};
4319
4320static void enable_blocks_parity(struct bnx2x *bp)
4321{
cbd9da7b 4322 int i;
72fd0718 4323
cbd9da7b 4324 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
72fd0718
VZ
4325 REG_WR(bp, bnx2x_parity_mask[i].addr,
4326 bnx2x_parity_mask[i].mask);
4327}
4328
34f80b04 4329
81f75bbf
EG
4330static void bnx2x_reset_common(struct bnx2x *bp)
4331{
4332 /* reset_common */
4333 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4334 0xd3ffff7f);
4335 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4336}
4337
573f2035
EG
4338static void bnx2x_init_pxp(struct bnx2x *bp)
4339{
4340 u16 devctl;
4341 int r_order, w_order;
4342
4343 pci_read_config_word(bp->pdev,
4344 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4345 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4346 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4347 if (bp->mrrs == -1)
4348 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4349 else {
4350 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4351 r_order = bp->mrrs;
4352 }
4353
4354 bnx2x_init_pxp_arb(bp, r_order, w_order);
4355}
fd4ef40d
EG
4356
4357static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4358{
2145a920 4359 int is_required;
fd4ef40d 4360 u32 val;
2145a920 4361 int port;
fd4ef40d 4362
2145a920
VZ
4363 if (BP_NOMCP(bp))
4364 return;
4365
4366 is_required = 0;
fd4ef40d
EG
4367 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4368 SHARED_HW_CFG_FAN_FAILURE_MASK;
4369
4370 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4371 is_required = 1;
4372
4373 /*
4374 * The fan failure mechanism is usually related to the PHY type since
4375 * the power consumption of the board is affected by the PHY. Currently,
4376 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4377 */
4378 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4379 for (port = PORT_0; port < PORT_MAX; port++) {
fd4ef40d 4380 is_required |=
d90d96ba
YR
4381 bnx2x_fan_failure_det_req(
4382 bp,
4383 bp->common.shmem_base,
a22f0788 4384 bp->common.shmem2_base,
d90d96ba 4385 port);
fd4ef40d
EG
4386 }
4387
4388 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4389
4390 if (is_required == 0)
4391 return;
4392
4393 /* Fan failure is indicated by SPIO 5 */
4394 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4395 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4396
4397 /* set to active low mode */
4398 val = REG_RD(bp, MISC_REG_SPIO_INT);
4399 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 4400 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
4401 REG_WR(bp, MISC_REG_SPIO_INT, val);
4402
4403 /* enable interrupt to signal the IGU */
4404 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4405 val |= (1 << MISC_REGISTERS_SPIO_5);
4406 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4407}
4408
523224a3 4409static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
a2fbb9ea 4410{
a2fbb9ea 4411 u32 val, i;
a2fbb9ea 4412
34f80b04 4413 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 4414
81f75bbf 4415 bnx2x_reset_common(bp);
34f80b04
EG
4416 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4417 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 4418
94a78b79 4419 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
4420 if (CHIP_IS_E1H(bp))
4421 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 4422
34f80b04
EG
4423 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
4424 msleep(30);
4425 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 4426
94a78b79 4427 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
4428 if (CHIP_IS_E1(bp)) {
4429 /* enable HW interrupt from PXP on USDM overflow
4430 bit 16 on INT_MASK_0 */
4431 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4432 }
a2fbb9ea 4433
94a78b79 4434 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 4435 bnx2x_init_pxp(bp);
a2fbb9ea
ET
4436
4437#ifdef __BIG_ENDIAN
34f80b04
EG
4438 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4439 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4440 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4441 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4442 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
4443 /* make sure this value is 0 */
4444 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
4445
4446/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4447 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4448 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4449 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4450 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
4451#endif
4452
523224a3
DK
4453 bnx2x_ilt_init_page_size(bp, INITOP_SET);
4454
a2fbb9ea 4455
34f80b04
EG
4456 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4457 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 4458
34f80b04
EG
4459 /* let the HW do it's magic ... */
4460 msleep(100);
4461 /* finish PXP init */
4462 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4463 if (val != 1) {
4464 BNX2X_ERR("PXP2 CFG failed\n");
4465 return -EBUSY;
4466 }
4467 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4468 if (val != 1) {
4469 BNX2X_ERR("PXP2 RD_INIT failed\n");
4470 return -EBUSY;
4471 }
a2fbb9ea 4472
34f80b04
EG
4473 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4474 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 4475
94a78b79 4476 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 4477
34f80b04
EG
4478 /* clean the DMAE memory */
4479 bp->dmae_ready = 1;
4480 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 4481
94a78b79
VZ
4482 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
4483 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
4484 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
4485 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 4486
34f80b04
EG
4487 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
4488 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
4489 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
4490 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
4491
94a78b79 4492 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba 4493
523224a3
DK
4494 /* QM queues pointers table */
4495 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
4496
34f80b04
EG
4497 /* soft reset pulse */
4498 REG_WR(bp, QM_REG_SOFT_RESET, 1);
4499 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 4500
37b091ba 4501#ifdef BCM_CNIC
94a78b79 4502 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 4503#endif
a2fbb9ea 4504
94a78b79 4505 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
523224a3
DK
4506 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
4507
34f80b04
EG
4508 if (!CHIP_REV_IS_SLOW(bp)) {
4509 /* enable hw interrupt from doorbell Q */
4510 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4511 }
a2fbb9ea 4512
94a78b79
VZ
4513 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4514 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 4515 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 4516#ifndef BCM_CNIC
3196a88a
EG
4517 /* set NIC mode */
4518 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 4519#endif
34f80b04
EG
4520 if (CHIP_IS_E1H(bp))
4521 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 4522
94a78b79
VZ
4523 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4524 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4525 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4526 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 4527
ca00392c
EG
4528 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4529 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4530 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4531 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 4532
94a78b79
VZ
4533 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4534 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4535 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4536 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 4537
34f80b04
EG
4538 /* sync semi rtc */
4539 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4540 0x80000000);
4541 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4542 0x80000000);
a2fbb9ea 4543
94a78b79
VZ
4544 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4545 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4546 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 4547
34f80b04 4548 REG_WR(bp, SRC_REG_SOFT_RST, 1);
c68ed255
TH
4549 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4550 REG_WR(bp, i, random32());
94a78b79 4551 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
4552#ifdef BCM_CNIC
4553 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4554 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4555 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4556 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4557 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4558 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4559 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4560 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4561 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4562 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4563#endif
34f80b04 4564 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 4565
34f80b04
EG
4566 if (sizeof(union cdu_context) != 1024)
4567 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
4568 dev_alert(&bp->pdev->dev, "please adjust the size "
4569 "of cdu_context(%ld)\n",
7995c64e 4570 (long)sizeof(union cdu_context));
a2fbb9ea 4571
94a78b79 4572 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
4573 val = (4 << 24) + (0 << 12) + 1024;
4574 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 4575
94a78b79 4576 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 4577 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
4578 /* enable context validation interrupt from CFC */
4579 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4580
4581 /* set the thresholds to prevent CFC/CDU race */
4582 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 4583
94a78b79
VZ
4584 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
4585 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 4586
94a78b79 4587 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
4588 /* Reset PCIE errors for debug */
4589 REG_WR(bp, 0x2814, 0xffffffff);
4590 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 4591
94a78b79 4592 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 4593 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 4594 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 4595 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 4596
94a78b79 4597 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
4598 if (CHIP_IS_E1H(bp)) {
4599 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
4600 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
4601 }
4602
4603 if (CHIP_REV_IS_SLOW(bp))
4604 msleep(200);
4605
4606 /* finish CFC init */
4607 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4608 if (val != 1) {
4609 BNX2X_ERR("CFC LL_INIT failed\n");
4610 return -EBUSY;
4611 }
4612 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4613 if (val != 1) {
4614 BNX2X_ERR("CFC AC_INIT failed\n");
4615 return -EBUSY;
4616 }
4617 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4618 if (val != 1) {
4619 BNX2X_ERR("CFC CAM_INIT failed\n");
4620 return -EBUSY;
4621 }
4622 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 4623
34f80b04
EG
4624 /* read NIG statistic
4625 to see if this is our first up since powerup */
4626 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4627 val = *bnx2x_sp(bp, wb_data[0]);
4628
4629 /* do internal memory self test */
4630 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4631 BNX2X_ERR("internal mem self test failed\n");
4632 return -EBUSY;
4633 }
4634
d90d96ba 4635 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
a22f0788
YR
4636 bp->common.shmem_base,
4637 bp->common.shmem2_base);
f1410647 4638
fd4ef40d
EG
4639 bnx2x_setup_fan_failure_detection(bp);
4640
34f80b04
EG
4641 /* clear PXP2 attentions */
4642 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 4643
34f80b04 4644 enable_blocks_attention(bp);
72fd0718
VZ
4645 if (CHIP_PARITY_SUPPORTED(bp))
4646 enable_blocks_parity(bp);
a2fbb9ea 4647
6bbca910
YR
4648 if (!BP_NOMCP(bp)) {
4649 bnx2x_acquire_phy_lock(bp);
a22f0788
YR
4650 bnx2x_common_init_phy(bp, bp->common.shmem_base,
4651 bp->common.shmem2_base);
6bbca910
YR
4652 bnx2x_release_phy_lock(bp);
4653 } else
4654 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4655
34f80b04
EG
4656 return 0;
4657}
a2fbb9ea 4658
523224a3 4659static int bnx2x_init_hw_port(struct bnx2x *bp)
34f80b04
EG
4660{
4661 int port = BP_PORT(bp);
94a78b79 4662 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 4663 u32 low, high;
34f80b04 4664 u32 val;
a2fbb9ea 4665
cdaa7cb8 4666 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
4667
4668 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 4669
94a78b79 4670 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 4671 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
4672
4673 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4674 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4675 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 4676 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 4677
523224a3
DK
4678 /* QM cid (connection) count */
4679 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
a2fbb9ea 4680
523224a3 4681#ifdef BCM_CNIC
94a78b79 4682 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
4683 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4684 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 4685#endif
cdaa7cb8 4686
94a78b79 4687 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 4688
94a78b79 4689 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
4690 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
4691 /* no pause for emulation and FPGA */
4692 low = 0;
4693 high = 513;
4694 } else {
4695 if (IS_E1HMF(bp))
4696 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4697 else if (bp->dev->mtu > 4096) {
4698 if (bp->flags & ONE_PORT_FLAG)
4699 low = 160;
4700 else {
4701 val = bp->dev->mtu;
4702 /* (24*1024 + val*4)/256 */
4703 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
4704 }
4705 } else
4706 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4707 high = low + 56; /* 14*1024/256 */
4708 }
4709 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4710 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4711
4712
94a78b79 4713 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 4714
94a78b79 4715 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 4716 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 4717 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 4718 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 4719
94a78b79
VZ
4720 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4721 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4722 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4723 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 4724
94a78b79 4725 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 4726 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 4727
94a78b79 4728 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
4729
4730 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 4731 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
4732
4733 /* update threshold */
34f80b04 4734 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 4735 /* update init credit */
34f80b04 4736 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
4737
4738 /* probe changes */
34f80b04 4739 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 4740 msleep(5);
34f80b04 4741 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 4742
37b091ba
MC
4743#ifdef BCM_CNIC
4744 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 4745#endif
94a78b79 4746 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 4747 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
4748
4749 if (CHIP_IS_E1(bp)) {
4750 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4751 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4752 }
94a78b79 4753 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 4754
94a78b79 4755 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
4756 /* init aeu_mask_attn_func_0/1:
4757 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4758 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4759 * bits 4-7 are used for "per vn group attention" */
4760 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4761 (IS_E1HMF(bp) ? 0xF7 : 0x7));
4762
94a78b79 4763 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 4764 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 4765 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 4766 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 4767 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 4768
94a78b79 4769 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
4770
4771 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4772
4773 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
4774 /* 0x2 disable e1hov, 0x1 enable */
4775 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4776 (IS_E1HMF(bp) ? 0x1 : 0x2));
4777
1c06328c
EG
4778 {
4779 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4780 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
4781 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
4782 }
34f80b04
EG
4783 }
4784
94a78b79 4785 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 4786 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
d90d96ba 4787 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
a22f0788
YR
4788 bp->common.shmem_base,
4789 bp->common.shmem2_base);
d90d96ba 4790 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
a22f0788 4791 bp->common.shmem2_base, port)) {
4d295db0
EG
4792 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4793 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4794 val = REG_RD(bp, reg_addr);
f1410647 4795 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0 4796 REG_WR(bp, reg_addr, val);
f1410647 4797 }
c18487ee 4798 bnx2x__link_reset(bp);
a2fbb9ea 4799
34f80b04
EG
4800 return 0;
4801}
4802
34f80b04
EG
4803static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4804{
4805 int reg;
4806
4807 if (CHIP_IS_E1H(bp))
4808 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4809 else /* E1 */
4810 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4811
4812 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4813}
4814
523224a3 4815static int bnx2x_init_hw_func(struct bnx2x *bp)
34f80b04
EG
4816{
4817 int port = BP_PORT(bp);
4818 int func = BP_FUNC(bp);
523224a3
DK
4819 struct bnx2x_ilt *ilt = BP_ILT(bp);
4820 u16 cdu_ilt_start;
8badd27a 4821 u32 addr, val;
34f80b04
EG
4822 int i;
4823
cdaa7cb8 4824 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 4825
8badd27a
EG
4826 /* set MSI reconfigure capability */
4827 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4828 val = REG_RD(bp, addr);
4829 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
4830 REG_WR(bp, addr, val);
4831
523224a3
DK
4832 ilt = BP_ILT(bp);
4833 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
37b091ba 4834
523224a3
DK
4835 for (i = 0; i < L2_ILT_LINES(bp); i++) {
4836 ilt->lines[cdu_ilt_start + i].page =
4837 bp->context.vcxt + (ILT_PAGE_CIDS * i);
4838 ilt->lines[cdu_ilt_start + i].page_mapping =
4839 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
4840 /* cdu ilt pages are allocated manually so there's no need to
4841 set the size */
37b091ba 4842 }
523224a3
DK
4843 bnx2x_ilt_init_op(bp, INITOP_SET);
4844#ifdef BCM_CNIC
4845 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
37b091ba 4846
523224a3
DK
4847 /* T1 hash bits value determines the T1 number of entries */
4848 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
4849#endif
37b091ba 4850
523224a3
DK
4851#ifndef BCM_CNIC
4852 /* set NIC mode */
4853 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4854#endif /* BCM_CNIC */
37b091ba 4855
523224a3 4856 bp->dmae_ready = 1;
34f80b04 4857
523224a3
DK
4858 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
4859
4860 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4861 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4862 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4863 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4864 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4865 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4866 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4867 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4868 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
4869
4870 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
4871 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
4872 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
4873 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
4874 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
4875 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
4876 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
4877 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
4878 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
4879 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
4880 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
4881 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
4882 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
4883
4884 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
34f80b04 4885
523224a3 4886 if (IS_E1HMF(bp)) {
34f80b04
EG
4887 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4888 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
4889 }
4890
523224a3
DK
4891 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
4892
34f80b04
EG
4893 /* HC init per function */
4894 if (CHIP_IS_E1H(bp)) {
4895 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4896
4897 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4898 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4899 }
94a78b79 4900 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 4901
c14423fe 4902 /* Reset PCIE errors for debug */
a2fbb9ea
ET
4903 REG_WR(bp, 0x2114, 0xffffffff);
4904 REG_WR(bp, 0x2120, 0xffffffff);
523224a3
DK
4905
4906 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
4907 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
4908 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
4909 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
4910 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
4911 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
4912
b7737c9b 4913 bnx2x_phy_probe(&bp->link_params);
34f80b04
EG
4914 return 0;
4915}
4916
9f6c9258 4917int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
34f80b04 4918{
523224a3 4919 int rc = 0;
a2fbb9ea 4920
34f80b04
EG
4921 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
4922 BP_FUNC(bp), load_code);
a2fbb9ea 4923
34f80b04
EG
4924 bp->dmae_ready = 0;
4925 mutex_init(&bp->dmae_mutex);
54016b26
EG
4926 rc = bnx2x_gunzip_init(bp);
4927 if (rc)
4928 return rc;
a2fbb9ea 4929
34f80b04
EG
4930 switch (load_code) {
4931 case FW_MSG_CODE_DRV_LOAD_COMMON:
523224a3 4932 rc = bnx2x_init_hw_common(bp, load_code);
34f80b04
EG
4933 if (rc)
4934 goto init_hw_err;
4935 /* no break */
4936
4937 case FW_MSG_CODE_DRV_LOAD_PORT:
523224a3 4938 rc = bnx2x_init_hw_port(bp);
34f80b04
EG
4939 if (rc)
4940 goto init_hw_err;
4941 /* no break */
4942
4943 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3 4944 rc = bnx2x_init_hw_func(bp);
34f80b04
EG
4945 if (rc)
4946 goto init_hw_err;
4947 break;
4948
4949 default:
4950 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4951 break;
4952 }
4953
4954 if (!BP_NOMCP(bp)) {
4955 int func = BP_FUNC(bp);
a2fbb9ea
ET
4956
4957 bp->fw_drv_pulse_wr_seq =
34f80b04 4958 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 4959 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
4960 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4961 }
a2fbb9ea 4962
34f80b04
EG
4963init_hw_err:
4964 bnx2x_gunzip_end(bp);
4965
4966 return rc;
a2fbb9ea
ET
4967}
4968
9f6c9258 4969void bnx2x_free_mem(struct bnx2x *bp)
a2fbb9ea
ET
4970{
4971
4972#define BNX2X_PCI_FREE(x, y, size) \
4973 do { \
4974 if (x) { \
523224a3 4975 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
a2fbb9ea
ET
4976 x = NULL; \
4977 y = 0; \
4978 } \
4979 } while (0)
4980
4981#define BNX2X_FREE(x) \
4982 do { \
4983 if (x) { \
523224a3 4984 kfree((void *)x); \
a2fbb9ea
ET
4985 x = NULL; \
4986 } \
4987 } while (0)
4988
4989 int i;
4990
4991 /* fastpath */
555f6c78 4992 /* Common */
a2fbb9ea 4993 for_each_queue(bp, i) {
555f6c78 4994 /* status blocks */
523224a3 4995 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
a2fbb9ea 4996 bnx2x_fp(bp, i, status_blk_mapping),
523224a3 4997 sizeof(struct host_hc_status_block_e1x));
555f6c78
EG
4998 }
4999 /* Rx */
54b9ddaa 5000 for_each_queue(bp, i) {
a2fbb9ea 5001
555f6c78 5002 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
5003 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5004 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5005 bnx2x_fp(bp, i, rx_desc_mapping),
5006 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5007
5008 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5009 bnx2x_fp(bp, i, rx_comp_mapping),
5010 sizeof(struct eth_fast_path_rx_cqe) *
5011 NUM_RCQ_BD);
a2fbb9ea 5012
7a9b2557 5013 /* SGE ring */
32626230 5014 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5015 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5016 bnx2x_fp(bp, i, rx_sge_mapping),
5017 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5018 }
555f6c78 5019 /* Tx */
54b9ddaa 5020 for_each_queue(bp, i) {
555f6c78
EG
5021
5022 /* fastpath tx rings: tx_buf tx_desc */
5023 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5024 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5025 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 5026 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 5027 }
a2fbb9ea
ET
5028 /* end of fastpath */
5029
5030 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
523224a3 5031 sizeof(struct host_sp_status_block));
a2fbb9ea
ET
5032
5033 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5034 sizeof(struct bnx2x_slowpath));
a2fbb9ea 5035
523224a3
DK
5036 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5037 bp->context.size);
5038
5039 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5040
5041 BNX2X_FREE(bp->ilt->lines);
37b091ba 5042#ifdef BCM_CNIC
523224a3
DK
5043
5044 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5045 sizeof(struct host_hc_status_block_e1x));
5046 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
a2fbb9ea 5047#endif
7a9b2557 5048 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea 5049
523224a3
DK
5050 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5051 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5052
a2fbb9ea
ET
5053#undef BNX2X_PCI_FREE
5054#undef BNX2X_KFREE
5055}
5056
9f6c9258 5057int bnx2x_alloc_mem(struct bnx2x *bp)
a2fbb9ea
ET
5058{
5059
5060#define BNX2X_PCI_ALLOC(x, y, size) \
5061 do { \
1a983142 5062 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
9f6c9258
DK
5063 if (x == NULL) \
5064 goto alloc_mem_err; \
5065 memset(x, 0, size); \
5066 } while (0)
a2fbb9ea 5067
9f6c9258
DK
5068#define BNX2X_ALLOC(x, size) \
5069 do { \
523224a3 5070 x = kzalloc(size, GFP_KERNEL); \
9f6c9258
DK
5071 if (x == NULL) \
5072 goto alloc_mem_err; \
9f6c9258 5073 } while (0)
a2fbb9ea 5074
9f6c9258 5075 int i;
523224a3 5076 void *p;
a2fbb9ea 5077
9f6c9258
DK
5078 /* fastpath */
5079 /* Common */
a2fbb9ea 5080 for_each_queue(bp, i) {
9f6c9258 5081 bnx2x_fp(bp, i, bp) = bp;
a2fbb9ea 5082
9f6c9258 5083 /* status blocks */
523224a3 5084 BNX2X_PCI_ALLOC(p,
9f6c9258 5085 &bnx2x_fp(bp, i, status_blk_mapping),
523224a3
DK
5086 sizeof(struct host_hc_status_block_e1x));
5087
5088 bnx2x_fp(bp, i, status_blk.e1x_sb) =
5089 (struct host_hc_status_block_e1x *)p;
5090
5091 bnx2x_fp(bp, i, sb_index_values) = (__le16 *)
5092 (bnx2x_fp(bp, i, status_blk.e1x_sb)->sb.index_values);
5093 bnx2x_fp(bp, i, sb_running_index) = (__le16 *)
5094 (bnx2x_fp(bp, i, status_blk.e1x_sb)->sb.running_index);
a2fbb9ea 5095 }
9f6c9258
DK
5096 /* Rx */
5097 for_each_queue(bp, i) {
a2fbb9ea 5098
9f6c9258
DK
5099 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5100 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5101 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5102 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5103 &bnx2x_fp(bp, i, rx_desc_mapping),
5104 sizeof(struct eth_rx_bd) * NUM_RX_BD);
555f6c78 5105
9f6c9258
DK
5106 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5107 &bnx2x_fp(bp, i, rx_comp_mapping),
5108 sizeof(struct eth_fast_path_rx_cqe) *
5109 NUM_RCQ_BD);
a2fbb9ea 5110
9f6c9258
DK
5111 /* SGE ring */
5112 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5113 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5114 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5115 &bnx2x_fp(bp, i, rx_sge_mapping),
5116 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5117 }
5118 /* Tx */
5119 for_each_queue(bp, i) {
8badd27a 5120
9f6c9258
DK
5121 /* fastpath tx rings: tx_buf tx_desc */
5122 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5123 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5124 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5125 &bnx2x_fp(bp, i, tx_desc_mapping),
5126 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
8badd27a 5127 }
9f6c9258 5128 /* end of fastpath */
8badd27a 5129
523224a3
DK
5130#ifdef BCM_CNIC
5131 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5132 sizeof(struct host_hc_status_block_e1x));
8badd27a 5133
523224a3
DK
5134 /* allocate searcher T2 table */
5135 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5136#endif
a2fbb9ea 5137
8badd27a 5138
523224a3
DK
5139 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5140 sizeof(struct host_sp_status_block));
a2fbb9ea 5141
523224a3
DK
5142 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5143 sizeof(struct bnx2x_slowpath));
a2fbb9ea 5144
523224a3
DK
5145 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
5146 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
5147 bp->context.size);
65abd74d 5148
523224a3 5149 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
65abd74d 5150
523224a3
DK
5151 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
5152 goto alloc_mem_err;
65abd74d 5153
9f6c9258
DK
5154 /* Slow path ring */
5155 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
65abd74d 5156
523224a3
DK
5157 /* EQ */
5158 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
5159 BCM_PAGE_SIZE * NUM_EQ_PAGES);
9f6c9258 5160 return 0;
e1510706 5161
9f6c9258
DK
5162alloc_mem_err:
5163 bnx2x_free_mem(bp);
5164 return -ENOMEM;
e1510706 5165
9f6c9258
DK
5166#undef BNX2X_PCI_ALLOC
5167#undef BNX2X_ALLOC
65abd74d
YG
5168}
5169
a2fbb9ea
ET
5170/*
5171 * Init service functions
5172 */
523224a3 5173int bnx2x_func_start(struct bnx2x *bp)
a2fbb9ea 5174{
523224a3 5175 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
a2fbb9ea 5176
523224a3
DK
5177 /* Wait for completion */
5178 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
5179 WAIT_RAMROD_COMMON);
5180}
a2fbb9ea 5181
523224a3
DK
5182int bnx2x_func_stop(struct bnx2x *bp)
5183{
5184 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
a2fbb9ea 5185
523224a3
DK
5186 /* Wait for completion */
5187 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
5188 0, &(bp->state), WAIT_RAMROD_COMMON);
a2fbb9ea
ET
5189}
5190
e665bfda 5191/**
523224a3 5192 * Sets a MAC in a CAM for a few L2 Clients for E1x chip
e665bfda
MC
5193 *
5194 * @param bp driver descriptor
5195 * @param set set or clear an entry (1 or 0)
5196 * @param mac pointer to a buffer containing a MAC
5197 * @param cl_bit_vec bit vector of clients to register a MAC for
5198 * @param cam_offset offset in a CAM to use
523224a3 5199 * @param is_bcast is the set MAC a broadcast address (for E1 only)
e665bfda 5200 */
523224a3
DK
5201static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
5202 u32 cl_bit_vec, u8 cam_offset,
5203 u8 is_bcast)
34f80b04 5204{
523224a3
DK
5205 struct mac_configuration_cmd *config =
5206 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
5207 int ramrod_flags = WAIT_RAMROD_COMMON;
5208
5209 bp->set_mac_pending = 1;
5210 smp_wmb();
5211
5212 config->hdr.length = 1 + (is_bcast ? 1 : 0);
5213 config->hdr.offset = cam_offset;
5214 config->hdr.client_id = 0xff;
5215 config->hdr.reserved1 = 0;
34f80b04 5216
8d9c5f34 5217 config->hdr.length = 1;
e665bfda
MC
5218 config->hdr.offset = cam_offset;
5219 config->hdr.client_id = 0xff;
34f80b04
EG
5220 config->hdr.reserved1 = 0;
5221
5222 /* primary MAC */
5223 config->config_table[0].msb_mac_addr =
e665bfda 5224 swab16(*(u16 *)&mac[0]);
34f80b04 5225 config->config_table[0].middle_mac_addr =
e665bfda 5226 swab16(*(u16 *)&mac[2]);
34f80b04 5227 config->config_table[0].lsb_mac_addr =
e665bfda 5228 swab16(*(u16 *)&mac[4]);
ca00392c 5229 config->config_table[0].clients_bit_vector =
e665bfda 5230 cpu_to_le32(cl_bit_vec);
34f80b04 5231 config->config_table[0].vlan_id = 0;
523224a3 5232 config->config_table[0].pf_id = BP_FUNC(bp);
3101c2bc 5233 if (set)
523224a3
DK
5234 SET_FLAG(config->config_table[0].flags,
5235 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5236 T_ETH_MAC_COMMAND_SET);
3101c2bc 5237 else
523224a3
DK
5238 SET_FLAG(config->config_table[0].flags,
5239 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5240 T_ETH_MAC_COMMAND_INVALIDATE);
34f80b04 5241
523224a3
DK
5242 if (is_bcast)
5243 SET_FLAG(config->config_table[0].flags,
5244 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
5245
5246 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
3101c2bc 5247 (set ? "setting" : "clearing"),
34f80b04
EG
5248 config->config_table[0].msb_mac_addr,
5249 config->config_table[0].middle_mac_addr,
523224a3 5250 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
34f80b04 5251
523224a3 5252 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
34f80b04 5253 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
523224a3
DK
5254 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
5255
5256 /* Wait for a completion */
5257 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
34f80b04
EG
5258}
5259
523224a3
DK
5260
5261int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
5262 int *state_p, int flags)
a2fbb9ea
ET
5263{
5264 /* can take a while if any port is running */
8b3a0f0b 5265 int cnt = 5000;
523224a3
DK
5266 u8 poll = flags & WAIT_RAMROD_POLL;
5267 u8 common = flags & WAIT_RAMROD_COMMON;
a2fbb9ea 5268
c14423fe
ET
5269 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
5270 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
5271
5272 might_sleep();
34f80b04 5273 while (cnt--) {
a2fbb9ea 5274 if (poll) {
523224a3
DK
5275 if (common)
5276 bnx2x_eq_int(bp);
5277 else {
5278 bnx2x_rx_int(bp->fp, 10);
5279 /* if index is different from 0
5280 * the reply for some commands will
5281 * be on the non default queue
5282 */
5283 if (idx)
5284 bnx2x_rx_int(&bp->fp[idx], 10);
5285 }
a2fbb9ea 5286 }
a2fbb9ea 5287
3101c2bc 5288 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
5289 if (*state_p == state) {
5290#ifdef BNX2X_STOP_ON_ERROR
5291 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
5292#endif
a2fbb9ea 5293 return 0;
8b3a0f0b 5294 }
a2fbb9ea 5295
a2fbb9ea 5296 msleep(1);
e3553b29
EG
5297
5298 if (bp->panic)
5299 return -EIO;
a2fbb9ea
ET
5300 }
5301
a2fbb9ea 5302 /* timeout! */
49d66772
ET
5303 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
5304 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
5305#ifdef BNX2X_STOP_ON_ERROR
5306 bnx2x_panic();
5307#endif
a2fbb9ea 5308
49d66772 5309 return -EBUSY;
a2fbb9ea
ET
5310}
5311
523224a3 5312u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
e665bfda 5313{
523224a3
DK
5314 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
5315}
5316
5317void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
5318{
5319 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
5320 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
e665bfda 5321
523224a3
DK
5322 /* networking MAC */
5323 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
5324 (1 << bp->fp->cl_id), cam_offset , 0);
e665bfda 5325
523224a3
DK
5326 if (CHIP_IS_E1(bp)) {
5327 /* broadcast MAC */
5328 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
5329 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
5330 }
e665bfda 5331}
523224a3
DK
5332static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
5333{
5334 int i = 0, old;
5335 struct net_device *dev = bp->dev;
5336 struct netdev_hw_addr *ha;
5337 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
5338 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
5339
5340 netdev_for_each_mc_addr(ha, dev) {
5341 /* copy mac */
5342 config_cmd->config_table[i].msb_mac_addr =
5343 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
5344 config_cmd->config_table[i].middle_mac_addr =
5345 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
5346 config_cmd->config_table[i].lsb_mac_addr =
5347 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
e665bfda 5348
523224a3
DK
5349 config_cmd->config_table[i].vlan_id = 0;
5350 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
5351 config_cmd->config_table[i].clients_bit_vector =
5352 cpu_to_le32(1 << BP_L_ID(bp));
5353
5354 SET_FLAG(config_cmd->config_table[i].flags,
5355 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5356 T_ETH_MAC_COMMAND_SET);
5357
5358 DP(NETIF_MSG_IFUP,
5359 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
5360 config_cmd->config_table[i].msb_mac_addr,
5361 config_cmd->config_table[i].middle_mac_addr,
5362 config_cmd->config_table[i].lsb_mac_addr);
5363 i++;
5364 }
5365 old = config_cmd->hdr.length;
5366 if (old > i) {
5367 for (; i < old; i++) {
5368 if (CAM_IS_INVALID(config_cmd->
5369 config_table[i])) {
5370 /* already invalidated */
5371 break;
5372 }
5373 /* invalidate */
5374 SET_FLAG(config_cmd->config_table[i].flags,
5375 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5376 T_ETH_MAC_COMMAND_INVALIDATE);
5377 }
5378 }
5379
5380 config_cmd->hdr.length = i;
5381 config_cmd->hdr.offset = offset;
5382 config_cmd->hdr.client_id = 0xff;
5383 config_cmd->hdr.reserved1 = 0;
5384
5385 bp->set_mac_pending = 1;
5386 smp_wmb();
5387
5388 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
5389 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
5390}
5391static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
e665bfda 5392{
523224a3
DK
5393 int i;
5394 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
5395 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
5396 int ramrod_flags = WAIT_RAMROD_COMMON;
5397
5398 bp->set_mac_pending = 1;
e665bfda
MC
5399 smp_wmb();
5400
523224a3
DK
5401 for (i = 0; i < config_cmd->hdr.length; i++)
5402 SET_FLAG(config_cmd->config_table[i].flags,
5403 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5404 T_ETH_MAC_COMMAND_INVALIDATE);
5405
5406 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
5407 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
e665bfda
MC
5408
5409 /* Wait for a completion */
523224a3
DK
5410 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
5411 ramrod_flags);
5412
e665bfda
MC
5413}
5414
523224a3 5415
993ac7b5
MC
5416#ifdef BCM_CNIC
5417/**
5418 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
5419 * MAC(s). This function will wait until the ramdord completion
5420 * returns.
5421 *
5422 * @param bp driver handle
5423 * @param set set or clear the CAM entry
5424 *
5425 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
5426 */
9f6c9258 5427int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
993ac7b5 5428{
523224a3
DK
5429 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
5430 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
5431 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
5432 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
993ac7b5
MC
5433
5434 /* Send a SET_MAC ramrod */
523224a3
DK
5435 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
5436 cam_offset, 0);
993ac7b5
MC
5437 return 0;
5438}
5439#endif
5440
523224a3
DK
5441static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
5442 struct bnx2x_client_init_params *params,
5443 u8 activate,
5444 struct client_init_ramrod_data *data)
5445{
5446 /* Clear the buffer */
5447 memset(data, 0, sizeof(*data));
5448
5449 /* general */
5450 data->general.client_id = params->rxq_params.cl_id;
5451 data->general.statistics_counter_id = params->rxq_params.stat_id;
5452 data->general.statistics_en_flg =
5453 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
5454 data->general.activate_flg = activate;
5455 data->general.sp_client_id = params->rxq_params.spcl_id;
5456
5457 /* Rx data */
5458 data->rx.tpa_en_flg =
5459 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
5460 data->rx.vmqueue_mode_en_flg = 0;
5461 data->rx.cache_line_alignment_log_size =
5462 params->rxq_params.cache_line_log;
5463 data->rx.enable_dynamic_hc =
5464 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
5465 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
5466 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
5467 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
5468
5469 /* We don't set drop flags */
5470 data->rx.drop_ip_cs_err_flg = 0;
5471 data->rx.drop_tcp_cs_err_flg = 0;
5472 data->rx.drop_ttl0_flg = 0;
5473 data->rx.drop_udp_cs_err_flg = 0;
5474
5475 data->rx.inner_vlan_removal_enable_flg =
5476 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
5477 data->rx.outer_vlan_removal_enable_flg =
5478 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
5479 data->rx.status_block_id = params->rxq_params.fw_sb_id;
5480 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
5481 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
5482 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
5483 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
5484 data->rx.bd_page_base.lo =
5485 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
5486 data->rx.bd_page_base.hi =
5487 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
5488 data->rx.sge_page_base.lo =
5489 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
5490 data->rx.sge_page_base.hi =
5491 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
5492 data->rx.cqe_page_base.lo =
5493 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
5494 data->rx.cqe_page_base.hi =
5495 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
5496 data->rx.is_leading_rss =
5497 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
5498 data->rx.is_approx_mcast = data->rx.is_leading_rss;
5499
5500 /* Tx data */
5501 data->tx.enforce_security_flg = 0; /* VF specific */
5502 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
5503 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
5504 data->tx.mtu = 0; /* VF specific */
5505 data->tx.tx_bd_page_base.lo =
5506 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
5507 data->tx.tx_bd_page_base.hi =
5508 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
5509
5510 /* flow control data */
5511 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
5512 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
5513 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
5514 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
5515 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
5516 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
5517 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
5518
5519 data->fc.safc_group_num = params->txq_params.cos;
5520 data->fc.safc_group_en_flg =
5521 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
5522 data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
5523}
5524
5525static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
5526{
5527 /* ustorm cxt validation */
5528 cxt->ustorm_ag_context.cdu_usage =
5529 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
5530 ETH_CONNECTION_TYPE);
5531 /* xcontext validation */
5532 cxt->xstorm_ag_context.cdu_reserved =
5533 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
5534 ETH_CONNECTION_TYPE);
5535}
5536
5537int bnx2x_setup_fw_client(struct bnx2x *bp,
5538 struct bnx2x_client_init_params *params,
5539 u8 activate,
5540 struct client_init_ramrod_data *data,
5541 dma_addr_t data_mapping)
5542{
5543 u16 hc_usec;
5544 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
5545 int ramrod_flags = 0, rc;
5546
5547 /* HC and context validation values */
5548 hc_usec = params->txq_params.hc_rate ?
5549 1000000 / params->txq_params.hc_rate : 0;
5550 bnx2x_update_coalesce_sb_index(bp,
5551 params->txq_params.fw_sb_id,
5552 params->txq_params.sb_cq_index,
5553 !(params->txq_params.flags & QUEUE_FLG_HC),
5554 hc_usec);
5555
5556 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
5557
5558 hc_usec = params->rxq_params.hc_rate ?
5559 1000000 / params->rxq_params.hc_rate : 0;
5560 bnx2x_update_coalesce_sb_index(bp,
5561 params->rxq_params.fw_sb_id,
5562 params->rxq_params.sb_cq_index,
5563 !(params->rxq_params.flags & QUEUE_FLG_HC),
5564 hc_usec);
5565
5566 bnx2x_set_ctx_validation(params->rxq_params.cxt,
5567 params->rxq_params.cid);
5568
5569 /* zero stats */
5570 if (params->txq_params.flags & QUEUE_FLG_STATS)
5571 storm_memset_xstats_zero(bp, BP_PORT(bp),
5572 params->txq_params.stat_id);
5573
5574 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
5575 storm_memset_ustats_zero(bp, BP_PORT(bp),
5576 params->rxq_params.stat_id);
5577 storm_memset_tstats_zero(bp, BP_PORT(bp),
5578 params->rxq_params.stat_id);
5579 }
5580
5581 /* Fill the ramrod data */
5582 bnx2x_fill_cl_init_data(bp, params, activate, data);
5583
5584 /* SETUP ramrod.
5585 *
5586 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
5587 * barrier except from mmiowb() is needed to impose a
5588 * proper ordering of memory operations.
5589 */
5590 mmiowb();
a2fbb9ea 5591
a2fbb9ea 5592
523224a3
DK
5593 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
5594 U64_HI(data_mapping), U64_LO(data_mapping), 0);
a2fbb9ea 5595
34f80b04 5596 /* Wait for completion */
523224a3
DK
5597 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
5598 params->ramrod_params.index,
5599 params->ramrod_params.pstate,
5600 ramrod_flags);
34f80b04 5601 return rc;
a2fbb9ea
ET
5602}
5603
9f6c9258 5604void bnx2x_set_num_queues_msix(struct bnx2x *bp)
ca00392c 5605{
ca00392c
EG
5606
5607 switch (bp->multi_mode) {
5608 case ETH_RSS_MODE_DISABLED:
54b9ddaa 5609 bp->num_queues = 1;
ca00392c
EG
5610 break;
5611
5612 case ETH_RSS_MODE_REGULAR:
54b9ddaa
VZ
5613 if (num_queues)
5614 bp->num_queues = min_t(u32, num_queues,
5615 BNX2X_MAX_QUEUES(bp));
ca00392c 5616 else
54b9ddaa
VZ
5617 bp->num_queues = min_t(u32, num_online_cpus(),
5618 BNX2X_MAX_QUEUES(bp));
ca00392c
EG
5619 break;
5620
5621
5622 default:
54b9ddaa 5623 bp->num_queues = 1;
9f6c9258
DK
5624 break;
5625 }
a2fbb9ea
ET
5626}
5627
523224a3
DK
5628void bnx2x_ilt_set_info(struct bnx2x *bp)
5629{
5630 struct ilt_client_info *ilt_client;
5631 struct bnx2x_ilt *ilt = BP_ILT(bp);
5632 u16 line = 0;
5633
5634 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
5635 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
5636
5637 /* CDU */
5638 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5639 ilt_client->client_num = ILT_CLIENT_CDU;
5640 ilt_client->page_size = CDU_ILT_PAGE_SZ;
5641 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5642 ilt_client->start = line;
5643 line += L2_ILT_LINES(bp);
5644#ifdef BCM_CNIC
5645 line += CNIC_ILT_LINES;
5646#endif
5647 ilt_client->end = line - 1;
5648
5649 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
5650 "flags 0x%x, hw psz %d\n",
5651 ilt_client->start,
5652 ilt_client->end,
5653 ilt_client->page_size,
5654 ilt_client->flags,
5655 ilog2(ilt_client->page_size >> 12));
5656
5657 /* QM */
5658 if (QM_INIT(bp->qm_cid_count)) {
5659 ilt_client = &ilt->clients[ILT_CLIENT_QM];
5660 ilt_client->client_num = ILT_CLIENT_QM;
5661 ilt_client->page_size = QM_ILT_PAGE_SZ;
5662 ilt_client->flags = 0;
5663 ilt_client->start = line;
5664
5665 /* 4 bytes for each cid */
5666 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5667 QM_ILT_PAGE_SZ);
5668
5669 ilt_client->end = line - 1;
5670
5671 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
5672 "flags 0x%x, hw psz %d\n",
5673 ilt_client->start,
5674 ilt_client->end,
5675 ilt_client->page_size,
5676 ilt_client->flags,
5677 ilog2(ilt_client->page_size >> 12));
5678
5679 }
5680 /* SRC */
5681 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5682#ifdef BCM_CNIC
5683 ilt_client->client_num = ILT_CLIENT_SRC;
5684 ilt_client->page_size = SRC_ILT_PAGE_SZ;
5685 ilt_client->flags = 0;
5686 ilt_client->start = line;
5687 line += SRC_ILT_LINES;
5688 ilt_client->end = line - 1;
5689
5690 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
5691 "flags 0x%x, hw psz %d\n",
5692 ilt_client->start,
5693 ilt_client->end,
5694 ilt_client->page_size,
5695 ilt_client->flags,
5696 ilog2(ilt_client->page_size >> 12));
5697
5698#else
5699 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
5700#endif
9f6c9258 5701
523224a3
DK
5702 /* TM */
5703 ilt_client = &ilt->clients[ILT_CLIENT_TM];
5704#ifdef BCM_CNIC
5705 ilt_client->client_num = ILT_CLIENT_TM;
5706 ilt_client->page_size = TM_ILT_PAGE_SZ;
5707 ilt_client->flags = 0;
5708 ilt_client->start = line;
5709 line += TM_ILT_LINES;
5710 ilt_client->end = line - 1;
5711
5712 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
5713 "flags 0x%x, hw psz %d\n",
5714 ilt_client->start,
5715 ilt_client->end,
5716 ilt_client->page_size,
5717 ilt_client->flags,
5718 ilog2(ilt_client->page_size >> 12));
9f6c9258 5719
523224a3
DK
5720#else
5721 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
5722#endif
5723}
5724int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
5725 int is_leading)
a2fbb9ea 5726{
523224a3 5727 struct bnx2x_client_init_params params = { {0} };
a2fbb9ea
ET
5728 int rc;
5729
523224a3
DK
5730 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
5731 IGU_INT_ENABLE, 0);
a2fbb9ea 5732
523224a3
DK
5733 params.ramrod_params.pstate = &fp->state;
5734 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
5735 params.ramrod_params.index = fp->index;
5736 params.ramrod_params.cid = fp->cid;
a2fbb9ea 5737
523224a3
DK
5738 if (is_leading)
5739 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
a2fbb9ea 5740
523224a3
DK
5741 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
5742
5743 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
5744
5745 rc = bnx2x_setup_fw_client(bp, &params, 1,
5746 bnx2x_sp(bp, client_init_data),
5747 bnx2x_sp_mapping(bp, client_init_data));
34f80b04 5748 return rc;
a2fbb9ea
ET
5749}
5750
523224a3 5751int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
a2fbb9ea 5752{
34f80b04 5753 int rc;
a2fbb9ea 5754
523224a3 5755 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
a2fbb9ea 5756
523224a3
DK
5757 /* halt the connection */
5758 *p->pstate = BNX2X_FP_STATE_HALTING;
5759 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
5760 p->cl_id, 0);
a2fbb9ea 5761
34f80b04 5762 /* Wait for completion */
523224a3
DK
5763 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
5764 p->pstate, poll_flag);
34f80b04 5765 if (rc) /* timeout */
da5a662a 5766 return rc;
a2fbb9ea 5767
523224a3
DK
5768 *p->pstate = BNX2X_FP_STATE_TERMINATING;
5769 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
5770 p->cl_id, 0);
5771 /* Wait for completion */
5772 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
5773 p->pstate, poll_flag);
5774 if (rc) /* timeout */
5775 return rc;
a2fbb9ea 5776
a2fbb9ea 5777
523224a3
DK
5778 /* delete cfc entry */
5779 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
da5a662a 5780
523224a3
DK
5781 /* Wait for completion */
5782 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
5783 p->pstate, WAIT_RAMROD_COMMON);
da5a662a 5784 return rc;
a2fbb9ea
ET
5785}
5786
523224a3
DK
5787static int bnx2x_stop_client(struct bnx2x *bp, int index)
5788{
5789 struct bnx2x_client_ramrod_params client_stop = {0};
5790 struct bnx2x_fastpath *fp = &bp->fp[index];
5791
5792 client_stop.index = index;
5793 client_stop.cid = fp->cid;
5794 client_stop.cl_id = fp->cl_id;
5795 client_stop.pstate = &(fp->state);
5796 client_stop.poll = 0;
5797
5798 return bnx2x_stop_fw_client(bp, &client_stop);
5799}
5800
5801
34f80b04
EG
5802static void bnx2x_reset_func(struct bnx2x *bp)
5803{
5804 int port = BP_PORT(bp);
5805 int func = BP_FUNC(bp);
5806 int base, i;
523224a3
DK
5807 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
5808 offsetof(struct hc_status_block_data_e1x, common);
5809 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
5810 int pfid_offset = offsetof(struct pci_entity, pf_id);
5811
5812 /* Disable the function in the FW */
5813 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
5814 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
5815 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
5816 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
5817
5818 /* FP SBs */
5819 for_each_queue(bp, i) {
5820 struct bnx2x_fastpath *fp = &bp->fp[i];
5821 REG_WR8(bp,
5822 BAR_CSTRORM_INTMEM +
5823 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
5824 + pfunc_offset_fp + pfid_offset,
5825 HC_FUNCTION_DISABLED);
5826 }
5827
5828 /* SP SB */
5829 REG_WR8(bp,
5830 BAR_CSTRORM_INTMEM +
5831 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5832 pfunc_offset_sp + pfid_offset,
5833 HC_FUNCTION_DISABLED);
5834
5835
5836 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
5837 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
5838 0);
34f80b04
EG
5839
5840 /* Configure IGU */
5841 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5842 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5843
37b091ba
MC
5844#ifdef BCM_CNIC
5845 /* Disable Timer scan */
5846 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
5847 /*
5848 * Wait for at least 10ms and up to 2 second for the timers scan to
5849 * complete
5850 */
5851 for (i = 0; i < 200; i++) {
5852 msleep(10);
5853 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
5854 break;
5855 }
5856#endif
34f80b04
EG
5857 /* Clear ILT */
5858 base = FUNC_ILT_BASE(func);
5859 for (i = base; i < base + ILT_PER_FUNC; i++)
5860 bnx2x_ilt_wr(bp, i, 0);
523224a3
DK
5861
5862 bp->dmae_ready = 0;
34f80b04
EG
5863}
5864
5865static void bnx2x_reset_port(struct bnx2x *bp)
5866{
5867 int port = BP_PORT(bp);
5868 u32 val;
5869
5870 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5871
5872 /* Do not rcv packets to BRB */
5873 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5874 /* Do not direct rcv packets that are not for MCP to the BRB */
5875 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5876 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5877
5878 /* Configure AEU */
5879 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5880
5881 msleep(100);
5882 /* Check for BRB port occupancy */
5883 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5884 if (val)
5885 DP(NETIF_MSG_IFDOWN,
33471629 5886 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
5887
5888 /* TODO: Close Doorbell port? */
5889}
5890
34f80b04
EG
5891static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5892{
5893 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
5894 BP_FUNC(bp), reset_code);
5895
5896 switch (reset_code) {
5897 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5898 bnx2x_reset_port(bp);
5899 bnx2x_reset_func(bp);
5900 bnx2x_reset_common(bp);
5901 break;
5902
5903 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5904 bnx2x_reset_port(bp);
5905 bnx2x_reset_func(bp);
5906 break;
5907
5908 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5909 bnx2x_reset_func(bp);
5910 break;
49d66772 5911
34f80b04
EG
5912 default:
5913 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5914 break;
5915 }
5916}
5917
9f6c9258 5918void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 5919{
da5a662a 5920 int port = BP_PORT(bp);
a2fbb9ea 5921 u32 reset_code = 0;
da5a662a 5922 int i, cnt, rc;
a2fbb9ea 5923
555f6c78 5924 /* Wait until tx fastpath tasks complete */
54b9ddaa 5925 for_each_queue(bp, i) {
228241eb
ET
5926 struct bnx2x_fastpath *fp = &bp->fp[i];
5927
34f80b04 5928 cnt = 1000;
e8b5fc51 5929 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 5930
34f80b04
EG
5931 if (!cnt) {
5932 BNX2X_ERR("timeout waiting for queue[%d]\n",
5933 i);
5934#ifdef BNX2X_STOP_ON_ERROR
5935 bnx2x_panic();
5936 return -EBUSY;
5937#else
5938 break;
5939#endif
5940 }
5941 cnt--;
da5a662a 5942 msleep(1);
34f80b04 5943 }
228241eb 5944 }
da5a662a
VZ
5945 /* Give HW time to discard old tx messages */
5946 msleep(1);
a2fbb9ea 5947
3101c2bc 5948 if (CHIP_IS_E1(bp)) {
523224a3
DK
5949 /* invalidate mc list,
5950 * wait and poll (interrupts are off)
5951 */
5952 bnx2x_invlidate_e1_mc_list(bp);
5953 bnx2x_set_eth_mac(bp, 0);
3101c2bc 5954
523224a3 5955 } else {
65abd74d
YG
5956 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5957
523224a3 5958 bnx2x_set_eth_mac(bp, 0);
3101c2bc
YG
5959
5960 for (i = 0; i < MC_HASH_SIZE; i++)
5961 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
5962 }
523224a3 5963
993ac7b5
MC
5964#ifdef BCM_CNIC
5965 /* Clear iSCSI L2 MAC */
5966 mutex_lock(&bp->cnic_mutex);
5967 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
5968 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
5969 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
5970 }
5971 mutex_unlock(&bp->cnic_mutex);
5972#endif
3101c2bc 5973
65abd74d
YG
5974 if (unload_mode == UNLOAD_NORMAL)
5975 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5976
7d0446c2 5977 else if (bp->flags & NO_WOL_FLAG)
65abd74d 5978 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 5979
7d0446c2 5980 else if (bp->wol) {
65abd74d
YG
5981 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5982 u8 *mac_addr = bp->dev->dev_addr;
5983 u32 val;
5984 /* The mac address is written to entries 1-4 to
5985 preserve entry 0 which is used by the PMF */
5986 u8 entry = (BP_E1HVN(bp) + 1)*8;
5987
5988 val = (mac_addr[0] << 8) | mac_addr[1];
5989 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
5990
5991 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
5992 (mac_addr[4] << 8) | mac_addr[5];
5993 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
5994
5995 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
5996
5997 } else
5998 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 5999
34f80b04
EG
6000 /* Close multi and leading connections
6001 Completions for ramrods are collected in a synchronous way */
523224a3
DK
6002 for_each_queue(bp, i)
6003
6004 if (bnx2x_stop_client(bp, i))
6005#ifdef BNX2X_STOP_ON_ERROR
6006 return;
6007#else
228241eb 6008 goto unload_error;
523224a3 6009#endif
a2fbb9ea 6010
523224a3 6011 rc = bnx2x_func_stop(bp);
da5a662a 6012 if (rc) {
523224a3 6013 BNX2X_ERR("Function stop failed!\n");
da5a662a 6014#ifdef BNX2X_STOP_ON_ERROR
523224a3 6015 return;
da5a662a
VZ
6016#else
6017 goto unload_error;
34f80b04 6018#endif
228241eb 6019 }
523224a3 6020#ifndef BNX2X_STOP_ON_ERROR
228241eb 6021unload_error:
523224a3 6022#endif
34f80b04 6023 if (!BP_NOMCP(bp))
a22f0788 6024 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04 6025 else {
f5372251 6026 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
6027 load_count[0], load_count[1], load_count[2]);
6028 load_count[0]--;
da5a662a 6029 load_count[1 + port]--;
f5372251 6030 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
6031 load_count[0], load_count[1], load_count[2]);
6032 if (load_count[0] == 0)
6033 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 6034 else if (load_count[1 + port] == 0)
34f80b04
EG
6035 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6036 else
6037 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6038 }
a2fbb9ea 6039
34f80b04
EG
6040 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6041 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6042 bnx2x__link_reset(bp);
a2fbb9ea 6043
523224a3
DK
6044 /* Disable HW interrupts, NAPI */
6045 bnx2x_netif_stop(bp, 1);
6046
6047 /* Release IRQs */
6048 bnx2x_free_irq(bp, false);
6049
a2fbb9ea 6050 /* Reset the chip */
228241eb 6051 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6052
6053 /* Report UNLOAD_DONE to MCP */
34f80b04 6054 if (!BP_NOMCP(bp))
a22f0788 6055 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
356e2385 6056
72fd0718
VZ
6057}
6058
9f6c9258 6059void bnx2x_disable_close_the_gate(struct bnx2x *bp)
72fd0718
VZ
6060{
6061 u32 val;
6062
6063 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6064
6065 if (CHIP_IS_E1(bp)) {
6066 int port = BP_PORT(bp);
6067 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6068 MISC_REG_AEU_MASK_ATTN_FUNC_0;
6069
6070 val = REG_RD(bp, addr);
6071 val &= ~(0x300);
6072 REG_WR(bp, addr, val);
6073 } else if (CHIP_IS_E1H(bp)) {
6074 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6075 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6076 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6077 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6078 }
6079}
6080
72fd0718
VZ
6081
6082/* Close gates #2, #3 and #4: */
6083static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
6084{
6085 u32 val, addr;
6086
6087 /* Gates #2 and #4a are closed/opened for "not E1" only */
6088 if (!CHIP_IS_E1(bp)) {
6089 /* #4 */
6090 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
6091 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
6092 close ? (val | 0x1) : (val & (~(u32)1)));
6093 /* #2 */
6094 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
6095 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
6096 close ? (val | 0x1) : (val & (~(u32)1)));
6097 }
6098
6099 /* #3 */
6100 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
6101 val = REG_RD(bp, addr);
6102 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
6103
6104 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
6105 close ? "closing" : "opening");
6106 mmiowb();
6107}
6108
6109#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
6110
6111static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
6112{
6113 /* Do some magic... */
6114 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6115 *magic_val = val & SHARED_MF_CLP_MAGIC;
6116 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
6117}
6118
6119/* Restore the value of the `magic' bit.
6120 *
6121 * @param pdev Device handle.
6122 * @param magic_val Old value of the `magic' bit.
6123 */
6124static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
6125{
6126 /* Restore the `magic' bit value... */
6127 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
6128 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
6129 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
6130 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6131 MF_CFG_WR(bp, shared_mf_config.clp_mb,
6132 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
6133}
6134
6135/* Prepares for MCP reset: takes care of CLP configurations.
6136 *
6137 * @param bp
6138 * @param magic_val Old value of 'magic' bit.
6139 */
6140static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
6141{
6142 u32 shmem;
6143 u32 validity_offset;
6144
6145 DP(NETIF_MSG_HW, "Starting\n");
6146
6147 /* Set `magic' bit in order to save MF config */
6148 if (!CHIP_IS_E1(bp))
6149 bnx2x_clp_reset_prep(bp, magic_val);
6150
6151 /* Get shmem offset */
6152 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6153 validity_offset = offsetof(struct shmem_region, validity_map[0]);
6154
6155 /* Clear validity map flags */
6156 if (shmem > 0)
6157 REG_WR(bp, shmem + validity_offset, 0);
6158}
6159
6160#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
6161#define MCP_ONE_TIMEOUT 100 /* 100 ms */
6162
6163/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
6164 * depending on the HW type.
6165 *
6166 * @param bp
6167 */
6168static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
6169{
6170 /* special handling for emulation and FPGA,
6171 wait 10 times longer */
6172 if (CHIP_REV_IS_SLOW(bp))
6173 msleep(MCP_ONE_TIMEOUT*10);
6174 else
6175 msleep(MCP_ONE_TIMEOUT);
6176}
6177
6178static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
6179{
6180 u32 shmem, cnt, validity_offset, val;
6181 int rc = 0;
6182
6183 msleep(100);
6184
6185 /* Get shmem offset */
6186 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6187 if (shmem == 0) {
6188 BNX2X_ERR("Shmem 0 return failure\n");
6189 rc = -ENOTTY;
6190 goto exit_lbl;
6191 }
6192
6193 validity_offset = offsetof(struct shmem_region, validity_map[0]);
6194
6195 /* Wait for MCP to come up */
6196 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
6197 /* TBD: its best to check validity map of last port.
6198 * currently checks on port 0.
6199 */
6200 val = REG_RD(bp, shmem + validity_offset);
6201 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
6202 shmem + validity_offset, val);
6203
6204 /* check that shared memory is valid. */
6205 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6206 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6207 break;
6208
6209 bnx2x_mcp_wait_one(bp);
6210 }
6211
6212 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
6213
6214 /* Check that shared memory is valid. This indicates that MCP is up. */
6215 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
6216 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
6217 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
6218 rc = -ENOTTY;
6219 goto exit_lbl;
6220 }
6221
6222exit_lbl:
6223 /* Restore the `magic' bit value */
6224 if (!CHIP_IS_E1(bp))
6225 bnx2x_clp_reset_done(bp, magic_val);
6226
6227 return rc;
6228}
6229
6230static void bnx2x_pxp_prep(struct bnx2x *bp)
6231{
6232 if (!CHIP_IS_E1(bp)) {
6233 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
6234 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
6235 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
6236 mmiowb();
6237 }
6238}
6239
6240/*
6241 * Reset the whole chip except for:
6242 * - PCIE core
6243 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
6244 * one reset bit)
6245 * - IGU
6246 * - MISC (including AEU)
6247 * - GRC
6248 * - RBCN, RBCP
6249 */
6250static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
6251{
6252 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
6253
6254 not_reset_mask1 =
6255 MISC_REGISTERS_RESET_REG_1_RST_HC |
6256 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
6257 MISC_REGISTERS_RESET_REG_1_RST_PXP;
6258
6259 not_reset_mask2 =
6260 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
6261 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
6262 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
6263 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
6264 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
6265 MISC_REGISTERS_RESET_REG_2_RST_GRC |
6266 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
6267 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
6268
6269 reset_mask1 = 0xffffffff;
6270
6271 if (CHIP_IS_E1(bp))
6272 reset_mask2 = 0xffff;
6273 else
6274 reset_mask2 = 0x1ffff;
6275
6276 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6277 reset_mask1 & (~not_reset_mask1));
6278 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6279 reset_mask2 & (~not_reset_mask2));
6280
6281 barrier();
6282 mmiowb();
6283
6284 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
6285 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
6286 mmiowb();
6287}
6288
6289static int bnx2x_process_kill(struct bnx2x *bp)
6290{
6291 int cnt = 1000;
6292 u32 val = 0;
6293 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
6294
6295
6296 /* Empty the Tetris buffer, wait for 1s */
6297 do {
6298 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
6299 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
6300 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
6301 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
6302 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
6303 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
6304 ((port_is_idle_0 & 0x1) == 0x1) &&
6305 ((port_is_idle_1 & 0x1) == 0x1) &&
6306 (pgl_exp_rom2 == 0xffffffff))
6307 break;
6308 msleep(1);
6309 } while (cnt-- > 0);
6310
6311 if (cnt <= 0) {
6312 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
6313 " are still"
6314 " outstanding read requests after 1s!\n");
6315 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
6316 " port_is_idle_0=0x%08x,"
6317 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
6318 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
6319 pgl_exp_rom2);
6320 return -EAGAIN;
6321 }
6322
6323 barrier();
6324
6325 /* Close gates #2, #3 and #4 */
6326 bnx2x_set_234_gates(bp, true);
6327
6328 /* TBD: Indicate that "process kill" is in progress to MCP */
6329
6330 /* Clear "unprepared" bit */
6331 REG_WR(bp, MISC_REG_UNPREPARED, 0);
6332 barrier();
6333
6334 /* Make sure all is written to the chip before the reset */
6335 mmiowb();
6336
6337 /* Wait for 1ms to empty GLUE and PCI-E core queues,
6338 * PSWHST, GRC and PSWRD Tetris buffer.
6339 */
6340 msleep(1);
6341
6342 /* Prepare to chip reset: */
6343 /* MCP */
6344 bnx2x_reset_mcp_prep(bp, &val);
6345
6346 /* PXP */
6347 bnx2x_pxp_prep(bp);
6348 barrier();
6349
6350 /* reset the chip */
6351 bnx2x_process_kill_chip_reset(bp);
6352 barrier();
6353
6354 /* Recover after reset: */
6355 /* MCP */
6356 if (bnx2x_reset_mcp_comp(bp, val))
6357 return -EAGAIN;
6358
6359 /* PXP */
6360 bnx2x_pxp_prep(bp);
6361
6362 /* Open the gates #2, #3 and #4 */
6363 bnx2x_set_234_gates(bp, false);
6364
6365 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
6366 * reset state, re-enable attentions. */
6367
a2fbb9ea
ET
6368 return 0;
6369}
6370
72fd0718
VZ
6371static int bnx2x_leader_reset(struct bnx2x *bp)
6372{
6373 int rc = 0;
6374 /* Try to recover after the failure */
6375 if (bnx2x_process_kill(bp)) {
6376 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
6377 bp->dev->name);
6378 rc = -EAGAIN;
6379 goto exit_leader_reset;
6380 }
6381
6382 /* Clear "reset is in progress" bit and update the driver state */
6383 bnx2x_set_reset_done(bp);
6384 bp->recovery_state = BNX2X_RECOVERY_DONE;
6385
6386exit_leader_reset:
6387 bp->is_leader = 0;
6388 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
6389 smp_wmb();
6390 return rc;
6391}
6392
72fd0718
VZ
6393/* Assumption: runs under rtnl lock. This together with the fact
6394 * that it's called only from bnx2x_reset_task() ensure that it
6395 * will never be called when netif_running(bp->dev) is false.
6396 */
6397static void bnx2x_parity_recover(struct bnx2x *bp)
6398{
6399 DP(NETIF_MSG_HW, "Handling parity\n");
6400 while (1) {
6401 switch (bp->recovery_state) {
6402 case BNX2X_RECOVERY_INIT:
6403 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
6404 /* Try to get a LEADER_LOCK HW lock */
6405 if (bnx2x_trylock_hw_lock(bp,
6406 HW_LOCK_RESOURCE_RESERVED_08))
6407 bp->is_leader = 1;
6408
6409 /* Stop the driver */
6410 /* If interface has been removed - break */
6411 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
6412 return;
6413
6414 bp->recovery_state = BNX2X_RECOVERY_WAIT;
6415 /* Ensure "is_leader" and "recovery_state"
6416 * update values are seen on other CPUs
6417 */
6418 smp_wmb();
6419 break;
6420
6421 case BNX2X_RECOVERY_WAIT:
6422 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
6423 if (bp->is_leader) {
6424 u32 load_counter = bnx2x_get_load_cnt(bp);
6425 if (load_counter) {
6426 /* Wait until all other functions get
6427 * down.
6428 */
6429 schedule_delayed_work(&bp->reset_task,
6430 HZ/10);
6431 return;
6432 } else {
6433 /* If all other functions got down -
6434 * try to bring the chip back to
6435 * normal. In any case it's an exit
6436 * point for a leader.
6437 */
6438 if (bnx2x_leader_reset(bp) ||
6439 bnx2x_nic_load(bp, LOAD_NORMAL)) {
6440 printk(KERN_ERR"%s: Recovery "
6441 "has failed. Power cycle is "
6442 "needed.\n", bp->dev->name);
6443 /* Disconnect this device */
6444 netif_device_detach(bp->dev);
6445 /* Block ifup for all function
6446 * of this ASIC until
6447 * "process kill" or power
6448 * cycle.
6449 */
6450 bnx2x_set_reset_in_progress(bp);
6451 /* Shut down the power */
6452 bnx2x_set_power_state(bp,
6453 PCI_D3hot);
6454 return;
6455 }
6456
6457 return;
6458 }
6459 } else { /* non-leader */
6460 if (!bnx2x_reset_is_done(bp)) {
6461 /* Try to get a LEADER_LOCK HW lock as
6462 * long as a former leader may have
6463 * been unloaded by the user or
6464 * released a leadership by another
6465 * reason.
6466 */
6467 if (bnx2x_trylock_hw_lock(bp,
6468 HW_LOCK_RESOURCE_RESERVED_08)) {
6469 /* I'm a leader now! Restart a
6470 * switch case.
6471 */
6472 bp->is_leader = 1;
6473 break;
6474 }
6475
6476 schedule_delayed_work(&bp->reset_task,
6477 HZ/10);
6478 return;
6479
6480 } else { /* A leader has completed
6481 * the "process kill". It's an exit
6482 * point for a non-leader.
6483 */
6484 bnx2x_nic_load(bp, LOAD_NORMAL);
6485 bp->recovery_state =
6486 BNX2X_RECOVERY_DONE;
6487 smp_wmb();
6488 return;
6489 }
6490 }
6491 default:
6492 return;
6493 }
6494 }
6495}
6496
6497/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
6498 * scheduled on a general queue in order to prevent a dead lock.
6499 */
34f80b04
EG
6500static void bnx2x_reset_task(struct work_struct *work)
6501{
72fd0718 6502 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
6503
6504#ifdef BNX2X_STOP_ON_ERROR
6505 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6506 " so reset not done to allow debug dump,\n"
72fd0718 6507 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
6508 return;
6509#endif
6510
6511 rtnl_lock();
6512
6513 if (!netif_running(bp->dev))
6514 goto reset_task_exit;
6515
72fd0718
VZ
6516 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
6517 bnx2x_parity_recover(bp);
6518 else {
6519 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6520 bnx2x_nic_load(bp, LOAD_NORMAL);
6521 }
34f80b04
EG
6522
6523reset_task_exit:
6524 rtnl_unlock();
6525}
6526
a2fbb9ea
ET
6527/* end of nic load/unload */
6528
a2fbb9ea
ET
6529/*
6530 * Init service functions
6531 */
6532
f1ef27ef
EG
6533static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
6534{
6535 switch (func) {
6536 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
6537 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
6538 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
6539 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
6540 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
6541 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
6542 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
6543 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
6544 default:
6545 BNX2X_ERR("Unsupported function index: %d\n", func);
6546 return (u32)(-1);
6547 }
6548}
6549
6550static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
6551{
6552 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
6553
6554 /* Flush all outstanding writes */
6555 mmiowb();
6556
6557 /* Pretend to be function 0 */
6558 REG_WR(bp, reg, 0);
6559 /* Flush the GRC transaction (in the chip) */
6560 new_val = REG_RD(bp, reg);
6561 if (new_val != 0) {
6562 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
6563 new_val);
6564 BUG();
6565 }
6566
6567 /* From now we are in the "like-E1" mode */
6568 bnx2x_int_disable(bp);
6569
6570 /* Flush all outstanding writes */
6571 mmiowb();
6572
6573 /* Restore the original funtion settings */
6574 REG_WR(bp, reg, orig_func);
6575 new_val = REG_RD(bp, reg);
6576 if (new_val != orig_func) {
6577 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
6578 orig_func, new_val);
6579 BUG();
6580 }
6581}
6582
6583static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
6584{
6585 if (CHIP_IS_E1H(bp))
6586 bnx2x_undi_int_disable_e1h(bp, func);
6587 else
6588 bnx2x_int_disable(bp);
6589}
6590
34f80b04
EG
6591static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6592{
6593 u32 val;
6594
6595 /* Check if there is any driver already loaded */
6596 val = REG_RD(bp, MISC_REG_UNPREPARED);
6597 if (val == 0x1) {
6598 /* Check if it is the UNDI driver
6599 * UNDI driver initializes CID offset for normal bell to 0x7
6600 */
4a37fb66 6601 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
6602 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6603 if (val == 0x7) {
6604 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6605 /* save our func */
34f80b04 6606 int func = BP_FUNC(bp);
da5a662a
VZ
6607 u32 swap_en;
6608 u32 swap_val;
34f80b04 6609
b4661739
EG
6610 /* clear the UNDI indication */
6611 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6612
34f80b04
EG
6613 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6614
6615 /* try unload UNDI on port 0 */
6616 bp->func = 0;
da5a662a
VZ
6617 bp->fw_seq =
6618 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6619 DRV_MSG_SEQ_NUMBER_MASK);
a22f0788 6620 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
6621
6622 /* if UNDI is loaded on the other port */
6623 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6624
da5a662a 6625 /* send "DONE" for previous unload */
a22f0788
YR
6626 bnx2x_fw_command(bp,
6627 DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
6628
6629 /* unload UNDI on port 1 */
34f80b04 6630 bp->func = 1;
da5a662a
VZ
6631 bp->fw_seq =
6632 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6633 DRV_MSG_SEQ_NUMBER_MASK);
6634 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6635
a22f0788 6636 bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
6637 }
6638
b4661739
EG
6639 /* now it's safe to release the lock */
6640 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6641
f1ef27ef 6642 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
6643
6644 /* close input traffic and wait for it */
6645 /* Do not rcv packets to BRB */
6646 REG_WR(bp,
6647 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6648 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6649 /* Do not direct rcv packets that are not for MCP to
6650 * the BRB */
6651 REG_WR(bp,
6652 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6653 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6654 /* clear AEU */
6655 REG_WR(bp,
6656 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6657 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6658 msleep(10);
6659
6660 /* save NIG port swap info */
6661 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6662 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
6663 /* reset device */
6664 REG_WR(bp,
6665 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 6666 0xd3ffffff);
34f80b04
EG
6667 REG_WR(bp,
6668 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6669 0x1403);
da5a662a
VZ
6670 /* take the NIG out of reset and restore swap values */
6671 REG_WR(bp,
6672 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6673 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6674 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6675 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6676
6677 /* send unload done to the MCP */
a22f0788 6678 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
6679
6680 /* restore our func and fw_seq */
6681 bp->func = func;
6682 bp->fw_seq =
6683 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6684 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
6685
6686 } else
6687 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
6688 }
6689}
6690
6691static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6692{
6693 u32 val, val2, val3, val4, id;
72ce58c3 6694 u16 pmc;
34f80b04
EG
6695
6696 /* Get the chip revision id and number. */
6697 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6698 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6699 id = ((val & 0xffff) << 16);
6700 val = REG_RD(bp, MISC_REG_CHIP_REV);
6701 id |= ((val & 0xf) << 12);
6702 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6703 id |= ((val & 0xff) << 4);
5a40e08e 6704 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
6705 id |= (val & 0xf);
6706 bp->common.chip_id = id;
6707 bp->link_params.chip_id = bp->common.chip_id;
6708 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6709
523224a3
DK
6710 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
6711
6712 /* Set doorbell size */
6713 bp->db_size = (1 << BNX2X_DB_SHIFT);
6714
6715 /*
6716 * set base FW non-default (fast path) status block id, this value is
6717 * used to initialize the fw_sb_id saved on the fp/queue structure to
6718 * determine the id used by the FW.
6719 */
6720 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
6721
1c06328c
EG
6722 val = (REG_RD(bp, 0x2874) & 0x55);
6723 if ((bp->common.chip_id & 0x1) ||
6724 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
6725 bp->flags |= ONE_PORT_FLAG;
6726 BNX2X_DEV_INFO("single port device\n");
6727 }
6728
34f80b04
EG
6729 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6730 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6731 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6732 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6733 bp->common.flash_size, bp->common.flash_size);
6734
6735 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 6736 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 6737 bp->link_params.shmem_base = bp->common.shmem_base;
a22f0788 6738 bp->link_params.shmem2_base = bp->common.shmem2_base;
2691d51d
EG
6739 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
6740 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
6741
6742 if (!bp->common.shmem_base ||
6743 (bp->common.shmem_base < 0xA0000) ||
6744 (bp->common.shmem_base >= 0xC0000)) {
6745 BNX2X_DEV_INFO("MCP not active\n");
6746 bp->flags |= NO_MCP_FLAG;
6747 return;
6748 }
6749
6750 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6751 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6752 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
cdaa7cb8 6753 BNX2X_ERROR("BAD MCP validity signature\n");
34f80b04
EG
6754
6755 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 6756 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
6757
6758 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6759 SHARED_HW_CFG_LED_MODE_MASK) >>
6760 SHARED_HW_CFG_LED_MODE_SHIFT);
6761
c2c8b03e
EG
6762 bp->link_params.feature_config_flags = 0;
6763 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
6764 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
6765 bp->link_params.feature_config_flags |=
6766 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6767 else
6768 bp->link_params.feature_config_flags &=
6769 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6770
34f80b04
EG
6771 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6772 bp->common.bc_ver = val;
6773 BNX2X_DEV_INFO("bc_ver %X\n", val);
6774 if (val < BNX2X_BC_VER) {
6775 /* for now only warn
6776 * later we might need to enforce this */
cdaa7cb8
VZ
6777 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
6778 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 6779 }
4d295db0 6780 bp->link_params.feature_config_flags |=
a22f0788 6781 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
4d295db0 6782 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
a22f0788
YR
6783 bp->link_params.feature_config_flags |=
6784 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
6785 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
72ce58c3
EG
6786
6787 if (BP_E1HVN(bp) == 0) {
6788 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6789 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6790 } else {
6791 /* no WOL capability for E1HVN != 0 */
6792 bp->flags |= NO_WOL_FLAG;
6793 }
6794 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 6795 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
6796
6797 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6798 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6799 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6800 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6801
cdaa7cb8
VZ
6802 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
6803 val, val2, val3, val4);
34f80b04
EG
6804}
6805
6806static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6807 u32 switch_cfg)
a2fbb9ea 6808{
a22f0788
YR
6809 int cfg_size = 0, idx, port = BP_PORT(bp);
6810
6811 /* Aggregation of supported attributes of all external phys */
6812 bp->port.supported[0] = 0;
6813 bp->port.supported[1] = 0;
b7737c9b
YR
6814 switch (bp->link_params.num_phys) {
6815 case 1:
a22f0788
YR
6816 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
6817 cfg_size = 1;
6818 break;
b7737c9b 6819 case 2:
a22f0788
YR
6820 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
6821 cfg_size = 1;
6822 break;
6823 case 3:
6824 if (bp->link_params.multi_phy_config &
6825 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
6826 bp->port.supported[1] =
6827 bp->link_params.phy[EXT_PHY1].supported;
6828 bp->port.supported[0] =
6829 bp->link_params.phy[EXT_PHY2].supported;
6830 } else {
6831 bp->port.supported[0] =
6832 bp->link_params.phy[EXT_PHY1].supported;
6833 bp->port.supported[1] =
6834 bp->link_params.phy[EXT_PHY2].supported;
6835 }
6836 cfg_size = 2;
6837 break;
b7737c9b 6838 }
a2fbb9ea 6839
a22f0788 6840 if (!(bp->port.supported[0] || bp->port.supported[1])) {
b7737c9b 6841 BNX2X_ERR("NVRAM config error. BAD phy config."
a22f0788 6842 "PHY1 config 0x%x, PHY2 config 0x%x\n",
b7737c9b 6843 SHMEM_RD(bp,
a22f0788
YR
6844 dev_info.port_hw_config[port].external_phy_config),
6845 SHMEM_RD(bp,
6846 dev_info.port_hw_config[port].external_phy_config2));
a2fbb9ea
ET
6847 return;
6848 }
6849
b7737c9b
YR
6850 switch (switch_cfg) {
6851 case SWITCH_CFG_1G:
34f80b04
EG
6852 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6853 port*0x10);
6854 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
6855 break;
6856
6857 case SWITCH_CFG_10G:
34f80b04
EG
6858 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6859 port*0x18);
6860 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 6861
a2fbb9ea
ET
6862 break;
6863
6864 default:
6865 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
a22f0788 6866 bp->port.link_config[0]);
a2fbb9ea
ET
6867 return;
6868 }
a22f0788
YR
6869 /* mask what we support according to speed_cap_mask per configuration */
6870 for (idx = 0; idx < cfg_size; idx++) {
6871 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 6872 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
a22f0788 6873 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 6874
a22f0788 6875 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 6876 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
a22f0788 6877 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 6878
a22f0788 6879 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 6880 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
a22f0788 6881 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 6882
a22f0788 6883 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 6884 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
a22f0788 6885 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 6886
a22f0788 6887 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 6888 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
a22f0788 6889 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
34f80b04 6890 SUPPORTED_1000baseT_Full);
a2fbb9ea 6891
a22f0788 6892 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 6893 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
a22f0788 6894 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 6895
a22f0788 6896 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 6897 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
a22f0788
YR
6898 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
6899
6900 }
a2fbb9ea 6901
a22f0788
YR
6902 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
6903 bp->port.supported[1]);
a2fbb9ea
ET
6904}
6905
34f80b04 6906static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 6907{
a22f0788
YR
6908 u32 link_config, idx, cfg_size = 0;
6909 bp->port.advertising[0] = 0;
6910 bp->port.advertising[1] = 0;
6911 switch (bp->link_params.num_phys) {
6912 case 1:
6913 case 2:
6914 cfg_size = 1;
6915 break;
6916 case 3:
6917 cfg_size = 2;
6918 break;
6919 }
6920 for (idx = 0; idx < cfg_size; idx++) {
6921 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
6922 link_config = bp->port.link_config[idx];
6923 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 6924 case PORT_FEATURE_LINK_SPEED_AUTO:
a22f0788
YR
6925 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
6926 bp->link_params.req_line_speed[idx] =
6927 SPEED_AUTO_NEG;
6928 bp->port.advertising[idx] |=
6929 bp->port.supported[idx];
a2fbb9ea 6930 } else {
a22f0788
YR
6931 /* force 10G, no AN */
6932 bp->link_params.req_line_speed[idx] =
6933 SPEED_10000;
6934 bp->port.advertising[idx] |=
6935 (ADVERTISED_10000baseT_Full |
a2fbb9ea 6936 ADVERTISED_FIBRE);
a22f0788 6937 continue;
a2fbb9ea
ET
6938 }
6939 break;
6940
6941 case PORT_FEATURE_LINK_SPEED_10M_FULL:
a22f0788
YR
6942 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
6943 bp->link_params.req_line_speed[idx] =
6944 SPEED_10;
6945 bp->port.advertising[idx] |=
6946 (ADVERTISED_10baseT_Full |
34f80b04 6947 ADVERTISED_TP);
a2fbb9ea 6948 } else {
cdaa7cb8
VZ
6949 BNX2X_ERROR("NVRAM config error. "
6950 "Invalid link_config 0x%x"
6951 " speed_cap_mask 0x%x\n",
a22f0788
YR
6952 link_config,
6953 bp->link_params.speed_cap_mask[idx]);
a2fbb9ea
ET
6954 return;
6955 }
6956 break;
6957
6958 case PORT_FEATURE_LINK_SPEED_10M_HALF:
a22f0788
YR
6959 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
6960 bp->link_params.req_line_speed[idx] =
6961 SPEED_10;
6962 bp->link_params.req_duplex[idx] =
6963 DUPLEX_HALF;
6964 bp->port.advertising[idx] |=
6965 (ADVERTISED_10baseT_Half |
34f80b04 6966 ADVERTISED_TP);
a2fbb9ea 6967 } else {
cdaa7cb8
VZ
6968 BNX2X_ERROR("NVRAM config error. "
6969 "Invalid link_config 0x%x"
6970 " speed_cap_mask 0x%x\n",
a22f0788
YR
6971 link_config,
6972 bp->link_params.speed_cap_mask[idx]);
a2fbb9ea
ET
6973 return;
6974 }
6975 break;
6976
6977 case PORT_FEATURE_LINK_SPEED_100M_FULL:
a22f0788
YR
6978 if (bp->port.supported[idx] & SUPPORTED_100baseT_Full) {
6979 bp->link_params.req_line_speed[idx] =
6980 SPEED_100;
6981 bp->port.advertising[idx] |=
6982 (ADVERTISED_100baseT_Full |
34f80b04 6983 ADVERTISED_TP);
a2fbb9ea 6984 } else {
cdaa7cb8
VZ
6985 BNX2X_ERROR("NVRAM config error. "
6986 "Invalid link_config 0x%x"
6987 " speed_cap_mask 0x%x\n",
a22f0788
YR
6988 link_config,
6989 bp->link_params.speed_cap_mask[idx]);
a2fbb9ea
ET
6990 return;
6991 }
6992 break;
6993
6994 case PORT_FEATURE_LINK_SPEED_100M_HALF:
a22f0788
YR
6995 if (bp->port.supported[idx] & SUPPORTED_100baseT_Half) {
6996 bp->link_params.req_line_speed[idx] = SPEED_100;
6997 bp->link_params.req_duplex[idx] = DUPLEX_HALF;
6998 bp->port.advertising[idx] |=
6999 (ADVERTISED_100baseT_Half |
34f80b04 7000 ADVERTISED_TP);
a2fbb9ea 7001 } else {
cdaa7cb8
VZ
7002 BNX2X_ERROR("NVRAM config error. "
7003 "Invalid link_config 0x%x"
7004 " speed_cap_mask 0x%x\n",
a22f0788
YR
7005 link_config,
7006 bp->link_params.speed_cap_mask[idx]);
a2fbb9ea
ET
7007 return;
7008 }
7009 break;
7010
7011 case PORT_FEATURE_LINK_SPEED_1G:
a22f0788
YR
7012 if (bp->port.supported[idx] &
7013 SUPPORTED_1000baseT_Full) {
7014 bp->link_params.req_line_speed[idx] =
7015 SPEED_1000;
7016 bp->port.advertising[idx] |=
7017 (ADVERTISED_1000baseT_Full |
34f80b04 7018 ADVERTISED_TP);
a2fbb9ea 7019 } else {
cdaa7cb8
VZ
7020 BNX2X_ERROR("NVRAM config error. "
7021 "Invalid link_config 0x%x"
7022 " speed_cap_mask 0x%x\n",
a22f0788
YR
7023 link_config,
7024 bp->link_params.speed_cap_mask[idx]);
a2fbb9ea
ET
7025 return;
7026 }
7027 break;
7028
7029 case PORT_FEATURE_LINK_SPEED_2_5G:
a22f0788
YR
7030 if (bp->port.supported[idx] &
7031 SUPPORTED_2500baseX_Full) {
7032 bp->link_params.req_line_speed[idx] =
7033 SPEED_2500;
7034 bp->port.advertising[idx] |=
7035 (ADVERTISED_2500baseX_Full |
34f80b04 7036 ADVERTISED_TP);
a2fbb9ea 7037 } else {
cdaa7cb8
VZ
7038 BNX2X_ERROR("NVRAM config error. "
7039 "Invalid link_config 0x%x"
7040 " speed_cap_mask 0x%x\n",
a22f0788
YR
7041 link_config,
7042 bp->link_params.speed_cap_mask[idx]);
a2fbb9ea
ET
7043 return;
7044 }
7045 break;
7046
7047 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7048 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7049 case PORT_FEATURE_LINK_SPEED_10G_KR:
a22f0788
YR
7050 if (bp->port.supported[idx] &
7051 SUPPORTED_10000baseT_Full) {
7052 bp->link_params.req_line_speed[idx] =
7053 SPEED_10000;
7054 bp->port.advertising[idx] |=
7055 (ADVERTISED_10000baseT_Full |
34f80b04 7056 ADVERTISED_FIBRE);
a2fbb9ea 7057 } else {
cdaa7cb8
VZ
7058 BNX2X_ERROR("NVRAM config error. "
7059 "Invalid link_config 0x%x"
7060 " speed_cap_mask 0x%x\n",
a22f0788
YR
7061 link_config,
7062 bp->link_params.speed_cap_mask[idx]);
a2fbb9ea
ET
7063 return;
7064 }
7065 break;
7066
7067 default:
cdaa7cb8
VZ
7068 BNX2X_ERROR("NVRAM config error. "
7069 "BAD link speed link_config 0x%x\n",
a22f0788
YR
7070 link_config);
7071 bp->link_params.req_line_speed[idx] = SPEED_AUTO_NEG;
7072 bp->port.advertising[idx] = bp->port.supported[idx];
a2fbb9ea
ET
7073 break;
7074 }
a2fbb9ea 7075
a22f0788 7076 bp->link_params.req_flow_ctrl[idx] = (link_config &
34f80b04 7077 PORT_FEATURE_FLOW_CONTROL_MASK);
a22f0788
YR
7078 if ((bp->link_params.req_flow_ctrl[idx] ==
7079 BNX2X_FLOW_CTRL_AUTO) &&
7080 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
7081 bp->link_params.req_flow_ctrl[idx] =
7082 BNX2X_FLOW_CTRL_NONE;
7083 }
a2fbb9ea 7084
a22f0788
YR
7085 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
7086 " 0x%x advertising 0x%x\n",
7087 bp->link_params.req_line_speed[idx],
7088 bp->link_params.req_duplex[idx],
7089 bp->link_params.req_flow_ctrl[idx],
7090 bp->port.advertising[idx]);
7091 }
a2fbb9ea
ET
7092}
7093
e665bfda
MC
7094static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
7095{
7096 mac_hi = cpu_to_be16(mac_hi);
7097 mac_lo = cpu_to_be32(mac_lo);
7098 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
7099 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
7100}
7101
34f80b04 7102static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7103{
34f80b04
EG
7104 int port = BP_PORT(bp);
7105 u32 val, val2;
589abe3a 7106 u32 config;
b7737c9b 7107 u32 ext_phy_type, ext_phy_config;;
a2fbb9ea 7108
c18487ee 7109 bp->link_params.bp = bp;
34f80b04 7110 bp->link_params.port = port;
c18487ee 7111
c18487ee 7112 bp->link_params.lane_config =
a2fbb9ea 7113 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
4d295db0 7114
a22f0788 7115 bp->link_params.speed_cap_mask[0] =
a2fbb9ea
ET
7116 SHMEM_RD(bp,
7117 dev_info.port_hw_config[port].speed_capability_mask);
a22f0788
YR
7118 bp->link_params.speed_cap_mask[1] =
7119 SHMEM_RD(bp,
7120 dev_info.port_hw_config[port].speed_capability_mask2);
7121 bp->port.link_config[0] =
a2fbb9ea
ET
7122 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7123
a22f0788
YR
7124 bp->port.link_config[1] =
7125 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
c2c8b03e 7126
a22f0788
YR
7127 bp->link_params.multi_phy_config =
7128 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
3ce2c3f9
EG
7129 /* If the device is capable of WoL, set the default state according
7130 * to the HW
7131 */
4d295db0 7132 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
7133 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
7134 (config & PORT_FEATURE_WOL_ENABLED));
7135
b7737c9b 7136 BNX2X_DEV_INFO("lane_config 0x%08x"
a22f0788 7137 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
c18487ee 7138 bp->link_params.lane_config,
a22f0788
YR
7139 bp->link_params.speed_cap_mask[0],
7140 bp->port.link_config[0]);
a2fbb9ea 7141
a22f0788 7142 bp->link_params.switch_cfg = (bp->port.link_config[0] &
4d295db0 7143 PORT_FEATURE_CONNECTED_SWITCH_MASK);
b7737c9b 7144 bnx2x_phy_probe(&bp->link_params);
c18487ee 7145 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7146
7147 bnx2x_link_settings_requested(bp);
7148
01cd4528
EG
7149 /*
7150 * If connected directly, work with the internal PHY, otherwise, work
7151 * with the external PHY
7152 */
b7737c9b
YR
7153 ext_phy_config =
7154 SHMEM_RD(bp,
7155 dev_info.port_hw_config[port].external_phy_config);
7156 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
01cd4528 7157 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
b7737c9b 7158 bp->mdio.prtad = bp->port.phy_addr;
01cd4528
EG
7159
7160 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
7161 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
7162 bp->mdio.prtad =
b7737c9b 7163 XGXS_EXT_PHY_ADDR(ext_phy_config);
01cd4528 7164
a2fbb9ea
ET
7165 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7166 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 7167 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
7168 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7169 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
7170
7171#ifdef BCM_CNIC
7172 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
7173 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
7174 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
7175#endif
34f80b04
EG
7176}
7177
7178static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7179{
7180 int func = BP_FUNC(bp);
7181 u32 val, val2;
7182 int rc = 0;
a2fbb9ea 7183
34f80b04 7184 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7185
523224a3
DK
7186 bp->common.int_block = INT_BLOCK_HC;
7187
7188 bp->igu_dsb_id = DEF_SB_IGU_ID;
7189 bp->igu_base_sb = 0;
7190 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
7191
34f80b04
EG
7192 bp->e1hov = 0;
7193 bp->e1hmf = 0;
2145a920 7194 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
523224a3
DK
7195
7196 bp->common.mf_cfg_base = bp->common.shmem_base +
7197 offsetof(struct shmem_region, func_mb) +
7198 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
34f80b04 7199 bp->mf_config =
523224a3 7200 MF_CFG_RD(bp, func_mf_config[func].config);
a2fbb9ea 7201
523224a3 7202 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 7203 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 7204 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 7205 bp->e1hmf = 1;
2691d51d
EG
7206 BNX2X_DEV_INFO("%s function mode\n",
7207 IS_E1HMF(bp) ? "multi" : "single");
7208
7209 if (IS_E1HMF(bp)) {
523224a3 7210 val = (MF_CFG_RD(bp, func_mf_config[func].
2691d51d
EG
7211 e1hov_tag) &
7212 FUNC_MF_CFG_E1HOV_TAG_MASK);
7213 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7214 bp->e1hov = val;
7215 BNX2X_DEV_INFO("E1HOV for func %d is %d "
7216 "(0x%04x)\n",
7217 func, bp->e1hov, bp->e1hov);
7218 } else {
cdaa7cb8
VZ
7219 BNX2X_ERROR("No valid E1HOV for func %d,"
7220 " aborting\n", func);
34f80b04
EG
7221 rc = -EPERM;
7222 }
2691d51d
EG
7223 } else {
7224 if (BP_E1HVN(bp)) {
cdaa7cb8
VZ
7225 BNX2X_ERROR("VN %d in single function mode,"
7226 " aborting\n", BP_E1HVN(bp));
2691d51d
EG
7227 rc = -EPERM;
7228 }
34f80b04
EG
7229 }
7230 }
a2fbb9ea 7231
523224a3
DK
7232 /* adjust igu_sb_cnt to MF */
7233 if (IS_E1HMF(bp))
7234 bp->igu_sb_cnt /= E1HVN_MAX;
7235
34f80b04
EG
7236 if (!BP_NOMCP(bp)) {
7237 bnx2x_get_port_hwinfo(bp);
7238
7239 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7240 DRV_MSG_SEQ_NUMBER_MASK);
7241 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7242 }
7243
7244 if (IS_E1HMF(bp)) {
523224a3
DK
7245 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
7246 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
34f80b04
EG
7247 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7248 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7249 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7250 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7251 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7252 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7253 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7254 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7255 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7256 ETH_ALEN);
7257 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7258 ETH_ALEN);
a2fbb9ea 7259 }
34f80b04
EG
7260
7261 return rc;
a2fbb9ea
ET
7262 }
7263
34f80b04
EG
7264 if (BP_NOMCP(bp)) {
7265 /* only supposed to happen on emulation/FPGA */
cdaa7cb8 7266 BNX2X_ERROR("warning: random MAC workaround active\n");
34f80b04
EG
7267 random_ether_addr(bp->dev->dev_addr);
7268 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7269 }
a2fbb9ea 7270
34f80b04
EG
7271 return rc;
7272}
7273
34f24c7f
VZ
7274static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
7275{
7276 int cnt, i, block_end, rodi;
7277 char vpd_data[BNX2X_VPD_LEN+1];
7278 char str_id_reg[VENDOR_ID_LEN+1];
7279 char str_id_cap[VENDOR_ID_LEN+1];
7280 u8 len;
7281
7282 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
7283 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
7284
7285 if (cnt < BNX2X_VPD_LEN)
7286 goto out_not_found;
7287
7288 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
7289 PCI_VPD_LRDT_RO_DATA);
7290 if (i < 0)
7291 goto out_not_found;
7292
7293
7294 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
7295 pci_vpd_lrdt_size(&vpd_data[i]);
7296
7297 i += PCI_VPD_LRDT_TAG_SIZE;
7298
7299 if (block_end > BNX2X_VPD_LEN)
7300 goto out_not_found;
7301
7302 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
7303 PCI_VPD_RO_KEYWORD_MFR_ID);
7304 if (rodi < 0)
7305 goto out_not_found;
7306
7307 len = pci_vpd_info_field_size(&vpd_data[rodi]);
7308
7309 if (len != VENDOR_ID_LEN)
7310 goto out_not_found;
7311
7312 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
7313
7314 /* vendor specific info */
7315 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
7316 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
7317 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
7318 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
7319
7320 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
7321 PCI_VPD_RO_KEYWORD_VENDOR0);
7322 if (rodi >= 0) {
7323 len = pci_vpd_info_field_size(&vpd_data[rodi]);
7324
7325 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
7326
7327 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
7328 memcpy(bp->fw_ver, &vpd_data[rodi], len);
7329 bp->fw_ver[len] = ' ';
7330 }
7331 }
7332 return;
7333 }
7334out_not_found:
7335 return;
7336}
7337
34f80b04
EG
7338static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7339{
7340 int func = BP_FUNC(bp);
87942b46 7341 int timer_interval;
34f80b04
EG
7342 int rc;
7343
da5a662a
VZ
7344 /* Disable interrupt handling until HW is initialized */
7345 atomic_set(&bp->intr_sem, 1);
e1510706 7346 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 7347
34f80b04 7348 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 7349 mutex_init(&bp->fw_mb_mutex);
bb7e95c8 7350 spin_lock_init(&bp->stats_lock);
993ac7b5
MC
7351#ifdef BCM_CNIC
7352 mutex_init(&bp->cnic_mutex);
7353#endif
a2fbb9ea 7354
1cf167f2 7355 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 7356 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
7357
7358 rc = bnx2x_get_hwinfo(bp);
7359
523224a3
DK
7360 if (!rc)
7361 rc = bnx2x_alloc_mem_bp(bp);
7362
34f24c7f 7363 bnx2x_read_fwinfo(bp);
34f80b04
EG
7364 /* need to reset chip if undi was active */
7365 if (!BP_NOMCP(bp))
7366 bnx2x_undi_unload(bp);
7367
7368 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 7369 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
7370
7371 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
7372 dev_err(&bp->pdev->dev, "MCP disabled, "
7373 "must load devices in order!\n");
34f80b04 7374
555f6c78 7375 /* Set multi queue mode */
8badd27a
EG
7376 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
7377 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
cdaa7cb8
VZ
7378 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
7379 "requested is not MSI-X\n");
555f6c78
EG
7380 multi_mode = ETH_RSS_MODE_DISABLED;
7381 }
7382 bp->multi_mode = multi_mode;
5d7cd496 7383 bp->int_mode = int_mode;
555f6c78 7384
4fd89b7a
DK
7385 bp->dev->features |= NETIF_F_GRO;
7386
7a9b2557
VZ
7387 /* Set TPA flags */
7388 if (disable_tpa) {
7389 bp->flags &= ~TPA_ENABLE_FLAG;
7390 bp->dev->features &= ~NETIF_F_LRO;
7391 } else {
7392 bp->flags |= TPA_ENABLE_FLAG;
7393 bp->dev->features |= NETIF_F_LRO;
7394 }
5d7cd496 7395 bp->disable_tpa = disable_tpa;
7a9b2557 7396
a18f5128
EG
7397 if (CHIP_IS_E1(bp))
7398 bp->dropless_fc = 0;
7399 else
7400 bp->dropless_fc = dropless_fc;
7401
8d5726c4 7402 bp->mrrs = mrrs;
7a9b2557 7403
34f80b04 7404 bp->tx_ring_size = MAX_TX_AVAIL;
34f80b04
EG
7405
7406 bp->rx_csum = 1;
34f80b04 7407
7d323bfd 7408 /* make sure that the numbers are in the right granularity */
523224a3
DK
7409 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
7410 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
34f80b04 7411
87942b46
EG
7412 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7413 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
7414
7415 init_timer(&bp->timer);
7416 bp->timer.expires = jiffies + bp->current_interval;
7417 bp->timer.data = (unsigned long) bp;
7418 bp->timer.function = bnx2x_timer;
7419
7420 return rc;
a2fbb9ea
ET
7421}
7422
a2fbb9ea 7423
de0c62db
DK
7424/****************************************************************************
7425* General service functions
7426****************************************************************************/
a2fbb9ea 7427
bb2a0f7a 7428/* called with rtnl_lock */
a2fbb9ea
ET
7429static int bnx2x_open(struct net_device *dev)
7430{
7431 struct bnx2x *bp = netdev_priv(dev);
7432
6eccabb3
EG
7433 netif_carrier_off(dev);
7434
a2fbb9ea
ET
7435 bnx2x_set_power_state(bp, PCI_D0);
7436
72fd0718
VZ
7437 if (!bnx2x_reset_is_done(bp)) {
7438 do {
7439 /* Reset MCP mail box sequence if there is on going
7440 * recovery
7441 */
7442 bp->fw_seq = 0;
7443
7444 /* If it's the first function to load and reset done
7445 * is still not cleared it may mean that. We don't
7446 * check the attention state here because it may have
7447 * already been cleared by a "common" reset but we
7448 * shell proceed with "process kill" anyway.
7449 */
7450 if ((bnx2x_get_load_cnt(bp) == 0) &&
7451 bnx2x_trylock_hw_lock(bp,
7452 HW_LOCK_RESOURCE_RESERVED_08) &&
7453 (!bnx2x_leader_reset(bp))) {
7454 DP(NETIF_MSG_HW, "Recovered in open\n");
7455 break;
7456 }
7457
7458 bnx2x_set_power_state(bp, PCI_D3hot);
7459
7460 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
7461 " completed yet. Try again later. If u still see this"
7462 " message after a few retries then power cycle is"
7463 " required.\n", bp->dev->name);
7464
7465 return -EAGAIN;
7466 } while (0);
7467 }
7468
7469 bp->recovery_state = BNX2X_RECOVERY_DONE;
7470
bb2a0f7a 7471 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
7472}
7473
bb2a0f7a 7474/* called with rtnl_lock */
a2fbb9ea
ET
7475static int bnx2x_close(struct net_device *dev)
7476{
a2fbb9ea
ET
7477 struct bnx2x *bp = netdev_priv(dev);
7478
7479 /* Unload the driver, release IRQs */
bb2a0f7a 7480 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 7481 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
7482
7483 return 0;
7484}
7485
f5372251 7486/* called with netif_tx_lock from dev_mcast.c */
9f6c9258 7487void bnx2x_set_rx_mode(struct net_device *dev)
34f80b04
EG
7488{
7489 struct bnx2x *bp = netdev_priv(dev);
7490 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
7491 int port = BP_PORT(bp);
7492
7493 if (bp->state != BNX2X_STATE_OPEN) {
7494 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
7495 return;
7496 }
7497
7498 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
7499
7500 if (dev->flags & IFF_PROMISC)
7501 rx_mode = BNX2X_RX_MODE_PROMISC;
7502
7503 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
7504 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
7505 CHIP_IS_E1(bp)))
34f80b04
EG
7506 rx_mode = BNX2X_RX_MODE_ALLMULTI;
7507
7508 else { /* some multicasts */
7509 if (CHIP_IS_E1(bp)) {
523224a3
DK
7510 /*
7511 * set mc list, do not wait as wait implies sleep
7512 * and set_rx_mode can be invoked from non-sleepable
7513 * context
7514 */
7515 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
7516 BNX2X_MAX_EMUL_MULTI*(1 + port) :
7517 BNX2X_MAX_MULTICAST*(1 + port));
e665bfda 7518
523224a3 7519 bnx2x_set_e1_mc_list(bp, offset);
34f80b04
EG
7520 } else { /* E1H */
7521 /* Accept one or more multicasts */
22bedad3 7522 struct netdev_hw_addr *ha;
34f80b04
EG
7523 u32 mc_filter[MC_HASH_SIZE];
7524 u32 crc, bit, regidx;
7525 int i;
7526
7527 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
7528
22bedad3 7529 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 7530 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
523224a3 7531 bnx2x_mc_addr(ha));
34f80b04 7532
523224a3
DK
7533 crc = crc32c_le(0, bnx2x_mc_addr(ha),
7534 ETH_ALEN);
34f80b04
EG
7535 bit = (crc >> 24) & 0xff;
7536 regidx = bit >> 5;
7537 bit &= 0x1f;
7538 mc_filter[regidx] |= (1 << bit);
7539 }
7540
7541 for (i = 0; i < MC_HASH_SIZE; i++)
7542 REG_WR(bp, MC_HASH_OFFSET(bp, i),
7543 mc_filter[i]);
7544 }
7545 }
7546
523224a3 7547
34f80b04
EG
7548 bp->rx_mode = rx_mode;
7549 bnx2x_set_storm_rx_mode(bp);
7550}
7551
a2fbb9ea 7552
c18487ee 7553/* called with rtnl_lock */
01cd4528
EG
7554static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
7555 int devad, u16 addr)
a2fbb9ea 7556{
01cd4528
EG
7557 struct bnx2x *bp = netdev_priv(netdev);
7558 u16 value;
7559 int rc;
a2fbb9ea 7560
01cd4528
EG
7561 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
7562 prtad, devad, addr);
a2fbb9ea 7563
01cd4528
EG
7564 /* The HW expects different devad if CL22 is used */
7565 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 7566
01cd4528 7567 bnx2x_acquire_phy_lock(bp);
e10bc84d 7568 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
01cd4528
EG
7569 bnx2x_release_phy_lock(bp);
7570 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 7571
01cd4528
EG
7572 if (!rc)
7573 rc = value;
7574 return rc;
7575}
a2fbb9ea 7576
01cd4528
EG
7577/* called with rtnl_lock */
7578static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
7579 u16 addr, u16 value)
7580{
7581 struct bnx2x *bp = netdev_priv(netdev);
01cd4528
EG
7582 int rc;
7583
7584 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
7585 " value 0x%x\n", prtad, devad, addr, value);
7586
01cd4528
EG
7587 /* The HW expects different devad if CL22 is used */
7588 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 7589
01cd4528 7590 bnx2x_acquire_phy_lock(bp);
e10bc84d 7591 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
01cd4528
EG
7592 bnx2x_release_phy_lock(bp);
7593 return rc;
7594}
c18487ee 7595
01cd4528
EG
7596/* called with rtnl_lock */
7597static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7598{
7599 struct bnx2x *bp = netdev_priv(dev);
7600 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 7601
01cd4528
EG
7602 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
7603 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 7604
01cd4528
EG
7605 if (!netif_running(dev))
7606 return -EAGAIN;
7607
7608 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
7609}
7610
257ddbda 7611#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
7612static void poll_bnx2x(struct net_device *dev)
7613{
7614 struct bnx2x *bp = netdev_priv(dev);
7615
7616 disable_irq(bp->pdev->irq);
7617 bnx2x_interrupt(bp->pdev->irq, dev);
7618 enable_irq(bp->pdev->irq);
7619}
7620#endif
7621
c64213cd
SH
7622static const struct net_device_ops bnx2x_netdev_ops = {
7623 .ndo_open = bnx2x_open,
7624 .ndo_stop = bnx2x_close,
7625 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 7626 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
7627 .ndo_set_mac_address = bnx2x_change_mac_addr,
7628 .ndo_validate_addr = eth_validate_addr,
7629 .ndo_do_ioctl = bnx2x_ioctl,
7630 .ndo_change_mtu = bnx2x_change_mtu,
7631 .ndo_tx_timeout = bnx2x_tx_timeout,
7632#ifdef BCM_VLAN
7633 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
7634#endif
257ddbda 7635#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
7636 .ndo_poll_controller = poll_bnx2x,
7637#endif
7638};
7639
34f80b04
EG
7640static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
7641 struct net_device *dev)
a2fbb9ea
ET
7642{
7643 struct bnx2x *bp;
7644 int rc;
7645
7646 SET_NETDEV_DEV(dev, &pdev->dev);
7647 bp = netdev_priv(dev);
7648
34f80b04
EG
7649 bp->dev = dev;
7650 bp->pdev = pdev;
a2fbb9ea 7651 bp->flags = 0;
34f80b04 7652 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
7653
7654 rc = pci_enable_device(pdev);
7655 if (rc) {
cdaa7cb8
VZ
7656 dev_err(&bp->pdev->dev,
7657 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
7658 goto err_out;
7659 }
7660
7661 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
7662 dev_err(&bp->pdev->dev,
7663 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
7664 rc = -ENODEV;
7665 goto err_out_disable;
7666 }
7667
7668 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
7669 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
7670 " base address, aborting\n");
a2fbb9ea
ET
7671 rc = -ENODEV;
7672 goto err_out_disable;
7673 }
7674
34f80b04
EG
7675 if (atomic_read(&pdev->enable_cnt) == 1) {
7676 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7677 if (rc) {
cdaa7cb8
VZ
7678 dev_err(&bp->pdev->dev,
7679 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
7680 goto err_out_disable;
7681 }
a2fbb9ea 7682
34f80b04
EG
7683 pci_set_master(pdev);
7684 pci_save_state(pdev);
7685 }
a2fbb9ea
ET
7686
7687 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7688 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
7689 dev_err(&bp->pdev->dev,
7690 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
7691 rc = -EIO;
7692 goto err_out_release;
7693 }
7694
7695 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
7696 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
7697 dev_err(&bp->pdev->dev,
7698 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
7699 rc = -EIO;
7700 goto err_out_release;
7701 }
7702
1a983142 7703 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 7704 bp->flags |= USING_DAC_FLAG;
1a983142 7705 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
7706 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
7707 " failed, aborting\n");
a2fbb9ea
ET
7708 rc = -EIO;
7709 goto err_out_release;
7710 }
7711
1a983142 7712 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
7713 dev_err(&bp->pdev->dev,
7714 "System does not support DMA, aborting\n");
a2fbb9ea
ET
7715 rc = -EIO;
7716 goto err_out_release;
7717 }
7718
34f80b04
EG
7719 dev->mem_start = pci_resource_start(pdev, 0);
7720 dev->base_addr = dev->mem_start;
7721 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
7722
7723 dev->irq = pdev->irq;
7724
275f165f 7725 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 7726 if (!bp->regview) {
cdaa7cb8
VZ
7727 dev_err(&bp->pdev->dev,
7728 "Cannot map register space, aborting\n");
a2fbb9ea
ET
7729 rc = -ENOMEM;
7730 goto err_out_release;
7731 }
7732
34f80b04 7733 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
523224a3 7734 min_t(u64, BNX2X_DB_SIZE(bp),
34f80b04 7735 pci_resource_len(pdev, 2)));
a2fbb9ea 7736 if (!bp->doorbells) {
cdaa7cb8
VZ
7737 dev_err(&bp->pdev->dev,
7738 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
7739 rc = -ENOMEM;
7740 goto err_out_unmap;
7741 }
7742
7743 bnx2x_set_power_state(bp, PCI_D0);
7744
34f80b04
EG
7745 /* clean indirect addresses */
7746 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
7747 PCICFG_VENDOR_ID_OFFSET);
7748 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
7749 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
7750 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
7751 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 7752
72fd0718
VZ
7753 /* Reset the load counter */
7754 bnx2x_clear_load_cnt(bp);
7755
34f80b04 7756 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 7757
c64213cd 7758 dev->netdev_ops = &bnx2x_netdev_ops;
de0c62db 7759 bnx2x_set_ethtool_ops(dev);
34f80b04
EG
7760 dev->features |= NETIF_F_SG;
7761 dev->features |= NETIF_F_HW_CSUM;
7762 if (bp->flags & USING_DAC_FLAG)
7763 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
7764 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7765 dev->features |= NETIF_F_TSO6;
34f80b04
EG
7766#ifdef BCM_VLAN
7767 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 7768 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
7769
7770 dev->vlan_features |= NETIF_F_SG;
7771 dev->vlan_features |= NETIF_F_HW_CSUM;
7772 if (bp->flags & USING_DAC_FLAG)
7773 dev->vlan_features |= NETIF_F_HIGHDMA;
7774 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7775 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 7776#endif
a2fbb9ea 7777
01cd4528
EG
7778 /* get_port_hwinfo() will set prtad and mmds properly */
7779 bp->mdio.prtad = MDIO_PRTAD_NONE;
7780 bp->mdio.mmds = 0;
7781 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7782 bp->mdio.dev = dev;
7783 bp->mdio.mdio_read = bnx2x_mdio_read;
7784 bp->mdio.mdio_write = bnx2x_mdio_write;
7785
a2fbb9ea
ET
7786 return 0;
7787
7788err_out_unmap:
7789 if (bp->regview) {
7790 iounmap(bp->regview);
7791 bp->regview = NULL;
7792 }
a2fbb9ea
ET
7793 if (bp->doorbells) {
7794 iounmap(bp->doorbells);
7795 bp->doorbells = NULL;
7796 }
7797
7798err_out_release:
34f80b04
EG
7799 if (atomic_read(&pdev->enable_cnt) == 1)
7800 pci_release_regions(pdev);
a2fbb9ea
ET
7801
7802err_out_disable:
7803 pci_disable_device(pdev);
7804 pci_set_drvdata(pdev, NULL);
7805
7806err_out:
7807 return rc;
7808}
7809
37f9ce62
EG
7810static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
7811 int *width, int *speed)
25047950
ET
7812{
7813 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7814
37f9ce62 7815 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 7816
37f9ce62
EG
7817 /* return value of 1=2.5GHz 2=5GHz */
7818 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 7819}
37f9ce62 7820
6891dd25 7821static int bnx2x_check_firmware(struct bnx2x *bp)
94a78b79 7822{
37f9ce62 7823 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
7824 struct bnx2x_fw_file_hdr *fw_hdr;
7825 struct bnx2x_fw_file_section *sections;
94a78b79 7826 u32 offset, len, num_ops;
37f9ce62 7827 u16 *ops_offsets;
94a78b79 7828 int i;
37f9ce62 7829 const u8 *fw_ver;
94a78b79
VZ
7830
7831 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
7832 return -EINVAL;
7833
7834 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
7835 sections = (struct bnx2x_fw_file_section *)fw_hdr;
7836
7837 /* Make sure none of the offsets and sizes make us read beyond
7838 * the end of the firmware data */
7839 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
7840 offset = be32_to_cpu(sections[i].offset);
7841 len = be32_to_cpu(sections[i].len);
7842 if (offset + len > firmware->size) {
cdaa7cb8
VZ
7843 dev_err(&bp->pdev->dev,
7844 "Section %d length is out of bounds\n", i);
94a78b79
VZ
7845 return -EINVAL;
7846 }
7847 }
7848
7849 /* Likewise for the init_ops offsets */
7850 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
7851 ops_offsets = (u16 *)(firmware->data + offset);
7852 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
7853
7854 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
7855 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
7856 dev_err(&bp->pdev->dev,
7857 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
7858 return -EINVAL;
7859 }
7860 }
7861
7862 /* Check FW version */
7863 offset = be32_to_cpu(fw_hdr->fw_version.offset);
7864 fw_ver = firmware->data + offset;
7865 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
7866 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
7867 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
7868 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
7869 dev_err(&bp->pdev->dev,
7870 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
7871 fw_ver[0], fw_ver[1], fw_ver[2],
7872 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
7873 BCM_5710_FW_MINOR_VERSION,
7874 BCM_5710_FW_REVISION_VERSION,
7875 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 7876 return -EINVAL;
94a78b79
VZ
7877 }
7878
7879 return 0;
7880}
7881
ab6ad5a4 7882static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 7883{
ab6ad5a4
EG
7884 const __be32 *source = (const __be32 *)_source;
7885 u32 *target = (u32 *)_target;
94a78b79 7886 u32 i;
94a78b79
VZ
7887
7888 for (i = 0; i < n/4; i++)
7889 target[i] = be32_to_cpu(source[i]);
7890}
7891
7892/*
7893 Ops array is stored in the following format:
7894 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7895 */
ab6ad5a4 7896static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 7897{
ab6ad5a4
EG
7898 const __be32 *source = (const __be32 *)_source;
7899 struct raw_op *target = (struct raw_op *)_target;
94a78b79 7900 u32 i, j, tmp;
94a78b79 7901
ab6ad5a4 7902 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
7903 tmp = be32_to_cpu(source[j]);
7904 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
7905 target[i].offset = tmp & 0xffffff;
7906 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
7907 }
7908}
ab6ad5a4 7909
523224a3
DK
7910/**
7911 * IRO array is stored in the following format:
7912 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
7913 */
7914static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
7915{
7916 const __be32 *source = (const __be32 *)_source;
7917 struct iro *target = (struct iro *)_target;
7918 u32 i, j, tmp;
7919
7920 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
7921 target[i].base = be32_to_cpu(source[j]);
7922 j++;
7923 tmp = be32_to_cpu(source[j]);
7924 target[i].m1 = (tmp >> 16) & 0xffff;
7925 target[i].m2 = tmp & 0xffff;
7926 j++;
7927 tmp = be32_to_cpu(source[j]);
7928 target[i].m3 = (tmp >> 16) & 0xffff;
7929 target[i].size = tmp & 0xffff;
7930 j++;
7931 }
7932}
7933
ab6ad5a4 7934static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 7935{
ab6ad5a4
EG
7936 const __be16 *source = (const __be16 *)_source;
7937 u16 *target = (u16 *)_target;
94a78b79 7938 u32 i;
94a78b79
VZ
7939
7940 for (i = 0; i < n/2; i++)
7941 target[i] = be16_to_cpu(source[i]);
7942}
7943
7995c64e
JP
7944#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
7945do { \
7946 u32 len = be32_to_cpu(fw_hdr->arr.len); \
7947 bp->arr = kmalloc(len, GFP_KERNEL); \
7948 if (!bp->arr) { \
7949 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7950 goto lbl; \
7951 } \
7952 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
7953 (u8 *)bp->arr, len); \
7954} while (0)
94a78b79 7955
6891dd25 7956int bnx2x_init_firmware(struct bnx2x *bp)
94a78b79 7957{
45229b42 7958 const char *fw_file_name;
94a78b79 7959 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 7960 int rc;
94a78b79 7961
94a78b79 7962 if (CHIP_IS_E1(bp))
45229b42 7963 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 7964 else if (CHIP_IS_E1H(bp))
45229b42 7965 fw_file_name = FW_FILE_NAME_E1H;
cdaa7cb8 7966 else {
6891dd25 7967 BNX2X_ERR("Unsupported chip revision\n");
cdaa7cb8
VZ
7968 return -EINVAL;
7969 }
94a78b79 7970
6891dd25 7971 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
94a78b79 7972
6891dd25 7973 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
94a78b79 7974 if (rc) {
6891dd25 7975 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
7976 goto request_firmware_exit;
7977 }
7978
7979 rc = bnx2x_check_firmware(bp);
7980 if (rc) {
6891dd25 7981 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
7982 goto request_firmware_exit;
7983 }
7984
7985 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
7986
7987 /* Initialize the pointers to the init arrays */
7988 /* Blob */
7989 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
7990
7991 /* Opcodes */
7992 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
7993
7994 /* Offsets */
ab6ad5a4
EG
7995 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
7996 be16_to_cpu_n);
94a78b79
VZ
7997
7998 /* STORMs firmware */
573f2035
EG
7999 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8000 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
8001 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
8002 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
8003 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8004 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
8005 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
8006 be32_to_cpu(fw_hdr->usem_pram_data.offset);
8007 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8008 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
8009 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
8010 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
8011 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8012 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
8013 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
8014 be32_to_cpu(fw_hdr->csem_pram_data.offset);
523224a3
DK
8015 /* IRO */
8016 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
94a78b79
VZ
8017
8018 return 0;
ab6ad5a4 8019
523224a3
DK
8020iro_alloc_err:
8021 kfree(bp->init_ops_offsets);
94a78b79
VZ
8022init_offsets_alloc_err:
8023 kfree(bp->init_ops);
8024init_ops_alloc_err:
8025 kfree(bp->init_data);
8026request_firmware_exit:
8027 release_firmware(bp->firmware);
8028
8029 return rc;
8030}
8031
523224a3
DK
8032static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
8033{
8034 int cid_count = L2_FP_COUNT(l2_cid_count);
94a78b79 8035
523224a3
DK
8036#ifdef BCM_CNIC
8037 cid_count += CNIC_CID_MAX;
8038#endif
8039 return roundup(cid_count, QM_CID_ROUND);
8040}
a2fbb9ea
ET
8041static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8042 const struct pci_device_id *ent)
8043{
a2fbb9ea
ET
8044 struct net_device *dev = NULL;
8045 struct bnx2x *bp;
37f9ce62 8046 int pcie_width, pcie_speed;
523224a3
DK
8047 int rc, cid_count;
8048
8049 cid_count = FP_SB_MAX_E1x + CNIC_CONTEXT_USE;
a2fbb9ea 8050
a2fbb9ea 8051 /* dev zeroed in init_etherdev */
523224a3 8052 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
34f80b04 8053 if (!dev) {
cdaa7cb8 8054 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 8055 return -ENOMEM;
34f80b04 8056 }
a2fbb9ea 8057
a2fbb9ea 8058 bp = netdev_priv(dev);
7995c64e 8059 bp->msg_enable = debug;
a2fbb9ea 8060
df4770de
EG
8061 pci_set_drvdata(pdev, dev);
8062
523224a3
DK
8063 bp->l2_cid_count = cid_count;
8064
34f80b04 8065 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
8066 if (rc < 0) {
8067 free_netdev(dev);
8068 return rc;
8069 }
8070
34f80b04 8071 rc = bnx2x_init_bp(bp);
693fc0d1
EG
8072 if (rc)
8073 goto init_one_exit;
8074
523224a3
DK
8075 /* calc qm_cid_count */
8076 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
8077
693fc0d1 8078 rc = register_netdev(dev);
34f80b04 8079 if (rc) {
693fc0d1 8080 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
8081 goto init_one_exit;
8082 }
8083
37f9ce62 8084 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
cdaa7cb8
VZ
8085 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
8086 " IRQ %d, ", board_info[ent->driver_data].name,
8087 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
8088 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
8089 dev->base_addr, bp->pdev->irq);
8090 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 8091
a2fbb9ea 8092 return 0;
34f80b04
EG
8093
8094init_one_exit:
8095 if (bp->regview)
8096 iounmap(bp->regview);
8097
8098 if (bp->doorbells)
8099 iounmap(bp->doorbells);
8100
8101 free_netdev(dev);
8102
8103 if (atomic_read(&pdev->enable_cnt) == 1)
8104 pci_release_regions(pdev);
8105
8106 pci_disable_device(pdev);
8107 pci_set_drvdata(pdev, NULL);
8108
8109 return rc;
a2fbb9ea
ET
8110}
8111
8112static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
8113{
8114 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
8115 struct bnx2x *bp;
8116
8117 if (!dev) {
cdaa7cb8 8118 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
8119 return;
8120 }
228241eb 8121 bp = netdev_priv(dev);
a2fbb9ea 8122
a2fbb9ea
ET
8123 unregister_netdev(dev);
8124
72fd0718
VZ
8125 /* Make sure RESET task is not scheduled before continuing */
8126 cancel_delayed_work_sync(&bp->reset_task);
8127
a2fbb9ea
ET
8128 if (bp->regview)
8129 iounmap(bp->regview);
8130
8131 if (bp->doorbells)
8132 iounmap(bp->doorbells);
8133
523224a3
DK
8134 bnx2x_free_mem_bp(bp);
8135
a2fbb9ea 8136 free_netdev(dev);
34f80b04
EG
8137
8138 if (atomic_read(&pdev->enable_cnt) == 1)
8139 pci_release_regions(pdev);
8140
a2fbb9ea
ET
8141 pci_disable_device(pdev);
8142 pci_set_drvdata(pdev, NULL);
8143}
8144
f8ef6e44
YG
8145static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
8146{
8147 int i;
8148
8149 bp->state = BNX2X_STATE_ERROR;
8150
8151 bp->rx_mode = BNX2X_RX_MODE_NONE;
8152
8153 bnx2x_netif_stop(bp, 0);
c89af1a3 8154 netif_carrier_off(bp->dev);
f8ef6e44
YG
8155
8156 del_timer_sync(&bp->timer);
8157 bp->stats_state = STATS_STATE_DISABLED;
8158 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
8159
8160 /* Release IRQs */
6cbe5065 8161 bnx2x_free_irq(bp, false);
f8ef6e44 8162
f8ef6e44
YG
8163 /* Free SKBs, SGEs, TPA pool and driver internals */
8164 bnx2x_free_skbs(bp);
523224a3 8165
54b9ddaa 8166 for_each_queue(bp, i)
f8ef6e44 8167 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 8168 for_each_queue(bp, i)
7cde1c8b 8169 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
8170 bnx2x_free_mem(bp);
8171
8172 bp->state = BNX2X_STATE_CLOSED;
8173
f8ef6e44
YG
8174 return 0;
8175}
8176
8177static void bnx2x_eeh_recover(struct bnx2x *bp)
8178{
8179 u32 val;
8180
8181 mutex_init(&bp->port.phy_mutex);
8182
8183 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8184 bp->link_params.shmem_base = bp->common.shmem_base;
8185 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
8186
8187 if (!bp->common.shmem_base ||
8188 (bp->common.shmem_base < 0xA0000) ||
8189 (bp->common.shmem_base >= 0xC0000)) {
8190 BNX2X_DEV_INFO("MCP not active\n");
8191 bp->flags |= NO_MCP_FLAG;
8192 return;
8193 }
8194
8195 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8196 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8197 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8198 BNX2X_ERR("BAD MCP validity signature\n");
8199
8200 if (!BP_NOMCP(bp)) {
8201 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
8202 & DRV_MSG_SEQ_NUMBER_MASK);
8203 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8204 }
8205}
8206
493adb1f
WX
8207/**
8208 * bnx2x_io_error_detected - called when PCI error is detected
8209 * @pdev: Pointer to PCI device
8210 * @state: The current pci connection state
8211 *
8212 * This function is called after a PCI bus error affecting
8213 * this device has been detected.
8214 */
8215static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
8216 pci_channel_state_t state)
8217{
8218 struct net_device *dev = pci_get_drvdata(pdev);
8219 struct bnx2x *bp = netdev_priv(dev);
8220
8221 rtnl_lock();
8222
8223 netif_device_detach(dev);
8224
07ce50e4
DN
8225 if (state == pci_channel_io_perm_failure) {
8226 rtnl_unlock();
8227 return PCI_ERS_RESULT_DISCONNECT;
8228 }
8229
493adb1f 8230 if (netif_running(dev))
f8ef6e44 8231 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
8232
8233 pci_disable_device(pdev);
8234
8235 rtnl_unlock();
8236
8237 /* Request a slot reset */
8238 return PCI_ERS_RESULT_NEED_RESET;
8239}
8240
8241/**
8242 * bnx2x_io_slot_reset - called after the PCI bus has been reset
8243 * @pdev: Pointer to PCI device
8244 *
8245 * Restart the card from scratch, as if from a cold-boot.
8246 */
8247static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
8248{
8249 struct net_device *dev = pci_get_drvdata(pdev);
8250 struct bnx2x *bp = netdev_priv(dev);
8251
8252 rtnl_lock();
8253
8254 if (pci_enable_device(pdev)) {
8255 dev_err(&pdev->dev,
8256 "Cannot re-enable PCI device after reset\n");
8257 rtnl_unlock();
8258 return PCI_ERS_RESULT_DISCONNECT;
8259 }
8260
8261 pci_set_master(pdev);
8262 pci_restore_state(pdev);
8263
8264 if (netif_running(dev))
8265 bnx2x_set_power_state(bp, PCI_D0);
8266
8267 rtnl_unlock();
8268
8269 return PCI_ERS_RESULT_RECOVERED;
8270}
8271
8272/**
8273 * bnx2x_io_resume - called when traffic can start flowing again
8274 * @pdev: Pointer to PCI device
8275 *
8276 * This callback is called when the error recovery driver tells us that
8277 * its OK to resume normal operation.
8278 */
8279static void bnx2x_io_resume(struct pci_dev *pdev)
8280{
8281 struct net_device *dev = pci_get_drvdata(pdev);
8282 struct bnx2x *bp = netdev_priv(dev);
8283
72fd0718
VZ
8284 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
8285 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
8286 return;
8287 }
8288
493adb1f
WX
8289 rtnl_lock();
8290
f8ef6e44
YG
8291 bnx2x_eeh_recover(bp);
8292
493adb1f 8293 if (netif_running(dev))
f8ef6e44 8294 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
8295
8296 netif_device_attach(dev);
8297
8298 rtnl_unlock();
8299}
8300
8301static struct pci_error_handlers bnx2x_err_handler = {
8302 .error_detected = bnx2x_io_error_detected,
356e2385
EG
8303 .slot_reset = bnx2x_io_slot_reset,
8304 .resume = bnx2x_io_resume,
493adb1f
WX
8305};
8306
a2fbb9ea 8307static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
8308 .name = DRV_MODULE_NAME,
8309 .id_table = bnx2x_pci_tbl,
8310 .probe = bnx2x_init_one,
8311 .remove = __devexit_p(bnx2x_remove_one),
8312 .suspend = bnx2x_suspend,
8313 .resume = bnx2x_resume,
8314 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
8315};
8316
8317static int __init bnx2x_init(void)
8318{
dd21ca6d
SG
8319 int ret;
8320
7995c64e 8321 pr_info("%s", version);
938cf541 8322
1cf167f2
EG
8323 bnx2x_wq = create_singlethread_workqueue("bnx2x");
8324 if (bnx2x_wq == NULL) {
7995c64e 8325 pr_err("Cannot create workqueue\n");
1cf167f2
EG
8326 return -ENOMEM;
8327 }
8328
dd21ca6d
SG
8329 ret = pci_register_driver(&bnx2x_pci_driver);
8330 if (ret) {
7995c64e 8331 pr_err("Cannot register driver\n");
dd21ca6d
SG
8332 destroy_workqueue(bnx2x_wq);
8333 }
8334 return ret;
a2fbb9ea
ET
8335}
8336
8337static void __exit bnx2x_cleanup(void)
8338{
8339 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
8340
8341 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
8342}
8343
8344module_init(bnx2x_init);
8345module_exit(bnx2x_cleanup);
8346
993ac7b5
MC
8347#ifdef BCM_CNIC
8348
8349/* count denotes the number of new completions we have seen */
8350static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
8351{
8352 struct eth_spe *spe;
8353
8354#ifdef BNX2X_STOP_ON_ERROR
8355 if (unlikely(bp->panic))
8356 return;
8357#endif
8358
8359 spin_lock_bh(&bp->spq_lock);
8360 bp->cnic_spq_pending -= count;
8361
8362 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
8363 bp->cnic_spq_pending++) {
8364
8365 if (!bp->cnic_kwq_pending)
8366 break;
8367
8368 spe = bnx2x_sp_get_next(bp);
8369 *spe = *bp->cnic_kwq_cons;
8370
8371 bp->cnic_kwq_pending--;
8372
8373 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
8374 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
8375
8376 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
8377 bp->cnic_kwq_cons = bp->cnic_kwq;
8378 else
8379 bp->cnic_kwq_cons++;
8380 }
8381 bnx2x_sp_prod_update(bp);
8382 spin_unlock_bh(&bp->spq_lock);
8383}
8384
8385static int bnx2x_cnic_sp_queue(struct net_device *dev,
8386 struct kwqe_16 *kwqes[], u32 count)
8387{
8388 struct bnx2x *bp = netdev_priv(dev);
8389 int i;
8390
8391#ifdef BNX2X_STOP_ON_ERROR
8392 if (unlikely(bp->panic))
8393 return -EIO;
8394#endif
8395
8396 spin_lock_bh(&bp->spq_lock);
8397
8398 for (i = 0; i < count; i++) {
8399 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
8400
8401 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
8402 break;
8403
8404 *bp->cnic_kwq_prod = *spe;
8405
8406 bp->cnic_kwq_pending++;
8407
8408 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
8409 spe->hdr.conn_and_cmd_data, spe->hdr.type,
523224a3
DK
8410 spe->data.update_data_addr.hi,
8411 spe->data.update_data_addr.lo,
993ac7b5
MC
8412 bp->cnic_kwq_pending);
8413
8414 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
8415 bp->cnic_kwq_prod = bp->cnic_kwq;
8416 else
8417 bp->cnic_kwq_prod++;
8418 }
8419
8420 spin_unlock_bh(&bp->spq_lock);
8421
8422 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
8423 bnx2x_cnic_sp_post(bp, 0);
8424
8425 return i;
8426}
8427
8428static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
8429{
8430 struct cnic_ops *c_ops;
8431 int rc = 0;
8432
8433 mutex_lock(&bp->cnic_mutex);
8434 c_ops = bp->cnic_ops;
8435 if (c_ops)
8436 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
8437 mutex_unlock(&bp->cnic_mutex);
8438
8439 return rc;
8440}
8441
8442static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
8443{
8444 struct cnic_ops *c_ops;
8445 int rc = 0;
8446
8447 rcu_read_lock();
8448 c_ops = rcu_dereference(bp->cnic_ops);
8449 if (c_ops)
8450 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
8451 rcu_read_unlock();
8452
8453 return rc;
8454}
8455
8456/*
8457 * for commands that have no data
8458 */
9f6c9258 8459int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
993ac7b5
MC
8460{
8461 struct cnic_ctl_info ctl = {0};
8462
8463 ctl.cmd = cmd;
8464
8465 return bnx2x_cnic_ctl_send(bp, &ctl);
8466}
8467
8468static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
8469{
8470 struct cnic_ctl_info ctl;
8471
8472 /* first we tell CNIC and only then we count this as a completion */
8473 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
8474 ctl.data.comp.cid = cid;
8475
8476 bnx2x_cnic_ctl_send_bh(bp, &ctl);
8477 bnx2x_cnic_sp_post(bp, 1);
8478}
8479
8480static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
8481{
8482 struct bnx2x *bp = netdev_priv(dev);
8483 int rc = 0;
8484
8485 switch (ctl->cmd) {
8486 case DRV_CTL_CTXTBL_WR_CMD: {
8487 u32 index = ctl->data.io.offset;
8488 dma_addr_t addr = ctl->data.io.dma_addr;
8489
8490 bnx2x_ilt_wr(bp, index, addr);
8491 break;
8492 }
8493
8494 case DRV_CTL_COMPLETION_CMD: {
8495 int count = ctl->data.comp.comp_count;
8496
8497 bnx2x_cnic_sp_post(bp, count);
8498 break;
8499 }
8500
8501 /* rtnl_lock is held. */
8502 case DRV_CTL_START_L2_CMD: {
8503 u32 cli = ctl->data.ring.client_id;
8504
523224a3
DK
8505 /* Set iSCSI MAC address */
8506 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8507
8508 mmiowb();
8509 barrier();
8510
8511 /* Start accepting on iSCSI L2 ring. Accept all multicasts
8512 * because it's the only way for UIO Client to accept
8513 * multicasts (in non-promiscuous mode only one Client per
8514 * function will receive multicast packets (leading in our
8515 * case).
8516 */
8517 bnx2x_rxq_set_mac_filters(bp, cli,
8518 BNX2X_ACCEPT_UNICAST |
8519 BNX2X_ACCEPT_BROADCAST |
8520 BNX2X_ACCEPT_ALL_MULTICAST);
8521 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
8522
993ac7b5
MC
8523 break;
8524 }
8525
8526 /* rtnl_lock is held. */
8527 case DRV_CTL_STOP_L2_CMD: {
8528 u32 cli = ctl->data.ring.client_id;
8529
523224a3
DK
8530 /* Stop accepting on iSCSI L2 ring */
8531 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
8532 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
8533
8534 mmiowb();
8535 barrier();
8536
8537 /* Unset iSCSI L2 MAC */
8538 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
993ac7b5
MC
8539 break;
8540 }
8541
8542 default:
8543 BNX2X_ERR("unknown command %x\n", ctl->cmd);
8544 rc = -EINVAL;
8545 }
8546
8547 return rc;
8548}
8549
9f6c9258 8550void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
993ac7b5
MC
8551{
8552 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8553
8554 if (bp->flags & USING_MSIX_FLAG) {
8555 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
8556 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
8557 cp->irq_arr[0].vector = bp->msix_table[1].vector;
8558 } else {
8559 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
8560 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
8561 }
523224a3 8562 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
993ac7b5 8563 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
523224a3 8564 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
993ac7b5
MC
8565 cp->irq_arr[1].status_blk = bp->def_status_blk;
8566 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
523224a3 8567 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
993ac7b5
MC
8568
8569 cp->num_irq = 2;
8570}
8571
8572static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
8573 void *data)
8574{
8575 struct bnx2x *bp = netdev_priv(dev);
8576 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8577
8578 if (ops == NULL)
8579 return -EINVAL;
8580
8581 if (atomic_read(&bp->intr_sem) != 0)
8582 return -EBUSY;
8583
8584 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
8585 if (!bp->cnic_kwq)
8586 return -ENOMEM;
8587
8588 bp->cnic_kwq_cons = bp->cnic_kwq;
8589 bp->cnic_kwq_prod = bp->cnic_kwq;
8590 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
8591
8592 bp->cnic_spq_pending = 0;
8593 bp->cnic_kwq_pending = 0;
8594
8595 bp->cnic_data = data;
8596
8597 cp->num_irq = 0;
8598 cp->drv_state = CNIC_DRV_STATE_REGD;
523224a3 8599 cp->iro_arr = bp->iro_arr;
993ac7b5 8600
523224a3
DK
8601 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
8602 BNX2X_VF_ID_INVALID, false,
8603 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
993ac7b5
MC
8604
8605 bnx2x_setup_cnic_irq_info(bp);
8606 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8607 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
8608 rcu_assign_pointer(bp->cnic_ops, ops);
8609
8610 return 0;
8611}
8612
8613static int bnx2x_unregister_cnic(struct net_device *dev)
8614{
8615 struct bnx2x *bp = netdev_priv(dev);
8616 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8617
8618 mutex_lock(&bp->cnic_mutex);
8619 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8620 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8621 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8622 }
8623 cp->drv_state = 0;
8624 rcu_assign_pointer(bp->cnic_ops, NULL);
8625 mutex_unlock(&bp->cnic_mutex);
8626 synchronize_rcu();
8627 kfree(bp->cnic_kwq);
8628 bp->cnic_kwq = NULL;
8629
8630 return 0;
8631}
8632
8633struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
8634{
8635 struct bnx2x *bp = netdev_priv(dev);
8636 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8637
8638 cp->drv_owner = THIS_MODULE;
8639 cp->chip_id = CHIP_ID(bp);
8640 cp->pdev = bp->pdev;
8641 cp->io_base = bp->regview;
8642 cp->io_base2 = bp->doorbells;
8643 cp->max_kwqe_pending = 8;
523224a3 8644 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
993ac7b5
MC
8645 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
8646 cp->ctx_tbl_len = CNIC_ILT_LINES;
8647 cp->starting_cid = BCM_CNIC_CID_START;
8648 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
8649 cp->drv_ctl = bnx2x_drv_ctl;
8650 cp->drv_register_cnic = bnx2x_register_cnic;
8651 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
8652
8653 return cp;
8654}
8655EXPORT_SYMBOL(bnx2x_cnic_probe);
8656
8657#endif /* BCM_CNIC */
94a78b79 8658