]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x/bnx2x_main.c
bnx2x: Use correct FW constant for header padding
[net-next-2.6.git] / drivers / net / bnx2x / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
a2fbb9ea
ET
26#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
0c6671b0 40#include <linux/if_vlan.h>
a2fbb9ea
ET
41#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
34f80b04 44#include <net/ip6_checksum.h>
a2fbb9ea
ET
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
34f80b04 47#include <linux/crc32c.h>
a2fbb9ea
ET
48#include <linux/prefetch.h>
49#include <linux/zlib.h>
a2fbb9ea 50#include <linux/io.h>
45229b42 51#include <linux/stringify.h>
a2fbb9ea 52
b0efbb99 53#define BNX2X_MAIN
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
9f6c9258 57#include "bnx2x_cmn.h"
a2fbb9ea 58
94a78b79
VZ
59#include <linux/firmware.h>
60#include "bnx2x_fw_file_hdr.h"
61/* FW files */
45229b42
BH
62#define FW_FILE_VERSION \
63 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
64 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
65 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
66 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
560131f3
DK
67#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
68#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
f2e0899f 69#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
94a78b79 70
34f80b04
EG
71/* Time in jiffies before concluding the transmitter is hung */
72#define TX_TIMEOUT (5*HZ)
a2fbb9ea 73
53a10565 74static char version[] __devinitdata =
34f80b04 75 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
76 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
24e3fcef 78MODULE_AUTHOR("Eliezer Tamir");
f2e0899f
DK
79MODULE_DESCRIPTION("Broadcom NetXtreme II "
80 "BCM57710/57711/57711E/57712/57712E Driver");
a2fbb9ea
ET
81MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
83MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H);
f2e0899f 85MODULE_FIRMWARE(FW_FILE_NAME_E2);
a2fbb9ea 86
555f6c78
EG
87static int multi_mode = 1;
88module_param(multi_mode, int, 0);
ca00392c
EG
89MODULE_PARM_DESC(multi_mode, " Multi queue mode "
90 "(0 Disable; 1 Enable (default))");
91
d6214d7a 92int num_queues;
54b9ddaa
VZ
93module_param(num_queues, int, 0);
94MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
95 " (default is as a number of CPUs)");
555f6c78 96
19680c48 97static int disable_tpa;
19680c48 98module_param(disable_tpa, int, 0);
9898f86d 99MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
100
101static int int_mode;
102module_param(int_mode, int, 0);
cdaa7cb8
VZ
103MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
104 "(1 INT#x; 2 MSI)");
8badd27a 105
a18f5128
EG
106static int dropless_fc;
107module_param(dropless_fc, int, 0);
108MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109
9898f86d 110static int poll;
a2fbb9ea 111module_param(poll, int, 0);
9898f86d 112MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
113
114static int mrrs = -1;
115module_param(mrrs, int, 0);
116MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117
9898f86d 118static int debug;
a2fbb9ea 119module_param(debug, int, 0);
9898f86d
EG
120MODULE_PARM_DESC(debug, " Default debug msglevel");
121
1cf167f2 122static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
123
124enum bnx2x_board_type {
125 BCM57710 = 0,
34f80b04
EG
126 BCM57711 = 1,
127 BCM57711E = 2,
f2e0899f
DK
128 BCM57712 = 3,
129 BCM57712E = 4
a2fbb9ea
ET
130};
131
34f80b04 132/* indexed by board_type, above */
53a10565 133static struct {
a2fbb9ea
ET
134 char *name;
135} board_info[] __devinitdata = {
34f80b04
EG
136 { "Broadcom NetXtreme II BCM57710 XGb" },
137 { "Broadcom NetXtreme II BCM57711 XGb" },
f2e0899f
DK
138 { "Broadcom NetXtreme II BCM57711E XGb" },
139 { "Broadcom NetXtreme II BCM57712 XGb" },
140 { "Broadcom NetXtreme II BCM57712E XGb" }
a2fbb9ea
ET
141};
142
f2e0899f
DK
143#ifndef PCI_DEVICE_ID_NX2_57712
144#define PCI_DEVICE_ID_NX2_57712 0x1662
145#endif
146#ifndef PCI_DEVICE_ID_NX2_57712E
147#define PCI_DEVICE_ID_NX2_57712E 0x1663
148#endif
34f80b04 149
a3aa1884 150static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
151 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
f2e0899f
DK
154 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
155 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
a2fbb9ea
ET
156 { 0 }
157};
158
159MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
160
161/****************************************************************************
162* General service functions
163****************************************************************************/
164
523224a3
DK
165static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
166 u32 addr, dma_addr_t mapping)
167{
168 REG_WR(bp, addr, U64_LO(mapping));
169 REG_WR(bp, addr + 4, U64_HI(mapping));
170}
171
172static inline void __storm_memset_fill(struct bnx2x *bp,
173 u32 addr, size_t size, u32 val)
174{
175 int i;
176 for (i = 0; i < size/4; i++)
177 REG_WR(bp, addr + (i * 4), val);
178}
179
180static inline void storm_memset_ustats_zero(struct bnx2x *bp,
181 u8 port, u16 stat_id)
182{
183 size_t size = sizeof(struct ustorm_per_client_stats);
184
185 u32 addr = BAR_USTRORM_INTMEM +
186 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
187
188 __storm_memset_fill(bp, addr, size, 0);
189}
190
191static inline void storm_memset_tstats_zero(struct bnx2x *bp,
192 u8 port, u16 stat_id)
193{
194 size_t size = sizeof(struct tstorm_per_client_stats);
195
196 u32 addr = BAR_TSTRORM_INTMEM +
197 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
198
199 __storm_memset_fill(bp, addr, size, 0);
200}
201
202static inline void storm_memset_xstats_zero(struct bnx2x *bp,
203 u8 port, u16 stat_id)
204{
205 size_t size = sizeof(struct xstorm_per_client_stats);
206
207 u32 addr = BAR_XSTRORM_INTMEM +
208 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
209
210 __storm_memset_fill(bp, addr, size, 0);
211}
212
213
214static inline void storm_memset_spq_addr(struct bnx2x *bp,
215 dma_addr_t mapping, u16 abs_fid)
216{
217 u32 addr = XSEM_REG_FAST_MEMORY +
218 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
219
220 __storm_memset_dma_mapping(bp, addr, mapping);
221}
222
223static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
224{
225 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
226}
227
228static inline void storm_memset_func_cfg(struct bnx2x *bp,
229 struct tstorm_eth_function_common_config *tcfg,
230 u16 abs_fid)
231{
232 size_t size = sizeof(struct tstorm_eth_function_common_config);
233
234 u32 addr = BAR_TSTRORM_INTMEM +
235 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
236
237 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
238}
239
240static inline void storm_memset_xstats_flags(struct bnx2x *bp,
241 struct stats_indication_flags *flags,
242 u16 abs_fid)
243{
244 size_t size = sizeof(struct stats_indication_flags);
245
246 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
247
248 __storm_memset_struct(bp, addr, size, (u32 *)flags);
249}
250
251static inline void storm_memset_tstats_flags(struct bnx2x *bp,
252 struct stats_indication_flags *flags,
253 u16 abs_fid)
254{
255 size_t size = sizeof(struct stats_indication_flags);
256
257 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
258
259 __storm_memset_struct(bp, addr, size, (u32 *)flags);
260}
261
262static inline void storm_memset_ustats_flags(struct bnx2x *bp,
263 struct stats_indication_flags *flags,
264 u16 abs_fid)
265{
266 size_t size = sizeof(struct stats_indication_flags);
267
268 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
269
270 __storm_memset_struct(bp, addr, size, (u32 *)flags);
271}
272
273static inline void storm_memset_cstats_flags(struct bnx2x *bp,
274 struct stats_indication_flags *flags,
275 u16 abs_fid)
276{
277 size_t size = sizeof(struct stats_indication_flags);
278
279 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
280
281 __storm_memset_struct(bp, addr, size, (u32 *)flags);
282}
283
284static inline void storm_memset_xstats_addr(struct bnx2x *bp,
285 dma_addr_t mapping, u16 abs_fid)
286{
287 u32 addr = BAR_XSTRORM_INTMEM +
288 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
289
290 __storm_memset_dma_mapping(bp, addr, mapping);
291}
292
293static inline void storm_memset_tstats_addr(struct bnx2x *bp,
294 dma_addr_t mapping, u16 abs_fid)
295{
296 u32 addr = BAR_TSTRORM_INTMEM +
297 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
298
299 __storm_memset_dma_mapping(bp, addr, mapping);
300}
301
302static inline void storm_memset_ustats_addr(struct bnx2x *bp,
303 dma_addr_t mapping, u16 abs_fid)
304{
305 u32 addr = BAR_USTRORM_INTMEM +
306 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
307
308 __storm_memset_dma_mapping(bp, addr, mapping);
309}
310
311static inline void storm_memset_cstats_addr(struct bnx2x *bp,
312 dma_addr_t mapping, u16 abs_fid)
313{
314 u32 addr = BAR_CSTRORM_INTMEM +
315 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
316
317 __storm_memset_dma_mapping(bp, addr, mapping);
318}
319
320static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
321 u16 pf_id)
322{
323 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
324 pf_id);
325 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
326 pf_id);
327 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
328 pf_id);
329 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
330 pf_id);
331}
332
333static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
334 u8 enable)
335{
336 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
337 enable);
338 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
339 enable);
340 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
341 enable);
342 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
343 enable);
344}
345
346static inline void storm_memset_eq_data(struct bnx2x *bp,
347 struct event_ring_data *eq_data,
348 u16 pfid)
349{
350 size_t size = sizeof(struct event_ring_data);
351
352 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
353
354 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
355}
356
357static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
358 u16 pfid)
359{
360 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
361 REG_WR16(bp, addr, eq_prod);
362}
363
364static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
365 u16 fw_sb_id, u8 sb_index,
366 u8 ticks)
367{
368
f2e0899f
DK
369 int index_offset = CHIP_IS_E2(bp) ?
370 offsetof(struct hc_status_block_data_e2, index_data) :
523224a3
DK
371 offsetof(struct hc_status_block_data_e1x, index_data);
372 u32 addr = BAR_CSTRORM_INTMEM +
373 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
374 index_offset +
375 sizeof(struct hc_index_data)*sb_index +
376 offsetof(struct hc_index_data, timeout);
377 REG_WR8(bp, addr, ticks);
378 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
379 port, fw_sb_id, sb_index, ticks);
380}
381static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
382 u16 fw_sb_id, u8 sb_index,
383 u8 disable)
384{
385 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
f2e0899f
DK
386 int index_offset = CHIP_IS_E2(bp) ?
387 offsetof(struct hc_status_block_data_e2, index_data) :
523224a3
DK
388 offsetof(struct hc_status_block_data_e1x, index_data);
389 u32 addr = BAR_CSTRORM_INTMEM +
390 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
391 index_offset +
392 sizeof(struct hc_index_data)*sb_index +
393 offsetof(struct hc_index_data, flags);
394 u16 flags = REG_RD16(bp, addr);
395 /* clear and set */
396 flags &= ~HC_INDEX_DATA_HC_ENABLED;
397 flags |= enable_flag;
398 REG_WR16(bp, addr, flags);
399 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
400 port, fw_sb_id, sb_index, disable);
401}
402
a2fbb9ea
ET
403/* used only at init
404 * locking is done by mcp
405 */
573f2035 406void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
407{
408 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
409 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
410 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
411 PCICFG_VENDOR_ID_OFFSET);
412}
413
a2fbb9ea
ET
414static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
415{
416 u32 val;
417
418 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
419 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
420 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
421 PCICFG_VENDOR_ID_OFFSET);
422
423 return val;
424}
a2fbb9ea 425
f2e0899f
DK
426#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
427#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
428#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
429#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
430#define DMAE_DP_DST_NONE "dst_addr [none]"
431
432void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
433{
434 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
435
436 switch (dmae->opcode & DMAE_COMMAND_DST) {
437 case DMAE_CMD_DST_PCI:
438 if (src_type == DMAE_CMD_SRC_PCI)
439 DP(msglvl, "DMAE: opcode 0x%08x\n"
440 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
441 "comp_addr [%x:%08x], comp_val 0x%08x\n",
442 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
443 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
444 dmae->comp_addr_hi, dmae->comp_addr_lo,
445 dmae->comp_val);
446 else
447 DP(msglvl, "DMAE: opcode 0x%08x\n"
448 "src [%08x], len [%d*4], dst [%x:%08x]\n"
449 "comp_addr [%x:%08x], comp_val 0x%08x\n",
450 dmae->opcode, dmae->src_addr_lo >> 2,
451 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
452 dmae->comp_addr_hi, dmae->comp_addr_lo,
453 dmae->comp_val);
454 break;
455 case DMAE_CMD_DST_GRC:
456 if (src_type == DMAE_CMD_SRC_PCI)
457 DP(msglvl, "DMAE: opcode 0x%08x\n"
458 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
459 "comp_addr [%x:%08x], comp_val 0x%08x\n",
460 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
461 dmae->len, dmae->dst_addr_lo >> 2,
462 dmae->comp_addr_hi, dmae->comp_addr_lo,
463 dmae->comp_val);
464 else
465 DP(msglvl, "DMAE: opcode 0x%08x\n"
466 "src [%08x], len [%d*4], dst [%08x]\n"
467 "comp_addr [%x:%08x], comp_val 0x%08x\n",
468 dmae->opcode, dmae->src_addr_lo >> 2,
469 dmae->len, dmae->dst_addr_lo >> 2,
470 dmae->comp_addr_hi, dmae->comp_addr_lo,
471 dmae->comp_val);
472 break;
473 default:
474 if (src_type == DMAE_CMD_SRC_PCI)
475 DP(msglvl, "DMAE: opcode 0x%08x\n"
476 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
477 "dst_addr [none]\n"
478 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
479 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
480 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
481 dmae->comp_val);
482 else
483 DP(msglvl, "DMAE: opcode 0x%08x\n"
484 DP_LEVEL "src_addr [%08x] len [%d * 4] "
485 "dst_addr [none]\n"
486 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
487 dmae->opcode, dmae->src_addr_lo >> 2,
488 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
489 dmae->comp_val);
490 break;
491 }
492
493}
494
6c719d00 495const u32 dmae_reg_go_c[] = {
a2fbb9ea
ET
496 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
497 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
498 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
499 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
500};
501
502/* copy command into DMAE command memory and set DMAE command go */
6c719d00 503void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
a2fbb9ea
ET
504{
505 u32 cmd_offset;
506 int i;
507
508 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
509 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
510 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
511
ad8d3948
EG
512 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
513 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
514 }
515 REG_WR(bp, dmae_reg_go_c[idx], 1);
516}
517
f2e0899f 518u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
a2fbb9ea 519{
f2e0899f
DK
520 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
521 DMAE_CMD_C_ENABLE);
522}
ad8d3948 523
f2e0899f
DK
524u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
525{
526 return opcode & ~DMAE_CMD_SRC_RESET;
527}
ad8d3948 528
f2e0899f
DK
529u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
530 bool with_comp, u8 comp_type)
531{
532 u32 opcode = 0;
533
534 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
535 (dst_type << DMAE_COMMAND_DST_SHIFT));
ad8d3948 536
f2e0899f
DK
537 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
538
539 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
540 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
541 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
542 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
a2fbb9ea 543
a2fbb9ea 544#ifdef __BIG_ENDIAN
f2e0899f 545 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
a2fbb9ea 546#else
f2e0899f 547 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
a2fbb9ea 548#endif
f2e0899f
DK
549 if (with_comp)
550 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
551 return opcode;
552}
553
554void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
555 u8 src_type, u8 dst_type)
556{
557 memset(dmae, 0, sizeof(struct dmae_command));
558
559 /* set the opcode */
560 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
561 true, DMAE_COMP_PCI);
562
563 /* fill in the completion parameters */
564 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
565 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
566 dmae->comp_val = DMAE_COMP_VAL;
567}
568
569/* issue a dmae command over the init-channel and wailt for completion */
570int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
571{
572 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
573 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
574 int rc = 0;
575
576 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
577 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
578 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 579
f2e0899f 580 /* lock the dmae channel */
5ff7b6d4
EG
581 mutex_lock(&bp->dmae_mutex);
582
f2e0899f 583 /* reset completion */
a2fbb9ea
ET
584 *wb_comp = 0;
585
f2e0899f
DK
586 /* post the command on the channel used for initializations */
587 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea 588
f2e0899f 589 /* wait for completion */
a2fbb9ea 590 udelay(5);
f2e0899f 591 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
ad8d3948
EG
592 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
593
ad8d3948 594 if (!cnt) {
c3eefaf6 595 BNX2X_ERR("DMAE timeout!\n");
f2e0899f
DK
596 rc = DMAE_TIMEOUT;
597 goto unlock;
a2fbb9ea 598 }
ad8d3948 599 cnt--;
f2e0899f 600 udelay(50);
a2fbb9ea 601 }
f2e0899f
DK
602 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
603 BNX2X_ERR("DMAE PCI error!\n");
604 rc = DMAE_PCI_ERROR;
605 }
606
607 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
608 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
609 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948 610
f2e0899f 611unlock:
ad8d3948 612 mutex_unlock(&bp->dmae_mutex);
f2e0899f
DK
613 return rc;
614}
615
616void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
617 u32 len32)
618{
619 struct dmae_command dmae;
620
621 if (!bp->dmae_ready) {
622 u32 *data = bnx2x_sp(bp, wb_data[0]);
623
624 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
625 " using indirect\n", dst_addr, len32);
626 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
627 return;
628 }
629
630 /* set opcode and fixed command fields */
631 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
632
633 /* fill in addresses and len */
634 dmae.src_addr_lo = U64_LO(dma_addr);
635 dmae.src_addr_hi = U64_HI(dma_addr);
636 dmae.dst_addr_lo = dst_addr >> 2;
637 dmae.dst_addr_hi = 0;
638 dmae.len = len32;
639
640 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
641
642 /* issue the command and wait for completion */
643 bnx2x_issue_dmae_with_comp(bp, &dmae);
a2fbb9ea
ET
644}
645
c18487ee 646void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 647{
5ff7b6d4 648 struct dmae_command dmae;
ad8d3948
EG
649
650 if (!bp->dmae_ready) {
651 u32 *data = bnx2x_sp(bp, wb_data[0]);
652 int i;
653
654 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
655 " using indirect\n", src_addr, len32);
656 for (i = 0; i < len32; i++)
657 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
658 return;
659 }
660
f2e0899f
DK
661 /* set opcode and fixed command fields */
662 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
a2fbb9ea 663
f2e0899f 664 /* fill in addresses and len */
5ff7b6d4
EG
665 dmae.src_addr_lo = src_addr >> 2;
666 dmae.src_addr_hi = 0;
667 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
668 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
669 dmae.len = len32;
ad8d3948 670
f2e0899f 671 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
ad8d3948 672
f2e0899f
DK
673 /* issue the command and wait for completion */
674 bnx2x_issue_dmae_with_comp(bp, &dmae);
ad8d3948
EG
675}
676
573f2035
EG
677void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
678 u32 addr, u32 len)
679{
02e3c6cb 680 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
681 int offset = 0;
682
02e3c6cb 683 while (len > dmae_wr_max) {
573f2035 684 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
685 addr + offset, dmae_wr_max);
686 offset += dmae_wr_max * 4;
687 len -= dmae_wr_max;
573f2035
EG
688 }
689
690 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
691}
692
ad8d3948
EG
693/* used only for slowpath so not inlined */
694static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
695{
696 u32 wb_write[2];
697
698 wb_write[0] = val_hi;
699 wb_write[1] = val_lo;
700 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 701}
a2fbb9ea 702
ad8d3948
EG
703#ifdef USE_WB_RD
704static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
705{
706 u32 wb_data[2];
707
708 REG_RD_DMAE(bp, reg, wb_data, 2);
709
710 return HILO_U64(wb_data[0], wb_data[1]);
711}
712#endif
713
a2fbb9ea
ET
714static int bnx2x_mc_assert(struct bnx2x *bp)
715{
a2fbb9ea 716 char last_idx;
34f80b04
EG
717 int i, rc = 0;
718 u32 row0, row1, row2, row3;
719
720 /* XSTORM */
721 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
722 XSTORM_ASSERT_LIST_INDEX_OFFSET);
723 if (last_idx)
724 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
725
726 /* print the asserts */
727 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
728
729 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
730 XSTORM_ASSERT_LIST_OFFSET(i));
731 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
732 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
733 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
734 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
735 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
736 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
737
738 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
739 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
740 " 0x%08x 0x%08x 0x%08x\n",
741 i, row3, row2, row1, row0);
742 rc++;
743 } else {
744 break;
745 }
746 }
747
748 /* TSTORM */
749 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
750 TSTORM_ASSERT_LIST_INDEX_OFFSET);
751 if (last_idx)
752 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
753
754 /* print the asserts */
755 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
756
757 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
758 TSTORM_ASSERT_LIST_OFFSET(i));
759 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
760 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
761 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
762 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
763 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
764 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
765
766 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
767 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
768 " 0x%08x 0x%08x 0x%08x\n",
769 i, row3, row2, row1, row0);
770 rc++;
771 } else {
772 break;
773 }
774 }
775
776 /* CSTORM */
777 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
778 CSTORM_ASSERT_LIST_INDEX_OFFSET);
779 if (last_idx)
780 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
781
782 /* print the asserts */
783 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
784
785 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
786 CSTORM_ASSERT_LIST_OFFSET(i));
787 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
788 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
789 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
790 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
791 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
792 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
793
794 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
795 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
796 " 0x%08x 0x%08x 0x%08x\n",
797 i, row3, row2, row1, row0);
798 rc++;
799 } else {
800 break;
801 }
802 }
803
804 /* USTORM */
805 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
806 USTORM_ASSERT_LIST_INDEX_OFFSET);
807 if (last_idx)
808 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
809
810 /* print the asserts */
811 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
812
813 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
814 USTORM_ASSERT_LIST_OFFSET(i));
815 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
816 USTORM_ASSERT_LIST_OFFSET(i) + 4);
817 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
818 USTORM_ASSERT_LIST_OFFSET(i) + 8);
819 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
820 USTORM_ASSERT_LIST_OFFSET(i) + 12);
821
822 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
823 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
824 " 0x%08x 0x%08x 0x%08x\n",
825 i, row3, row2, row1, row0);
826 rc++;
827 } else {
828 break;
a2fbb9ea
ET
829 }
830 }
34f80b04 831
a2fbb9ea
ET
832 return rc;
833}
c14423fe 834
a2fbb9ea
ET
835static void bnx2x_fw_dump(struct bnx2x *bp)
836{
cdaa7cb8 837 u32 addr;
a2fbb9ea 838 u32 mark, offset;
4781bfad 839 __be32 data[9];
a2fbb9ea 840 int word;
f2e0899f 841 u32 trace_shmem_base;
2145a920
VZ
842 if (BP_NOMCP(bp)) {
843 BNX2X_ERR("NO MCP - can not dump\n");
844 return;
845 }
cdaa7cb8 846
f2e0899f
DK
847 if (BP_PATH(bp) == 0)
848 trace_shmem_base = bp->common.shmem_base;
849 else
850 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
851 addr = trace_shmem_base - 0x0800 + 4;
cdaa7cb8 852 mark = REG_RD(bp, addr);
f2e0899f
DK
853 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
854 + ((mark + 0x3) & ~0x3) - 0x08000000;
7995c64e 855 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 856
7995c64e 857 pr_err("");
f2e0899f 858 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
a2fbb9ea 859 for (word = 0; word < 8; word++)
cdaa7cb8 860 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 861 data[8] = 0x0;
7995c64e 862 pr_cont("%s", (char *)data);
a2fbb9ea 863 }
cdaa7cb8 864 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 865 for (word = 0; word < 8; word++)
cdaa7cb8 866 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 867 data[8] = 0x0;
7995c64e 868 pr_cont("%s", (char *)data);
a2fbb9ea 869 }
7995c64e 870 pr_err("end of fw dump\n");
a2fbb9ea
ET
871}
872
6c719d00 873void bnx2x_panic_dump(struct bnx2x *bp)
a2fbb9ea
ET
874{
875 int i;
523224a3
DK
876 u16 j;
877 struct hc_sp_status_block_data sp_sb_data;
878 int func = BP_FUNC(bp);
879#ifdef BNX2X_STOP_ON_ERROR
880 u16 start = 0, end = 0;
881#endif
a2fbb9ea 882
66e855f3
YG
883 bp->stats_state = STATS_STATE_DISABLED;
884 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
885
a2fbb9ea
ET
886 BNX2X_ERR("begin crash dump -----------------\n");
887
8440d2b6
EG
888 /* Indices */
889 /* Common */
523224a3 890 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
cdaa7cb8 891 " spq_prod_idx(0x%x)\n",
523224a3
DK
892 bp->def_idx, bp->def_att_idx,
893 bp->attn_state, bp->spq_prod_idx);
894 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
895 bp->def_status_blk->atten_status_block.attn_bits,
896 bp->def_status_blk->atten_status_block.attn_bits_ack,
897 bp->def_status_blk->atten_status_block.status_block_id,
898 bp->def_status_blk->atten_status_block.attn_bits_index);
899 BNX2X_ERR(" def (");
900 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
901 pr_cont("0x%x%s",
902 bp->def_status_blk->sp_sb.index_values[i],
903 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
904
905 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
906 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
907 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
908 i*sizeof(u32));
909
910 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
911 "pf_id(0x%x) vnic_id(0x%x) "
912 "vf_id(0x%x) vf_valid (0x%x)\n",
913 sp_sb_data.igu_sb_id,
914 sp_sb_data.igu_seg_id,
915 sp_sb_data.p_func.pf_id,
916 sp_sb_data.p_func.vnic_id,
917 sp_sb_data.p_func.vf_id,
918 sp_sb_data.p_func.vf_valid);
919
8440d2b6 920
54b9ddaa 921 for_each_queue(bp, i) {
a2fbb9ea 922 struct bnx2x_fastpath *fp = &bp->fp[i];
523224a3 923 int loop;
f2e0899f 924 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
925 struct hc_status_block_data_e1x sb_data_e1x;
926 struct hc_status_block_sm *hc_sm_p =
f2e0899f
DK
927 CHIP_IS_E2(bp) ?
928 sb_data_e2.common.state_machine :
523224a3
DK
929 sb_data_e1x.common.state_machine;
930 struct hc_index_data *hc_index_p =
f2e0899f
DK
931 CHIP_IS_E2(bp) ?
932 sb_data_e2.index_data :
523224a3
DK
933 sb_data_e1x.index_data;
934 int data_size;
935 u32 *sb_data_p;
936
937 /* Rx */
cdaa7cb8 938 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
523224a3 939 " rx_comp_prod(0x%x)"
cdaa7cb8 940 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 941 i, fp->rx_bd_prod, fp->rx_bd_cons,
523224a3 942 fp->rx_comp_prod,
66e855f3 943 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8 944 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
523224a3 945 " fp_hc_idx(0x%x)\n",
8440d2b6 946 fp->rx_sge_prod, fp->last_max_sge,
523224a3 947 le16_to_cpu(fp->fp_hc_idx));
a2fbb9ea 948
523224a3 949 /* Tx */
cdaa7cb8
VZ
950 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
951 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
952 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
953 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
954 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
523224a3 955
f2e0899f
DK
956 loop = CHIP_IS_E2(bp) ?
957 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
523224a3
DK
958
959 /* host sb data */
960
961 BNX2X_ERR(" run indexes (");
962 for (j = 0; j < HC_SB_MAX_SM; j++)
963 pr_cont("0x%x%s",
964 fp->sb_running_index[j],
965 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
966
967 BNX2X_ERR(" indexes (");
968 for (j = 0; j < loop; j++)
969 pr_cont("0x%x%s",
970 fp->sb_index_values[j],
971 (j == loop - 1) ? ")" : " ");
972 /* fw sb data */
f2e0899f
DK
973 data_size = CHIP_IS_E2(bp) ?
974 sizeof(struct hc_status_block_data_e2) :
523224a3
DK
975 sizeof(struct hc_status_block_data_e1x);
976 data_size /= sizeof(u32);
f2e0899f
DK
977 sb_data_p = CHIP_IS_E2(bp) ?
978 (u32 *)&sb_data_e2 :
979 (u32 *)&sb_data_e1x;
523224a3
DK
980 /* copy sb data in here */
981 for (j = 0; j < data_size; j++)
982 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
983 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
984 j * sizeof(u32));
985
f2e0899f
DK
986 if (CHIP_IS_E2(bp)) {
987 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
988 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
989 sb_data_e2.common.p_func.pf_id,
990 sb_data_e2.common.p_func.vf_id,
991 sb_data_e2.common.p_func.vf_valid,
992 sb_data_e2.common.p_func.vnic_id,
993 sb_data_e2.common.same_igu_sb_1b);
994 } else {
995 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
996 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
997 sb_data_e1x.common.p_func.pf_id,
998 sb_data_e1x.common.p_func.vf_id,
999 sb_data_e1x.common.p_func.vf_valid,
1000 sb_data_e1x.common.p_func.vnic_id,
1001 sb_data_e1x.common.same_igu_sb_1b);
1002 }
523224a3
DK
1003
1004 /* SB_SMs data */
1005 for (j = 0; j < HC_SB_MAX_SM; j++) {
1006 pr_cont("SM[%d] __flags (0x%x) "
1007 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1008 "time_to_expire (0x%x) "
1009 "timer_value(0x%x)\n", j,
1010 hc_sm_p[j].__flags,
1011 hc_sm_p[j].igu_sb_id,
1012 hc_sm_p[j].igu_seg_id,
1013 hc_sm_p[j].time_to_expire,
1014 hc_sm_p[j].timer_value);
1015 }
1016
1017 /* Indecies data */
1018 for (j = 0; j < loop; j++) {
1019 pr_cont("INDEX[%d] flags (0x%x) "
1020 "timeout (0x%x)\n", j,
1021 hc_index_p[j].flags,
1022 hc_index_p[j].timeout);
1023 }
8440d2b6 1024 }
a2fbb9ea 1025
523224a3 1026#ifdef BNX2X_STOP_ON_ERROR
8440d2b6
EG
1027 /* Rings */
1028 /* Rx */
54b9ddaa 1029 for_each_queue(bp, i) {
8440d2b6 1030 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
1031
1032 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1033 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 1034 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
1035 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1036 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1037
c3eefaf6
EG
1038 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1039 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
1040 }
1041
3196a88a
EG
1042 start = RX_SGE(fp->rx_sge_prod);
1043 end = RX_SGE(fp->last_max_sge);
8440d2b6 1044 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
1045 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1046 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1047
c3eefaf6
EG
1048 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1049 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
1050 }
1051
a2fbb9ea
ET
1052 start = RCQ_BD(fp->rx_comp_cons - 10);
1053 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 1054 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
1055 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1056
c3eefaf6
EG
1057 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1058 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
1059 }
1060 }
1061
8440d2b6 1062 /* Tx */
54b9ddaa 1063 for_each_queue(bp, i) {
8440d2b6
EG
1064 struct bnx2x_fastpath *fp = &bp->fp[i];
1065
1066 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1067 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1068 for (j = start; j != end; j = TX_BD(j + 1)) {
1069 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1070
c3eefaf6
EG
1071 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1072 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
1073 }
1074
1075 start = TX_BD(fp->tx_bd_cons - 10);
1076 end = TX_BD(fp->tx_bd_cons + 254);
1077 for (j = start; j != end; j = TX_BD(j + 1)) {
1078 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1079
c3eefaf6
EG
1080 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1081 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
1082 }
1083 }
523224a3 1084#endif
34f80b04 1085 bnx2x_fw_dump(bp);
a2fbb9ea
ET
1086 bnx2x_mc_assert(bp);
1087 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
1088}
1089
f2e0899f 1090static void bnx2x_hc_int_enable(struct bnx2x *bp)
a2fbb9ea 1091{
34f80b04 1092 int port = BP_PORT(bp);
a2fbb9ea
ET
1093 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1094 u32 val = REG_RD(bp, addr);
1095 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1096 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
1097
1098 if (msix) {
8badd27a
EG
1099 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1100 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
1101 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1102 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
1103 } else if (msi) {
1104 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1105 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1106 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1107 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
1108 } else {
1109 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 1110 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
1111 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1112 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 1113
8badd27a
EG
1114 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1115 val, port, addr);
615f8fd9
ET
1116
1117 REG_WR(bp, addr, val);
1118
a2fbb9ea
ET
1119 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1120 }
1121
8badd27a
EG
1122 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1123 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
1124
1125 REG_WR(bp, addr, val);
37dbbf32
EG
1126 /*
1127 * Ensure that HC_CONFIG is written before leading/trailing edge config
1128 */
1129 mmiowb();
1130 barrier();
34f80b04 1131
f2e0899f 1132 if (!CHIP_IS_E1(bp)) {
34f80b04 1133 /* init leading/trailing edge */
fb3bff17 1134 if (IS_MF(bp)) {
8badd27a 1135 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 1136 if (bp->port.pmf)
4acac6a5
EG
1137 /* enable nig and gpio3 attention */
1138 val |= 0x1100;
34f80b04
EG
1139 } else
1140 val = 0xffff;
1141
1142 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1143 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1144 }
37dbbf32
EG
1145
1146 /* Make sure that interrupts are indeed enabled from here on */
1147 mmiowb();
a2fbb9ea
ET
1148}
1149
f2e0899f
DK
1150static void bnx2x_igu_int_enable(struct bnx2x *bp)
1151{
1152 u32 val;
1153 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1154 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1155
1156 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1157
1158 if (msix) {
1159 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1160 IGU_PF_CONF_SINGLE_ISR_EN);
1161 val |= (IGU_PF_CONF_FUNC_EN |
1162 IGU_PF_CONF_MSI_MSIX_EN |
1163 IGU_PF_CONF_ATTN_BIT_EN);
1164 } else if (msi) {
1165 val &= ~IGU_PF_CONF_INT_LINE_EN;
1166 val |= (IGU_PF_CONF_FUNC_EN |
1167 IGU_PF_CONF_MSI_MSIX_EN |
1168 IGU_PF_CONF_ATTN_BIT_EN |
1169 IGU_PF_CONF_SINGLE_ISR_EN);
1170 } else {
1171 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1172 val |= (IGU_PF_CONF_FUNC_EN |
1173 IGU_PF_CONF_INT_LINE_EN |
1174 IGU_PF_CONF_ATTN_BIT_EN |
1175 IGU_PF_CONF_SINGLE_ISR_EN);
1176 }
1177
1178 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1179 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1180
1181 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1182
1183 barrier();
1184
1185 /* init leading/trailing edge */
1186 if (IS_MF(bp)) {
1187 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1188 if (bp->port.pmf)
1189 /* enable nig and gpio3 attention */
1190 val |= 0x1100;
1191 } else
1192 val = 0xffff;
1193
1194 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1195 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1196
1197 /* Make sure that interrupts are indeed enabled from here on */
1198 mmiowb();
1199}
1200
1201void bnx2x_int_enable(struct bnx2x *bp)
1202{
1203 if (bp->common.int_block == INT_BLOCK_HC)
1204 bnx2x_hc_int_enable(bp);
1205 else
1206 bnx2x_igu_int_enable(bp);
1207}
1208
1209static void bnx2x_hc_int_disable(struct bnx2x *bp)
a2fbb9ea 1210{
34f80b04 1211 int port = BP_PORT(bp);
a2fbb9ea
ET
1212 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1213 u32 val = REG_RD(bp, addr);
1214
1215 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1216 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1217 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1218 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1219
1220 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1221 val, port, addr);
1222
8badd27a
EG
1223 /* flush all outstanding writes */
1224 mmiowb();
1225
a2fbb9ea
ET
1226 REG_WR(bp, addr, val);
1227 if (REG_RD(bp, addr) != val)
1228 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1229}
1230
f2e0899f
DK
1231static void bnx2x_igu_int_disable(struct bnx2x *bp)
1232{
1233 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1234
1235 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1236 IGU_PF_CONF_INT_LINE_EN |
1237 IGU_PF_CONF_ATTN_BIT_EN);
1238
1239 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1240
1241 /* flush all outstanding writes */
1242 mmiowb();
1243
1244 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1245 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1246 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1247}
1248
1249void bnx2x_int_disable(struct bnx2x *bp)
1250{
1251 if (bp->common.int_block == INT_BLOCK_HC)
1252 bnx2x_hc_int_disable(bp);
1253 else
1254 bnx2x_igu_int_disable(bp);
1255}
1256
9f6c9258 1257void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 1258{
a2fbb9ea 1259 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1260 int i, offset;
a2fbb9ea 1261
34f80b04 1262 /* disable interrupt handling */
a2fbb9ea 1263 atomic_inc(&bp->intr_sem);
e1510706
EG
1264 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1265
f8ef6e44
YG
1266 if (disable_hw)
1267 /* prevent the HW from sending interrupts */
1268 bnx2x_int_disable(bp);
a2fbb9ea
ET
1269
1270 /* make sure all ISRs are done */
1271 if (msix) {
8badd27a
EG
1272 synchronize_irq(bp->msix_table[0].vector);
1273 offset = 1;
37b091ba
MC
1274#ifdef BCM_CNIC
1275 offset++;
1276#endif
a2fbb9ea 1277 for_each_queue(bp, i)
8badd27a 1278 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
1279 } else
1280 synchronize_irq(bp->pdev->irq);
1281
1282 /* make sure sp_task is not running */
1cf167f2
EG
1283 cancel_delayed_work(&bp->sp_task);
1284 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
1285}
1286
34f80b04 1287/* fast path */
a2fbb9ea
ET
1288
1289/*
34f80b04 1290 * General service functions
a2fbb9ea
ET
1291 */
1292
72fd0718
VZ
1293/* Return true if succeeded to acquire the lock */
1294static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1295{
1296 u32 lock_status;
1297 u32 resource_bit = (1 << resource);
1298 int func = BP_FUNC(bp);
1299 u32 hw_lock_control_reg;
1300
1301 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1302
1303 /* Validating that the resource is within range */
1304 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1305 DP(NETIF_MSG_HW,
1306 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1307 resource, HW_LOCK_MAX_RESOURCE_VALUE);
0fdf4d09 1308 return false;
72fd0718
VZ
1309 }
1310
1311 if (func <= 5)
1312 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1313 else
1314 hw_lock_control_reg =
1315 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1316
1317 /* Try to acquire the lock */
1318 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1319 lock_status = REG_RD(bp, hw_lock_control_reg);
1320 if (lock_status & resource_bit)
1321 return true;
1322
1323 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1324 return false;
1325}
1326
993ac7b5
MC
1327#ifdef BCM_CNIC
1328static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1329#endif
3196a88a 1330
9f6c9258 1331void bnx2x_sp_event(struct bnx2x_fastpath *fp,
a2fbb9ea
ET
1332 union eth_rx_cqe *rr_cqe)
1333{
1334 struct bnx2x *bp = fp->bp;
1335 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1336 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1337
34f80b04 1338 DP(BNX2X_MSG_SP,
a2fbb9ea 1339 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1340 fp->index, cid, command, bp->state,
34f80b04 1341 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea 1342
523224a3
DK
1343 switch (command | fp->state) {
1344 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1345 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1346 fp->state = BNX2X_FP_STATE_OPEN;
a2fbb9ea
ET
1347 break;
1348
523224a3
DK
1349 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1350 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
a2fbb9ea
ET
1351 fp->state = BNX2X_FP_STATE_HALTED;
1352 break;
1353
523224a3
DK
1354 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1355 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1356 fp->state = BNX2X_FP_STATE_TERMINATED;
a2fbb9ea
ET
1357 break;
1358
523224a3
DK
1359 default:
1360 BNX2X_ERR("unexpected MC reply (%d) "
1361 "fp[%d] state is %x\n",
1362 command, fp->index, fp->state);
993ac7b5 1363 break;
523224a3 1364 }
3196a88a 1365
8fe23fbd
DK
1366 smp_mb__before_atomic_inc();
1367 atomic_inc(&bp->spq_left);
523224a3
DK
1368 /* push the change in fp->state and towards the memory */
1369 smp_wmb();
49d66772 1370
523224a3 1371 return;
a2fbb9ea
ET
1372}
1373
9f6c9258 1374irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
a2fbb9ea 1375{
555f6c78 1376 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1377 u16 status = bnx2x_ack_int(bp);
34f80b04 1378 u16 mask;
ca00392c 1379 int i;
a2fbb9ea 1380
34f80b04 1381 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1382 if (unlikely(status == 0)) {
1383 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1384 return IRQ_NONE;
1385 }
f5372251 1386 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1387
34f80b04 1388 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1389 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1390 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1391 return IRQ_HANDLED;
1392 }
1393
3196a88a
EG
1394#ifdef BNX2X_STOP_ON_ERROR
1395 if (unlikely(bp->panic))
1396 return IRQ_HANDLED;
1397#endif
1398
f2e0899f 1399 for_each_queue(bp, i) {
ca00392c 1400 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1401
523224a3 1402 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
ca00392c 1403 if (status & mask) {
54b9ddaa
VZ
1404 /* Handle Rx and Tx according to SB id */
1405 prefetch(fp->rx_cons_sb);
54b9ddaa 1406 prefetch(fp->tx_cons_sb);
523224a3 1407 prefetch(&fp->sb_running_index[SM_RX_ID]);
54b9ddaa 1408 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1409 status &= ~mask;
1410 }
a2fbb9ea
ET
1411 }
1412
993ac7b5 1413#ifdef BCM_CNIC
523224a3 1414 mask = 0x2;
993ac7b5
MC
1415 if (status & (mask | 0x1)) {
1416 struct cnic_ops *c_ops = NULL;
1417
1418 rcu_read_lock();
1419 c_ops = rcu_dereference(bp->cnic_ops);
1420 if (c_ops)
1421 c_ops->cnic_handler(bp->cnic_data, NULL);
1422 rcu_read_unlock();
1423
1424 status &= ~mask;
1425 }
1426#endif
a2fbb9ea 1427
34f80b04 1428 if (unlikely(status & 0x1)) {
1cf167f2 1429 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1430
1431 status &= ~0x1;
1432 if (!status)
1433 return IRQ_HANDLED;
1434 }
1435
cdaa7cb8
VZ
1436 if (unlikely(status))
1437 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 1438 status);
a2fbb9ea 1439
c18487ee 1440 return IRQ_HANDLED;
a2fbb9ea
ET
1441}
1442
c18487ee 1443/* end of fast path */
a2fbb9ea 1444
a2fbb9ea 1445
c18487ee
YR
1446/* Link */
1447
1448/*
1449 * General service functions
1450 */
a2fbb9ea 1451
9f6c9258 1452int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1453{
1454 u32 lock_status;
1455 u32 resource_bit = (1 << resource);
4a37fb66
YG
1456 int func = BP_FUNC(bp);
1457 u32 hw_lock_control_reg;
c18487ee 1458 int cnt;
a2fbb9ea 1459
c18487ee
YR
1460 /* Validating that the resource is within range */
1461 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1462 DP(NETIF_MSG_HW,
1463 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1464 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1465 return -EINVAL;
1466 }
a2fbb9ea 1467
4a37fb66
YG
1468 if (func <= 5) {
1469 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1470 } else {
1471 hw_lock_control_reg =
1472 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1473 }
1474
c18487ee 1475 /* Validating that the resource is not already taken */
4a37fb66 1476 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1477 if (lock_status & resource_bit) {
1478 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1479 lock_status, resource_bit);
1480 return -EEXIST;
1481 }
a2fbb9ea 1482
46230476
EG
1483 /* Try for 5 second every 5ms */
1484 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1485 /* Try to acquire the lock */
4a37fb66
YG
1486 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1487 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1488 if (lock_status & resource_bit)
1489 return 0;
a2fbb9ea 1490
c18487ee 1491 msleep(5);
a2fbb9ea 1492 }
c18487ee
YR
1493 DP(NETIF_MSG_HW, "Timeout\n");
1494 return -EAGAIN;
1495}
a2fbb9ea 1496
9f6c9258 1497int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1498{
1499 u32 lock_status;
1500 u32 resource_bit = (1 << resource);
4a37fb66
YG
1501 int func = BP_FUNC(bp);
1502 u32 hw_lock_control_reg;
a2fbb9ea 1503
72fd0718
VZ
1504 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1505
c18487ee
YR
1506 /* Validating that the resource is within range */
1507 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1508 DP(NETIF_MSG_HW,
1509 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1510 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1511 return -EINVAL;
1512 }
1513
4a37fb66
YG
1514 if (func <= 5) {
1515 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1516 } else {
1517 hw_lock_control_reg =
1518 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1519 }
1520
c18487ee 1521 /* Validating that the resource is currently taken */
4a37fb66 1522 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1523 if (!(lock_status & resource_bit)) {
1524 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1525 lock_status, resource_bit);
1526 return -EFAULT;
a2fbb9ea
ET
1527 }
1528
9f6c9258
DK
1529 REG_WR(bp, hw_lock_control_reg, resource_bit);
1530 return 0;
c18487ee 1531}
a2fbb9ea 1532
9f6c9258 1533
4acac6a5
EG
1534int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1535{
1536 /* The GPIO should be swapped if swap register is set and active */
1537 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1538 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1539 int gpio_shift = gpio_num +
1540 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1541 u32 gpio_mask = (1 << gpio_shift);
1542 u32 gpio_reg;
1543 int value;
1544
1545 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1546 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1547 return -EINVAL;
1548 }
1549
1550 /* read GPIO value */
1551 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1552
1553 /* get the requested pin value */
1554 if ((gpio_reg & gpio_mask) == gpio_mask)
1555 value = 1;
1556 else
1557 value = 0;
1558
1559 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1560
1561 return value;
1562}
1563
17de50b7 1564int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1565{
1566 /* The GPIO should be swapped if swap register is set and active */
1567 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1568 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1569 int gpio_shift = gpio_num +
1570 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1571 u32 gpio_mask = (1 << gpio_shift);
1572 u32 gpio_reg;
a2fbb9ea 1573
c18487ee
YR
1574 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1575 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1576 return -EINVAL;
1577 }
a2fbb9ea 1578
4a37fb66 1579 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1580 /* read GPIO and mask except the float bits */
1581 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1582
c18487ee
YR
1583 switch (mode) {
1584 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1585 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1586 gpio_num, gpio_shift);
1587 /* clear FLOAT and set CLR */
1588 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1589 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1590 break;
a2fbb9ea 1591
c18487ee
YR
1592 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1593 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1594 gpio_num, gpio_shift);
1595 /* clear FLOAT and set SET */
1596 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1597 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1598 break;
a2fbb9ea 1599
17de50b7 1600 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1601 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1602 gpio_num, gpio_shift);
1603 /* set FLOAT */
1604 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1605 break;
a2fbb9ea 1606
c18487ee
YR
1607 default:
1608 break;
a2fbb9ea
ET
1609 }
1610
c18487ee 1611 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1612 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1613
c18487ee 1614 return 0;
a2fbb9ea
ET
1615}
1616
4acac6a5
EG
1617int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1618{
1619 /* The GPIO should be swapped if swap register is set and active */
1620 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1621 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1622 int gpio_shift = gpio_num +
1623 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1624 u32 gpio_mask = (1 << gpio_shift);
1625 u32 gpio_reg;
1626
1627 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1628 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1629 return -EINVAL;
1630 }
1631
1632 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1633 /* read GPIO int */
1634 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1635
1636 switch (mode) {
1637 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1638 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1639 "output low\n", gpio_num, gpio_shift);
1640 /* clear SET and set CLR */
1641 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1642 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1643 break;
1644
1645 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1646 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1647 "output high\n", gpio_num, gpio_shift);
1648 /* clear CLR and set SET */
1649 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1650 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1651 break;
1652
1653 default:
1654 break;
1655 }
1656
1657 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1658 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1659
1660 return 0;
1661}
1662
c18487ee 1663static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1664{
c18487ee
YR
1665 u32 spio_mask = (1 << spio_num);
1666 u32 spio_reg;
a2fbb9ea 1667
c18487ee
YR
1668 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1669 (spio_num > MISC_REGISTERS_SPIO_7)) {
1670 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1671 return -EINVAL;
a2fbb9ea
ET
1672 }
1673
4a37fb66 1674 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1675 /* read SPIO and mask except the float bits */
1676 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1677
c18487ee 1678 switch (mode) {
6378c025 1679 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1680 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1681 /* clear FLOAT and set CLR */
1682 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1683 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1684 break;
a2fbb9ea 1685
6378c025 1686 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1687 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1688 /* clear FLOAT and set SET */
1689 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1690 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1691 break;
a2fbb9ea 1692
c18487ee
YR
1693 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1694 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1695 /* set FLOAT */
1696 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1697 break;
a2fbb9ea 1698
c18487ee
YR
1699 default:
1700 break;
a2fbb9ea
ET
1701 }
1702
c18487ee 1703 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1704 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1705
a2fbb9ea
ET
1706 return 0;
1707}
1708
a22f0788
YR
1709int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1710{
1711 u32 sel_phy_idx = 0;
1712 if (bp->link_vars.link_up) {
1713 sel_phy_idx = EXT_PHY1;
1714 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1715 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1716 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1717 sel_phy_idx = EXT_PHY2;
1718 } else {
1719
1720 switch (bnx2x_phy_selection(&bp->link_params)) {
1721 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1722 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1723 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1724 sel_phy_idx = EXT_PHY1;
1725 break;
1726 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1727 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1728 sel_phy_idx = EXT_PHY2;
1729 break;
1730 }
1731 }
1732 /*
1733 * The selected actived PHY is always after swapping (in case PHY
1734 * swapping is enabled). So when swapping is enabled, we need to reverse
1735 * the configuration
1736 */
1737
1738 if (bp->link_params.multi_phy_config &
1739 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1740 if (sel_phy_idx == EXT_PHY1)
1741 sel_phy_idx = EXT_PHY2;
1742 else if (sel_phy_idx == EXT_PHY2)
1743 sel_phy_idx = EXT_PHY1;
1744 }
1745 return LINK_CONFIG_IDX(sel_phy_idx);
1746}
1747
9f6c9258 1748void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1749{
a22f0788 1750 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
ad33ea3a
EG
1751 switch (bp->link_vars.ieee_fc &
1752 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1753 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
a22f0788 1754 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 1755 ADVERTISED_Pause);
c18487ee 1756 break;
356e2385 1757
c18487ee 1758 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
a22f0788 1759 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
f85582f8 1760 ADVERTISED_Pause);
c18487ee 1761 break;
356e2385 1762
c18487ee 1763 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
a22f0788 1764 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
c18487ee 1765 break;
356e2385 1766
c18487ee 1767 default:
a22f0788 1768 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 1769 ADVERTISED_Pause);
c18487ee
YR
1770 break;
1771 }
1772}
f1410647 1773
9f6c9258 1774u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 1775{
19680c48
EG
1776 if (!BP_NOMCP(bp)) {
1777 u8 rc;
a22f0788
YR
1778 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1779 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
19680c48 1780 /* Initialize link parameters structure variables */
8c99e7b0
YR
1781 /* It is recommended to turn off RX FC for jumbo frames
1782 for better performance */
f2e0899f 1783 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
c0700f90 1784 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1785 else
c0700f90 1786 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1787
4a37fb66 1788 bnx2x_acquire_phy_lock(bp);
b5bf9068 1789
a22f0788 1790 if (load_mode == LOAD_DIAG) {
de6eae1f 1791 bp->link_params.loopback_mode = LOOPBACK_XGXS;
a22f0788
YR
1792 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1793 }
b5bf9068 1794
19680c48 1795 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 1796
4a37fb66 1797 bnx2x_release_phy_lock(bp);
a2fbb9ea 1798
3c96c68b
EG
1799 bnx2x_calc_fc_adv(bp);
1800
b5bf9068
EG
1801 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1802 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 1803 bnx2x_link_report(bp);
b5bf9068 1804 }
a22f0788 1805 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
19680c48
EG
1806 return rc;
1807 }
f5372251 1808 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 1809 return -EINVAL;
a2fbb9ea
ET
1810}
1811
9f6c9258 1812void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1813{
19680c48 1814 if (!BP_NOMCP(bp)) {
4a37fb66 1815 bnx2x_acquire_phy_lock(bp);
54c2fb78 1816 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
19680c48 1817 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1818 bnx2x_release_phy_lock(bp);
a2fbb9ea 1819
19680c48
EG
1820 bnx2x_calc_fc_adv(bp);
1821 } else
f5372251 1822 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 1823}
a2fbb9ea 1824
c18487ee
YR
1825static void bnx2x__link_reset(struct bnx2x *bp)
1826{
19680c48 1827 if (!BP_NOMCP(bp)) {
4a37fb66 1828 bnx2x_acquire_phy_lock(bp);
589abe3a 1829 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 1830 bnx2x_release_phy_lock(bp);
19680c48 1831 } else
f5372251 1832 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 1833}
a2fbb9ea 1834
a22f0788 1835u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
c18487ee 1836{
2145a920 1837 u8 rc = 0;
a2fbb9ea 1838
2145a920
VZ
1839 if (!BP_NOMCP(bp)) {
1840 bnx2x_acquire_phy_lock(bp);
a22f0788
YR
1841 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1842 is_serdes);
2145a920
VZ
1843 bnx2x_release_phy_lock(bp);
1844 } else
1845 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 1846
c18487ee
YR
1847 return rc;
1848}
a2fbb9ea 1849
8a1c38d1 1850static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 1851{
8a1c38d1
EG
1852 u32 r_param = bp->link_vars.line_speed / 8;
1853 u32 fair_periodic_timeout_usec;
1854 u32 t_fair;
34f80b04 1855
8a1c38d1
EG
1856 memset(&(bp->cmng.rs_vars), 0,
1857 sizeof(struct rate_shaping_vars_per_port));
1858 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 1859
8a1c38d1
EG
1860 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1861 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 1862
8a1c38d1
EG
1863 /* this is the threshold below which no timer arming will occur
1864 1.25 coefficient is for the threshold to be a little bigger
1865 than the real time, to compensate for timer in-accuracy */
1866 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
1867 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1868
8a1c38d1
EG
1869 /* resolution of fairness timer */
1870 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1871 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1872 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 1873
8a1c38d1
EG
1874 /* this is the threshold below which we won't arm the timer anymore */
1875 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 1876
8a1c38d1
EG
1877 /* we multiply by 1e3/8 to get bytes/msec.
1878 We don't want the credits to pass a credit
1879 of the t_fair*FAIR_MEM (algorithm resolution) */
1880 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1881 /* since each tick is 4 usec */
1882 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
1883}
1884
2691d51d
EG
1885/* Calculates the sum of vn_min_rates.
1886 It's needed for further normalizing of the min_rates.
1887 Returns:
1888 sum of vn_min_rates.
1889 or
1890 0 - if all the min_rates are 0.
1891 In the later case fainess algorithm should be deactivated.
1892 If not all min_rates are zero then those that are zeroes will be set to 1.
1893 */
1894static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1895{
1896 int all_zero = 1;
2691d51d
EG
1897 int vn;
1898
1899 bp->vn_weight_sum = 0;
1900 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
f2e0899f 1901 u32 vn_cfg = bp->mf_config[vn];
2691d51d
EG
1902 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1903 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1904
1905 /* Skip hidden vns */
1906 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1907 continue;
1908
1909 /* If min rate is zero - set it to 1 */
1910 if (!vn_min_rate)
1911 vn_min_rate = DEF_MIN_RATE;
1912 else
1913 all_zero = 0;
1914
1915 bp->vn_weight_sum += vn_min_rate;
1916 }
1917
1918 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
1919 if (all_zero) {
1920 bp->cmng.flags.cmng_enables &=
1921 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1922 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1923 " fairness will be disabled\n");
1924 } else
1925 bp->cmng.flags.cmng_enables |=
1926 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
1927}
1928
f2e0899f 1929static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
34f80b04
EG
1930{
1931 struct rate_shaping_vars_per_vn m_rs_vn;
1932 struct fairness_vars_per_vn m_fair_vn;
f2e0899f
DK
1933 u32 vn_cfg = bp->mf_config[vn];
1934 int func = 2*vn + BP_PORT(bp);
34f80b04
EG
1935 u16 vn_min_rate, vn_max_rate;
1936 int i;
1937
1938 /* If function is hidden - set min and max to zeroes */
1939 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1940 vn_min_rate = 0;
1941 vn_max_rate = 0;
1942
1943 } else {
1944 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1945 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1 1946 /* If min rate is zero - set it to 1 */
f2e0899f 1947 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
1948 vn_min_rate = DEF_MIN_RATE;
1949 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1950 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1951 }
f85582f8 1952
8a1c38d1 1953 DP(NETIF_MSG_IFUP,
b015e3d1 1954 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 1955 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
1956
1957 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1958 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1959
1960 /* global vn counter - maximal Mbps for this vn */
1961 m_rs_vn.vn_counter.rate = vn_max_rate;
1962
1963 /* quota - number of bytes transmitted in this period */
1964 m_rs_vn.vn_counter.quota =
1965 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1966
8a1c38d1 1967 if (bp->vn_weight_sum) {
34f80b04
EG
1968 /* credit for each period of the fairness algorithm:
1969 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
1970 vn_weight_sum should not be larger than 10000, thus
1971 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1972 than zero */
34f80b04 1973 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
1974 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1975 (8 * bp->vn_weight_sum))),
1976 (bp->cmng.fair_vars.fair_threshold * 2));
1977 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
1978 m_fair_vn.vn_credit_delta);
1979 }
1980
34f80b04
EG
1981 /* Store it to internal memory */
1982 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1983 REG_WR(bp, BAR_XSTRORM_INTMEM +
1984 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1985 ((u32 *)(&m_rs_vn))[i]);
1986
1987 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1988 REG_WR(bp, BAR_XSTRORM_INTMEM +
1989 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1990 ((u32 *)(&m_fair_vn))[i]);
1991}
f85582f8 1992
523224a3
DK
1993static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
1994{
1995 if (CHIP_REV_IS_SLOW(bp))
1996 return CMNG_FNS_NONE;
fb3bff17 1997 if (IS_MF(bp))
523224a3
DK
1998 return CMNG_FNS_MINMAX;
1999
2000 return CMNG_FNS_NONE;
2001}
2002
2003static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2004{
2005 int vn;
2006
2007 if (BP_NOMCP(bp))
2008 return; /* what should be the default bvalue in this case */
2009
2010 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2011 int /*abs*/func = 2*vn + BP_PORT(bp);
f2e0899f 2012 bp->mf_config[vn] =
523224a3
DK
2013 MF_CFG_RD(bp, func_mf_config[func].config);
2014 }
2015}
2016
2017static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2018{
2019
2020 if (cmng_type == CMNG_FNS_MINMAX) {
2021 int vn;
2022
2023 /* clear cmng_enables */
2024 bp->cmng.flags.cmng_enables = 0;
2025
2026 /* read mf conf from shmem */
2027 if (read_cfg)
2028 bnx2x_read_mf_cfg(bp);
2029
2030 /* Init rate shaping and fairness contexts */
2031 bnx2x_init_port_minmax(bp);
2032
2033 /* vn_weight_sum and enable fairness if not 0 */
2034 bnx2x_calc_vn_weight_sum(bp);
2035
2036 /* calculate and set min-max rate for each vn */
2037 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2038 bnx2x_init_vn_minmax(bp, vn);
2039
2040 /* always enable rate shaping and fairness */
2041 bp->cmng.flags.cmng_enables |=
2042 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2043 if (!bp->vn_weight_sum)
2044 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2045 " fairness will be disabled\n");
2046 return;
2047 }
2048
2049 /* rate shaping and fairness are disabled */
2050 DP(NETIF_MSG_IFUP,
2051 "rate shaping and fairness are disabled\n");
2052}
34f80b04 2053
523224a3
DK
2054static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2055{
2056 int port = BP_PORT(bp);
2057 int func;
2058 int vn;
2059
2060 /* Set the attention towards other drivers on the same port */
2061 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2062 if (vn == BP_E1HVN(bp))
2063 continue;
2064
2065 func = ((vn << 1) | port);
2066 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2067 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2068 }
2069}
8a1c38d1 2070
c18487ee
YR
2071/* This function is called upon link interrupt */
2072static void bnx2x_link_attn(struct bnx2x *bp)
2073{
d9e8b185 2074 u32 prev_link_status = bp->link_vars.link_status;
bb2a0f7a
YG
2075 /* Make sure that we are synced with the current statistics */
2076 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2077
c18487ee 2078 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2079
bb2a0f7a
YG
2080 if (bp->link_vars.link_up) {
2081
1c06328c 2082 /* dropless flow control */
f2e0899f 2083 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
1c06328c
EG
2084 int port = BP_PORT(bp);
2085 u32 pause_enabled = 0;
2086
2087 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2088 pause_enabled = 1;
2089
2090 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2091 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2092 pause_enabled);
2093 }
2094
bb2a0f7a
YG
2095 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2096 struct host_port_stats *pstats;
2097
2098 pstats = bnx2x_sp(bp, port_stats);
2099 /* reset old bmac stats */
2100 memset(&(pstats->mac_stx[0]), 0,
2101 sizeof(struct mac_stx));
2102 }
f34d28ea 2103 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2104 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2105 }
2106
d9e8b185
VZ
2107 /* indicate link status only if link status actually changed */
2108 if (prev_link_status != bp->link_vars.link_status)
2109 bnx2x_link_report(bp);
34f80b04 2110
f2e0899f
DK
2111 if (IS_MF(bp))
2112 bnx2x_link_sync_notify(bp);
34f80b04 2113
f2e0899f
DK
2114 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2115 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
8a1c38d1 2116
f2e0899f
DK
2117 if (cmng_fns != CMNG_FNS_NONE) {
2118 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2119 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2120 } else
2121 /* rate shaping and fairness are disabled */
2122 DP(NETIF_MSG_IFUP,
2123 "single function mode without fairness\n");
34f80b04 2124 }
c18487ee 2125}
a2fbb9ea 2126
9f6c9258 2127void bnx2x__link_status_update(struct bnx2x *bp)
c18487ee 2128{
f34d28ea 2129 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2130 return;
a2fbb9ea 2131
c18487ee 2132 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2133
bb2a0f7a
YG
2134 if (bp->link_vars.link_up)
2135 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2136 else
2137 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2138
f2e0899f
DK
2139 /* the link status update could be the result of a DCC event
2140 hence re-read the shmem mf configuration */
2141 bnx2x_read_mf_cfg(bp);
2691d51d 2142
c18487ee
YR
2143 /* indicate link status */
2144 bnx2x_link_report(bp);
a2fbb9ea 2145}
a2fbb9ea 2146
34f80b04
EG
2147static void bnx2x_pmf_update(struct bnx2x *bp)
2148{
2149 int port = BP_PORT(bp);
2150 u32 val;
2151
2152 bp->port.pmf = 1;
2153 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2154
2155 /* enable nig attention */
2156 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
f2e0899f
DK
2157 if (bp->common.int_block == INT_BLOCK_HC) {
2158 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2159 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2160 } else if (CHIP_IS_E2(bp)) {
2161 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2162 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2163 }
bb2a0f7a
YG
2164
2165 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2166}
2167
c18487ee 2168/* end of Link */
a2fbb9ea
ET
2169
2170/* slow path */
2171
2172/*
2173 * General service functions
2174 */
2175
2691d51d 2176/* send the MCP a request, block until there is a reply */
a22f0788 2177u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2691d51d 2178{
f2e0899f 2179 int mb_idx = BP_FW_MB_IDX(bp);
2691d51d
EG
2180 u32 seq = ++bp->fw_seq;
2181 u32 rc = 0;
2182 u32 cnt = 1;
2183 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2184
c4ff7cbf 2185 mutex_lock(&bp->fw_mb_mutex);
f2e0899f
DK
2186 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2187 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2188
2691d51d
EG
2189 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2190
2191 do {
2192 /* let the FW do it's magic ... */
2193 msleep(delay);
2194
f2e0899f 2195 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2691d51d 2196
c4ff7cbf
EG
2197 /* Give the FW up to 5 second (500*10ms) */
2198 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2199
2200 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2201 cnt*delay, rc, seq);
2202
2203 /* is this a reply to our command? */
2204 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2205 rc &= FW_MSG_CODE_MASK;
2206 else {
2207 /* FW BUG! */
2208 BNX2X_ERR("FW failed to respond!\n");
2209 bnx2x_fw_dump(bp);
2210 rc = 0;
2211 }
c4ff7cbf 2212 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2213
2214 return rc;
2215}
2216
523224a3
DK
2217/* must be called under rtnl_lock */
2218void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2691d51d 2219{
523224a3 2220 u32 mask = (1 << cl_id);
2691d51d 2221
523224a3
DK
2222 /* initial seeting is BNX2X_ACCEPT_NONE */
2223 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2224 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2225 u8 unmatched_unicast = 0;
2691d51d 2226
523224a3
DK
2227 if (filters & BNX2X_PROMISCUOUS_MODE) {
2228 /* promiscious - accept all, drop none */
2229 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2230 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2231 }
2232 if (filters & BNX2X_ACCEPT_UNICAST) {
2233 /* accept matched ucast */
2234 drop_all_ucast = 0;
2235 }
2236 if (filters & BNX2X_ACCEPT_MULTICAST) {
2237 /* accept matched mcast */
2238 drop_all_mcast = 0;
2239 }
2240 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2241 /* accept all mcast */
2242 drop_all_ucast = 0;
2243 accp_all_ucast = 1;
2244 }
2245 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2246 /* accept all mcast */
2247 drop_all_mcast = 0;
2248 accp_all_mcast = 1;
2249 }
2250 if (filters & BNX2X_ACCEPT_BROADCAST) {
2251 /* accept (all) bcast */
2252 drop_all_bcast = 0;
2253 accp_all_bcast = 1;
2254 }
2691d51d 2255
523224a3
DK
2256 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2257 bp->mac_filters.ucast_drop_all | mask :
2258 bp->mac_filters.ucast_drop_all & ~mask;
2691d51d 2259
523224a3
DK
2260 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2261 bp->mac_filters.mcast_drop_all | mask :
2262 bp->mac_filters.mcast_drop_all & ~mask;
2691d51d 2263
523224a3
DK
2264 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2265 bp->mac_filters.bcast_drop_all | mask :
2266 bp->mac_filters.bcast_drop_all & ~mask;
2691d51d 2267
523224a3
DK
2268 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2269 bp->mac_filters.ucast_accept_all | mask :
2270 bp->mac_filters.ucast_accept_all & ~mask;
2691d51d 2271
523224a3
DK
2272 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2273 bp->mac_filters.mcast_accept_all | mask :
2274 bp->mac_filters.mcast_accept_all & ~mask;
2275
2276 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2277 bp->mac_filters.bcast_accept_all | mask :
2278 bp->mac_filters.bcast_accept_all & ~mask;
2279
2280 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2281 bp->mac_filters.unmatched_unicast | mask :
2282 bp->mac_filters.unmatched_unicast & ~mask;
2691d51d
EG
2283}
2284
523224a3 2285void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2691d51d 2286{
523224a3
DK
2287 if (FUNC_CONFIG(p->func_flgs)) {
2288 struct tstorm_eth_function_common_config tcfg = {0};
2691d51d 2289
523224a3
DK
2290 /* tpa */
2291 if (p->func_flgs & FUNC_FLG_TPA)
2292 tcfg.config_flags |=
2293 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2691d51d 2294
523224a3
DK
2295 /* set rss flags */
2296 if (p->func_flgs & FUNC_FLG_RSS) {
2297 u16 rss_flgs = (p->rss->mode <<
2298 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2691d51d 2299
523224a3
DK
2300 if (p->rss->cap & RSS_IPV4_CAP)
2301 rss_flgs |= RSS_IPV4_CAP_MASK;
2302 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2303 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2304 if (p->rss->cap & RSS_IPV6_CAP)
2305 rss_flgs |= RSS_IPV6_CAP_MASK;
2306 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2307 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2691d51d 2308
523224a3
DK
2309 tcfg.config_flags |= rss_flgs;
2310 tcfg.rss_result_mask = p->rss->result_mask;
2691d51d 2311
2691d51d
EG
2312 }
2313
523224a3 2314 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2691d51d 2315 }
2691d51d 2316
523224a3
DK
2317 /* Enable the function in the FW */
2318 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2319 storm_memset_func_en(bp, p->func_id, 1);
2691d51d 2320
523224a3
DK
2321 /* statistics */
2322 if (p->func_flgs & FUNC_FLG_STATS) {
2323 struct stats_indication_flags stats_flags = {0};
2324 stats_flags.collect_eth = 1;
2691d51d 2325
523224a3
DK
2326 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2327 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2328
523224a3
DK
2329 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2330 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2331
523224a3
DK
2332 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2333 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2334
523224a3
DK
2335 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2336 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d
EG
2337 }
2338
523224a3
DK
2339 /* spq */
2340 if (p->func_flgs & FUNC_FLG_SPQ) {
2341 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2342 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2343 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2344 }
2691d51d
EG
2345}
2346
523224a3
DK
2347static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2348 struct bnx2x_fastpath *fp)
28912902 2349{
523224a3 2350 u16 flags = 0;
28912902 2351
523224a3
DK
2352 /* calculate queue flags */
2353 flags |= QUEUE_FLG_CACHE_ALIGN;
2354 flags |= QUEUE_FLG_HC;
fb3bff17 2355 flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0;
28912902 2356
523224a3
DK
2357#ifdef BCM_VLAN
2358 flags |= QUEUE_FLG_VLAN;
2359 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2360#endif
2361
2362 if (!fp->disable_tpa)
2363 flags |= QUEUE_FLG_TPA;
2364
2365 flags |= QUEUE_FLG_STATS;
2366
2367 return flags;
2368}
2369
2370static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2371 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2372 struct bnx2x_rxq_init_params *rxq_init)
2373{
2374 u16 max_sge = 0;
2375 u16 sge_sz = 0;
2376 u16 tpa_agg_size = 0;
2377
2378 /* calculate queue flags */
2379 u16 flags = bnx2x_get_cl_flags(bp, fp);
2380
2381 if (!fp->disable_tpa) {
2382 pause->sge_th_hi = 250;
2383 pause->sge_th_lo = 150;
2384 tpa_agg_size = min_t(u32,
2385 (min_t(u32, 8, MAX_SKB_FRAGS) *
2386 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2387 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2388 SGE_PAGE_SHIFT;
2389 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2390 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2391 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2392 0xffff);
2393 }
2394
2395 /* pause - not for e1 */
2396 if (!CHIP_IS_E1(bp)) {
2397 pause->bd_th_hi = 350;
2398 pause->bd_th_lo = 250;
2399 pause->rcq_th_hi = 350;
2400 pause->rcq_th_lo = 250;
2401 pause->sge_th_hi = 0;
2402 pause->sge_th_lo = 0;
2403 pause->pri_map = 1;
2404 }
2405
2406 /* rxq setup */
2407 rxq_init->flags = flags;
2408 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2409 rxq_init->dscr_map = fp->rx_desc_mapping;
2410 rxq_init->sge_map = fp->rx_sge_mapping;
2411 rxq_init->rcq_map = fp->rx_comp_mapping;
2412 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2413 rxq_init->mtu = bp->dev->mtu;
2414 rxq_init->buf_sz = bp->rx_buf_size;
2415 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2416 rxq_init->cl_id = fp->cl_id;
2417 rxq_init->spcl_id = fp->cl_id;
2418 rxq_init->stat_id = fp->cl_id;
2419 rxq_init->tpa_agg_sz = tpa_agg_size;
2420 rxq_init->sge_buf_sz = sge_sz;
2421 rxq_init->max_sges_pkt = max_sge;
2422 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2423 rxq_init->fw_sb_id = fp->fw_sb_id;
2424
2425 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2426
2427 rxq_init->cid = HW_CID(bp, fp->cid);
2428
2429 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2430}
2431
2432static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2433 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2434{
2435 u16 flags = bnx2x_get_cl_flags(bp, fp);
2436
2437 txq_init->flags = flags;
2438 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2439 txq_init->dscr_map = fp->tx_desc_mapping;
2440 txq_init->stat_id = fp->cl_id;
2441 txq_init->cid = HW_CID(bp, fp->cid);
2442 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2443 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2444 txq_init->fw_sb_id = fp->fw_sb_id;
2445 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2446}
2447
2448void bnx2x_pf_init(struct bnx2x *bp)
2449{
2450 struct bnx2x_func_init_params func_init = {0};
2451 struct bnx2x_rss_params rss = {0};
2452 struct event_ring_data eq_data = { {0} };
2453 u16 flags;
2454
2455 /* pf specific setups */
2456 if (!CHIP_IS_E1(bp))
fb3bff17 2457 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
523224a3 2458
f2e0899f
DK
2459 if (CHIP_IS_E2(bp)) {
2460 /* reset IGU PF statistics: MSIX + ATTN */
2461 /* PF */
2462 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2463 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2464 (CHIP_MODE_IS_4_PORT(bp) ?
2465 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2466 /* ATTN */
2467 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2468 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2469 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2470 (CHIP_MODE_IS_4_PORT(bp) ?
2471 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2472 }
2473
523224a3
DK
2474 /* function setup flags */
2475 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2476
f2e0899f
DK
2477 if (CHIP_IS_E1x(bp))
2478 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2479 else
2480 flags |= FUNC_FLG_TPA;
523224a3
DK
2481
2482 /**
2483 * Although RSS is meaningless when there is a single HW queue we
2484 * still need it enabled in order to have HW Rx hash generated.
2485 *
2486 * if (is_eth_multi(bp))
2487 * flags |= FUNC_FLG_RSS;
2488 */
f0b9f472 2489 flags |= FUNC_FLG_RSS;
523224a3
DK
2490
2491 /* function setup */
2492 if (flags & FUNC_FLG_RSS) {
2493 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2494 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2495 rss.mode = bp->multi_mode;
2496 rss.result_mask = MULTI_MASK;
2497 func_init.rss = &rss;
2498 }
2499
2500 func_init.func_flgs = flags;
2501 func_init.pf_id = BP_FUNC(bp);
2502 func_init.func_id = BP_FUNC(bp);
2503 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2504 func_init.spq_map = bp->spq_mapping;
2505 func_init.spq_prod = bp->spq_prod_idx;
2506
2507 bnx2x_func_init(bp, &func_init);
2508
2509 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2510
2511 /*
2512 Congestion management values depend on the link rate
2513 There is no active link so initial link rate is set to 10 Gbps.
2514 When the link comes up The congestion management values are
2515 re-calculated according to the actual link rate.
2516 */
2517 bp->link_vars.line_speed = SPEED_10000;
2518 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2519
2520 /* Only the PMF sets the HW */
2521 if (bp->port.pmf)
2522 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2523
2524 /* no rx until link is up */
2525 bp->rx_mode = BNX2X_RX_MODE_NONE;
2526 bnx2x_set_storm_rx_mode(bp);
2527
2528 /* init Event Queue */
2529 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2530 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2531 eq_data.producer = bp->eq_prod;
2532 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2533 eq_data.sb_id = DEF_SB_ID;
2534 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2535}
2536
2537
2538static void bnx2x_e1h_disable(struct bnx2x *bp)
2539{
2540 int port = BP_PORT(bp);
2541
2542 netif_tx_disable(bp->dev);
2543
2544 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2545
2546 netif_carrier_off(bp->dev);
2547}
2548
2549static void bnx2x_e1h_enable(struct bnx2x *bp)
2550{
2551 int port = BP_PORT(bp);
2552
2553 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2554
2555 /* Tx queue should be only reenabled */
2556 netif_tx_wake_all_queues(bp->dev);
2557
2558 /*
2559 * Should not call netif_carrier_on since it will be called if the link
2560 * is up when checking for link state
2561 */
2562}
2563
2564static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2565{
2566 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2567
2568 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2569
2570 /*
2571 * This is the only place besides the function initialization
2572 * where the bp->flags can change so it is done without any
2573 * locks
2574 */
f2e0899f 2575 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
523224a3
DK
2576 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2577 bp->flags |= MF_FUNC_DIS;
2578
2579 bnx2x_e1h_disable(bp);
2580 } else {
2581 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2582 bp->flags &= ~MF_FUNC_DIS;
2583
2584 bnx2x_e1h_enable(bp);
2585 }
2586 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2587 }
2588 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2589
2590 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2591 bnx2x_link_sync_notify(bp);
2592 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2593 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2594 }
2595
2596 /* Report results to MCP */
2597 if (dcc_event)
2598 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2599 else
2600 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2601}
2602
2603/* must be called under the spq lock */
2604static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2605{
2606 struct eth_spe *next_spe = bp->spq_prod_bd;
2607
2608 if (bp->spq_prod_bd == bp->spq_last_bd) {
2609 bp->spq_prod_bd = bp->spq;
2610 bp->spq_prod_idx = 0;
2611 DP(NETIF_MSG_TIMER, "end of spq\n");
2612 } else {
2613 bp->spq_prod_bd++;
2614 bp->spq_prod_idx++;
2615 }
2616 return next_spe;
2617}
2618
2619/* must be called under the spq lock */
28912902
MC
2620static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2621{
2622 int func = BP_FUNC(bp);
2623
2624 /* Make sure that BD data is updated before writing the producer */
2625 wmb();
2626
523224a3 2627 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
f85582f8 2628 bp->spq_prod_idx);
28912902
MC
2629 mmiowb();
2630}
2631
a2fbb9ea 2632/* the slow path queue is odd since completions arrive on the fastpath ring */
9f6c9258 2633int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
f85582f8 2634 u32 data_hi, u32 data_lo, int common)
a2fbb9ea 2635{
28912902 2636 struct eth_spe *spe;
523224a3 2637 u16 type;
a2fbb9ea 2638
a2fbb9ea
ET
2639#ifdef BNX2X_STOP_ON_ERROR
2640 if (unlikely(bp->panic))
2641 return -EIO;
2642#endif
2643
34f80b04 2644 spin_lock_bh(&bp->spq_lock);
a2fbb9ea 2645
8fe23fbd 2646 if (!atomic_read(&bp->spq_left)) {
a2fbb9ea 2647 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2648 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2649 bnx2x_panic();
2650 return -EBUSY;
2651 }
f1410647 2652
28912902
MC
2653 spe = bnx2x_sp_get_next(bp);
2654
a2fbb9ea 2655 /* CID needs port number to be encoded int it */
28912902 2656 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
2657 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2658 HW_CID(bp, cid));
523224a3 2659
a2fbb9ea 2660 if (common)
523224a3
DK
2661 /* Common ramrods:
2662 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2663 * TRAFFIC_STOP, TRAFFIC_START
2664 */
2665 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2666 & SPE_HDR_CONN_TYPE;
2667 else
2668 /* ETH ramrods: SETUP, HALT */
2669 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2670 & SPE_HDR_CONN_TYPE;
a2fbb9ea 2671
523224a3
DK
2672 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2673 SPE_HDR_FUNCTION_ID);
a2fbb9ea 2674
523224a3
DK
2675 spe->hdr.type = cpu_to_le16(type);
2676
2677 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2678 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2679
2680 /* stats ramrod has it's own slot on the spq */
2681 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2682 /* It's ok if the actual decrement is issued towards the memory
2683 * somewhere between the spin_lock and spin_unlock. Thus no
2684 * more explict memory barrier is needed.
2685 */
8fe23fbd 2686 atomic_dec(&bp->spq_left);
a2fbb9ea 2687
cdaa7cb8 2688 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
523224a3
DK
2689 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2690 "type(0x%x) left %x\n",
cdaa7cb8
VZ
2691 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2692 (u32)(U64_LO(bp->spq_mapping) +
2693 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
8fe23fbd 2694 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
cdaa7cb8 2695
28912902 2696 bnx2x_sp_prod_update(bp);
34f80b04 2697 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2698 return 0;
2699}
2700
2701/* acquire split MCP access lock register */
4a37fb66 2702static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2703{
72fd0718 2704 u32 j, val;
34f80b04 2705 int rc = 0;
a2fbb9ea
ET
2706
2707 might_sleep();
72fd0718 2708 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
2709 val = (1UL << 31);
2710 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2711 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2712 if (val & (1L << 31))
2713 break;
2714
2715 msleep(5);
2716 }
a2fbb9ea 2717 if (!(val & (1L << 31))) {
19680c48 2718 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2719 rc = -EBUSY;
2720 }
2721
2722 return rc;
2723}
2724
4a37fb66
YG
2725/* release split MCP access lock register */
2726static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 2727{
72fd0718 2728 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
2729}
2730
523224a3
DK
2731#define BNX2X_DEF_SB_ATT_IDX 0x0001
2732#define BNX2X_DEF_SB_IDX 0x0002
2733
a2fbb9ea
ET
2734static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2735{
523224a3 2736 struct host_sp_status_block *def_sb = bp->def_status_blk;
a2fbb9ea
ET
2737 u16 rc = 0;
2738
2739 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2740 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2741 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
523224a3 2742 rc |= BNX2X_DEF_SB_ATT_IDX;
a2fbb9ea 2743 }
523224a3
DK
2744
2745 if (bp->def_idx != def_sb->sp_sb.running_index) {
2746 bp->def_idx = def_sb->sp_sb.running_index;
2747 rc |= BNX2X_DEF_SB_IDX;
a2fbb9ea 2748 }
523224a3
DK
2749
2750 /* Do not reorder: indecies reading should complete before handling */
2751 barrier();
a2fbb9ea
ET
2752 return rc;
2753}
2754
2755/*
2756 * slow path service functions
2757 */
2758
2759static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2760{
34f80b04 2761 int port = BP_PORT(bp);
a2fbb9ea
ET
2762 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2763 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2764 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2765 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2766 u32 aeu_mask;
87942b46 2767 u32 nig_mask = 0;
f2e0899f 2768 u32 reg_addr;
a2fbb9ea 2769
a2fbb9ea
ET
2770 if (bp->attn_state & asserted)
2771 BNX2X_ERR("IGU ERROR\n");
2772
3fcaf2e5
EG
2773 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2774 aeu_mask = REG_RD(bp, aeu_addr);
2775
a2fbb9ea 2776 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 2777 aeu_mask, asserted);
72fd0718 2778 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 2779 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2780
3fcaf2e5
EG
2781 REG_WR(bp, aeu_addr, aeu_mask);
2782 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2783
3fcaf2e5 2784 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2785 bp->attn_state |= asserted;
3fcaf2e5 2786 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2787
2788 if (asserted & ATTN_HARD_WIRED_MASK) {
2789 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2790
a5e9a7cf
EG
2791 bnx2x_acquire_phy_lock(bp);
2792
877e9aa4 2793 /* save nig interrupt mask */
87942b46 2794 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2795 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2796
c18487ee 2797 bnx2x_link_attn(bp);
a2fbb9ea
ET
2798
2799 /* handle unicore attn? */
2800 }
2801 if (asserted & ATTN_SW_TIMER_4_FUNC)
2802 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2803
2804 if (asserted & GPIO_2_FUNC)
2805 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2806
2807 if (asserted & GPIO_3_FUNC)
2808 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2809
2810 if (asserted & GPIO_4_FUNC)
2811 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2812
2813 if (port == 0) {
2814 if (asserted & ATTN_GENERAL_ATTN_1) {
2815 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2816 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2817 }
2818 if (asserted & ATTN_GENERAL_ATTN_2) {
2819 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2820 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2821 }
2822 if (asserted & ATTN_GENERAL_ATTN_3) {
2823 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2824 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2825 }
2826 } else {
2827 if (asserted & ATTN_GENERAL_ATTN_4) {
2828 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2829 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2830 }
2831 if (asserted & ATTN_GENERAL_ATTN_5) {
2832 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2833 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2834 }
2835 if (asserted & ATTN_GENERAL_ATTN_6) {
2836 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2837 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2838 }
2839 }
2840
2841 } /* if hardwired */
2842
f2e0899f
DK
2843 if (bp->common.int_block == INT_BLOCK_HC)
2844 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2845 COMMAND_REG_ATTN_BITS_SET);
2846 else
2847 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2848
2849 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2850 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2851 REG_WR(bp, reg_addr, asserted);
a2fbb9ea
ET
2852
2853 /* now set back the mask */
a5e9a7cf 2854 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2855 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2856 bnx2x_release_phy_lock(bp);
2857 }
a2fbb9ea
ET
2858}
2859
fd4ef40d
EG
2860static inline void bnx2x_fan_failure(struct bnx2x *bp)
2861{
2862 int port = BP_PORT(bp);
b7737c9b 2863 u32 ext_phy_config;
fd4ef40d 2864 /* mark the failure */
b7737c9b
YR
2865 ext_phy_config =
2866 SHMEM_RD(bp,
2867 dev_info.port_hw_config[port].external_phy_config);
2868
2869 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2870 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
fd4ef40d 2871 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
b7737c9b 2872 ext_phy_config);
fd4ef40d
EG
2873
2874 /* log the failure */
cdaa7cb8
VZ
2875 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2876 " the driver to shutdown the card to prevent permanent"
2877 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 2878}
ab6ad5a4 2879
877e9aa4 2880static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2881{
34f80b04 2882 int port = BP_PORT(bp);
877e9aa4 2883 int reg_offset;
d90d96ba 2884 u32 val;
877e9aa4 2885
34f80b04
EG
2886 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2887 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2888
34f80b04 2889 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2890
2891 val = REG_RD(bp, reg_offset);
2892 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2893 REG_WR(bp, reg_offset, val);
2894
2895 BNX2X_ERR("SPIO5 hw attention\n");
2896
fd4ef40d 2897 /* Fan failure attention */
d90d96ba 2898 bnx2x_hw_reset_phy(&bp->link_params);
fd4ef40d 2899 bnx2x_fan_failure(bp);
877e9aa4 2900 }
34f80b04 2901
589abe3a
EG
2902 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2903 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2904 bnx2x_acquire_phy_lock(bp);
2905 bnx2x_handle_module_detect_int(&bp->link_params);
2906 bnx2x_release_phy_lock(bp);
2907 }
2908
34f80b04
EG
2909 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2910
2911 val = REG_RD(bp, reg_offset);
2912 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2913 REG_WR(bp, reg_offset, val);
2914
2915 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2916 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2917 bnx2x_panic();
2918 }
877e9aa4
ET
2919}
2920
2921static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2922{
2923 u32 val;
2924
0626b899 2925 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2926
2927 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2928 BNX2X_ERR("DB hw attention 0x%x\n", val);
2929 /* DORQ discard attention */
2930 if (val & 0x2)
2931 BNX2X_ERR("FATAL error from DORQ\n");
2932 }
34f80b04
EG
2933
2934 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2935
2936 int port = BP_PORT(bp);
2937 int reg_offset;
2938
2939 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2940 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2941
2942 val = REG_RD(bp, reg_offset);
2943 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2944 REG_WR(bp, reg_offset, val);
2945
2946 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 2947 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
2948 bnx2x_panic();
2949 }
877e9aa4
ET
2950}
2951
2952static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2953{
2954 u32 val;
2955
2956 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2957
2958 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2959 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2960 /* CFC error attention */
2961 if (val & 0x2)
2962 BNX2X_ERR("FATAL error from CFC\n");
2963 }
2964
2965 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2966
2967 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2968 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2969 /* RQ_USDMDP_FIFO_OVERFLOW */
2970 if (val & 0x18000)
2971 BNX2X_ERR("FATAL error from PXP\n");
f2e0899f
DK
2972 if (CHIP_IS_E2(bp)) {
2973 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
2974 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
2975 }
877e9aa4 2976 }
34f80b04
EG
2977
2978 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2979
2980 int port = BP_PORT(bp);
2981 int reg_offset;
2982
2983 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2984 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2985
2986 val = REG_RD(bp, reg_offset);
2987 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2988 REG_WR(bp, reg_offset, val);
2989
2990 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 2991 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
2992 bnx2x_panic();
2993 }
877e9aa4
ET
2994}
2995
2996static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2997{
34f80b04
EG
2998 u32 val;
2999
877e9aa4
ET
3000 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3001
34f80b04
EG
3002 if (attn & BNX2X_PMF_LINK_ASSERT) {
3003 int func = BP_FUNC(bp);
3004
3005 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
f2e0899f
DK
3006 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3007 func_mf_config[BP_ABS_FUNC(bp)].config);
3008 val = SHMEM_RD(bp,
3009 func_mb[BP_FW_MB_IDX(bp)].drv_status);
2691d51d
EG
3010 if (val & DRV_STATUS_DCC_EVENT_MASK)
3011 bnx2x_dcc_event(bp,
3012 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3013 bnx2x__link_status_update(bp);
2691d51d 3014 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3015 bnx2x_pmf_update(bp);
3016
3017 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3018
3019 BNX2X_ERR("MC assert!\n");
3020 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3021 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3022 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3023 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3024 bnx2x_panic();
3025
3026 } else if (attn & BNX2X_MCP_ASSERT) {
3027
3028 BNX2X_ERR("MCP assert!\n");
3029 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3030 bnx2x_fw_dump(bp);
877e9aa4
ET
3031
3032 } else
3033 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3034 }
3035
3036 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3037 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3038 if (attn & BNX2X_GRC_TIMEOUT) {
f2e0899f
DK
3039 val = CHIP_IS_E1(bp) ? 0 :
3040 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
34f80b04
EG
3041 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3042 }
3043 if (attn & BNX2X_GRC_RSV) {
f2e0899f
DK
3044 val = CHIP_IS_E1(bp) ? 0 :
3045 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
34f80b04
EG
3046 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3047 }
877e9aa4 3048 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3049 }
3050}
3051
72fd0718
VZ
3052#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3053#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3054#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3055#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3056#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3057#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
f85582f8 3058
72fd0718
VZ
3059/*
3060 * should be run under rtnl lock
3061 */
3062static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3063{
3064 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3065 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3066 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3067 barrier();
3068 mmiowb();
3069}
3070
3071/*
3072 * should be run under rtnl lock
3073 */
3074static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3075{
3076 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3077 val |= (1 << 16);
3078 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3079 barrier();
3080 mmiowb();
3081}
3082
3083/*
3084 * should be run under rtnl lock
3085 */
9f6c9258 3086bool bnx2x_reset_is_done(struct bnx2x *bp)
72fd0718
VZ
3087{
3088 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3089 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3090 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3091}
3092
3093/*
3094 * should be run under rtnl lock
3095 */
9f6c9258 3096inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
72fd0718
VZ
3097{
3098 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3099
3100 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3101
3102 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3103 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3104 barrier();
3105 mmiowb();
3106}
3107
3108/*
3109 * should be run under rtnl lock
3110 */
9f6c9258 3111u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
72fd0718
VZ
3112{
3113 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3114
3115 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3116
3117 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3118 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3119 barrier();
3120 mmiowb();
3121
3122 return val1;
3123}
3124
3125/*
3126 * should be run under rtnl lock
3127 */
3128static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3129{
3130 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3131}
3132
3133static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3134{
3135 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3136 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3137}
3138
3139static inline void _print_next_block(int idx, const char *blk)
3140{
3141 if (idx)
3142 pr_cont(", ");
3143 pr_cont("%s", blk);
3144}
3145
3146static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3147{
3148 int i = 0;
3149 u32 cur_bit = 0;
3150 for (i = 0; sig; i++) {
3151 cur_bit = ((u32)0x1 << i);
3152 if (sig & cur_bit) {
3153 switch (cur_bit) {
3154 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3155 _print_next_block(par_num++, "BRB");
3156 break;
3157 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3158 _print_next_block(par_num++, "PARSER");
3159 break;
3160 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3161 _print_next_block(par_num++, "TSDM");
3162 break;
3163 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3164 _print_next_block(par_num++, "SEARCHER");
3165 break;
3166 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3167 _print_next_block(par_num++, "TSEMI");
3168 break;
3169 }
3170
3171 /* Clear the bit */
3172 sig &= ~cur_bit;
3173 }
3174 }
3175
3176 return par_num;
3177}
3178
3179static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3180{
3181 int i = 0;
3182 u32 cur_bit = 0;
3183 for (i = 0; sig; i++) {
3184 cur_bit = ((u32)0x1 << i);
3185 if (sig & cur_bit) {
3186 switch (cur_bit) {
3187 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3188 _print_next_block(par_num++, "PBCLIENT");
3189 break;
3190 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3191 _print_next_block(par_num++, "QM");
3192 break;
3193 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3194 _print_next_block(par_num++, "XSDM");
3195 break;
3196 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3197 _print_next_block(par_num++, "XSEMI");
3198 break;
3199 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3200 _print_next_block(par_num++, "DOORBELLQ");
3201 break;
3202 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3203 _print_next_block(par_num++, "VAUX PCI CORE");
3204 break;
3205 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3206 _print_next_block(par_num++, "DEBUG");
3207 break;
3208 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3209 _print_next_block(par_num++, "USDM");
3210 break;
3211 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3212 _print_next_block(par_num++, "USEMI");
3213 break;
3214 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3215 _print_next_block(par_num++, "UPB");
3216 break;
3217 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3218 _print_next_block(par_num++, "CSDM");
3219 break;
3220 }
3221
3222 /* Clear the bit */
3223 sig &= ~cur_bit;
3224 }
3225 }
3226
3227 return par_num;
3228}
3229
3230static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3231{
3232 int i = 0;
3233 u32 cur_bit = 0;
3234 for (i = 0; sig; i++) {
3235 cur_bit = ((u32)0x1 << i);
3236 if (sig & cur_bit) {
3237 switch (cur_bit) {
3238 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3239 _print_next_block(par_num++, "CSEMI");
3240 break;
3241 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3242 _print_next_block(par_num++, "PXP");
3243 break;
3244 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3245 _print_next_block(par_num++,
3246 "PXPPCICLOCKCLIENT");
3247 break;
3248 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3249 _print_next_block(par_num++, "CFC");
3250 break;
3251 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3252 _print_next_block(par_num++, "CDU");
3253 break;
3254 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3255 _print_next_block(par_num++, "IGU");
3256 break;
3257 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3258 _print_next_block(par_num++, "MISC");
3259 break;
3260 }
3261
3262 /* Clear the bit */
3263 sig &= ~cur_bit;
3264 }
3265 }
3266
3267 return par_num;
3268}
3269
3270static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3271{
3272 int i = 0;
3273 u32 cur_bit = 0;
3274 for (i = 0; sig; i++) {
3275 cur_bit = ((u32)0x1 << i);
3276 if (sig & cur_bit) {
3277 switch (cur_bit) {
3278 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3279 _print_next_block(par_num++, "MCP ROM");
3280 break;
3281 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3282 _print_next_block(par_num++, "MCP UMP RX");
3283 break;
3284 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3285 _print_next_block(par_num++, "MCP UMP TX");
3286 break;
3287 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3288 _print_next_block(par_num++, "MCP SCPAD");
3289 break;
3290 }
3291
3292 /* Clear the bit */
3293 sig &= ~cur_bit;
3294 }
3295 }
3296
3297 return par_num;
3298}
3299
3300static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3301 u32 sig2, u32 sig3)
3302{
3303 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3304 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3305 int par_num = 0;
3306 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3307 "[0]:0x%08x [1]:0x%08x "
3308 "[2]:0x%08x [3]:0x%08x\n",
3309 sig0 & HW_PRTY_ASSERT_SET_0,
3310 sig1 & HW_PRTY_ASSERT_SET_1,
3311 sig2 & HW_PRTY_ASSERT_SET_2,
3312 sig3 & HW_PRTY_ASSERT_SET_3);
3313 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3314 bp->dev->name);
3315 par_num = bnx2x_print_blocks_with_parity0(
3316 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3317 par_num = bnx2x_print_blocks_with_parity1(
3318 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3319 par_num = bnx2x_print_blocks_with_parity2(
3320 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3321 par_num = bnx2x_print_blocks_with_parity3(
3322 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3323 printk("\n");
3324 return true;
3325 } else
3326 return false;
3327}
3328
9f6c9258 3329bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 3330{
a2fbb9ea 3331 struct attn_route attn;
72fd0718
VZ
3332 int port = BP_PORT(bp);
3333
3334 attn.sig[0] = REG_RD(bp,
3335 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3336 port*4);
3337 attn.sig[1] = REG_RD(bp,
3338 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3339 port*4);
3340 attn.sig[2] = REG_RD(bp,
3341 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3342 port*4);
3343 attn.sig[3] = REG_RD(bp,
3344 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3345 port*4);
3346
3347 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3348 attn.sig[3]);
3349}
3350
f2e0899f
DK
3351
3352static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3353{
3354 u32 val;
3355 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3356
3357 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3358 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3359 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3360 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3361 "ADDRESS_ERROR\n");
3362 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3363 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3364 "INCORRECT_RCV_BEHAVIOR\n");
3365 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3366 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3367 "WAS_ERROR_ATTN\n");
3368 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3369 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3370 "VF_LENGTH_VIOLATION_ATTN\n");
3371 if (val &
3372 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3373 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3374 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3375 if (val &
3376 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3377 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3378 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3379 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3380 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3381 "TCPL_ERROR_ATTN\n");
3382 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3383 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3384 "TCPL_IN_TWO_RCBS_ATTN\n");
3385 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3386 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3387 "CSSNOOP_FIFO_OVERFLOW\n");
3388 }
3389 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3390 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3391 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3392 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3393 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3394 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3395 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3396 "_ATC_TCPL_TO_NOT_PEND\n");
3397 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3398 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3399 "ATC_GPA_MULTIPLE_HITS\n");
3400 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3401 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3402 "ATC_RCPL_TO_EMPTY_CNT\n");
3403 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3404 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3405 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3406 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3407 "ATC_IREQ_LESS_THAN_STU\n");
3408 }
3409
3410 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3411 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3412 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3413 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3414 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3415 }
3416
3417}
3418
72fd0718
VZ
3419static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3420{
3421 struct attn_route attn, *group_mask;
34f80b04 3422 int port = BP_PORT(bp);
877e9aa4 3423 int index;
a2fbb9ea
ET
3424 u32 reg_addr;
3425 u32 val;
3fcaf2e5 3426 u32 aeu_mask;
a2fbb9ea
ET
3427
3428 /* need to take HW lock because MCP or other port might also
3429 try to handle this event */
4a37fb66 3430 bnx2x_acquire_alr(bp);
a2fbb9ea 3431
72fd0718
VZ
3432 if (bnx2x_chk_parity_attn(bp)) {
3433 bp->recovery_state = BNX2X_RECOVERY_INIT;
3434 bnx2x_set_reset_in_progress(bp);
3435 schedule_delayed_work(&bp->reset_task, 0);
3436 /* Disable HW interrupts */
3437 bnx2x_int_disable(bp);
3438 bnx2x_release_alr(bp);
3439 /* In case of parity errors don't handle attentions so that
3440 * other function would "see" parity errors.
3441 */
3442 return;
3443 }
3444
a2fbb9ea
ET
3445 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3446 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3447 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3448 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
f2e0899f
DK
3449 if (CHIP_IS_E2(bp))
3450 attn.sig[4] =
3451 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3452 else
3453 attn.sig[4] = 0;
3454
3455 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3456 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
a2fbb9ea
ET
3457
3458 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3459 if (deasserted & (1 << index)) {
72fd0718 3460 group_mask = &bp->attn_group[index];
a2fbb9ea 3461
f2e0899f
DK
3462 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3463 "%08x %08x %08x\n",
3464 index,
3465 group_mask->sig[0], group_mask->sig[1],
3466 group_mask->sig[2], group_mask->sig[3],
3467 group_mask->sig[4]);
a2fbb9ea 3468
f2e0899f
DK
3469 bnx2x_attn_int_deasserted4(bp,
3470 attn.sig[4] & group_mask->sig[4]);
877e9aa4 3471 bnx2x_attn_int_deasserted3(bp,
72fd0718 3472 attn.sig[3] & group_mask->sig[3]);
877e9aa4 3473 bnx2x_attn_int_deasserted1(bp,
72fd0718 3474 attn.sig[1] & group_mask->sig[1]);
877e9aa4 3475 bnx2x_attn_int_deasserted2(bp,
72fd0718 3476 attn.sig[2] & group_mask->sig[2]);
877e9aa4 3477 bnx2x_attn_int_deasserted0(bp,
72fd0718 3478 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
3479 }
3480 }
3481
4a37fb66 3482 bnx2x_release_alr(bp);
a2fbb9ea 3483
f2e0899f
DK
3484 if (bp->common.int_block == INT_BLOCK_HC)
3485 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3486 COMMAND_REG_ATTN_BITS_CLR);
3487 else
3488 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
a2fbb9ea
ET
3489
3490 val = ~deasserted;
f2e0899f
DK
3491 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3492 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5c862848 3493 REG_WR(bp, reg_addr, val);
a2fbb9ea 3494
a2fbb9ea 3495 if (~bp->attn_state & deasserted)
3fcaf2e5 3496 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3497
3498 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3499 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3500
3fcaf2e5
EG
3501 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3502 aeu_mask = REG_RD(bp, reg_addr);
3503
3504 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3505 aeu_mask, deasserted);
72fd0718 3506 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 3507 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3508
3fcaf2e5
EG
3509 REG_WR(bp, reg_addr, aeu_mask);
3510 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3511
3512 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3513 bp->attn_state &= ~deasserted;
3514 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3515}
3516
3517static void bnx2x_attn_int(struct bnx2x *bp)
3518{
3519 /* read local copy of bits */
68d59484
EG
3520 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3521 attn_bits);
3522 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3523 attn_bits_ack);
a2fbb9ea
ET
3524 u32 attn_state = bp->attn_state;
3525
3526 /* look for changed bits */
3527 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3528 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3529
3530 DP(NETIF_MSG_HW,
3531 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3532 attn_bits, attn_ack, asserted, deasserted);
3533
3534 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3535 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3536
3537 /* handle bits that were raised */
3538 if (asserted)
3539 bnx2x_attn_int_asserted(bp, asserted);
3540
3541 if (deasserted)
3542 bnx2x_attn_int_deasserted(bp, deasserted);
3543}
3544
523224a3
DK
3545static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3546{
3547 /* No memory barriers */
3548 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3549 mmiowb(); /* keep prod updates ordered */
3550}
3551
3552#ifdef BCM_CNIC
3553static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3554 union event_ring_elem *elem)
3555{
3556 if (!bp->cnic_eth_dev.starting_cid ||
3557 cid < bp->cnic_eth_dev.starting_cid)
3558 return 1;
3559
3560 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3561
3562 if (unlikely(elem->message.data.cfc_del_event.error)) {
3563 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3564 cid);
3565 bnx2x_panic_dump(bp);
3566 }
3567 bnx2x_cnic_cfc_comp(bp, cid);
3568 return 0;
3569}
3570#endif
3571
3572static void bnx2x_eq_int(struct bnx2x *bp)
3573{
3574 u16 hw_cons, sw_cons, sw_prod;
3575 union event_ring_elem *elem;
3576 u32 cid;
3577 u8 opcode;
3578 int spqe_cnt = 0;
3579
3580 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3581
3582 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3583 * when we get the the next-page we nned to adjust so the loop
3584 * condition below will be met. The next element is the size of a
3585 * regular element and hence incrementing by 1
3586 */
3587 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3588 hw_cons++;
3589
3590 /* This function may never run in parralel with itself for a
3591 * specific bp, thus there is no need in "paired" read memory
3592 * barrier here.
3593 */
3594 sw_cons = bp->eq_cons;
3595 sw_prod = bp->eq_prod;
3596
3597 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
8fe23fbd 3598 hw_cons, sw_cons, atomic_read(&bp->spq_left));
523224a3
DK
3599
3600 for (; sw_cons != hw_cons;
3601 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3602
3603
3604 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3605
3606 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3607 opcode = elem->message.opcode;
3608
3609
3610 /* handle eq element */
3611 switch (opcode) {
3612 case EVENT_RING_OPCODE_STAT_QUERY:
3613 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3614 /* nothing to do with stats comp */
3615 continue;
3616
3617 case EVENT_RING_OPCODE_CFC_DEL:
3618 /* handle according to cid range */
3619 /*
3620 * we may want to verify here that the bp state is
3621 * HALTING
3622 */
3623 DP(NETIF_MSG_IFDOWN,
3624 "got delete ramrod for MULTI[%d]\n", cid);
3625#ifdef BCM_CNIC
3626 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3627 goto next_spqe;
3628#endif
3629 bnx2x_fp(bp, cid, state) =
3630 BNX2X_FP_STATE_CLOSED;
3631
3632 goto next_spqe;
3633 }
3634
3635 switch (opcode | bp->state) {
3636 case (EVENT_RING_OPCODE_FUNCTION_START |
3637 BNX2X_STATE_OPENING_WAIT4_PORT):
3638 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3639 bp->state = BNX2X_STATE_FUNC_STARTED;
3640 break;
3641
3642 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3643 BNX2X_STATE_CLOSING_WAIT4_HALT):
3644 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3645 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3646 break;
3647
3648 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3649 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3650 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3651 bp->set_mac_pending = 0;
3652 break;
3653
3654 case (EVENT_RING_OPCODE_SET_MAC |
3655 BNX2X_STATE_CLOSING_WAIT4_HALT):
3656 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3657 bp->set_mac_pending = 0;
3658 break;
3659 default:
3660 /* unknown event log error and continue */
3661 BNX2X_ERR("Unknown EQ event %d\n",
3662 elem->message.opcode);
3663 }
3664next_spqe:
3665 spqe_cnt++;
3666 } /* for */
3667
8fe23fbd
DK
3668 smp_mb__before_atomic_inc();
3669 atomic_add(spqe_cnt, &bp->spq_left);
523224a3
DK
3670
3671 bp->eq_cons = sw_cons;
3672 bp->eq_prod = sw_prod;
3673 /* Make sure that above mem writes were issued towards the memory */
3674 smp_wmb();
3675
3676 /* update producer */
3677 bnx2x_update_eq_prod(bp, bp->eq_prod);
3678}
3679
a2fbb9ea
ET
3680static void bnx2x_sp_task(struct work_struct *work)
3681{
1cf167f2 3682 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3683 u16 status;
3684
3685 /* Return here if interrupt is disabled */
3686 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3687 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3688 return;
3689 }
3690
3691 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3692/* if (status == 0) */
3693/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3694
cdaa7cb8 3695 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 3696
877e9aa4 3697 /* HW attentions */
523224a3 3698 if (status & BNX2X_DEF_SB_ATT_IDX) {
a2fbb9ea 3699 bnx2x_attn_int(bp);
523224a3 3700 status &= ~BNX2X_DEF_SB_ATT_IDX;
cdaa7cb8
VZ
3701 }
3702
523224a3
DK
3703 /* SP events: STAT_QUERY and others */
3704 if (status & BNX2X_DEF_SB_IDX) {
3705
3706 /* Handle EQ completions */
3707 bnx2x_eq_int(bp);
3708
3709 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3710 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3711
3712 status &= ~BNX2X_DEF_SB_IDX;
cdaa7cb8
VZ
3713 }
3714
3715 if (unlikely(status))
3716 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3717 status);
a2fbb9ea 3718
523224a3
DK
3719 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3720 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
a2fbb9ea
ET
3721}
3722
9f6c9258 3723irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
a2fbb9ea
ET
3724{
3725 struct net_device *dev = dev_instance;
3726 struct bnx2x *bp = netdev_priv(dev);
3727
3728 /* Return here if interrupt is disabled */
3729 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3730 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3731 return IRQ_HANDLED;
3732 }
3733
523224a3
DK
3734 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3735 IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3736
3737#ifdef BNX2X_STOP_ON_ERROR
3738 if (unlikely(bp->panic))
3739 return IRQ_HANDLED;
3740#endif
3741
993ac7b5
MC
3742#ifdef BCM_CNIC
3743 {
3744 struct cnic_ops *c_ops;
3745
3746 rcu_read_lock();
3747 c_ops = rcu_dereference(bp->cnic_ops);
3748 if (c_ops)
3749 c_ops->cnic_handler(bp->cnic_data, NULL);
3750 rcu_read_unlock();
3751 }
3752#endif
1cf167f2 3753 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3754
3755 return IRQ_HANDLED;
3756}
3757
3758/* end of slow path */
3759
a2fbb9ea
ET
3760static void bnx2x_timer(unsigned long data)
3761{
3762 struct bnx2x *bp = (struct bnx2x *) data;
3763
3764 if (!netif_running(bp->dev))
3765 return;
3766
3767 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3768 goto timer_restart;
a2fbb9ea
ET
3769
3770 if (poll) {
3771 struct bnx2x_fastpath *fp = &bp->fp[0];
3772 int rc;
3773
7961f791 3774 bnx2x_tx_int(fp);
a2fbb9ea
ET
3775 rc = bnx2x_rx_int(fp, 1000);
3776 }
3777
34f80b04 3778 if (!BP_NOMCP(bp)) {
f2e0899f 3779 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
3780 u32 drv_pulse;
3781 u32 mcp_pulse;
3782
3783 ++bp->fw_drv_pulse_wr_seq;
3784 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3785 /* TBD - add SYSTEM_TIME */
3786 drv_pulse = bp->fw_drv_pulse_wr_seq;
f2e0899f 3787 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
a2fbb9ea 3788
f2e0899f 3789 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
a2fbb9ea
ET
3790 MCP_PULSE_SEQ_MASK);
3791 /* The delta between driver pulse and mcp response
3792 * should be 1 (before mcp response) or 0 (after mcp response)
3793 */
3794 if ((drv_pulse != mcp_pulse) &&
3795 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3796 /* someone lost a heartbeat... */
3797 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3798 drv_pulse, mcp_pulse);
3799 }
3800 }
3801
f34d28ea 3802 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 3803 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3804
f1410647 3805timer_restart:
a2fbb9ea
ET
3806 mod_timer(&bp->timer, jiffies + bp->current_interval);
3807}
3808
3809/* end of Statistics */
3810
3811/* nic init */
3812
3813/*
3814 * nic init service functions
3815 */
3816
523224a3 3817static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
a2fbb9ea 3818{
523224a3
DK
3819 u32 i;
3820 if (!(len%4) && !(addr%4))
3821 for (i = 0; i < len; i += 4)
3822 REG_WR(bp, addr + i, fill);
3823 else
3824 for (i = 0; i < len; i++)
3825 REG_WR8(bp, addr + i, fill);
34f80b04 3826
34f80b04
EG
3827}
3828
523224a3
DK
3829/* helper: writes FP SP data to FW - data_size in dwords */
3830static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3831 int fw_sb_id,
3832 u32 *sb_data_p,
3833 u32 data_size)
34f80b04 3834{
a2fbb9ea 3835 int index;
523224a3
DK
3836 for (index = 0; index < data_size; index++)
3837 REG_WR(bp, BAR_CSTRORM_INTMEM +
3838 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3839 sizeof(u32)*index,
3840 *(sb_data_p + index));
3841}
a2fbb9ea 3842
523224a3
DK
3843static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3844{
3845 u32 *sb_data_p;
3846 u32 data_size = 0;
f2e0899f 3847 struct hc_status_block_data_e2 sb_data_e2;
523224a3 3848 struct hc_status_block_data_e1x sb_data_e1x;
a2fbb9ea 3849
523224a3 3850 /* disable the function first */
f2e0899f
DK
3851 if (CHIP_IS_E2(bp)) {
3852 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3853 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3854 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3855 sb_data_e2.common.p_func.vf_valid = false;
3856 sb_data_p = (u32 *)&sb_data_e2;
3857 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3858 } else {
3859 memset(&sb_data_e1x, 0,
3860 sizeof(struct hc_status_block_data_e1x));
3861 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3862 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3863 sb_data_e1x.common.p_func.vf_valid = false;
3864 sb_data_p = (u32 *)&sb_data_e1x;
3865 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3866 }
523224a3 3867 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
a2fbb9ea 3868
523224a3
DK
3869 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3870 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3871 CSTORM_STATUS_BLOCK_SIZE);
3872 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3873 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3874 CSTORM_SYNC_BLOCK_SIZE);
3875}
34f80b04 3876
523224a3
DK
3877/* helper: writes SP SB data to FW */
3878static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3879 struct hc_sp_status_block_data *sp_sb_data)
3880{
3881 int func = BP_FUNC(bp);
3882 int i;
3883 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3884 REG_WR(bp, BAR_CSTRORM_INTMEM +
3885 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3886 i*sizeof(u32),
3887 *((u32 *)sp_sb_data + i));
34f80b04
EG
3888}
3889
523224a3 3890static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
34f80b04
EG
3891{
3892 int func = BP_FUNC(bp);
523224a3
DK
3893 struct hc_sp_status_block_data sp_sb_data;
3894 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
a2fbb9ea 3895
523224a3
DK
3896 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3897 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3898 sp_sb_data.p_func.vf_valid = false;
3899
3900 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3901
3902 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3903 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3904 CSTORM_SP_STATUS_BLOCK_SIZE);
3905 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3906 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3907 CSTORM_SP_SYNC_BLOCK_SIZE);
3908
3909}
3910
3911
3912static inline
3913void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3914 int igu_sb_id, int igu_seg_id)
3915{
3916 hc_sm->igu_sb_id = igu_sb_id;
3917 hc_sm->igu_seg_id = igu_seg_id;
3918 hc_sm->timer_value = 0xFF;
3919 hc_sm->time_to_expire = 0xFFFFFFFF;
a2fbb9ea
ET
3920}
3921
523224a3
DK
3922void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
3923 u8 vf_valid, int fw_sb_id, int igu_sb_id)
a2fbb9ea 3924{
523224a3
DK
3925 int igu_seg_id;
3926
f2e0899f 3927 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
3928 struct hc_status_block_data_e1x sb_data_e1x;
3929 struct hc_status_block_sm *hc_sm_p;
3930 struct hc_index_data *hc_index_p;
3931 int data_size;
3932 u32 *sb_data_p;
3933
f2e0899f
DK
3934 if (CHIP_INT_MODE_IS_BC(bp))
3935 igu_seg_id = HC_SEG_ACCESS_NORM;
3936 else
3937 igu_seg_id = IGU_SEG_ACCESS_NORM;
523224a3
DK
3938
3939 bnx2x_zero_fp_sb(bp, fw_sb_id);
3940
f2e0899f
DK
3941 if (CHIP_IS_E2(bp)) {
3942 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3943 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
3944 sb_data_e2.common.p_func.vf_id = vfid;
3945 sb_data_e2.common.p_func.vf_valid = vf_valid;
3946 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
3947 sb_data_e2.common.same_igu_sb_1b = true;
3948 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
3949 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
3950 hc_sm_p = sb_data_e2.common.state_machine;
3951 hc_index_p = sb_data_e2.index_data;
3952 sb_data_p = (u32 *)&sb_data_e2;
3953 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3954 } else {
3955 memset(&sb_data_e1x, 0,
3956 sizeof(struct hc_status_block_data_e1x));
3957 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3958 sb_data_e1x.common.p_func.vf_id = 0xff;
3959 sb_data_e1x.common.p_func.vf_valid = false;
3960 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
3961 sb_data_e1x.common.same_igu_sb_1b = true;
3962 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3963 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3964 hc_sm_p = sb_data_e1x.common.state_machine;
3965 hc_index_p = sb_data_e1x.index_data;
3966 sb_data_p = (u32 *)&sb_data_e1x;
3967 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3968 }
523224a3
DK
3969
3970 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3971 igu_sb_id, igu_seg_id);
3972 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
3973 igu_sb_id, igu_seg_id);
3974
3975 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
3976
3977 /* write indecies to HW */
3978 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3979}
3980
3981static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
3982 u8 sb_index, u8 disable, u16 usec)
3983{
3984 int port = BP_PORT(bp);
3985 u8 ticks = usec / BNX2X_BTR;
3986
3987 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3988
3989 disable = disable ? 1 : (usec ? 0 : 1);
3990 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3991}
3992
3993static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
3994 u16 tx_usec, u16 rx_usec)
3995{
3996 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
3997 false, rx_usec);
3998 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
3999 false, tx_usec);
4000}
f2e0899f 4001
523224a3
DK
4002static void bnx2x_init_def_sb(struct bnx2x *bp)
4003{
4004 struct host_sp_status_block *def_sb = bp->def_status_blk;
4005 dma_addr_t mapping = bp->def_status_blk_mapping;
4006 int igu_sp_sb_index;
4007 int igu_seg_id;
34f80b04
EG
4008 int port = BP_PORT(bp);
4009 int func = BP_FUNC(bp);
523224a3 4010 int reg_offset;
a2fbb9ea 4011 u64 section;
523224a3
DK
4012 int index;
4013 struct hc_sp_status_block_data sp_sb_data;
4014 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4015
f2e0899f
DK
4016 if (CHIP_INT_MODE_IS_BC(bp)) {
4017 igu_sp_sb_index = DEF_SB_IGU_ID;
4018 igu_seg_id = HC_SEG_ACCESS_DEF;
4019 } else {
4020 igu_sp_sb_index = bp->igu_dsb_id;
4021 igu_seg_id = IGU_SEG_ACCESS_DEF;
4022 }
a2fbb9ea
ET
4023
4024 /* ATTN */
523224a3 4025 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
a2fbb9ea 4026 atten_status_block);
523224a3 4027 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
a2fbb9ea 4028
49d66772
ET
4029 bp->attn_state = 0;
4030
a2fbb9ea
ET
4031 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4032 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
34f80b04 4033 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
523224a3
DK
4034 int sindex;
4035 /* take care of sig[0]..sig[4] */
4036 for (sindex = 0; sindex < 4; sindex++)
4037 bp->attn_group[index].sig[sindex] =
4038 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
f2e0899f
DK
4039
4040 if (CHIP_IS_E2(bp))
4041 /*
4042 * enable5 is separate from the rest of the registers,
4043 * and therefore the address skip is 4
4044 * and not 16 between the different groups
4045 */
4046 bp->attn_group[index].sig[4] = REG_RD(bp,
4047 reg_offset + 0x10 + 0x4*index);
4048 else
4049 bp->attn_group[index].sig[4] = 0;
a2fbb9ea
ET
4050 }
4051
f2e0899f
DK
4052 if (bp->common.int_block == INT_BLOCK_HC) {
4053 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4054 HC_REG_ATTN_MSG0_ADDR_L);
4055
4056 REG_WR(bp, reg_offset, U64_LO(section));
4057 REG_WR(bp, reg_offset + 4, U64_HI(section));
4058 } else if (CHIP_IS_E2(bp)) {
4059 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4060 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4061 }
a2fbb9ea 4062
523224a3
DK
4063 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4064 sp_sb);
a2fbb9ea 4065
523224a3 4066 bnx2x_zero_sp_sb(bp);
a2fbb9ea 4067
523224a3
DK
4068 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4069 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4070 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4071 sp_sb_data.igu_seg_id = igu_seg_id;
4072 sp_sb_data.p_func.pf_id = func;
f2e0899f 4073 sp_sb_data.p_func.vnic_id = BP_VN(bp);
523224a3 4074 sp_sb_data.p_func.vf_id = 0xff;
a2fbb9ea 4075
523224a3 4076 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
49d66772 4077
bb2a0f7a 4078 bp->stats_pending = 0;
66e855f3 4079 bp->set_mac_pending = 0;
bb2a0f7a 4080
523224a3 4081 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4082}
4083
9f6c9258 4084void bnx2x_update_coalesce(struct bnx2x *bp)
a2fbb9ea 4085{
a2fbb9ea
ET
4086 int i;
4087
523224a3
DK
4088 for_each_queue(bp, i)
4089 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4090 bp->rx_ticks, bp->tx_ticks);
a2fbb9ea
ET
4091}
4092
a2fbb9ea
ET
4093static void bnx2x_init_sp_ring(struct bnx2x *bp)
4094{
a2fbb9ea 4095 spin_lock_init(&bp->spq_lock);
8fe23fbd 4096 atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
a2fbb9ea 4097
a2fbb9ea 4098 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4099 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4100 bp->spq_prod_bd = bp->spq;
4101 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
a2fbb9ea
ET
4102}
4103
523224a3 4104static void bnx2x_init_eq_ring(struct bnx2x *bp)
a2fbb9ea
ET
4105{
4106 int i;
523224a3
DK
4107 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4108 union event_ring_elem *elem =
4109 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
a2fbb9ea 4110
523224a3
DK
4111 elem->next_page.addr.hi =
4112 cpu_to_le32(U64_HI(bp->eq_mapping +
4113 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4114 elem->next_page.addr.lo =
4115 cpu_to_le32(U64_LO(bp->eq_mapping +
4116 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
a2fbb9ea 4117 }
523224a3
DK
4118 bp->eq_cons = 0;
4119 bp->eq_prod = NUM_EQ_DESC;
4120 bp->eq_cons_sb = BNX2X_EQ_INDEX;
a2fbb9ea
ET
4121}
4122
4123static void bnx2x_init_ind_table(struct bnx2x *bp)
4124{
26c8fa4d 4125 int func = BP_FUNC(bp);
a2fbb9ea
ET
4126 int i;
4127
555f6c78 4128 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4129 return;
4130
555f6c78
EG
4131 DP(NETIF_MSG_IFUP,
4132 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4133 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4134 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4135 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 4136 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
4137}
4138
9f6c9258 4139void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
a2fbb9ea 4140{
34f80b04 4141 int mode = bp->rx_mode;
523224a3
DK
4142 u16 cl_id;
4143
581ce43d
EG
4144 /* All but management unicast packets should pass to the host as well */
4145 u32 llh_mask =
4146 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4147 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4148 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4149 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 4150
a2fbb9ea
ET
4151 switch (mode) {
4152 case BNX2X_RX_MODE_NONE: /* no Rx */
523224a3
DK
4153 cl_id = BP_L_ID(bp);
4154 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
a2fbb9ea 4155 break;
356e2385 4156
a2fbb9ea 4157 case BNX2X_RX_MODE_NORMAL:
523224a3
DK
4158 cl_id = BP_L_ID(bp);
4159 bnx2x_rxq_set_mac_filters(bp, cl_id,
4160 BNX2X_ACCEPT_UNICAST |
4161 BNX2X_ACCEPT_BROADCAST |
4162 BNX2X_ACCEPT_MULTICAST);
a2fbb9ea 4163 break;
356e2385 4164
a2fbb9ea 4165 case BNX2X_RX_MODE_ALLMULTI:
523224a3
DK
4166 cl_id = BP_L_ID(bp);
4167 bnx2x_rxq_set_mac_filters(bp, cl_id,
4168 BNX2X_ACCEPT_UNICAST |
4169 BNX2X_ACCEPT_BROADCAST |
4170 BNX2X_ACCEPT_ALL_MULTICAST);
a2fbb9ea 4171 break;
356e2385 4172
a2fbb9ea 4173 case BNX2X_RX_MODE_PROMISC:
523224a3
DK
4174 cl_id = BP_L_ID(bp);
4175 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
4176
581ce43d
EG
4177 /* pass management unicast packets as well */
4178 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 4179 break;
356e2385 4180
a2fbb9ea 4181 default:
34f80b04
EG
4182 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4183 break;
a2fbb9ea
ET
4184 }
4185
581ce43d 4186 REG_WR(bp,
523224a3
DK
4187 BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
4188 NIG_REG_LLH0_BRB1_DRV_MASK,
581ce43d
EG
4189 llh_mask);
4190
523224a3
DK
4191 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4192 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4193 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
4194 bp->mac_filters.ucast_drop_all,
4195 bp->mac_filters.mcast_drop_all,
4196 bp->mac_filters.bcast_drop_all,
4197 bp->mac_filters.ucast_accept_all,
4198 bp->mac_filters.mcast_accept_all,
4199 bp->mac_filters.bcast_accept_all
4200 );
a2fbb9ea 4201
523224a3 4202 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
a2fbb9ea
ET
4203}
4204
471de716
EG
4205static void bnx2x_init_internal_common(struct bnx2x *bp)
4206{
4207 int i;
4208
523224a3 4209 if (!CHIP_IS_E1(bp)) {
de832a55 4210
523224a3
DK
4211 /* xstorm needs to know whether to add ovlan to packets or not,
4212 * in switch-independent we'll write 0 to here... */
34f80b04 4213 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4214 bp->mf_mode);
34f80b04 4215 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4216 bp->mf_mode);
34f80b04 4217 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4218 bp->mf_mode);
34f80b04 4219 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4220 bp->mf_mode);
34f80b04
EG
4221 }
4222
523224a3
DK
4223 /* Zero this manually as its initialization is
4224 currently missing in the initTool */
4225 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
ca00392c 4226 REG_WR(bp, BAR_USTRORM_INTMEM +
523224a3 4227 USTORM_AGG_DATA_OFFSET + i * 4, 0);
f2e0899f
DK
4228 if (CHIP_IS_E2(bp)) {
4229 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4230 CHIP_INT_MODE_IS_BC(bp) ?
4231 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4232 }
523224a3 4233}
8a1c38d1 4234
523224a3
DK
4235static void bnx2x_init_internal_port(struct bnx2x *bp)
4236{
4237 /* port */
a2fbb9ea
ET
4238}
4239
471de716
EG
4240static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4241{
4242 switch (load_code) {
4243 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 4244 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
471de716
EG
4245 bnx2x_init_internal_common(bp);
4246 /* no break */
4247
4248 case FW_MSG_CODE_DRV_LOAD_PORT:
4249 bnx2x_init_internal_port(bp);
4250 /* no break */
4251
4252 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3
DK
4253 /* internal memory per function is
4254 initialized inside bnx2x_pf_init */
471de716
EG
4255 break;
4256
4257 default:
4258 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4259 break;
4260 }
4261}
4262
523224a3
DK
4263static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4264{
4265 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4266
4267 fp->state = BNX2X_FP_STATE_CLOSED;
4268
4269 fp->index = fp->cid = fp_idx;
4270 fp->cl_id = BP_L_ID(bp) + fp_idx;
4271 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4272 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4273 /* qZone id equals to FW (per path) client id */
4274 fp->cl_qzone_id = fp->cl_id +
f2e0899f
DK
4275 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4276 ETH_MAX_RX_CLIENTS_E1H);
523224a3 4277 /* init shortcut */
f2e0899f
DK
4278 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4279 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
523224a3
DK
4280 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4281 /* Setup SB indicies */
4282 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4283 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4284
4285 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4286 "cl_id %d fw_sb %d igu_sb %d\n",
4287 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4288 fp->igu_sb_id);
4289 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4290 fp->fw_sb_id, fp->igu_sb_id);
4291
4292 bnx2x_update_fpsb_idx(fp);
4293}
4294
9f6c9258 4295void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4296{
4297 int i;
4298
523224a3
DK
4299 for_each_queue(bp, i)
4300 bnx2x_init_fp_sb(bp, i);
37b091ba 4301#ifdef BCM_CNIC
523224a3
DK
4302
4303 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4304 BNX2X_VF_ID_INVALID, false,
4305 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4306
37b091ba 4307#endif
a2fbb9ea 4308
16119785
EG
4309 /* ensure status block indices were read */
4310 rmb();
4311
523224a3 4312 bnx2x_init_def_sb(bp);
5c862848 4313 bnx2x_update_dsb_idx(bp);
a2fbb9ea 4314 bnx2x_init_rx_rings(bp);
523224a3 4315 bnx2x_init_tx_rings(bp);
a2fbb9ea 4316 bnx2x_init_sp_ring(bp);
523224a3 4317 bnx2x_init_eq_ring(bp);
471de716 4318 bnx2x_init_internal(bp, load_code);
523224a3 4319 bnx2x_pf_init(bp);
a2fbb9ea 4320 bnx2x_init_ind_table(bp);
0ef00459
EG
4321 bnx2x_stats_init(bp);
4322
4323 /* At this point, we are ready for interrupts */
4324 atomic_set(&bp->intr_sem, 0);
4325
4326 /* flush all before enabling interrupts */
4327 mb();
4328 mmiowb();
4329
615f8fd9 4330 bnx2x_int_enable(bp);
eb8da205
EG
4331
4332 /* Check for SPIO5 */
4333 bnx2x_attn_int_deasserted0(bp,
4334 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4335 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
4336}
4337
4338/* end of nic init */
4339
4340/*
4341 * gzip service functions
4342 */
4343
4344static int bnx2x_gunzip_init(struct bnx2x *bp)
4345{
1a983142
FT
4346 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4347 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
4348 if (bp->gunzip_buf == NULL)
4349 goto gunzip_nomem1;
4350
4351 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4352 if (bp->strm == NULL)
4353 goto gunzip_nomem2;
4354
4355 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4356 GFP_KERNEL);
4357 if (bp->strm->workspace == NULL)
4358 goto gunzip_nomem3;
4359
4360 return 0;
4361
4362gunzip_nomem3:
4363 kfree(bp->strm);
4364 bp->strm = NULL;
4365
4366gunzip_nomem2:
1a983142
FT
4367 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4368 bp->gunzip_mapping);
a2fbb9ea
ET
4369 bp->gunzip_buf = NULL;
4370
4371gunzip_nomem1:
cdaa7cb8
VZ
4372 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4373 " un-compression\n");
a2fbb9ea
ET
4374 return -ENOMEM;
4375}
4376
4377static void bnx2x_gunzip_end(struct bnx2x *bp)
4378{
4379 kfree(bp->strm->workspace);
a2fbb9ea
ET
4380 kfree(bp->strm);
4381 bp->strm = NULL;
4382
4383 if (bp->gunzip_buf) {
1a983142
FT
4384 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4385 bp->gunzip_mapping);
a2fbb9ea
ET
4386 bp->gunzip_buf = NULL;
4387 }
4388}
4389
94a78b79 4390static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
4391{
4392 int n, rc;
4393
4394 /* check gzip header */
94a78b79
VZ
4395 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4396 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 4397 return -EINVAL;
94a78b79 4398 }
a2fbb9ea
ET
4399
4400 n = 10;
4401
34f80b04 4402#define FNAME 0x8
a2fbb9ea
ET
4403
4404 if (zbuf[3] & FNAME)
4405 while ((zbuf[n++] != 0) && (n < len));
4406
94a78b79 4407 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
4408 bp->strm->avail_in = len - n;
4409 bp->strm->next_out = bp->gunzip_buf;
4410 bp->strm->avail_out = FW_BUF_SIZE;
4411
4412 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4413 if (rc != Z_OK)
4414 return rc;
4415
4416 rc = zlib_inflate(bp->strm, Z_FINISH);
4417 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
4418 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4419 bp->strm->msg);
a2fbb9ea
ET
4420
4421 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4422 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
4423 netdev_err(bp->dev, "Firmware decompression error:"
4424 " gunzip_outlen (%d) not aligned\n",
4425 bp->gunzip_outlen);
a2fbb9ea
ET
4426 bp->gunzip_outlen >>= 2;
4427
4428 zlib_inflateEnd(bp->strm);
4429
4430 if (rc == Z_STREAM_END)
4431 return 0;
4432
4433 return rc;
4434}
4435
4436/* nic load/unload */
4437
4438/*
34f80b04 4439 * General service functions
a2fbb9ea
ET
4440 */
4441
4442/* send a NIG loopback debug packet */
4443static void bnx2x_lb_pckt(struct bnx2x *bp)
4444{
a2fbb9ea 4445 u32 wb_write[3];
a2fbb9ea
ET
4446
4447 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4448 wb_write[0] = 0x55555555;
4449 wb_write[1] = 0x55555555;
34f80b04 4450 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4451 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4452
4453 /* NON-IP protocol */
a2fbb9ea
ET
4454 wb_write[0] = 0x09000000;
4455 wb_write[1] = 0x55555555;
34f80b04 4456 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4457 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4458}
4459
4460/* some of the internal memories
4461 * are not directly readable from the driver
4462 * to test them we send debug packets
4463 */
4464static int bnx2x_int_mem_test(struct bnx2x *bp)
4465{
4466 int factor;
4467 int count, i;
4468 u32 val = 0;
4469
ad8d3948 4470 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4471 factor = 120;
ad8d3948
EG
4472 else if (CHIP_REV_IS_EMUL(bp))
4473 factor = 200;
4474 else
a2fbb9ea 4475 factor = 1;
a2fbb9ea 4476
a2fbb9ea
ET
4477 /* Disable inputs of parser neighbor blocks */
4478 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4479 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4480 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4481 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4482
4483 /* Write 0 to parser credits for CFC search request */
4484 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4485
4486 /* send Ethernet packet */
4487 bnx2x_lb_pckt(bp);
4488
4489 /* TODO do i reset NIG statistic? */
4490 /* Wait until NIG register shows 1 packet of size 0x10 */
4491 count = 1000 * factor;
4492 while (count) {
34f80b04 4493
a2fbb9ea
ET
4494 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4495 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4496 if (val == 0x10)
4497 break;
4498
4499 msleep(10);
4500 count--;
4501 }
4502 if (val != 0x10) {
4503 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4504 return -1;
4505 }
4506
4507 /* Wait until PRS register shows 1 packet */
4508 count = 1000 * factor;
4509 while (count) {
4510 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4511 if (val == 1)
4512 break;
4513
4514 msleep(10);
4515 count--;
4516 }
4517 if (val != 0x1) {
4518 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4519 return -2;
4520 }
4521
4522 /* Reset and init BRB, PRS */
34f80b04 4523 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4524 msleep(50);
34f80b04 4525 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 4526 msleep(50);
94a78b79
VZ
4527 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4528 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
4529
4530 DP(NETIF_MSG_HW, "part2\n");
4531
4532 /* Disable inputs of parser neighbor blocks */
4533 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4534 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4535 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4536 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4537
4538 /* Write 0 to parser credits for CFC search request */
4539 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4540
4541 /* send 10 Ethernet packets */
4542 for (i = 0; i < 10; i++)
4543 bnx2x_lb_pckt(bp);
4544
4545 /* Wait until NIG register shows 10 + 1
4546 packets of size 11*0x10 = 0xb0 */
4547 count = 1000 * factor;
4548 while (count) {
34f80b04 4549
a2fbb9ea
ET
4550 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4551 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4552 if (val == 0xb0)
4553 break;
4554
4555 msleep(10);
4556 count--;
4557 }
4558 if (val != 0xb0) {
4559 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4560 return -3;
4561 }
4562
4563 /* Wait until PRS register shows 2 packets */
4564 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4565 if (val != 2)
4566 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4567
4568 /* Write 1 to parser credits for CFC search request */
4569 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4570
4571 /* Wait until PRS register shows 3 packets */
4572 msleep(10 * factor);
4573 /* Wait until NIG register shows 1 packet of size 0x10 */
4574 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4575 if (val != 3)
4576 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4577
4578 /* clear NIG EOP FIFO */
4579 for (i = 0; i < 11; i++)
4580 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4581 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4582 if (val != 1) {
4583 BNX2X_ERR("clear of NIG failed\n");
4584 return -4;
4585 }
4586
4587 /* Reset and init BRB, PRS, NIG */
4588 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4589 msleep(50);
4590 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4591 msleep(50);
94a78b79
VZ
4592 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4593 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 4594#ifndef BCM_CNIC
a2fbb9ea
ET
4595 /* set NIC mode */
4596 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4597#endif
4598
4599 /* Enable inputs of parser neighbor blocks */
4600 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4601 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4602 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 4603 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
4604
4605 DP(NETIF_MSG_HW, "done\n");
4606
4607 return 0; /* OK */
4608}
4609
4610static void enable_blocks_attention(struct bnx2x *bp)
4611{
4612 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
f2e0899f
DK
4613 if (CHIP_IS_E2(bp))
4614 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4615 else
4616 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
a2fbb9ea
ET
4617 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4618 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
f2e0899f
DK
4619 /*
4620 * mask read length error interrupts in brb for parser
4621 * (parsing unit and 'checksum and crc' unit)
4622 * these errors are legal (PU reads fixed length and CAC can cause
4623 * read length error on truncated packets)
4624 */
4625 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
a2fbb9ea
ET
4626 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4627 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4628 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4629 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4630 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
4631/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4632/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4633 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4634 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4635 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
4636/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4637/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4638 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4639 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4640 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4641 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
4642/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4643/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
f85582f8 4644
34f80b04
EG
4645 if (CHIP_REV_IS_FPGA(bp))
4646 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
f2e0899f
DK
4647 else if (CHIP_IS_E2(bp))
4648 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4649 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4650 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4651 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4652 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4653 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
34f80b04
EG
4654 else
4655 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
4656 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4657 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4658 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
4659/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4660/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4661 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4662 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
4663/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4664 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
4665}
4666
72fd0718
VZ
4667static const struct {
4668 u32 addr;
4669 u32 mask;
4670} bnx2x_parity_mask[] = {
f2e0899f
DK
4671 {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
4672 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4673 {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
4674 {HC_REG_HC_PRTY_MASK, 0x7},
4675 {MISC_REG_MISC_PRTY_MASK, 0x1},
f85582f8
DK
4676 {QM_REG_QM_PRTY_MASK, 0x0},
4677 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
72fd0718
VZ
4678 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4679 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
f85582f8
DK
4680 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
4681 {CDU_REG_CDU_PRTY_MASK, 0x0},
4682 {CFC_REG_CFC_PRTY_MASK, 0x0},
4683 {DBG_REG_DBG_PRTY_MASK, 0x0},
4684 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
4685 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
4686 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
4687 {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */
4688 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
4689 {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */
4690 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
4691 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
4692 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
4693 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
4694 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
4695 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
4696 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
4697 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
4698 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
72fd0718
VZ
4699};
4700
4701static void enable_blocks_parity(struct bnx2x *bp)
4702{
cbd9da7b 4703 int i;
72fd0718 4704
cbd9da7b 4705 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
72fd0718
VZ
4706 REG_WR(bp, bnx2x_parity_mask[i].addr,
4707 bnx2x_parity_mask[i].mask);
4708}
4709
34f80b04 4710
81f75bbf
EG
4711static void bnx2x_reset_common(struct bnx2x *bp)
4712{
4713 /* reset_common */
4714 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4715 0xd3ffff7f);
4716 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4717}
4718
573f2035
EG
4719static void bnx2x_init_pxp(struct bnx2x *bp)
4720{
4721 u16 devctl;
4722 int r_order, w_order;
4723
4724 pci_read_config_word(bp->pdev,
4725 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4726 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4727 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4728 if (bp->mrrs == -1)
4729 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4730 else {
4731 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4732 r_order = bp->mrrs;
4733 }
4734
4735 bnx2x_init_pxp_arb(bp, r_order, w_order);
4736}
fd4ef40d
EG
4737
4738static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4739{
2145a920 4740 int is_required;
fd4ef40d 4741 u32 val;
2145a920 4742 int port;
fd4ef40d 4743
2145a920
VZ
4744 if (BP_NOMCP(bp))
4745 return;
4746
4747 is_required = 0;
fd4ef40d
EG
4748 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4749 SHARED_HW_CFG_FAN_FAILURE_MASK;
4750
4751 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4752 is_required = 1;
4753
4754 /*
4755 * The fan failure mechanism is usually related to the PHY type since
4756 * the power consumption of the board is affected by the PHY. Currently,
4757 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4758 */
4759 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4760 for (port = PORT_0; port < PORT_MAX; port++) {
fd4ef40d 4761 is_required |=
d90d96ba
YR
4762 bnx2x_fan_failure_det_req(
4763 bp,
4764 bp->common.shmem_base,
a22f0788 4765 bp->common.shmem2_base,
d90d96ba 4766 port);
fd4ef40d
EG
4767 }
4768
4769 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4770
4771 if (is_required == 0)
4772 return;
4773
4774 /* Fan failure is indicated by SPIO 5 */
4775 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4776 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4777
4778 /* set to active low mode */
4779 val = REG_RD(bp, MISC_REG_SPIO_INT);
4780 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 4781 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
4782 REG_WR(bp, MISC_REG_SPIO_INT, val);
4783
4784 /* enable interrupt to signal the IGU */
4785 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4786 val |= (1 << MISC_REGISTERS_SPIO_5);
4787 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4788}
4789
f2e0899f
DK
4790static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4791{
4792 u32 offset = 0;
4793
4794 if (CHIP_IS_E1(bp))
4795 return;
4796 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4797 return;
4798
4799 switch (BP_ABS_FUNC(bp)) {
4800 case 0:
4801 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4802 break;
4803 case 1:
4804 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4805 break;
4806 case 2:
4807 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4808 break;
4809 case 3:
4810 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4811 break;
4812 case 4:
4813 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4814 break;
4815 case 5:
4816 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4817 break;
4818 case 6:
4819 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4820 break;
4821 case 7:
4822 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4823 break;
4824 default:
4825 return;
4826 }
4827
4828 REG_WR(bp, offset, pretend_func_num);
4829 REG_RD(bp, offset);
4830 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4831}
4832
4833static void bnx2x_pf_disable(struct bnx2x *bp)
4834{
4835 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4836 val &= ~IGU_PF_CONF_FUNC_EN;
4837
4838 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4839 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4840 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4841}
4842
523224a3 4843static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
a2fbb9ea 4844{
a2fbb9ea 4845 u32 val, i;
a2fbb9ea 4846
f2e0899f 4847 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
a2fbb9ea 4848
81f75bbf 4849 bnx2x_reset_common(bp);
34f80b04
EG
4850 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4851 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 4852
94a78b79 4853 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
f2e0899f 4854 if (!CHIP_IS_E1(bp))
fb3bff17 4855 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
a2fbb9ea 4856
f2e0899f
DK
4857 if (CHIP_IS_E2(bp)) {
4858 u8 fid;
4859
4860 /**
4861 * 4-port mode or 2-port mode we need to turn of master-enable
4862 * for everyone, after that, turn it back on for self.
4863 * so, we disregard multi-function or not, and always disable
4864 * for all functions on the given path, this means 0,2,4,6 for
4865 * path 0 and 1,3,5,7 for path 1
4866 */
4867 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4868 if (fid == BP_ABS_FUNC(bp)) {
4869 REG_WR(bp,
4870 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4871 1);
4872 continue;
4873 }
4874
4875 bnx2x_pretend_func(bp, fid);
4876 /* clear pf enable */
4877 bnx2x_pf_disable(bp);
4878 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4879 }
4880 }
a2fbb9ea 4881
94a78b79 4882 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
4883 if (CHIP_IS_E1(bp)) {
4884 /* enable HW interrupt from PXP on USDM overflow
4885 bit 16 on INT_MASK_0 */
4886 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4887 }
a2fbb9ea 4888
94a78b79 4889 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 4890 bnx2x_init_pxp(bp);
a2fbb9ea
ET
4891
4892#ifdef __BIG_ENDIAN
34f80b04
EG
4893 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4894 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4895 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4896 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4897 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
4898 /* make sure this value is 0 */
4899 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
4900
4901/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4902 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4903 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4904 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4905 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
4906#endif
4907
523224a3
DK
4908 bnx2x_ilt_init_page_size(bp, INITOP_SET);
4909
34f80b04
EG
4910 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4911 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 4912
34f80b04
EG
4913 /* let the HW do it's magic ... */
4914 msleep(100);
4915 /* finish PXP init */
4916 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4917 if (val != 1) {
4918 BNX2X_ERR("PXP2 CFG failed\n");
4919 return -EBUSY;
4920 }
4921 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4922 if (val != 1) {
4923 BNX2X_ERR("PXP2 RD_INIT failed\n");
4924 return -EBUSY;
4925 }
a2fbb9ea 4926
f2e0899f
DK
4927 /* Timers bug workaround E2 only. We need to set the entire ILT to
4928 * have entries with value "0" and valid bit on.
4929 * This needs to be done by the first PF that is loaded in a path
4930 * (i.e. common phase)
4931 */
4932 if (CHIP_IS_E2(bp)) {
4933 struct ilt_client_info ilt_cli;
4934 struct bnx2x_ilt ilt;
4935 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
4936 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
4937
4938 /* initalize dummy TM client */
4939 ilt_cli.start = 0;
4940 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
4941 ilt_cli.client_num = ILT_CLIENT_TM;
4942
4943 /* Step 1: set zeroes to all ilt page entries with valid bit on
4944 * Step 2: set the timers first/last ilt entry to point
4945 * to the entire range to prevent ILT range error for 3rd/4th
4946 * vnic (this code assumes existance of the vnic)
4947 *
4948 * both steps performed by call to bnx2x_ilt_client_init_op()
4949 * with dummy TM client
4950 *
4951 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
4952 * and his brother are split registers
4953 */
4954 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
4955 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
4956 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4957
4958 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
4959 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
4960 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
4961 }
4962
4963
34f80b04
EG
4964 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4965 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 4966
f2e0899f
DK
4967 if (CHIP_IS_E2(bp)) {
4968 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
4969 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
4970 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
4971
4972 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
4973
4974 /* let the HW do it's magic ... */
4975 do {
4976 msleep(200);
4977 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
4978 } while (factor-- && (val != 1));
4979
4980 if (val != 1) {
4981 BNX2X_ERR("ATC_INIT failed\n");
4982 return -EBUSY;
4983 }
4984 }
4985
94a78b79 4986 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 4987
34f80b04
EG
4988 /* clean the DMAE memory */
4989 bp->dmae_ready = 1;
4990 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 4991
94a78b79
VZ
4992 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
4993 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
4994 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
4995 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 4996
34f80b04
EG
4997 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
4998 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
4999 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5000 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5001
94a78b79 5002 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba 5003
f2e0899f
DK
5004 if (CHIP_MODE_IS_4_PORT(bp))
5005 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
f85582f8 5006
523224a3
DK
5007 /* QM queues pointers table */
5008 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
5009
34f80b04
EG
5010 /* soft reset pulse */
5011 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5012 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 5013
37b091ba 5014#ifdef BCM_CNIC
94a78b79 5015 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 5016#endif
a2fbb9ea 5017
94a78b79 5018 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
523224a3
DK
5019 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5020
34f80b04
EG
5021 if (!CHIP_REV_IS_SLOW(bp)) {
5022 /* enable hw interrupt from doorbell Q */
5023 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5024 }
a2fbb9ea 5025
94a78b79 5026 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
f2e0899f
DK
5027 if (CHIP_MODE_IS_4_PORT(bp)) {
5028 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5029 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5030 }
5031
94a78b79 5032 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 5033 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 5034#ifndef BCM_CNIC
3196a88a
EG
5035 /* set NIC mode */
5036 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 5037#endif
f2e0899f 5038 if (!CHIP_IS_E1(bp))
fb3bff17 5039 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
f85582f8 5040
f2e0899f
DK
5041 if (CHIP_IS_E2(bp)) {
5042 /* Bit-map indicating which L2 hdrs may appear after the
5043 basic Ethernet header */
5044 int has_ovlan = IS_MF(bp);
5045 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5046 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5047 }
a2fbb9ea 5048
94a78b79
VZ
5049 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5050 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5051 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5052 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 5053
ca00392c
EG
5054 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5055 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5056 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5057 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5058
94a78b79
VZ
5059 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5060 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5061 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5062 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 5063
f2e0899f
DK
5064 if (CHIP_MODE_IS_4_PORT(bp))
5065 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5066
34f80b04
EG
5067 /* sync semi rtc */
5068 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5069 0x80000000);
5070 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5071 0x80000000);
a2fbb9ea 5072
94a78b79
VZ
5073 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5074 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5075 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 5076
f2e0899f
DK
5077 if (CHIP_IS_E2(bp)) {
5078 int has_ovlan = IS_MF(bp);
5079 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5080 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5081 }
5082
34f80b04 5083 REG_WR(bp, SRC_REG_SOFT_RST, 1);
c68ed255
TH
5084 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5085 REG_WR(bp, i, random32());
f85582f8 5086
94a78b79 5087 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
5088#ifdef BCM_CNIC
5089 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5090 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5091 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5092 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5093 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5094 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5095 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5096 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5097 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5098 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5099#endif
34f80b04 5100 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5101
34f80b04
EG
5102 if (sizeof(union cdu_context) != 1024)
5103 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
5104 dev_alert(&bp->pdev->dev, "please adjust the size "
5105 "of cdu_context(%ld)\n",
7995c64e 5106 (long)sizeof(union cdu_context));
a2fbb9ea 5107
94a78b79 5108 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
5109 val = (4 << 24) + (0 << 12) + 1024;
5110 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 5111
94a78b79 5112 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 5113 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5114 /* enable context validation interrupt from CFC */
5115 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5116
5117 /* set the thresholds to prevent CFC/CDU race */
5118 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5119
94a78b79 5120 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
f2e0899f
DK
5121
5122 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5123 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5124
5125 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
94a78b79 5126 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 5127
94a78b79 5128 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
5129 /* Reset PCIE errors for debug */
5130 REG_WR(bp, 0x2814, 0xffffffff);
5131 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5132
f2e0899f
DK
5133 if (CHIP_IS_E2(bp)) {
5134 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5135 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5136 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5137 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5138 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5139 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5140 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5141 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5142 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5143 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5144 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5145 }
5146
94a78b79 5147 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 5148 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 5149 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 5150 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 5151
94a78b79 5152 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
f2e0899f 5153 if (!CHIP_IS_E1(bp)) {
fb3bff17
DK
5154 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5155 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
34f80b04 5156 }
f2e0899f
DK
5157 if (CHIP_IS_E2(bp)) {
5158 /* Bit-map indicating which L2 hdrs may appear after the
5159 basic Ethernet header */
5160 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
5161 }
34f80b04
EG
5162
5163 if (CHIP_REV_IS_SLOW(bp))
5164 msleep(200);
5165
5166 /* finish CFC init */
5167 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5168 if (val != 1) {
5169 BNX2X_ERR("CFC LL_INIT failed\n");
5170 return -EBUSY;
5171 }
5172 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5173 if (val != 1) {
5174 BNX2X_ERR("CFC AC_INIT failed\n");
5175 return -EBUSY;
5176 }
5177 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5178 if (val != 1) {
5179 BNX2X_ERR("CFC CAM_INIT failed\n");
5180 return -EBUSY;
5181 }
5182 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5183
f2e0899f
DK
5184 if (CHIP_IS_E1(bp)) {
5185 /* read NIG statistic
5186 to see if this is our first up since powerup */
5187 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5188 val = *bnx2x_sp(bp, wb_data[0]);
34f80b04 5189
f2e0899f
DK
5190 /* do internal memory self test */
5191 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5192 BNX2X_ERR("internal mem self test failed\n");
5193 return -EBUSY;
5194 }
34f80b04
EG
5195 }
5196
d90d96ba 5197 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
a22f0788
YR
5198 bp->common.shmem_base,
5199 bp->common.shmem2_base);
f1410647 5200
fd4ef40d
EG
5201 bnx2x_setup_fan_failure_detection(bp);
5202
34f80b04
EG
5203 /* clear PXP2 attentions */
5204 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5205
34f80b04 5206 enable_blocks_attention(bp);
72fd0718
VZ
5207 if (CHIP_PARITY_SUPPORTED(bp))
5208 enable_blocks_parity(bp);
a2fbb9ea 5209
6bbca910 5210 if (!BP_NOMCP(bp)) {
f2e0899f
DK
5211 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5212 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5213 CHIP_IS_E1x(bp)) {
5214 u32 shmem_base[2], shmem2_base[2];
5215 shmem_base[0] = bp->common.shmem_base;
5216 shmem2_base[0] = bp->common.shmem2_base;
5217 if (CHIP_IS_E2(bp)) {
5218 shmem_base[1] =
5219 SHMEM2_RD(bp, other_shmem_base_addr);
5220 shmem2_base[1] =
5221 SHMEM2_RD(bp, other_shmem2_base_addr);
5222 }
5223 bnx2x_acquire_phy_lock(bp);
5224 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5225 bp->common.chip_id);
5226 bnx2x_release_phy_lock(bp);
5227 }
6bbca910
YR
5228 } else
5229 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5230
34f80b04
EG
5231 return 0;
5232}
a2fbb9ea 5233
523224a3 5234static int bnx2x_init_hw_port(struct bnx2x *bp)
34f80b04
EG
5235{
5236 int port = BP_PORT(bp);
94a78b79 5237 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 5238 u32 low, high;
34f80b04 5239 u32 val;
a2fbb9ea 5240
cdaa7cb8 5241 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
5242
5243 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 5244
94a78b79 5245 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 5246 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c 5247
f2e0899f
DK
5248 /* Timers bug workaround: disables the pf_master bit in pglue at
5249 * common phase, we need to enable it here before any dmae access are
5250 * attempted. Therefore we manually added the enable-master to the
5251 * port phase (it also happens in the function phase)
5252 */
5253 if (CHIP_IS_E2(bp))
5254 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5255
ca00392c
EG
5256 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5257 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5258 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 5259 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 5260
523224a3
DK
5261 /* QM cid (connection) count */
5262 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
a2fbb9ea 5263
523224a3 5264#ifdef BCM_CNIC
94a78b79 5265 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
5266 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5267 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 5268#endif
cdaa7cb8 5269
94a78b79 5270 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 5271
f2e0899f
DK
5272 if (CHIP_MODE_IS_4_PORT(bp))
5273 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
5274
5275 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5276 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5277 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5278 /* no pause for emulation and FPGA */
5279 low = 0;
5280 high = 513;
5281 } else {
5282 if (IS_MF(bp))
5283 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5284 else if (bp->dev->mtu > 4096) {
5285 if (bp->flags & ONE_PORT_FLAG)
5286 low = 160;
5287 else {
5288 val = bp->dev->mtu;
5289 /* (24*1024 + val*4)/256 */
5290 low = 96 + (val/64) +
5291 ((val % 64) ? 1 : 0);
5292 }
5293 } else
5294 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5295 high = low + 56; /* 14*1024/256 */
5296 }
5297 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5298 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
1c06328c 5299 }
1c06328c 5300
f2e0899f
DK
5301 if (CHIP_MODE_IS_4_PORT(bp)) {
5302 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5303 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5304 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5305 BRB1_REG_MAC_GUARANTIED_0), 40);
5306 }
1c06328c 5307
94a78b79 5308 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 5309
94a78b79 5310 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 5311 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 5312 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 5313 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 5314
94a78b79
VZ
5315 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5316 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5317 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5318 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
f2e0899f
DK
5319 if (CHIP_MODE_IS_4_PORT(bp))
5320 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
356e2385 5321
94a78b79 5322 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 5323 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 5324
94a78b79 5325 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea 5326
f2e0899f
DK
5327 if (!CHIP_IS_E2(bp)) {
5328 /* configure PBF to work without PAUSE mtu 9000 */
5329 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea 5330
f2e0899f
DK
5331 /* update threshold */
5332 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5333 /* update init credit */
5334 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea 5335
f2e0899f
DK
5336 /* probe changes */
5337 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5338 udelay(50);
5339 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5340 }
a2fbb9ea 5341
37b091ba
MC
5342#ifdef BCM_CNIC
5343 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 5344#endif
94a78b79 5345 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 5346 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
5347
5348 if (CHIP_IS_E1(bp)) {
5349 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5350 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5351 }
94a78b79 5352 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 5353
f2e0899f
DK
5354 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5355
94a78b79 5356 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
5357 /* init aeu_mask_attn_func_0/1:
5358 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5359 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5360 * bits 4-7 are used for "per vn group attention" */
5361 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
fb3bff17 5362 (IS_MF(bp) ? 0xF7 : 0x7));
34f80b04 5363
94a78b79 5364 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 5365 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 5366 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 5367 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 5368 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 5369
94a78b79 5370 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
5371
5372 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5373
f2e0899f 5374 if (!CHIP_IS_E1(bp)) {
fb3bff17 5375 /* 0x2 disable mf_ov, 0x1 enable */
34f80b04 5376 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
fb3bff17 5377 (IS_MF(bp) ? 0x1 : 0x2));
34f80b04 5378
f2e0899f
DK
5379 if (CHIP_IS_E2(bp)) {
5380 val = 0;
5381 switch (bp->mf_mode) {
5382 case MULTI_FUNCTION_SD:
5383 val = 1;
5384 break;
5385 case MULTI_FUNCTION_SI:
5386 val = 2;
5387 break;
5388 }
5389
5390 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5391 NIG_REG_LLH0_CLS_TYPE), val);
5392 }
1c06328c
EG
5393 {
5394 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5395 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5396 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5397 }
34f80b04
EG
5398 }
5399
94a78b79 5400 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 5401 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
d90d96ba 5402 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
a22f0788
YR
5403 bp->common.shmem_base,
5404 bp->common.shmem2_base);
d90d96ba 5405 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
a22f0788 5406 bp->common.shmem2_base, port)) {
4d295db0
EG
5407 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5408 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5409 val = REG_RD(bp, reg_addr);
f1410647 5410 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0 5411 REG_WR(bp, reg_addr, val);
f1410647 5412 }
c18487ee 5413 bnx2x__link_reset(bp);
a2fbb9ea 5414
34f80b04
EG
5415 return 0;
5416}
5417
34f80b04
EG
5418static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5419{
5420 int reg;
5421
f2e0899f 5422 if (CHIP_IS_E1(bp))
34f80b04 5423 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
f2e0899f
DK
5424 else
5425 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
34f80b04
EG
5426
5427 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5428}
5429
f2e0899f
DK
5430static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5431{
5432 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5433}
5434
5435static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5436{
5437 u32 i, base = FUNC_ILT_BASE(func);
5438 for (i = base; i < base + ILT_PER_FUNC; i++)
5439 bnx2x_ilt_wr(bp, i, 0);
5440}
5441
523224a3 5442static int bnx2x_init_hw_func(struct bnx2x *bp)
34f80b04
EG
5443{
5444 int port = BP_PORT(bp);
5445 int func = BP_FUNC(bp);
523224a3
DK
5446 struct bnx2x_ilt *ilt = BP_ILT(bp);
5447 u16 cdu_ilt_start;
8badd27a 5448 u32 addr, val;
34f80b04
EG
5449 int i;
5450
cdaa7cb8 5451 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 5452
8badd27a 5453 /* set MSI reconfigure capability */
f2e0899f
DK
5454 if (bp->common.int_block == INT_BLOCK_HC) {
5455 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5456 val = REG_RD(bp, addr);
5457 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5458 REG_WR(bp, addr, val);
5459 }
8badd27a 5460
523224a3
DK
5461 ilt = BP_ILT(bp);
5462 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
37b091ba 5463
523224a3
DK
5464 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5465 ilt->lines[cdu_ilt_start + i].page =
5466 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5467 ilt->lines[cdu_ilt_start + i].page_mapping =
5468 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5469 /* cdu ilt pages are allocated manually so there's no need to
5470 set the size */
37b091ba 5471 }
523224a3 5472 bnx2x_ilt_init_op(bp, INITOP_SET);
f85582f8 5473
523224a3
DK
5474#ifdef BCM_CNIC
5475 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
37b091ba 5476
523224a3
DK
5477 /* T1 hash bits value determines the T1 number of entries */
5478 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
5479#endif
37b091ba 5480
523224a3
DK
5481#ifndef BCM_CNIC
5482 /* set NIC mode */
5483 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5484#endif /* BCM_CNIC */
37b091ba 5485
f2e0899f
DK
5486 if (CHIP_IS_E2(bp)) {
5487 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5488
5489 /* Turn on a single ISR mode in IGU if driver is going to use
5490 * INT#x or MSI
5491 */
5492 if (!(bp->flags & USING_MSIX_FLAG))
5493 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5494 /*
5495 * Timers workaround bug: function init part.
5496 * Need to wait 20msec after initializing ILT,
5497 * needed to make sure there are no requests in
5498 * one of the PXP internal queues with "old" ILT addresses
5499 */
5500 msleep(20);
5501 /*
5502 * Master enable - Due to WB DMAE writes performed before this
5503 * register is re-initialized as part of the regular function
5504 * init
5505 */
5506 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5507 /* Enable the function in IGU */
5508 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5509 }
5510
523224a3 5511 bp->dmae_ready = 1;
34f80b04 5512
523224a3
DK
5513 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5514
f2e0899f
DK
5515 if (CHIP_IS_E2(bp))
5516 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5517
523224a3
DK
5518 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5519 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5520 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5521 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5522 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5523 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5524 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5525 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5526 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5527
f2e0899f
DK
5528 if (CHIP_IS_E2(bp)) {
5529 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5530 BP_PATH(bp));
5531 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5532 BP_PATH(bp));
5533 }
5534
5535 if (CHIP_MODE_IS_4_PORT(bp))
5536 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5537
5538 if (CHIP_IS_E2(bp))
5539 REG_WR(bp, QM_REG_PF_EN, 1);
5540
523224a3 5541 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
f2e0899f
DK
5542
5543 if (CHIP_MODE_IS_4_PORT(bp))
5544 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5545
523224a3
DK
5546 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5547 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5548 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5549 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5550 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5551 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5552 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5553 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5554 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5555 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5556 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
f2e0899f
DK
5557 if (CHIP_IS_E2(bp))
5558 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5559
523224a3
DK
5560 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5561
5562 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
34f80b04 5563
f2e0899f
DK
5564 if (CHIP_IS_E2(bp))
5565 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5566
fb3bff17 5567 if (IS_MF(bp)) {
34f80b04 5568 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
fb3bff17 5569 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
34f80b04
EG
5570 }
5571
523224a3
DK
5572 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5573
34f80b04 5574 /* HC init per function */
f2e0899f
DK
5575 if (bp->common.int_block == INT_BLOCK_HC) {
5576 if (CHIP_IS_E1H(bp)) {
5577 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5578
5579 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5580 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5581 }
5582 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5583
5584 } else {
5585 int num_segs, sb_idx, prod_offset;
5586
34f80b04
EG
5587 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5588
f2e0899f
DK
5589 if (CHIP_IS_E2(bp)) {
5590 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5591 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5592 }
5593
5594 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5595
5596 if (CHIP_IS_E2(bp)) {
5597 int dsb_idx = 0;
5598 /**
5599 * Producer memory:
5600 * E2 mode: address 0-135 match to the mapping memory;
5601 * 136 - PF0 default prod; 137 - PF1 default prod;
5602 * 138 - PF2 default prod; 139 - PF3 default prod;
5603 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5604 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5605 * 144-147 reserved.
5606 *
5607 * E1.5 mode - In backward compatible mode;
5608 * for non default SB; each even line in the memory
5609 * holds the U producer and each odd line hold
5610 * the C producer. The first 128 producers are for
5611 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5612 * producers are for the DSB for each PF.
5613 * Each PF has five segments: (the order inside each
5614 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5615 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5616 * 144-147 attn prods;
5617 */
5618 /* non-default-status-blocks */
5619 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5620 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5621 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5622 prod_offset = (bp->igu_base_sb + sb_idx) *
5623 num_segs;
5624
5625 for (i = 0; i < num_segs; i++) {
5626 addr = IGU_REG_PROD_CONS_MEMORY +
5627 (prod_offset + i) * 4;
5628 REG_WR(bp, addr, 0);
5629 }
5630 /* send consumer update with value 0 */
5631 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5632 USTORM_ID, 0, IGU_INT_NOP, 1);
5633 bnx2x_igu_clear_sb(bp,
5634 bp->igu_base_sb + sb_idx);
5635 }
5636
5637 /* default-status-blocks */
5638 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5639 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5640
5641 if (CHIP_MODE_IS_4_PORT(bp))
5642 dsb_idx = BP_FUNC(bp);
5643 else
5644 dsb_idx = BP_E1HVN(bp);
5645
5646 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5647 IGU_BC_BASE_DSB_PROD + dsb_idx :
5648 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5649
5650 for (i = 0; i < (num_segs * E1HVN_MAX);
5651 i += E1HVN_MAX) {
5652 addr = IGU_REG_PROD_CONS_MEMORY +
5653 (prod_offset + i)*4;
5654 REG_WR(bp, addr, 0);
5655 }
5656 /* send consumer update with 0 */
5657 if (CHIP_INT_MODE_IS_BC(bp)) {
5658 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5659 USTORM_ID, 0, IGU_INT_NOP, 1);
5660 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5661 CSTORM_ID, 0, IGU_INT_NOP, 1);
5662 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5663 XSTORM_ID, 0, IGU_INT_NOP, 1);
5664 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5665 TSTORM_ID, 0, IGU_INT_NOP, 1);
5666 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5667 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5668 } else {
5669 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5670 USTORM_ID, 0, IGU_INT_NOP, 1);
5671 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5672 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5673 }
5674 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5675
5676 /* !!! these should become driver const once
5677 rf-tool supports split-68 const */
5678 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5679 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5680 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5681 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5682 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5683 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5684 }
34f80b04 5685 }
34f80b04 5686
c14423fe 5687 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5688 REG_WR(bp, 0x2114, 0xffffffff);
5689 REG_WR(bp, 0x2120, 0xffffffff);
523224a3
DK
5690
5691 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5692 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5693 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5694 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5695 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5696 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5697
b7737c9b 5698 bnx2x_phy_probe(&bp->link_params);
f85582f8 5699
34f80b04
EG
5700 return 0;
5701}
5702
9f6c9258 5703int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
34f80b04 5704{
523224a3 5705 int rc = 0;
a2fbb9ea 5706
34f80b04 5707 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
f2e0899f 5708 BP_ABS_FUNC(bp), load_code);
a2fbb9ea 5709
34f80b04
EG
5710 bp->dmae_ready = 0;
5711 mutex_init(&bp->dmae_mutex);
54016b26
EG
5712 rc = bnx2x_gunzip_init(bp);
5713 if (rc)
5714 return rc;
a2fbb9ea 5715
34f80b04
EG
5716 switch (load_code) {
5717 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 5718 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
523224a3 5719 rc = bnx2x_init_hw_common(bp, load_code);
34f80b04
EG
5720 if (rc)
5721 goto init_hw_err;
5722 /* no break */
5723
5724 case FW_MSG_CODE_DRV_LOAD_PORT:
523224a3 5725 rc = bnx2x_init_hw_port(bp);
34f80b04
EG
5726 if (rc)
5727 goto init_hw_err;
5728 /* no break */
5729
5730 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3 5731 rc = bnx2x_init_hw_func(bp);
34f80b04
EG
5732 if (rc)
5733 goto init_hw_err;
5734 break;
5735
5736 default:
5737 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5738 break;
5739 }
5740
5741 if (!BP_NOMCP(bp)) {
f2e0899f 5742 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
5743
5744 bp->fw_drv_pulse_wr_seq =
f2e0899f 5745 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
a2fbb9ea 5746 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
5747 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5748 }
a2fbb9ea 5749
34f80b04
EG
5750init_hw_err:
5751 bnx2x_gunzip_end(bp);
5752
5753 return rc;
a2fbb9ea
ET
5754}
5755
9f6c9258 5756void bnx2x_free_mem(struct bnx2x *bp)
a2fbb9ea
ET
5757{
5758
5759#define BNX2X_PCI_FREE(x, y, size) \
5760 do { \
5761 if (x) { \
523224a3 5762 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
a2fbb9ea
ET
5763 x = NULL; \
5764 y = 0; \
5765 } \
5766 } while (0)
5767
5768#define BNX2X_FREE(x) \
5769 do { \
5770 if (x) { \
523224a3 5771 kfree((void *)x); \
a2fbb9ea
ET
5772 x = NULL; \
5773 } \
5774 } while (0)
5775
5776 int i;
5777
5778 /* fastpath */
555f6c78 5779 /* Common */
a2fbb9ea 5780 for_each_queue(bp, i) {
555f6c78 5781 /* status blocks */
f2e0899f
DK
5782 if (CHIP_IS_E2(bp))
5783 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5784 bnx2x_fp(bp, i, status_blk_mapping),
5785 sizeof(struct host_hc_status_block_e2));
5786 else
5787 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5788 bnx2x_fp(bp, i, status_blk_mapping),
5789 sizeof(struct host_hc_status_block_e1x));
555f6c78
EG
5790 }
5791 /* Rx */
54b9ddaa 5792 for_each_queue(bp, i) {
a2fbb9ea 5793
555f6c78 5794 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
5795 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5796 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5797 bnx2x_fp(bp, i, rx_desc_mapping),
5798 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5799
5800 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5801 bnx2x_fp(bp, i, rx_comp_mapping),
5802 sizeof(struct eth_fast_path_rx_cqe) *
5803 NUM_RCQ_BD);
a2fbb9ea 5804
7a9b2557 5805 /* SGE ring */
32626230 5806 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5807 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5808 bnx2x_fp(bp, i, rx_sge_mapping),
5809 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5810 }
555f6c78 5811 /* Tx */
54b9ddaa 5812 for_each_queue(bp, i) {
555f6c78
EG
5813
5814 /* fastpath tx rings: tx_buf tx_desc */
5815 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5816 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5817 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 5818 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 5819 }
a2fbb9ea
ET
5820 /* end of fastpath */
5821
5822 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
523224a3 5823 sizeof(struct host_sp_status_block));
a2fbb9ea
ET
5824
5825 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5826 sizeof(struct bnx2x_slowpath));
a2fbb9ea 5827
523224a3
DK
5828 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5829 bp->context.size);
5830
5831 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5832
5833 BNX2X_FREE(bp->ilt->lines);
f85582f8 5834
37b091ba 5835#ifdef BCM_CNIC
f2e0899f
DK
5836 if (CHIP_IS_E2(bp))
5837 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5838 sizeof(struct host_hc_status_block_e2));
5839 else
5840 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5841 sizeof(struct host_hc_status_block_e1x));
f85582f8 5842
523224a3 5843 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
a2fbb9ea 5844#endif
f85582f8 5845
7a9b2557 5846 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea 5847
523224a3
DK
5848 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5849 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5850
a2fbb9ea
ET
5851#undef BNX2X_PCI_FREE
5852#undef BNX2X_KFREE
5853}
5854
f2e0899f
DK
5855static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
5856{
5857 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
5858 if (CHIP_IS_E2(bp)) {
5859 bnx2x_fp(bp, index, sb_index_values) =
5860 (__le16 *)status_blk.e2_sb->sb.index_values;
5861 bnx2x_fp(bp, index, sb_running_index) =
5862 (__le16 *)status_blk.e2_sb->sb.running_index;
5863 } else {
5864 bnx2x_fp(bp, index, sb_index_values) =
5865 (__le16 *)status_blk.e1x_sb->sb.index_values;
5866 bnx2x_fp(bp, index, sb_running_index) =
5867 (__le16 *)status_blk.e1x_sb->sb.running_index;
5868 }
5869}
5870
9f6c9258 5871int bnx2x_alloc_mem(struct bnx2x *bp)
a2fbb9ea 5872{
a2fbb9ea
ET
5873#define BNX2X_PCI_ALLOC(x, y, size) \
5874 do { \
1a983142 5875 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
9f6c9258
DK
5876 if (x == NULL) \
5877 goto alloc_mem_err; \
5878 memset(x, 0, size); \
5879 } while (0)
a2fbb9ea 5880
9f6c9258
DK
5881#define BNX2X_ALLOC(x, size) \
5882 do { \
523224a3 5883 x = kzalloc(size, GFP_KERNEL); \
9f6c9258
DK
5884 if (x == NULL) \
5885 goto alloc_mem_err; \
9f6c9258 5886 } while (0)
a2fbb9ea 5887
9f6c9258 5888 int i;
a2fbb9ea 5889
9f6c9258
DK
5890 /* fastpath */
5891 /* Common */
a2fbb9ea 5892 for_each_queue(bp, i) {
f2e0899f 5893 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
9f6c9258 5894 bnx2x_fp(bp, i, bp) = bp;
9f6c9258 5895 /* status blocks */
f2e0899f
DK
5896 if (CHIP_IS_E2(bp))
5897 BNX2X_PCI_ALLOC(sb->e2_sb,
5898 &bnx2x_fp(bp, i, status_blk_mapping),
5899 sizeof(struct host_hc_status_block_e2));
5900 else
5901 BNX2X_PCI_ALLOC(sb->e1x_sb,
9f6c9258 5902 &bnx2x_fp(bp, i, status_blk_mapping),
523224a3
DK
5903 sizeof(struct host_hc_status_block_e1x));
5904
f2e0899f 5905 set_sb_shortcuts(bp, i);
a2fbb9ea 5906 }
9f6c9258
DK
5907 /* Rx */
5908 for_each_queue(bp, i) {
a2fbb9ea 5909
9f6c9258
DK
5910 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5911 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5912 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5913 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5914 &bnx2x_fp(bp, i, rx_desc_mapping),
5915 sizeof(struct eth_rx_bd) * NUM_RX_BD);
555f6c78 5916
9f6c9258
DK
5917 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5918 &bnx2x_fp(bp, i, rx_comp_mapping),
5919 sizeof(struct eth_fast_path_rx_cqe) *
5920 NUM_RCQ_BD);
a2fbb9ea 5921
9f6c9258
DK
5922 /* SGE ring */
5923 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5924 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5925 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5926 &bnx2x_fp(bp, i, rx_sge_mapping),
5927 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5928 }
5929 /* Tx */
5930 for_each_queue(bp, i) {
8badd27a 5931
9f6c9258
DK
5932 /* fastpath tx rings: tx_buf tx_desc */
5933 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5934 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5935 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5936 &bnx2x_fp(bp, i, tx_desc_mapping),
5937 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
8badd27a 5938 }
9f6c9258 5939 /* end of fastpath */
8badd27a 5940
523224a3 5941#ifdef BCM_CNIC
f2e0899f
DK
5942 if (CHIP_IS_E2(bp))
5943 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5944 sizeof(struct host_hc_status_block_e2));
5945 else
5946 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5947 sizeof(struct host_hc_status_block_e1x));
8badd27a 5948
523224a3
DK
5949 /* allocate searcher T2 table */
5950 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5951#endif
a2fbb9ea 5952
8badd27a 5953
523224a3
DK
5954 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5955 sizeof(struct host_sp_status_block));
a2fbb9ea 5956
523224a3
DK
5957 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5958 sizeof(struct bnx2x_slowpath));
a2fbb9ea 5959
523224a3 5960 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
f85582f8 5961
523224a3
DK
5962 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
5963 bp->context.size);
65abd74d 5964
523224a3 5965 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
65abd74d 5966
523224a3
DK
5967 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
5968 goto alloc_mem_err;
65abd74d 5969
9f6c9258
DK
5970 /* Slow path ring */
5971 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
65abd74d 5972
523224a3
DK
5973 /* EQ */
5974 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
5975 BCM_PAGE_SIZE * NUM_EQ_PAGES);
9f6c9258 5976 return 0;
e1510706 5977
9f6c9258
DK
5978alloc_mem_err:
5979 bnx2x_free_mem(bp);
5980 return -ENOMEM;
e1510706 5981
9f6c9258
DK
5982#undef BNX2X_PCI_ALLOC
5983#undef BNX2X_ALLOC
65abd74d
YG
5984}
5985
a2fbb9ea
ET
5986/*
5987 * Init service functions
5988 */
523224a3 5989int bnx2x_func_start(struct bnx2x *bp)
a2fbb9ea 5990{
523224a3 5991 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
a2fbb9ea 5992
523224a3
DK
5993 /* Wait for completion */
5994 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
5995 WAIT_RAMROD_COMMON);
5996}
a2fbb9ea 5997
523224a3
DK
5998int bnx2x_func_stop(struct bnx2x *bp)
5999{
6000 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
a2fbb9ea 6001
523224a3
DK
6002 /* Wait for completion */
6003 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6004 0, &(bp->state), WAIT_RAMROD_COMMON);
a2fbb9ea
ET
6005}
6006
e665bfda 6007/**
f85582f8 6008 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
e665bfda
MC
6009 *
6010 * @param bp driver descriptor
6011 * @param set set or clear an entry (1 or 0)
6012 * @param mac pointer to a buffer containing a MAC
6013 * @param cl_bit_vec bit vector of clients to register a MAC for
6014 * @param cam_offset offset in a CAM to use
523224a3 6015 * @param is_bcast is the set MAC a broadcast address (for E1 only)
e665bfda 6016 */
523224a3 6017static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
f85582f8
DK
6018 u32 cl_bit_vec, u8 cam_offset,
6019 u8 is_bcast)
34f80b04 6020{
523224a3
DK
6021 struct mac_configuration_cmd *config =
6022 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6023 int ramrod_flags = WAIT_RAMROD_COMMON;
6024
6025 bp->set_mac_pending = 1;
6026 smp_wmb();
6027
8d9c5f34 6028 config->hdr.length = 1;
e665bfda
MC
6029 config->hdr.offset = cam_offset;
6030 config->hdr.client_id = 0xff;
34f80b04
EG
6031 config->hdr.reserved1 = 0;
6032
6033 /* primary MAC */
6034 config->config_table[0].msb_mac_addr =
e665bfda 6035 swab16(*(u16 *)&mac[0]);
34f80b04 6036 config->config_table[0].middle_mac_addr =
e665bfda 6037 swab16(*(u16 *)&mac[2]);
34f80b04 6038 config->config_table[0].lsb_mac_addr =
e665bfda 6039 swab16(*(u16 *)&mac[4]);
ca00392c 6040 config->config_table[0].clients_bit_vector =
e665bfda 6041 cpu_to_le32(cl_bit_vec);
34f80b04 6042 config->config_table[0].vlan_id = 0;
523224a3 6043 config->config_table[0].pf_id = BP_FUNC(bp);
3101c2bc 6044 if (set)
523224a3
DK
6045 SET_FLAG(config->config_table[0].flags,
6046 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6047 T_ETH_MAC_COMMAND_SET);
3101c2bc 6048 else
523224a3
DK
6049 SET_FLAG(config->config_table[0].flags,
6050 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6051 T_ETH_MAC_COMMAND_INVALIDATE);
34f80b04 6052
523224a3
DK
6053 if (is_bcast)
6054 SET_FLAG(config->config_table[0].flags,
6055 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6056
6057 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
3101c2bc 6058 (set ? "setting" : "clearing"),
34f80b04
EG
6059 config->config_table[0].msb_mac_addr,
6060 config->config_table[0].middle_mac_addr,
523224a3 6061 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
34f80b04 6062
523224a3 6063 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
34f80b04 6064 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
523224a3
DK
6065 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6066
6067 /* Wait for a completion */
6068 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
34f80b04
EG
6069}
6070
523224a3 6071int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
f85582f8 6072 int *state_p, int flags)
a2fbb9ea
ET
6073{
6074 /* can take a while if any port is running */
8b3a0f0b 6075 int cnt = 5000;
523224a3
DK
6076 u8 poll = flags & WAIT_RAMROD_POLL;
6077 u8 common = flags & WAIT_RAMROD_COMMON;
a2fbb9ea 6078
c14423fe
ET
6079 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6080 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6081
6082 might_sleep();
34f80b04 6083 while (cnt--) {
a2fbb9ea 6084 if (poll) {
523224a3
DK
6085 if (common)
6086 bnx2x_eq_int(bp);
6087 else {
6088 bnx2x_rx_int(bp->fp, 10);
6089 /* if index is different from 0
6090 * the reply for some commands will
6091 * be on the non default queue
6092 */
6093 if (idx)
6094 bnx2x_rx_int(&bp->fp[idx], 10);
6095 }
a2fbb9ea 6096 }
a2fbb9ea 6097
3101c2bc 6098 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6099 if (*state_p == state) {
6100#ifdef BNX2X_STOP_ON_ERROR
6101 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6102#endif
a2fbb9ea 6103 return 0;
8b3a0f0b 6104 }
a2fbb9ea 6105
a2fbb9ea 6106 msleep(1);
e3553b29
EG
6107
6108 if (bp->panic)
6109 return -EIO;
a2fbb9ea
ET
6110 }
6111
a2fbb9ea 6112 /* timeout! */
49d66772
ET
6113 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6114 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6115#ifdef BNX2X_STOP_ON_ERROR
6116 bnx2x_panic();
6117#endif
a2fbb9ea 6118
49d66772 6119 return -EBUSY;
a2fbb9ea
ET
6120}
6121
523224a3 6122u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
e665bfda 6123{
f2e0899f
DK
6124 if (CHIP_IS_E1H(bp))
6125 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6126 else if (CHIP_MODE_IS_4_PORT(bp))
6127 return BP_FUNC(bp) * 32 + rel_offset;
6128 else
6129 return BP_VN(bp) * 32 + rel_offset;
523224a3
DK
6130}
6131
6132void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6133{
6134 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6135 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
e665bfda 6136
523224a3
DK
6137 /* networking MAC */
6138 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6139 (1 << bp->fp->cl_id), cam_offset , 0);
e665bfda 6140
523224a3
DK
6141 if (CHIP_IS_E1(bp)) {
6142 /* broadcast MAC */
6143 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6144 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6145 }
e665bfda 6146}
523224a3
DK
6147static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6148{
6149 int i = 0, old;
6150 struct net_device *dev = bp->dev;
6151 struct netdev_hw_addr *ha;
6152 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6153 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6154
6155 netdev_for_each_mc_addr(ha, dev) {
6156 /* copy mac */
6157 config_cmd->config_table[i].msb_mac_addr =
6158 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6159 config_cmd->config_table[i].middle_mac_addr =
6160 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6161 config_cmd->config_table[i].lsb_mac_addr =
6162 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
e665bfda 6163
523224a3
DK
6164 config_cmd->config_table[i].vlan_id = 0;
6165 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6166 config_cmd->config_table[i].clients_bit_vector =
6167 cpu_to_le32(1 << BP_L_ID(bp));
6168
6169 SET_FLAG(config_cmd->config_table[i].flags,
6170 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6171 T_ETH_MAC_COMMAND_SET);
6172
6173 DP(NETIF_MSG_IFUP,
6174 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6175 config_cmd->config_table[i].msb_mac_addr,
6176 config_cmd->config_table[i].middle_mac_addr,
6177 config_cmd->config_table[i].lsb_mac_addr);
6178 i++;
6179 }
6180 old = config_cmd->hdr.length;
6181 if (old > i) {
6182 for (; i < old; i++) {
6183 if (CAM_IS_INVALID(config_cmd->
6184 config_table[i])) {
6185 /* already invalidated */
6186 break;
6187 }
6188 /* invalidate */
6189 SET_FLAG(config_cmd->config_table[i].flags,
6190 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6191 T_ETH_MAC_COMMAND_INVALIDATE);
6192 }
6193 }
6194
6195 config_cmd->hdr.length = i;
6196 config_cmd->hdr.offset = offset;
6197 config_cmd->hdr.client_id = 0xff;
6198 config_cmd->hdr.reserved1 = 0;
6199
6200 bp->set_mac_pending = 1;
6201 smp_wmb();
6202
6203 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6204 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6205}
6206static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
e665bfda 6207{
523224a3
DK
6208 int i;
6209 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6210 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6211 int ramrod_flags = WAIT_RAMROD_COMMON;
6212
6213 bp->set_mac_pending = 1;
e665bfda
MC
6214 smp_wmb();
6215
523224a3
DK
6216 for (i = 0; i < config_cmd->hdr.length; i++)
6217 SET_FLAG(config_cmd->config_table[i].flags,
6218 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6219 T_ETH_MAC_COMMAND_INVALIDATE);
6220
6221 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6222 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
e665bfda
MC
6223
6224 /* Wait for a completion */
523224a3
DK
6225 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6226 ramrod_flags);
6227
e665bfda
MC
6228}
6229
993ac7b5
MC
6230#ifdef BCM_CNIC
6231/**
6232 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6233 * MAC(s). This function will wait until the ramdord completion
6234 * returns.
6235 *
6236 * @param bp driver handle
6237 * @param set set or clear the CAM entry
6238 *
6239 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6240 */
9f6c9258 6241int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
993ac7b5 6242{
523224a3
DK
6243 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6244 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6245 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
6246 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
993ac7b5
MC
6247
6248 /* Send a SET_MAC ramrod */
523224a3
DK
6249 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6250 cam_offset, 0);
993ac7b5
MC
6251 return 0;
6252}
6253#endif
6254
523224a3
DK
6255static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6256 struct bnx2x_client_init_params *params,
6257 u8 activate,
6258 struct client_init_ramrod_data *data)
6259{
6260 /* Clear the buffer */
6261 memset(data, 0, sizeof(*data));
6262
6263 /* general */
6264 data->general.client_id = params->rxq_params.cl_id;
6265 data->general.statistics_counter_id = params->rxq_params.stat_id;
6266 data->general.statistics_en_flg =
6267 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6268 data->general.activate_flg = activate;
6269 data->general.sp_client_id = params->rxq_params.spcl_id;
6270
6271 /* Rx data */
6272 data->rx.tpa_en_flg =
6273 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6274 data->rx.vmqueue_mode_en_flg = 0;
6275 data->rx.cache_line_alignment_log_size =
6276 params->rxq_params.cache_line_log;
6277 data->rx.enable_dynamic_hc =
6278 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6279 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6280 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6281 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6282
6283 /* We don't set drop flags */
6284 data->rx.drop_ip_cs_err_flg = 0;
6285 data->rx.drop_tcp_cs_err_flg = 0;
6286 data->rx.drop_ttl0_flg = 0;
6287 data->rx.drop_udp_cs_err_flg = 0;
6288
6289 data->rx.inner_vlan_removal_enable_flg =
6290 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6291 data->rx.outer_vlan_removal_enable_flg =
6292 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6293 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6294 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6295 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6296 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6297 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6298 data->rx.bd_page_base.lo =
6299 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6300 data->rx.bd_page_base.hi =
6301 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6302 data->rx.sge_page_base.lo =
6303 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6304 data->rx.sge_page_base.hi =
6305 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6306 data->rx.cqe_page_base.lo =
6307 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6308 data->rx.cqe_page_base.hi =
6309 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6310 data->rx.is_leading_rss =
6311 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6312 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6313
6314 /* Tx data */
6315 data->tx.enforce_security_flg = 0; /* VF specific */
6316 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6317 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6318 data->tx.mtu = 0; /* VF specific */
6319 data->tx.tx_bd_page_base.lo =
6320 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6321 data->tx.tx_bd_page_base.hi =
6322 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6323
6324 /* flow control data */
6325 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6326 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6327 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6328 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6329 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6330 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6331 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6332
6333 data->fc.safc_group_num = params->txq_params.cos;
6334 data->fc.safc_group_en_flg =
6335 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6336 data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
6337}
6338
6339static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6340{
6341 /* ustorm cxt validation */
6342 cxt->ustorm_ag_context.cdu_usage =
6343 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6344 ETH_CONNECTION_TYPE);
6345 /* xcontext validation */
6346 cxt->xstorm_ag_context.cdu_reserved =
6347 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6348 ETH_CONNECTION_TYPE);
6349}
6350
6351int bnx2x_setup_fw_client(struct bnx2x *bp,
6352 struct bnx2x_client_init_params *params,
6353 u8 activate,
6354 struct client_init_ramrod_data *data,
6355 dma_addr_t data_mapping)
6356{
6357 u16 hc_usec;
6358 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6359 int ramrod_flags = 0, rc;
6360
6361 /* HC and context validation values */
6362 hc_usec = params->txq_params.hc_rate ?
6363 1000000 / params->txq_params.hc_rate : 0;
6364 bnx2x_update_coalesce_sb_index(bp,
6365 params->txq_params.fw_sb_id,
6366 params->txq_params.sb_cq_index,
6367 !(params->txq_params.flags & QUEUE_FLG_HC),
6368 hc_usec);
6369
6370 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6371
6372 hc_usec = params->rxq_params.hc_rate ?
6373 1000000 / params->rxq_params.hc_rate : 0;
6374 bnx2x_update_coalesce_sb_index(bp,
6375 params->rxq_params.fw_sb_id,
6376 params->rxq_params.sb_cq_index,
6377 !(params->rxq_params.flags & QUEUE_FLG_HC),
6378 hc_usec);
6379
6380 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6381 params->rxq_params.cid);
6382
6383 /* zero stats */
6384 if (params->txq_params.flags & QUEUE_FLG_STATS)
6385 storm_memset_xstats_zero(bp, BP_PORT(bp),
6386 params->txq_params.stat_id);
6387
6388 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6389 storm_memset_ustats_zero(bp, BP_PORT(bp),
6390 params->rxq_params.stat_id);
6391 storm_memset_tstats_zero(bp, BP_PORT(bp),
6392 params->rxq_params.stat_id);
6393 }
6394
6395 /* Fill the ramrod data */
6396 bnx2x_fill_cl_init_data(bp, params, activate, data);
6397
6398 /* SETUP ramrod.
6399 *
6400 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6401 * barrier except from mmiowb() is needed to impose a
6402 * proper ordering of memory operations.
6403 */
6404 mmiowb();
a2fbb9ea 6405
a2fbb9ea 6406
523224a3
DK
6407 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6408 U64_HI(data_mapping), U64_LO(data_mapping), 0);
a2fbb9ea 6409
34f80b04 6410 /* Wait for completion */
523224a3
DK
6411 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6412 params->ramrod_params.index,
6413 params->ramrod_params.pstate,
6414 ramrod_flags);
34f80b04 6415 return rc;
a2fbb9ea
ET
6416}
6417
d6214d7a
DK
6418/**
6419 * Configure interrupt mode according to current configuration.
6420 * In case of MSI-X it will also try to enable MSI-X.
6421 *
6422 * @param bp
6423 *
6424 * @return int
6425 */
6426static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
ca00392c 6427{
d6214d7a 6428 int rc = 0;
ca00392c 6429
d6214d7a
DK
6430 switch (bp->int_mode) {
6431 case INT_MODE_MSI:
6432 bnx2x_enable_msi(bp);
6433 /* falling through... */
6434 case INT_MODE_INTx:
54b9ddaa 6435 bp->num_queues = 1;
d6214d7a 6436 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
ca00392c 6437 break;
d6214d7a
DK
6438 default:
6439 /* Set number of queues according to bp->multi_mode value */
6440 bnx2x_set_num_queues(bp);
ca00392c 6441
d6214d7a
DK
6442 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6443 bp->num_queues);
ca00392c 6444
d6214d7a
DK
6445 /* if we can't use MSI-X we only need one fp,
6446 * so try to enable MSI-X with the requested number of fp's
6447 * and fallback to MSI or legacy INTx with one fp
6448 */
6449 rc = bnx2x_enable_msix(bp);
6450 if (rc) {
6451 /* failed to enable MSI-X */
6452 if (bp->multi_mode)
6453 DP(NETIF_MSG_IFUP,
6454 "Multi requested but failed to "
6455 "enable MSI-X (%d), "
6456 "set number of queues to %d\n",
6457 bp->num_queues,
6458 1);
6459 bp->num_queues = 1;
6460
6461 if (!(bp->flags & DISABLE_MSI_FLAG))
6462 bnx2x_enable_msi(bp);
6463 }
ca00392c 6464
9f6c9258
DK
6465 break;
6466 }
d6214d7a
DK
6467
6468 return rc;
a2fbb9ea
ET
6469}
6470
c2bff63f
DK
6471/* must be called prioir to any HW initializations */
6472static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6473{
6474 return L2_ILT_LINES(bp);
6475}
6476
523224a3
DK
6477void bnx2x_ilt_set_info(struct bnx2x *bp)
6478{
6479 struct ilt_client_info *ilt_client;
6480 struct bnx2x_ilt *ilt = BP_ILT(bp);
6481 u16 line = 0;
6482
6483 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6484 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6485
6486 /* CDU */
6487 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6488 ilt_client->client_num = ILT_CLIENT_CDU;
6489 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6490 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6491 ilt_client->start = line;
6492 line += L2_ILT_LINES(bp);
6493#ifdef BCM_CNIC
6494 line += CNIC_ILT_LINES;
6495#endif
6496 ilt_client->end = line - 1;
6497
6498 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6499 "flags 0x%x, hw psz %d\n",
6500 ilt_client->start,
6501 ilt_client->end,
6502 ilt_client->page_size,
6503 ilt_client->flags,
6504 ilog2(ilt_client->page_size >> 12));
6505
6506 /* QM */
6507 if (QM_INIT(bp->qm_cid_count)) {
6508 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6509 ilt_client->client_num = ILT_CLIENT_QM;
6510 ilt_client->page_size = QM_ILT_PAGE_SZ;
6511 ilt_client->flags = 0;
6512 ilt_client->start = line;
6513
6514 /* 4 bytes for each cid */
6515 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6516 QM_ILT_PAGE_SZ);
6517
6518 ilt_client->end = line - 1;
6519
6520 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6521 "flags 0x%x, hw psz %d\n",
6522 ilt_client->start,
6523 ilt_client->end,
6524 ilt_client->page_size,
6525 ilt_client->flags,
6526 ilog2(ilt_client->page_size >> 12));
6527
6528 }
6529 /* SRC */
6530 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6531#ifdef BCM_CNIC
6532 ilt_client->client_num = ILT_CLIENT_SRC;
6533 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6534 ilt_client->flags = 0;
6535 ilt_client->start = line;
6536 line += SRC_ILT_LINES;
6537 ilt_client->end = line - 1;
6538
6539 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6540 "flags 0x%x, hw psz %d\n",
6541 ilt_client->start,
6542 ilt_client->end,
6543 ilt_client->page_size,
6544 ilt_client->flags,
6545 ilog2(ilt_client->page_size >> 12));
6546
6547#else
6548 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6549#endif
9f6c9258 6550
523224a3
DK
6551 /* TM */
6552 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6553#ifdef BCM_CNIC
6554 ilt_client->client_num = ILT_CLIENT_TM;
6555 ilt_client->page_size = TM_ILT_PAGE_SZ;
6556 ilt_client->flags = 0;
6557 ilt_client->start = line;
6558 line += TM_ILT_LINES;
6559 ilt_client->end = line - 1;
6560
6561 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6562 "flags 0x%x, hw psz %d\n",
6563 ilt_client->start,
6564 ilt_client->end,
6565 ilt_client->page_size,
6566 ilt_client->flags,
6567 ilog2(ilt_client->page_size >> 12));
9f6c9258 6568
523224a3
DK
6569#else
6570 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6571#endif
6572}
f85582f8 6573
523224a3
DK
6574int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6575 int is_leading)
a2fbb9ea 6576{
523224a3 6577 struct bnx2x_client_init_params params = { {0} };
a2fbb9ea
ET
6578 int rc;
6579
523224a3
DK
6580 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6581 IGU_INT_ENABLE, 0);
a2fbb9ea 6582
523224a3
DK
6583 params.ramrod_params.pstate = &fp->state;
6584 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6585 params.ramrod_params.index = fp->index;
6586 params.ramrod_params.cid = fp->cid;
a2fbb9ea 6587
523224a3
DK
6588 if (is_leading)
6589 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
a2fbb9ea 6590
523224a3
DK
6591 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6592
6593 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6594
6595 rc = bnx2x_setup_fw_client(bp, &params, 1,
6596 bnx2x_sp(bp, client_init_data),
6597 bnx2x_sp_mapping(bp, client_init_data));
34f80b04 6598 return rc;
a2fbb9ea
ET
6599}
6600
523224a3 6601int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
a2fbb9ea 6602{
34f80b04 6603 int rc;
a2fbb9ea 6604
523224a3 6605 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
a2fbb9ea 6606
523224a3
DK
6607 /* halt the connection */
6608 *p->pstate = BNX2X_FP_STATE_HALTING;
6609 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6610 p->cl_id, 0);
a2fbb9ea 6611
34f80b04 6612 /* Wait for completion */
523224a3
DK
6613 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6614 p->pstate, poll_flag);
34f80b04 6615 if (rc) /* timeout */
da5a662a 6616 return rc;
a2fbb9ea 6617
523224a3
DK
6618 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6619 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6620 p->cl_id, 0);
6621 /* Wait for completion */
6622 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6623 p->pstate, poll_flag);
6624 if (rc) /* timeout */
6625 return rc;
a2fbb9ea 6626
a2fbb9ea 6627
523224a3
DK
6628 /* delete cfc entry */
6629 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
da5a662a 6630
523224a3
DK
6631 /* Wait for completion */
6632 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6633 p->pstate, WAIT_RAMROD_COMMON);
da5a662a 6634 return rc;
a2fbb9ea
ET
6635}
6636
523224a3
DK
6637static int bnx2x_stop_client(struct bnx2x *bp, int index)
6638{
6639 struct bnx2x_client_ramrod_params client_stop = {0};
6640 struct bnx2x_fastpath *fp = &bp->fp[index];
6641
6642 client_stop.index = index;
6643 client_stop.cid = fp->cid;
6644 client_stop.cl_id = fp->cl_id;
6645 client_stop.pstate = &(fp->state);
6646 client_stop.poll = 0;
6647
6648 return bnx2x_stop_fw_client(bp, &client_stop);
6649}
6650
6651
34f80b04
EG
6652static void bnx2x_reset_func(struct bnx2x *bp)
6653{
6654 int port = BP_PORT(bp);
6655 int func = BP_FUNC(bp);
f2e0899f 6656 int i;
523224a3 6657 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
f2e0899f
DK
6658 (CHIP_IS_E2(bp) ?
6659 offsetof(struct hc_status_block_data_e2, common) :
6660 offsetof(struct hc_status_block_data_e1x, common));
523224a3
DK
6661 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6662 int pfid_offset = offsetof(struct pci_entity, pf_id);
6663
6664 /* Disable the function in the FW */
6665 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6666 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6667 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6668 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6669
6670 /* FP SBs */
6671 for_each_queue(bp, i) {
6672 struct bnx2x_fastpath *fp = &bp->fp[i];
6673 REG_WR8(bp,
6674 BAR_CSTRORM_INTMEM +
6675 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6676 + pfunc_offset_fp + pfid_offset,
6677 HC_FUNCTION_DISABLED);
6678 }
6679
6680 /* SP SB */
6681 REG_WR8(bp,
6682 BAR_CSTRORM_INTMEM +
6683 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6684 pfunc_offset_sp + pfid_offset,
6685 HC_FUNCTION_DISABLED);
6686
6687
6688 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6689 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6690 0);
34f80b04
EG
6691
6692 /* Configure IGU */
f2e0899f
DK
6693 if (bp->common.int_block == INT_BLOCK_HC) {
6694 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6695 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6696 } else {
6697 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6698 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6699 }
34f80b04 6700
37b091ba
MC
6701#ifdef BCM_CNIC
6702 /* Disable Timer scan */
6703 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6704 /*
6705 * Wait for at least 10ms and up to 2 second for the timers scan to
6706 * complete
6707 */
6708 for (i = 0; i < 200; i++) {
6709 msleep(10);
6710 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6711 break;
6712 }
6713#endif
34f80b04 6714 /* Clear ILT */
f2e0899f
DK
6715 bnx2x_clear_func_ilt(bp, func);
6716
6717 /* Timers workaround bug for E2: if this is vnic-3,
6718 * we need to set the entire ilt range for this timers.
6719 */
6720 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6721 struct ilt_client_info ilt_cli;
6722 /* use dummy TM client */
6723 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6724 ilt_cli.start = 0;
6725 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6726 ilt_cli.client_num = ILT_CLIENT_TM;
6727
6728 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6729 }
6730
6731 /* this assumes that reset_port() called before reset_func()*/
6732 if (CHIP_IS_E2(bp))
6733 bnx2x_pf_disable(bp);
523224a3
DK
6734
6735 bp->dmae_ready = 0;
34f80b04
EG
6736}
6737
6738static void bnx2x_reset_port(struct bnx2x *bp)
6739{
6740 int port = BP_PORT(bp);
6741 u32 val;
6742
6743 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6744
6745 /* Do not rcv packets to BRB */
6746 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6747 /* Do not direct rcv packets that are not for MCP to the BRB */
6748 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6749 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6750
6751 /* Configure AEU */
6752 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6753
6754 msleep(100);
6755 /* Check for BRB port occupancy */
6756 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6757 if (val)
6758 DP(NETIF_MSG_IFDOWN,
33471629 6759 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6760
6761 /* TODO: Close Doorbell port? */
6762}
6763
34f80b04
EG
6764static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6765{
6766 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
f2e0899f 6767 BP_ABS_FUNC(bp), reset_code);
34f80b04
EG
6768
6769 switch (reset_code) {
6770 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6771 bnx2x_reset_port(bp);
6772 bnx2x_reset_func(bp);
6773 bnx2x_reset_common(bp);
6774 break;
6775
6776 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6777 bnx2x_reset_port(bp);
6778 bnx2x_reset_func(bp);
6779 break;
6780
6781 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6782 bnx2x_reset_func(bp);
6783 break;
49d66772 6784
34f80b04
EG
6785 default:
6786 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6787 break;
6788 }
6789}
6790
9f6c9258 6791void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6792{
da5a662a 6793 int port = BP_PORT(bp);
a2fbb9ea 6794 u32 reset_code = 0;
da5a662a 6795 int i, cnt, rc;
a2fbb9ea 6796
555f6c78 6797 /* Wait until tx fastpath tasks complete */
54b9ddaa 6798 for_each_queue(bp, i) {
228241eb
ET
6799 struct bnx2x_fastpath *fp = &bp->fp[i];
6800
34f80b04 6801 cnt = 1000;
e8b5fc51 6802 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 6803
34f80b04
EG
6804 if (!cnt) {
6805 BNX2X_ERR("timeout waiting for queue[%d]\n",
6806 i);
6807#ifdef BNX2X_STOP_ON_ERROR
6808 bnx2x_panic();
6809 return -EBUSY;
6810#else
6811 break;
6812#endif
6813 }
6814 cnt--;
da5a662a 6815 msleep(1);
34f80b04 6816 }
228241eb 6817 }
da5a662a
VZ
6818 /* Give HW time to discard old tx messages */
6819 msleep(1);
a2fbb9ea 6820
3101c2bc 6821 if (CHIP_IS_E1(bp)) {
523224a3
DK
6822 /* invalidate mc list,
6823 * wait and poll (interrupts are off)
6824 */
6825 bnx2x_invlidate_e1_mc_list(bp);
6826 bnx2x_set_eth_mac(bp, 0);
3101c2bc 6827
523224a3 6828 } else {
65abd74d
YG
6829 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6830
523224a3 6831 bnx2x_set_eth_mac(bp, 0);
3101c2bc
YG
6832
6833 for (i = 0; i < MC_HASH_SIZE; i++)
6834 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6835 }
523224a3 6836
993ac7b5
MC
6837#ifdef BCM_CNIC
6838 /* Clear iSCSI L2 MAC */
6839 mutex_lock(&bp->cnic_mutex);
6840 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
6841 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
6842 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
6843 }
6844 mutex_unlock(&bp->cnic_mutex);
6845#endif
3101c2bc 6846
65abd74d
YG
6847 if (unload_mode == UNLOAD_NORMAL)
6848 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6849
7d0446c2 6850 else if (bp->flags & NO_WOL_FLAG)
65abd74d 6851 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 6852
7d0446c2 6853 else if (bp->wol) {
65abd74d
YG
6854 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6855 u8 *mac_addr = bp->dev->dev_addr;
6856 u32 val;
6857 /* The mac address is written to entries 1-4 to
6858 preserve entry 0 which is used by the PMF */
6859 u8 entry = (BP_E1HVN(bp) + 1)*8;
6860
6861 val = (mac_addr[0] << 8) | mac_addr[1];
6862 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6863
6864 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6865 (mac_addr[4] << 8) | mac_addr[5];
6866 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6867
6868 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6869
6870 } else
6871 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6872
34f80b04
EG
6873 /* Close multi and leading connections
6874 Completions for ramrods are collected in a synchronous way */
523224a3
DK
6875 for_each_queue(bp, i)
6876
6877 if (bnx2x_stop_client(bp, i))
6878#ifdef BNX2X_STOP_ON_ERROR
6879 return;
6880#else
228241eb 6881 goto unload_error;
523224a3 6882#endif
a2fbb9ea 6883
523224a3 6884 rc = bnx2x_func_stop(bp);
da5a662a 6885 if (rc) {
523224a3 6886 BNX2X_ERR("Function stop failed!\n");
da5a662a 6887#ifdef BNX2X_STOP_ON_ERROR
523224a3 6888 return;
da5a662a
VZ
6889#else
6890 goto unload_error;
34f80b04 6891#endif
228241eb 6892 }
523224a3 6893#ifndef BNX2X_STOP_ON_ERROR
228241eb 6894unload_error:
523224a3 6895#endif
34f80b04 6896 if (!BP_NOMCP(bp))
a22f0788 6897 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04 6898 else {
f2e0899f
DK
6899 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
6900 "%d, %d, %d\n", BP_PATH(bp),
6901 load_count[BP_PATH(bp)][0],
6902 load_count[BP_PATH(bp)][1],
6903 load_count[BP_PATH(bp)][2]);
6904 load_count[BP_PATH(bp)][0]--;
6905 load_count[BP_PATH(bp)][1 + port]--;
6906 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
6907 "%d, %d, %d\n", BP_PATH(bp),
6908 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
6909 load_count[BP_PATH(bp)][2]);
6910 if (load_count[BP_PATH(bp)][0] == 0)
34f80b04 6911 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
f2e0899f 6912 else if (load_count[BP_PATH(bp)][1 + port] == 0)
34f80b04
EG
6913 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6914 else
6915 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6916 }
a2fbb9ea 6917
34f80b04
EG
6918 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6919 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6920 bnx2x__link_reset(bp);
a2fbb9ea 6921
523224a3
DK
6922 /* Disable HW interrupts, NAPI */
6923 bnx2x_netif_stop(bp, 1);
6924
6925 /* Release IRQs */
d6214d7a 6926 bnx2x_free_irq(bp);
523224a3 6927
a2fbb9ea 6928 /* Reset the chip */
228241eb 6929 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6930
6931 /* Report UNLOAD_DONE to MCP */
34f80b04 6932 if (!BP_NOMCP(bp))
a22f0788 6933 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
356e2385 6934
72fd0718
VZ
6935}
6936
9f6c9258 6937void bnx2x_disable_close_the_gate(struct bnx2x *bp)
72fd0718
VZ
6938{
6939 u32 val;
6940
6941 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6942
6943 if (CHIP_IS_E1(bp)) {
6944 int port = BP_PORT(bp);
6945 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6946 MISC_REG_AEU_MASK_ATTN_FUNC_0;
6947
6948 val = REG_RD(bp, addr);
6949 val &= ~(0x300);
6950 REG_WR(bp, addr, val);
6951 } else if (CHIP_IS_E1H(bp)) {
6952 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6953 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6954 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6955 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6956 }
6957}
6958
72fd0718
VZ
6959/* Close gates #2, #3 and #4: */
6960static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
6961{
6962 u32 val, addr;
6963
6964 /* Gates #2 and #4a are closed/opened for "not E1" only */
6965 if (!CHIP_IS_E1(bp)) {
6966 /* #4 */
6967 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
6968 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
6969 close ? (val | 0x1) : (val & (~(u32)1)));
6970 /* #2 */
6971 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
6972 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
6973 close ? (val | 0x1) : (val & (~(u32)1)));
6974 }
6975
6976 /* #3 */
6977 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
6978 val = REG_RD(bp, addr);
6979 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
6980
6981 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
6982 close ? "closing" : "opening");
6983 mmiowb();
6984}
6985
6986#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
6987
6988static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
6989{
6990 /* Do some magic... */
6991 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6992 *magic_val = val & SHARED_MF_CLP_MAGIC;
6993 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
6994}
6995
6996/* Restore the value of the `magic' bit.
6997 *
6998 * @param pdev Device handle.
6999 * @param magic_val Old value of the `magic' bit.
7000 */
7001static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7002{
7003 /* Restore the `magic' bit value... */
72fd0718
VZ
7004 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7005 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7006 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7007}
7008
f85582f8
DK
7009/**
7010 * Prepares for MCP reset: takes care of CLP configurations.
72fd0718
VZ
7011 *
7012 * @param bp
7013 * @param magic_val Old value of 'magic' bit.
7014 */
7015static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7016{
7017 u32 shmem;
7018 u32 validity_offset;
7019
7020 DP(NETIF_MSG_HW, "Starting\n");
7021
7022 /* Set `magic' bit in order to save MF config */
7023 if (!CHIP_IS_E1(bp))
7024 bnx2x_clp_reset_prep(bp, magic_val);
7025
7026 /* Get shmem offset */
7027 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7028 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7029
7030 /* Clear validity map flags */
7031 if (shmem > 0)
7032 REG_WR(bp, shmem + validity_offset, 0);
7033}
7034
7035#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7036#define MCP_ONE_TIMEOUT 100 /* 100 ms */
7037
7038/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7039 * depending on the HW type.
7040 *
7041 * @param bp
7042 */
7043static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7044{
7045 /* special handling for emulation and FPGA,
7046 wait 10 times longer */
7047 if (CHIP_REV_IS_SLOW(bp))
7048 msleep(MCP_ONE_TIMEOUT*10);
7049 else
7050 msleep(MCP_ONE_TIMEOUT);
7051}
7052
7053static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7054{
7055 u32 shmem, cnt, validity_offset, val;
7056 int rc = 0;
7057
7058 msleep(100);
7059
7060 /* Get shmem offset */
7061 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7062 if (shmem == 0) {
7063 BNX2X_ERR("Shmem 0 return failure\n");
7064 rc = -ENOTTY;
7065 goto exit_lbl;
7066 }
7067
7068 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7069
7070 /* Wait for MCP to come up */
7071 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7072 /* TBD: its best to check validity map of last port.
7073 * currently checks on port 0.
7074 */
7075 val = REG_RD(bp, shmem + validity_offset);
7076 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7077 shmem + validity_offset, val);
7078
7079 /* check that shared memory is valid. */
7080 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7081 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7082 break;
7083
7084 bnx2x_mcp_wait_one(bp);
7085 }
7086
7087 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7088
7089 /* Check that shared memory is valid. This indicates that MCP is up. */
7090 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7091 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7092 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7093 rc = -ENOTTY;
7094 goto exit_lbl;
7095 }
7096
7097exit_lbl:
7098 /* Restore the `magic' bit value */
7099 if (!CHIP_IS_E1(bp))
7100 bnx2x_clp_reset_done(bp, magic_val);
7101
7102 return rc;
7103}
7104
7105static void bnx2x_pxp_prep(struct bnx2x *bp)
7106{
7107 if (!CHIP_IS_E1(bp)) {
7108 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7109 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7110 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7111 mmiowb();
7112 }
7113}
7114
7115/*
7116 * Reset the whole chip except for:
7117 * - PCIE core
7118 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7119 * one reset bit)
7120 * - IGU
7121 * - MISC (including AEU)
7122 * - GRC
7123 * - RBCN, RBCP
7124 */
7125static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7126{
7127 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7128
7129 not_reset_mask1 =
7130 MISC_REGISTERS_RESET_REG_1_RST_HC |
7131 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7132 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7133
7134 not_reset_mask2 =
7135 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7136 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7137 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7138 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7139 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7140 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7141 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7142 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7143
7144 reset_mask1 = 0xffffffff;
7145
7146 if (CHIP_IS_E1(bp))
7147 reset_mask2 = 0xffff;
7148 else
7149 reset_mask2 = 0x1ffff;
7150
7151 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7152 reset_mask1 & (~not_reset_mask1));
7153 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7154 reset_mask2 & (~not_reset_mask2));
7155
7156 barrier();
7157 mmiowb();
7158
7159 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7160 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7161 mmiowb();
7162}
7163
7164static int bnx2x_process_kill(struct bnx2x *bp)
7165{
7166 int cnt = 1000;
7167 u32 val = 0;
7168 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7169
7170
7171 /* Empty the Tetris buffer, wait for 1s */
7172 do {
7173 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7174 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7175 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7176 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7177 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7178 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7179 ((port_is_idle_0 & 0x1) == 0x1) &&
7180 ((port_is_idle_1 & 0x1) == 0x1) &&
7181 (pgl_exp_rom2 == 0xffffffff))
7182 break;
7183 msleep(1);
7184 } while (cnt-- > 0);
7185
7186 if (cnt <= 0) {
7187 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7188 " are still"
7189 " outstanding read requests after 1s!\n");
7190 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7191 " port_is_idle_0=0x%08x,"
7192 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7193 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7194 pgl_exp_rom2);
7195 return -EAGAIN;
7196 }
7197
7198 barrier();
7199
7200 /* Close gates #2, #3 and #4 */
7201 bnx2x_set_234_gates(bp, true);
7202
7203 /* TBD: Indicate that "process kill" is in progress to MCP */
7204
7205 /* Clear "unprepared" bit */
7206 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7207 barrier();
7208
7209 /* Make sure all is written to the chip before the reset */
7210 mmiowb();
7211
7212 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7213 * PSWHST, GRC and PSWRD Tetris buffer.
7214 */
7215 msleep(1);
7216
7217 /* Prepare to chip reset: */
7218 /* MCP */
7219 bnx2x_reset_mcp_prep(bp, &val);
7220
7221 /* PXP */
7222 bnx2x_pxp_prep(bp);
7223 barrier();
7224
7225 /* reset the chip */
7226 bnx2x_process_kill_chip_reset(bp);
7227 barrier();
7228
7229 /* Recover after reset: */
7230 /* MCP */
7231 if (bnx2x_reset_mcp_comp(bp, val))
7232 return -EAGAIN;
7233
7234 /* PXP */
7235 bnx2x_pxp_prep(bp);
7236
7237 /* Open the gates #2, #3 and #4 */
7238 bnx2x_set_234_gates(bp, false);
7239
7240 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7241 * reset state, re-enable attentions. */
7242
a2fbb9ea
ET
7243 return 0;
7244}
7245
72fd0718
VZ
7246static int bnx2x_leader_reset(struct bnx2x *bp)
7247{
7248 int rc = 0;
7249 /* Try to recover after the failure */
7250 if (bnx2x_process_kill(bp)) {
7251 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7252 bp->dev->name);
7253 rc = -EAGAIN;
7254 goto exit_leader_reset;
7255 }
7256
7257 /* Clear "reset is in progress" bit and update the driver state */
7258 bnx2x_set_reset_done(bp);
7259 bp->recovery_state = BNX2X_RECOVERY_DONE;
7260
7261exit_leader_reset:
7262 bp->is_leader = 0;
7263 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7264 smp_wmb();
7265 return rc;
7266}
7267
72fd0718
VZ
7268/* Assumption: runs under rtnl lock. This together with the fact
7269 * that it's called only from bnx2x_reset_task() ensure that it
7270 * will never be called when netif_running(bp->dev) is false.
7271 */
7272static void bnx2x_parity_recover(struct bnx2x *bp)
7273{
7274 DP(NETIF_MSG_HW, "Handling parity\n");
7275 while (1) {
7276 switch (bp->recovery_state) {
7277 case BNX2X_RECOVERY_INIT:
7278 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7279 /* Try to get a LEADER_LOCK HW lock */
7280 if (bnx2x_trylock_hw_lock(bp,
7281 HW_LOCK_RESOURCE_RESERVED_08))
7282 bp->is_leader = 1;
7283
7284 /* Stop the driver */
7285 /* If interface has been removed - break */
7286 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7287 return;
7288
7289 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7290 /* Ensure "is_leader" and "recovery_state"
7291 * update values are seen on other CPUs
7292 */
7293 smp_wmb();
7294 break;
7295
7296 case BNX2X_RECOVERY_WAIT:
7297 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7298 if (bp->is_leader) {
7299 u32 load_counter = bnx2x_get_load_cnt(bp);
7300 if (load_counter) {
7301 /* Wait until all other functions get
7302 * down.
7303 */
7304 schedule_delayed_work(&bp->reset_task,
7305 HZ/10);
7306 return;
7307 } else {
7308 /* If all other functions got down -
7309 * try to bring the chip back to
7310 * normal. In any case it's an exit
7311 * point for a leader.
7312 */
7313 if (bnx2x_leader_reset(bp) ||
7314 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7315 printk(KERN_ERR"%s: Recovery "
7316 "has failed. Power cycle is "
7317 "needed.\n", bp->dev->name);
7318 /* Disconnect this device */
7319 netif_device_detach(bp->dev);
7320 /* Block ifup for all function
7321 * of this ASIC until
7322 * "process kill" or power
7323 * cycle.
7324 */
7325 bnx2x_set_reset_in_progress(bp);
7326 /* Shut down the power */
7327 bnx2x_set_power_state(bp,
7328 PCI_D3hot);
7329 return;
7330 }
7331
7332 return;
7333 }
7334 } else { /* non-leader */
7335 if (!bnx2x_reset_is_done(bp)) {
7336 /* Try to get a LEADER_LOCK HW lock as
7337 * long as a former leader may have
7338 * been unloaded by the user or
7339 * released a leadership by another
7340 * reason.
7341 */
7342 if (bnx2x_trylock_hw_lock(bp,
7343 HW_LOCK_RESOURCE_RESERVED_08)) {
7344 /* I'm a leader now! Restart a
7345 * switch case.
7346 */
7347 bp->is_leader = 1;
7348 break;
7349 }
7350
7351 schedule_delayed_work(&bp->reset_task,
7352 HZ/10);
7353 return;
7354
7355 } else { /* A leader has completed
7356 * the "process kill". It's an exit
7357 * point for a non-leader.
7358 */
7359 bnx2x_nic_load(bp, LOAD_NORMAL);
7360 bp->recovery_state =
7361 BNX2X_RECOVERY_DONE;
7362 smp_wmb();
7363 return;
7364 }
7365 }
7366 default:
7367 return;
7368 }
7369 }
7370}
7371
7372/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7373 * scheduled on a general queue in order to prevent a dead lock.
7374 */
34f80b04
EG
7375static void bnx2x_reset_task(struct work_struct *work)
7376{
72fd0718 7377 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
7378
7379#ifdef BNX2X_STOP_ON_ERROR
7380 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7381 " so reset not done to allow debug dump,\n"
72fd0718 7382 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
7383 return;
7384#endif
7385
7386 rtnl_lock();
7387
7388 if (!netif_running(bp->dev))
7389 goto reset_task_exit;
7390
72fd0718
VZ
7391 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7392 bnx2x_parity_recover(bp);
7393 else {
7394 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7395 bnx2x_nic_load(bp, LOAD_NORMAL);
7396 }
34f80b04
EG
7397
7398reset_task_exit:
7399 rtnl_unlock();
7400}
7401
a2fbb9ea
ET
7402/* end of nic load/unload */
7403
a2fbb9ea
ET
7404/*
7405 * Init service functions
7406 */
7407
f2e0899f
DK
7408u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
7409{
7410 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7411 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7412 return base + (BP_ABS_FUNC(bp)) * stride;
f1ef27ef
EG
7413}
7414
f2e0899f 7415static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
f1ef27ef 7416{
f2e0899f 7417 u32 reg = bnx2x_get_pretend_reg(bp);
f1ef27ef
EG
7418
7419 /* Flush all outstanding writes */
7420 mmiowb();
7421
7422 /* Pretend to be function 0 */
7423 REG_WR(bp, reg, 0);
f2e0899f 7424 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
f1ef27ef
EG
7425
7426 /* From now we are in the "like-E1" mode */
7427 bnx2x_int_disable(bp);
7428
7429 /* Flush all outstanding writes */
7430 mmiowb();
7431
f2e0899f
DK
7432 /* Restore the original function */
7433 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7434 REG_RD(bp, reg);
f1ef27ef
EG
7435}
7436
f2e0899f 7437static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
f1ef27ef 7438{
f2e0899f 7439 if (CHIP_IS_E1(bp))
f1ef27ef 7440 bnx2x_int_disable(bp);
f2e0899f
DK
7441 else
7442 bnx2x_undi_int_disable_e1h(bp);
f1ef27ef
EG
7443}
7444
34f80b04
EG
7445static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7446{
7447 u32 val;
7448
7449 /* Check if there is any driver already loaded */
7450 val = REG_RD(bp, MISC_REG_UNPREPARED);
7451 if (val == 0x1) {
7452 /* Check if it is the UNDI driver
7453 * UNDI driver initializes CID offset for normal bell to 0x7
7454 */
4a37fb66 7455 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7456 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7457 if (val == 0x7) {
7458 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
f2e0899f
DK
7459 /* save our pf_num */
7460 int orig_pf_num = bp->pf_num;
da5a662a
VZ
7461 u32 swap_en;
7462 u32 swap_val;
34f80b04 7463
b4661739
EG
7464 /* clear the UNDI indication */
7465 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7466
34f80b04
EG
7467 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7468
7469 /* try unload UNDI on port 0 */
f2e0899f 7470 bp->pf_num = 0;
da5a662a 7471 bp->fw_seq =
f2e0899f 7472 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a 7473 DRV_MSG_SEQ_NUMBER_MASK);
a22f0788 7474 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
7475
7476 /* if UNDI is loaded on the other port */
7477 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7478
da5a662a 7479 /* send "DONE" for previous unload */
a22f0788
YR
7480 bnx2x_fw_command(bp,
7481 DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
7482
7483 /* unload UNDI on port 1 */
f2e0899f 7484 bp->pf_num = 1;
da5a662a 7485 bp->fw_seq =
f2e0899f 7486 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a
VZ
7487 DRV_MSG_SEQ_NUMBER_MASK);
7488 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7489
a22f0788 7490 bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
7491 }
7492
b4661739
EG
7493 /* now it's safe to release the lock */
7494 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7495
f2e0899f 7496 bnx2x_undi_int_disable(bp);
da5a662a
VZ
7497
7498 /* close input traffic and wait for it */
7499 /* Do not rcv packets to BRB */
7500 REG_WR(bp,
7501 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7502 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7503 /* Do not direct rcv packets that are not for MCP to
7504 * the BRB */
7505 REG_WR(bp,
7506 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7507 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7508 /* clear AEU */
7509 REG_WR(bp,
7510 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7511 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7512 msleep(10);
7513
7514 /* save NIG port swap info */
7515 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7516 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7517 /* reset device */
7518 REG_WR(bp,
7519 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7520 0xd3ffffff);
34f80b04
EG
7521 REG_WR(bp,
7522 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7523 0x1403);
da5a662a
VZ
7524 /* take the NIG out of reset and restore swap values */
7525 REG_WR(bp,
7526 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7527 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7528 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7529 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7530
7531 /* send unload done to the MCP */
a22f0788 7532 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
7533
7534 /* restore our func and fw_seq */
f2e0899f 7535 bp->pf_num = orig_pf_num;
da5a662a 7536 bp->fw_seq =
f2e0899f 7537 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a 7538 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7539 } else
7540 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7541 }
7542}
7543
7544static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7545{
7546 u32 val, val2, val3, val4, id;
72ce58c3 7547 u16 pmc;
34f80b04
EG
7548
7549 /* Get the chip revision id and number. */
7550 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7551 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7552 id = ((val & 0xffff) << 16);
7553 val = REG_RD(bp, MISC_REG_CHIP_REV);
7554 id |= ((val & 0xf) << 12);
7555 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7556 id |= ((val & 0xff) << 4);
5a40e08e 7557 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7558 id |= (val & 0xf);
7559 bp->common.chip_id = id;
523224a3
DK
7560
7561 /* Set doorbell size */
7562 bp->db_size = (1 << BNX2X_DB_SHIFT);
7563
f2e0899f
DK
7564 if (CHIP_IS_E2(bp)) {
7565 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7566 if ((val & 1) == 0)
7567 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7568 else
7569 val = (val >> 1) & 1;
7570 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7571 "2_PORT_MODE");
7572 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7573 CHIP_2_PORT_MODE;
7574
7575 if (CHIP_MODE_IS_4_PORT(bp))
7576 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7577 else
7578 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7579 } else {
7580 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7581 bp->pfid = bp->pf_num; /* 0..7 */
7582 }
7583
523224a3
DK
7584 /*
7585 * set base FW non-default (fast path) status block id, this value is
7586 * used to initialize the fw_sb_id saved on the fp/queue structure to
7587 * determine the id used by the FW.
7588 */
f2e0899f
DK
7589 if (CHIP_IS_E1x(bp))
7590 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7591 else /* E2 */
7592 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7593
7594 bp->link_params.chip_id = bp->common.chip_id;
7595 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
523224a3 7596
1c06328c
EG
7597 val = (REG_RD(bp, 0x2874) & 0x55);
7598 if ((bp->common.chip_id & 0x1) ||
7599 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7600 bp->flags |= ONE_PORT_FLAG;
7601 BNX2X_DEV_INFO("single port device\n");
7602 }
7603
34f80b04
EG
7604 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7605 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7606 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7607 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7608 bp->common.flash_size, bp->common.flash_size);
7609
7610 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
f2e0899f
DK
7611 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7612 MISC_REG_GENERIC_CR_1 :
7613 MISC_REG_GENERIC_CR_0));
34f80b04 7614 bp->link_params.shmem_base = bp->common.shmem_base;
a22f0788 7615 bp->link_params.shmem2_base = bp->common.shmem2_base;
2691d51d
EG
7616 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7617 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04 7618
f2e0899f 7619 if (!bp->common.shmem_base) {
34f80b04
EG
7620 BNX2X_DEV_INFO("MCP not active\n");
7621 bp->flags |= NO_MCP_FLAG;
7622 return;
7623 }
7624
7625 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7626 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7627 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
f2e0899f 7628 BNX2X_ERR("BAD MCP validity signature\n");
34f80b04
EG
7629
7630 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7631 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7632
7633 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7634 SHARED_HW_CFG_LED_MODE_MASK) >>
7635 SHARED_HW_CFG_LED_MODE_SHIFT);
7636
c2c8b03e
EG
7637 bp->link_params.feature_config_flags = 0;
7638 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7639 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7640 bp->link_params.feature_config_flags |=
7641 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7642 else
7643 bp->link_params.feature_config_flags &=
7644 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7645
34f80b04
EG
7646 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7647 bp->common.bc_ver = val;
7648 BNX2X_DEV_INFO("bc_ver %X\n", val);
7649 if (val < BNX2X_BC_VER) {
7650 /* for now only warn
7651 * later we might need to enforce this */
f2e0899f
DK
7652 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7653 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 7654 }
4d295db0 7655 bp->link_params.feature_config_flags |=
a22f0788 7656 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
f85582f8
DK
7657 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7658
a22f0788
YR
7659 bp->link_params.feature_config_flags |=
7660 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7661 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
72ce58c3
EG
7662
7663 if (BP_E1HVN(bp) == 0) {
7664 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7665 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7666 } else {
7667 /* no WOL capability for E1HVN != 0 */
7668 bp->flags |= NO_WOL_FLAG;
7669 }
7670 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7671 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7672
7673 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7674 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7675 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7676 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7677
cdaa7cb8
VZ
7678 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7679 val, val2, val3, val4);
34f80b04
EG
7680}
7681
f2e0899f
DK
7682#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7683#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7684
7685static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7686{
7687 int pfid = BP_FUNC(bp);
7688 int vn = BP_E1HVN(bp);
7689 int igu_sb_id;
7690 u32 val;
7691 u8 fid;
7692
7693 bp->igu_base_sb = 0xff;
7694 bp->igu_sb_cnt = 0;
7695 if (CHIP_INT_MODE_IS_BC(bp)) {
7696 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7697 bp->l2_cid_count);
7698
7699 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7700 FP_SB_MAX_E1x;
7701
7702 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7703 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7704
7705 return;
7706 }
7707
7708 /* IGU in normal mode - read CAM */
7709 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7710 igu_sb_id++) {
7711 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7712 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7713 continue;
7714 fid = IGU_FID(val);
7715 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7716 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7717 continue;
7718 if (IGU_VEC(val) == 0)
7719 /* default status block */
7720 bp->igu_dsb_id = igu_sb_id;
7721 else {
7722 if (bp->igu_base_sb == 0xff)
7723 bp->igu_base_sb = igu_sb_id;
7724 bp->igu_sb_cnt++;
7725 }
7726 }
7727 }
7728 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
7729 if (bp->igu_sb_cnt == 0)
7730 BNX2X_ERR("CAM configuration error\n");
7731}
7732
34f80b04
EG
7733static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7734 u32 switch_cfg)
a2fbb9ea 7735{
a22f0788
YR
7736 int cfg_size = 0, idx, port = BP_PORT(bp);
7737
7738 /* Aggregation of supported attributes of all external phys */
7739 bp->port.supported[0] = 0;
7740 bp->port.supported[1] = 0;
b7737c9b
YR
7741 switch (bp->link_params.num_phys) {
7742 case 1:
a22f0788
YR
7743 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
7744 cfg_size = 1;
7745 break;
b7737c9b 7746 case 2:
a22f0788
YR
7747 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
7748 cfg_size = 1;
7749 break;
7750 case 3:
7751 if (bp->link_params.multi_phy_config &
7752 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7753 bp->port.supported[1] =
7754 bp->link_params.phy[EXT_PHY1].supported;
7755 bp->port.supported[0] =
7756 bp->link_params.phy[EXT_PHY2].supported;
7757 } else {
7758 bp->port.supported[0] =
7759 bp->link_params.phy[EXT_PHY1].supported;
7760 bp->port.supported[1] =
7761 bp->link_params.phy[EXT_PHY2].supported;
7762 }
7763 cfg_size = 2;
7764 break;
b7737c9b 7765 }
a2fbb9ea 7766
a22f0788 7767 if (!(bp->port.supported[0] || bp->port.supported[1])) {
b7737c9b 7768 BNX2X_ERR("NVRAM config error. BAD phy config."
a22f0788 7769 "PHY1 config 0x%x, PHY2 config 0x%x\n",
b7737c9b 7770 SHMEM_RD(bp,
a22f0788
YR
7771 dev_info.port_hw_config[port].external_phy_config),
7772 SHMEM_RD(bp,
7773 dev_info.port_hw_config[port].external_phy_config2));
a2fbb9ea 7774 return;
f85582f8 7775 }
a2fbb9ea 7776
b7737c9b
YR
7777 switch (switch_cfg) {
7778 case SWITCH_CFG_1G:
34f80b04
EG
7779 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7780 port*0x10);
7781 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7782 break;
7783
7784 case SWITCH_CFG_10G:
34f80b04
EG
7785 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7786 port*0x18);
7787 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7788 break;
7789
7790 default:
7791 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
a22f0788 7792 bp->port.link_config[0]);
a2fbb9ea
ET
7793 return;
7794 }
a22f0788
YR
7795 /* mask what we support according to speed_cap_mask per configuration */
7796 for (idx = 0; idx < cfg_size; idx++) {
7797 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7798 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
a22f0788 7799 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7800
a22f0788 7801 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7802 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
a22f0788 7803 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7804
a22f0788 7805 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7806 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
a22f0788 7807 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7808
a22f0788 7809 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7810 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
a22f0788 7811 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7812
a22f0788 7813 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7814 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
a22f0788 7815 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
f85582f8 7816 SUPPORTED_1000baseT_Full);
a2fbb9ea 7817
a22f0788 7818 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7819 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
a22f0788 7820 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7821
a22f0788 7822 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7823 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
a22f0788
YR
7824 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
7825
7826 }
a2fbb9ea 7827
a22f0788
YR
7828 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
7829 bp->port.supported[1]);
a2fbb9ea
ET
7830}
7831
34f80b04 7832static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7833{
a22f0788
YR
7834 u32 link_config, idx, cfg_size = 0;
7835 bp->port.advertising[0] = 0;
7836 bp->port.advertising[1] = 0;
7837 switch (bp->link_params.num_phys) {
7838 case 1:
7839 case 2:
7840 cfg_size = 1;
7841 break;
7842 case 3:
7843 cfg_size = 2;
7844 break;
7845 }
7846 for (idx = 0; idx < cfg_size; idx++) {
7847 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
7848 link_config = bp->port.link_config[idx];
7849 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
f85582f8 7850 case PORT_FEATURE_LINK_SPEED_AUTO:
a22f0788
YR
7851 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
7852 bp->link_params.req_line_speed[idx] =
7853 SPEED_AUTO_NEG;
7854 bp->port.advertising[idx] |=
7855 bp->port.supported[idx];
f85582f8
DK
7856 } else {
7857 /* force 10G, no AN */
a22f0788
YR
7858 bp->link_params.req_line_speed[idx] =
7859 SPEED_10000;
7860 bp->port.advertising[idx] |=
7861 (ADVERTISED_10000baseT_Full |
f85582f8 7862 ADVERTISED_FIBRE);
a22f0788 7863 continue;
f85582f8
DK
7864 }
7865 break;
a2fbb9ea 7866
f85582f8 7867 case PORT_FEATURE_LINK_SPEED_10M_FULL:
a22f0788
YR
7868 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
7869 bp->link_params.req_line_speed[idx] =
7870 SPEED_10;
7871 bp->port.advertising[idx] |=
7872 (ADVERTISED_10baseT_Full |
f85582f8
DK
7873 ADVERTISED_TP);
7874 } else {
7875 BNX2X_ERROR("NVRAM config error. "
7876 "Invalid link_config 0x%x"
7877 " speed_cap_mask 0x%x\n",
7878 link_config,
a22f0788 7879 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
7880 return;
7881 }
7882 break;
a2fbb9ea 7883
f85582f8 7884 case PORT_FEATURE_LINK_SPEED_10M_HALF:
a22f0788
YR
7885 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
7886 bp->link_params.req_line_speed[idx] =
7887 SPEED_10;
7888 bp->link_params.req_duplex[idx] =
7889 DUPLEX_HALF;
7890 bp->port.advertising[idx] |=
7891 (ADVERTISED_10baseT_Half |
f85582f8
DK
7892 ADVERTISED_TP);
7893 } else {
7894 BNX2X_ERROR("NVRAM config error. "
7895 "Invalid link_config 0x%x"
7896 " speed_cap_mask 0x%x\n",
7897 link_config,
7898 bp->link_params.speed_cap_mask[idx]);
7899 return;
7900 }
7901 break;
a2fbb9ea 7902
f85582f8
DK
7903 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7904 if (bp->port.supported[idx] &
7905 SUPPORTED_100baseT_Full) {
a22f0788
YR
7906 bp->link_params.req_line_speed[idx] =
7907 SPEED_100;
7908 bp->port.advertising[idx] |=
7909 (ADVERTISED_100baseT_Full |
f85582f8
DK
7910 ADVERTISED_TP);
7911 } else {
7912 BNX2X_ERROR("NVRAM config error. "
7913 "Invalid link_config 0x%x"
7914 " speed_cap_mask 0x%x\n",
7915 link_config,
7916 bp->link_params.speed_cap_mask[idx]);
7917 return;
7918 }
7919 break;
a2fbb9ea 7920
f85582f8
DK
7921 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7922 if (bp->port.supported[idx] &
7923 SUPPORTED_100baseT_Half) {
7924 bp->link_params.req_line_speed[idx] =
7925 SPEED_100;
7926 bp->link_params.req_duplex[idx] =
7927 DUPLEX_HALF;
a22f0788
YR
7928 bp->port.advertising[idx] |=
7929 (ADVERTISED_100baseT_Half |
f85582f8
DK
7930 ADVERTISED_TP);
7931 } else {
7932 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
7933 "Invalid link_config 0x%x"
7934 " speed_cap_mask 0x%x\n",
a22f0788
YR
7935 link_config,
7936 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
7937 return;
7938 }
7939 break;
a2fbb9ea 7940
f85582f8 7941 case PORT_FEATURE_LINK_SPEED_1G:
a22f0788
YR
7942 if (bp->port.supported[idx] &
7943 SUPPORTED_1000baseT_Full) {
7944 bp->link_params.req_line_speed[idx] =
7945 SPEED_1000;
7946 bp->port.advertising[idx] |=
7947 (ADVERTISED_1000baseT_Full |
f85582f8
DK
7948 ADVERTISED_TP);
7949 } else {
7950 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
7951 "Invalid link_config 0x%x"
7952 " speed_cap_mask 0x%x\n",
a22f0788
YR
7953 link_config,
7954 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
7955 return;
7956 }
7957 break;
a2fbb9ea 7958
f85582f8 7959 case PORT_FEATURE_LINK_SPEED_2_5G:
a22f0788
YR
7960 if (bp->port.supported[idx] &
7961 SUPPORTED_2500baseX_Full) {
7962 bp->link_params.req_line_speed[idx] =
7963 SPEED_2500;
7964 bp->port.advertising[idx] |=
7965 (ADVERTISED_2500baseX_Full |
34f80b04 7966 ADVERTISED_TP);
f85582f8
DK
7967 } else {
7968 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
7969 "Invalid link_config 0x%x"
7970 " speed_cap_mask 0x%x\n",
a22f0788 7971 link_config,
f85582f8
DK
7972 bp->link_params.speed_cap_mask[idx]);
7973 return;
7974 }
7975 break;
a2fbb9ea 7976
f85582f8
DK
7977 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7978 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7979 case PORT_FEATURE_LINK_SPEED_10G_KR:
a22f0788
YR
7980 if (bp->port.supported[idx] &
7981 SUPPORTED_10000baseT_Full) {
7982 bp->link_params.req_line_speed[idx] =
7983 SPEED_10000;
7984 bp->port.advertising[idx] |=
7985 (ADVERTISED_10000baseT_Full |
34f80b04 7986 ADVERTISED_FIBRE);
f85582f8
DK
7987 } else {
7988 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
7989 "Invalid link_config 0x%x"
7990 " speed_cap_mask 0x%x\n",
a22f0788 7991 link_config,
f85582f8
DK
7992 bp->link_params.speed_cap_mask[idx]);
7993 return;
7994 }
7995 break;
a2fbb9ea 7996
f85582f8
DK
7997 default:
7998 BNX2X_ERROR("NVRAM config error. "
7999 "BAD link speed link_config 0x%x\n",
8000 link_config);
8001 bp->link_params.req_line_speed[idx] =
8002 SPEED_AUTO_NEG;
8003 bp->port.advertising[idx] =
8004 bp->port.supported[idx];
8005 break;
8006 }
a2fbb9ea 8007
a22f0788 8008 bp->link_params.req_flow_ctrl[idx] = (link_config &
34f80b04 8009 PORT_FEATURE_FLOW_CONTROL_MASK);
a22f0788
YR
8010 if ((bp->link_params.req_flow_ctrl[idx] ==
8011 BNX2X_FLOW_CTRL_AUTO) &&
8012 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8013 bp->link_params.req_flow_ctrl[idx] =
8014 BNX2X_FLOW_CTRL_NONE;
8015 }
a2fbb9ea 8016
a22f0788
YR
8017 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8018 " 0x%x advertising 0x%x\n",
8019 bp->link_params.req_line_speed[idx],
8020 bp->link_params.req_duplex[idx],
8021 bp->link_params.req_flow_ctrl[idx],
8022 bp->port.advertising[idx]);
8023 }
a2fbb9ea
ET
8024}
8025
e665bfda
MC
8026static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8027{
8028 mac_hi = cpu_to_be16(mac_hi);
8029 mac_lo = cpu_to_be32(mac_lo);
8030 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8031 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8032}
8033
34f80b04 8034static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8035{
34f80b04
EG
8036 int port = BP_PORT(bp);
8037 u32 val, val2;
589abe3a 8038 u32 config;
b7737c9b 8039 u32 ext_phy_type, ext_phy_config;;
a2fbb9ea 8040
c18487ee 8041 bp->link_params.bp = bp;
34f80b04 8042 bp->link_params.port = port;
c18487ee 8043
c18487ee 8044 bp->link_params.lane_config =
a2fbb9ea 8045 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
4d295db0 8046
a22f0788 8047 bp->link_params.speed_cap_mask[0] =
a2fbb9ea
ET
8048 SHMEM_RD(bp,
8049 dev_info.port_hw_config[port].speed_capability_mask);
a22f0788
YR
8050 bp->link_params.speed_cap_mask[1] =
8051 SHMEM_RD(bp,
8052 dev_info.port_hw_config[port].speed_capability_mask2);
8053 bp->port.link_config[0] =
a2fbb9ea
ET
8054 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8055
a22f0788
YR
8056 bp->port.link_config[1] =
8057 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
c2c8b03e 8058
a22f0788
YR
8059 bp->link_params.multi_phy_config =
8060 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
3ce2c3f9
EG
8061 /* If the device is capable of WoL, set the default state according
8062 * to the HW
8063 */
4d295db0 8064 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8065 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8066 (config & PORT_FEATURE_WOL_ENABLED));
8067
f85582f8 8068 BNX2X_DEV_INFO("lane_config 0x%08x "
a22f0788 8069 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
c18487ee 8070 bp->link_params.lane_config,
a22f0788
YR
8071 bp->link_params.speed_cap_mask[0],
8072 bp->port.link_config[0]);
a2fbb9ea 8073
a22f0788 8074 bp->link_params.switch_cfg = (bp->port.link_config[0] &
f85582f8 8075 PORT_FEATURE_CONNECTED_SWITCH_MASK);
b7737c9b 8076 bnx2x_phy_probe(&bp->link_params);
c18487ee 8077 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8078
8079 bnx2x_link_settings_requested(bp);
8080
01cd4528
EG
8081 /*
8082 * If connected directly, work with the internal PHY, otherwise, work
8083 * with the external PHY
8084 */
b7737c9b
YR
8085 ext_phy_config =
8086 SHMEM_RD(bp,
8087 dev_info.port_hw_config[port].external_phy_config);
8088 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
01cd4528 8089 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
b7737c9b 8090 bp->mdio.prtad = bp->port.phy_addr;
01cd4528
EG
8091
8092 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8093 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8094 bp->mdio.prtad =
b7737c9b 8095 XGXS_EXT_PHY_ADDR(ext_phy_config);
01cd4528 8096
a2fbb9ea
ET
8097 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8098 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 8099 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
8100 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8101 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
8102
8103#ifdef BCM_CNIC
8104 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8105 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8106 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8107#endif
34f80b04
EG
8108}
8109
8110static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8111{
f2e0899f
DK
8112 int func = BP_ABS_FUNC(bp);
8113 int vn;
34f80b04
EG
8114 u32 val, val2;
8115 int rc = 0;
a2fbb9ea 8116
34f80b04 8117 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8118
f2e0899f
DK
8119 if (CHIP_IS_E1x(bp)) {
8120 bp->common.int_block = INT_BLOCK_HC;
8121
8122 bp->igu_dsb_id = DEF_SB_IGU_ID;
8123 bp->igu_base_sb = 0;
8124 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
8125 } else {
8126 bp->common.int_block = INT_BLOCK_IGU;
8127 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8128 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8129 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8130 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8131 } else
8132 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
523224a3 8133
f2e0899f
DK
8134 bnx2x_get_igu_cam_info(bp);
8135
8136 }
8137 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8138 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8139
8140 /*
8141 * Initialize MF configuration
8142 */
523224a3 8143
fb3bff17
DK
8144 bp->mf_ov = 0;
8145 bp->mf_mode = 0;
f2e0899f
DK
8146 vn = BP_E1HVN(bp);
8147 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8148 if (SHMEM2_HAS(bp, mf_cfg_addr))
8149 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8150 else
8151 bp->common.mf_cfg_base = bp->common.shmem_base +
523224a3
DK
8152 offsetof(struct shmem_region, func_mb) +
8153 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
f2e0899f 8154 bp->mf_config[vn] =
523224a3 8155 MF_CFG_RD(bp, func_mf_config[func].config);
a2fbb9ea 8156
523224a3 8157 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8158 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8159 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
fb3bff17 8160 bp->mf_mode = 1;
2691d51d 8161 BNX2X_DEV_INFO("%s function mode\n",
fb3bff17 8162 IS_MF(bp) ? "multi" : "single");
2691d51d 8163
fb3bff17 8164 if (IS_MF(bp)) {
523224a3 8165 val = (MF_CFG_RD(bp, func_mf_config[func].
2691d51d
EG
8166 e1hov_tag) &
8167 FUNC_MF_CFG_E1HOV_TAG_MASK);
8168 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
fb3bff17 8169 bp->mf_ov = val;
f2e0899f 8170 BNX2X_DEV_INFO("MF OV for func %d is %d "
2691d51d 8171 "(0x%04x)\n",
fb3bff17 8172 func, bp->mf_ov, bp->mf_ov);
2691d51d 8173 } else {
f2e0899f 8174 BNX2X_ERROR("No valid MF OV for func %d,"
cdaa7cb8 8175 " aborting\n", func);
34f80b04
EG
8176 rc = -EPERM;
8177 }
2691d51d 8178 } else {
f2e0899f 8179 if (BP_VN(bp)) {
cdaa7cb8
VZ
8180 BNX2X_ERROR("VN %d in single function mode,"
8181 " aborting\n", BP_E1HVN(bp));
2691d51d
EG
8182 rc = -EPERM;
8183 }
34f80b04
EG
8184 }
8185 }
a2fbb9ea 8186
f2e0899f
DK
8187 /* adjust igu_sb_cnt to MF for E1x */
8188 if (CHIP_IS_E1x(bp) && IS_MF(bp))
523224a3
DK
8189 bp->igu_sb_cnt /= E1HVN_MAX;
8190
f2e0899f
DK
8191 /*
8192 * adjust E2 sb count: to be removed when FW will support
8193 * more then 16 L2 clients
8194 */
8195#define MAX_L2_CLIENTS 16
8196 if (CHIP_IS_E2(bp))
8197 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8198 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8199
34f80b04
EG
8200 if (!BP_NOMCP(bp)) {
8201 bnx2x_get_port_hwinfo(bp);
8202
f2e0899f
DK
8203 bp->fw_seq =
8204 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8205 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04
EG
8206 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8207 }
8208
fb3bff17 8209 if (IS_MF(bp)) {
523224a3
DK
8210 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8211 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
34f80b04
EG
8212 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8213 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8214 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8215 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8216 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8217 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8218 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8219 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8220 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8221 ETH_ALEN);
8222 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8223 ETH_ALEN);
a2fbb9ea 8224 }
34f80b04
EG
8225
8226 return rc;
a2fbb9ea
ET
8227 }
8228
34f80b04
EG
8229 if (BP_NOMCP(bp)) {
8230 /* only supposed to happen on emulation/FPGA */
cdaa7cb8 8231 BNX2X_ERROR("warning: random MAC workaround active\n");
34f80b04
EG
8232 random_ether_addr(bp->dev->dev_addr);
8233 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8234 }
a2fbb9ea 8235
34f80b04
EG
8236 return rc;
8237}
8238
34f24c7f
VZ
8239static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8240{
8241 int cnt, i, block_end, rodi;
8242 char vpd_data[BNX2X_VPD_LEN+1];
8243 char str_id_reg[VENDOR_ID_LEN+1];
8244 char str_id_cap[VENDOR_ID_LEN+1];
8245 u8 len;
8246
8247 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8248 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8249
8250 if (cnt < BNX2X_VPD_LEN)
8251 goto out_not_found;
8252
8253 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8254 PCI_VPD_LRDT_RO_DATA);
8255 if (i < 0)
8256 goto out_not_found;
8257
8258
8259 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8260 pci_vpd_lrdt_size(&vpd_data[i]);
8261
8262 i += PCI_VPD_LRDT_TAG_SIZE;
8263
8264 if (block_end > BNX2X_VPD_LEN)
8265 goto out_not_found;
8266
8267 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8268 PCI_VPD_RO_KEYWORD_MFR_ID);
8269 if (rodi < 0)
8270 goto out_not_found;
8271
8272 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8273
8274 if (len != VENDOR_ID_LEN)
8275 goto out_not_found;
8276
8277 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8278
8279 /* vendor specific info */
8280 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8281 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8282 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8283 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8284
8285 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8286 PCI_VPD_RO_KEYWORD_VENDOR0);
8287 if (rodi >= 0) {
8288 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8289
8290 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8291
8292 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8293 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8294 bp->fw_ver[len] = ' ';
8295 }
8296 }
8297 return;
8298 }
8299out_not_found:
8300 return;
8301}
8302
34f80b04
EG
8303static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8304{
f2e0899f 8305 int func;
87942b46 8306 int timer_interval;
34f80b04
EG
8307 int rc;
8308
da5a662a
VZ
8309 /* Disable interrupt handling until HW is initialized */
8310 atomic_set(&bp->intr_sem, 1);
e1510706 8311 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8312
34f80b04 8313 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 8314 mutex_init(&bp->fw_mb_mutex);
bb7e95c8 8315 spin_lock_init(&bp->stats_lock);
993ac7b5
MC
8316#ifdef BCM_CNIC
8317 mutex_init(&bp->cnic_mutex);
8318#endif
a2fbb9ea 8319
1cf167f2 8320 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 8321 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
8322
8323 rc = bnx2x_get_hwinfo(bp);
8324
523224a3
DK
8325 if (!rc)
8326 rc = bnx2x_alloc_mem_bp(bp);
8327
34f24c7f 8328 bnx2x_read_fwinfo(bp);
f2e0899f
DK
8329
8330 func = BP_FUNC(bp);
8331
34f80b04
EG
8332 /* need to reset chip if undi was active */
8333 if (!BP_NOMCP(bp))
8334 bnx2x_undi_unload(bp);
8335
8336 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 8337 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
8338
8339 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
8340 dev_err(&bp->pdev->dev, "MCP disabled, "
8341 "must load devices in order!\n");
34f80b04 8342
555f6c78 8343 /* Set multi queue mode */
8badd27a
EG
8344 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8345 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
cdaa7cb8
VZ
8346 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8347 "requested is not MSI-X\n");
555f6c78
EG
8348 multi_mode = ETH_RSS_MODE_DISABLED;
8349 }
8350 bp->multi_mode = multi_mode;
5d7cd496 8351 bp->int_mode = int_mode;
555f6c78 8352
4fd89b7a
DK
8353 bp->dev->features |= NETIF_F_GRO;
8354
7a9b2557
VZ
8355 /* Set TPA flags */
8356 if (disable_tpa) {
8357 bp->flags &= ~TPA_ENABLE_FLAG;
8358 bp->dev->features &= ~NETIF_F_LRO;
8359 } else {
8360 bp->flags |= TPA_ENABLE_FLAG;
8361 bp->dev->features |= NETIF_F_LRO;
8362 }
5d7cd496 8363 bp->disable_tpa = disable_tpa;
7a9b2557 8364
a18f5128
EG
8365 if (CHIP_IS_E1(bp))
8366 bp->dropless_fc = 0;
8367 else
8368 bp->dropless_fc = dropless_fc;
8369
8d5726c4 8370 bp->mrrs = mrrs;
7a9b2557 8371
34f80b04 8372 bp->tx_ring_size = MAX_TX_AVAIL;
34f80b04
EG
8373
8374 bp->rx_csum = 1;
34f80b04 8375
7d323bfd 8376 /* make sure that the numbers are in the right granularity */
523224a3
DK
8377 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8378 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
34f80b04 8379
87942b46
EG
8380 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8381 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8382
8383 init_timer(&bp->timer);
8384 bp->timer.expires = jiffies + bp->current_interval;
8385 bp->timer.data = (unsigned long) bp;
8386 bp->timer.function = bnx2x_timer;
8387
8388 return rc;
a2fbb9ea
ET
8389}
8390
a2fbb9ea 8391
de0c62db
DK
8392/****************************************************************************
8393* General service functions
8394****************************************************************************/
a2fbb9ea 8395
bb2a0f7a 8396/* called with rtnl_lock */
a2fbb9ea
ET
8397static int bnx2x_open(struct net_device *dev)
8398{
8399 struct bnx2x *bp = netdev_priv(dev);
8400
6eccabb3
EG
8401 netif_carrier_off(dev);
8402
a2fbb9ea
ET
8403 bnx2x_set_power_state(bp, PCI_D0);
8404
72fd0718
VZ
8405 if (!bnx2x_reset_is_done(bp)) {
8406 do {
8407 /* Reset MCP mail box sequence if there is on going
8408 * recovery
8409 */
8410 bp->fw_seq = 0;
8411
8412 /* If it's the first function to load and reset done
8413 * is still not cleared it may mean that. We don't
8414 * check the attention state here because it may have
8415 * already been cleared by a "common" reset but we
8416 * shell proceed with "process kill" anyway.
8417 */
8418 if ((bnx2x_get_load_cnt(bp) == 0) &&
8419 bnx2x_trylock_hw_lock(bp,
8420 HW_LOCK_RESOURCE_RESERVED_08) &&
8421 (!bnx2x_leader_reset(bp))) {
8422 DP(NETIF_MSG_HW, "Recovered in open\n");
8423 break;
8424 }
8425
8426 bnx2x_set_power_state(bp, PCI_D3hot);
8427
8428 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8429 " completed yet. Try again later. If u still see this"
8430 " message after a few retries then power cycle is"
8431 " required.\n", bp->dev->name);
8432
8433 return -EAGAIN;
8434 } while (0);
8435 }
8436
8437 bp->recovery_state = BNX2X_RECOVERY_DONE;
8438
bb2a0f7a 8439 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
8440}
8441
bb2a0f7a 8442/* called with rtnl_lock */
a2fbb9ea
ET
8443static int bnx2x_close(struct net_device *dev)
8444{
a2fbb9ea
ET
8445 struct bnx2x *bp = netdev_priv(dev);
8446
8447 /* Unload the driver, release IRQs */
bb2a0f7a 8448 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 8449 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
8450
8451 return 0;
8452}
8453
f5372251 8454/* called with netif_tx_lock from dev_mcast.c */
9f6c9258 8455void bnx2x_set_rx_mode(struct net_device *dev)
34f80b04
EG
8456{
8457 struct bnx2x *bp = netdev_priv(dev);
8458 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8459 int port = BP_PORT(bp);
8460
8461 if (bp->state != BNX2X_STATE_OPEN) {
8462 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8463 return;
8464 }
8465
8466 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8467
8468 if (dev->flags & IFF_PROMISC)
8469 rx_mode = BNX2X_RX_MODE_PROMISC;
34f80b04 8470 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
8471 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8472 CHIP_IS_E1(bp)))
34f80b04 8473 rx_mode = BNX2X_RX_MODE_ALLMULTI;
34f80b04
EG
8474 else { /* some multicasts */
8475 if (CHIP_IS_E1(bp)) {
523224a3
DK
8476 /*
8477 * set mc list, do not wait as wait implies sleep
8478 * and set_rx_mode can be invoked from non-sleepable
8479 * context
8480 */
8481 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8482 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8483 BNX2X_MAX_MULTICAST*(1 + port));
e665bfda 8484
523224a3 8485 bnx2x_set_e1_mc_list(bp, offset);
34f80b04
EG
8486 } else { /* E1H */
8487 /* Accept one or more multicasts */
22bedad3 8488 struct netdev_hw_addr *ha;
34f80b04
EG
8489 u32 mc_filter[MC_HASH_SIZE];
8490 u32 crc, bit, regidx;
8491 int i;
8492
8493 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8494
22bedad3 8495 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 8496 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
523224a3 8497 bnx2x_mc_addr(ha));
34f80b04 8498
523224a3
DK
8499 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8500 ETH_ALEN);
34f80b04
EG
8501 bit = (crc >> 24) & 0xff;
8502 regidx = bit >> 5;
8503 bit &= 0x1f;
8504 mc_filter[regidx] |= (1 << bit);
8505 }
8506
8507 for (i = 0; i < MC_HASH_SIZE; i++)
8508 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8509 mc_filter[i]);
8510 }
8511 }
8512
8513 bp->rx_mode = rx_mode;
8514 bnx2x_set_storm_rx_mode(bp);
8515}
8516
c18487ee 8517/* called with rtnl_lock */
01cd4528
EG
8518static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8519 int devad, u16 addr)
a2fbb9ea 8520{
01cd4528
EG
8521 struct bnx2x *bp = netdev_priv(netdev);
8522 u16 value;
8523 int rc;
a2fbb9ea 8524
01cd4528
EG
8525 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8526 prtad, devad, addr);
a2fbb9ea 8527
01cd4528
EG
8528 /* The HW expects different devad if CL22 is used */
8529 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 8530
01cd4528 8531 bnx2x_acquire_phy_lock(bp);
e10bc84d 8532 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
01cd4528
EG
8533 bnx2x_release_phy_lock(bp);
8534 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 8535
01cd4528
EG
8536 if (!rc)
8537 rc = value;
8538 return rc;
8539}
a2fbb9ea 8540
01cd4528
EG
8541/* called with rtnl_lock */
8542static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8543 u16 addr, u16 value)
8544{
8545 struct bnx2x *bp = netdev_priv(netdev);
01cd4528
EG
8546 int rc;
8547
8548 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8549 " value 0x%x\n", prtad, devad, addr, value);
8550
01cd4528
EG
8551 /* The HW expects different devad if CL22 is used */
8552 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 8553
01cd4528 8554 bnx2x_acquire_phy_lock(bp);
e10bc84d 8555 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
01cd4528
EG
8556 bnx2x_release_phy_lock(bp);
8557 return rc;
8558}
c18487ee 8559
01cd4528
EG
8560/* called with rtnl_lock */
8561static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8562{
8563 struct bnx2x *bp = netdev_priv(dev);
8564 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 8565
01cd4528
EG
8566 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8567 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 8568
01cd4528
EG
8569 if (!netif_running(dev))
8570 return -EAGAIN;
8571
8572 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
8573}
8574
257ddbda 8575#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
8576static void poll_bnx2x(struct net_device *dev)
8577{
8578 struct bnx2x *bp = netdev_priv(dev);
8579
8580 disable_irq(bp->pdev->irq);
8581 bnx2x_interrupt(bp->pdev->irq, dev);
8582 enable_irq(bp->pdev->irq);
8583}
8584#endif
8585
c64213cd
SH
8586static const struct net_device_ops bnx2x_netdev_ops = {
8587 .ndo_open = bnx2x_open,
8588 .ndo_stop = bnx2x_close,
8589 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 8590 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
8591 .ndo_set_mac_address = bnx2x_change_mac_addr,
8592 .ndo_validate_addr = eth_validate_addr,
8593 .ndo_do_ioctl = bnx2x_ioctl,
8594 .ndo_change_mtu = bnx2x_change_mtu,
8595 .ndo_tx_timeout = bnx2x_tx_timeout,
8596#ifdef BCM_VLAN
8597 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
8598#endif
257ddbda 8599#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
8600 .ndo_poll_controller = poll_bnx2x,
8601#endif
8602};
8603
34f80b04
EG
8604static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8605 struct net_device *dev)
a2fbb9ea
ET
8606{
8607 struct bnx2x *bp;
8608 int rc;
8609
8610 SET_NETDEV_DEV(dev, &pdev->dev);
8611 bp = netdev_priv(dev);
8612
34f80b04
EG
8613 bp->dev = dev;
8614 bp->pdev = pdev;
a2fbb9ea 8615 bp->flags = 0;
f2e0899f 8616 bp->pf_num = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
8617
8618 rc = pci_enable_device(pdev);
8619 if (rc) {
cdaa7cb8
VZ
8620 dev_err(&bp->pdev->dev,
8621 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
8622 goto err_out;
8623 }
8624
8625 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
8626 dev_err(&bp->pdev->dev,
8627 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
8628 rc = -ENODEV;
8629 goto err_out_disable;
8630 }
8631
8632 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
8633 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
8634 " base address, aborting\n");
a2fbb9ea
ET
8635 rc = -ENODEV;
8636 goto err_out_disable;
8637 }
8638
34f80b04
EG
8639 if (atomic_read(&pdev->enable_cnt) == 1) {
8640 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8641 if (rc) {
cdaa7cb8
VZ
8642 dev_err(&bp->pdev->dev,
8643 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
8644 goto err_out_disable;
8645 }
a2fbb9ea 8646
34f80b04
EG
8647 pci_set_master(pdev);
8648 pci_save_state(pdev);
8649 }
a2fbb9ea
ET
8650
8651 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8652 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
8653 dev_err(&bp->pdev->dev,
8654 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
8655 rc = -EIO;
8656 goto err_out_release;
8657 }
8658
8659 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8660 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
8661 dev_err(&bp->pdev->dev,
8662 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
8663 rc = -EIO;
8664 goto err_out_release;
8665 }
8666
1a983142 8667 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 8668 bp->flags |= USING_DAC_FLAG;
1a983142 8669 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
8670 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
8671 " failed, aborting\n");
a2fbb9ea
ET
8672 rc = -EIO;
8673 goto err_out_release;
8674 }
8675
1a983142 8676 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
8677 dev_err(&bp->pdev->dev,
8678 "System does not support DMA, aborting\n");
a2fbb9ea
ET
8679 rc = -EIO;
8680 goto err_out_release;
8681 }
8682
34f80b04
EG
8683 dev->mem_start = pci_resource_start(pdev, 0);
8684 dev->base_addr = dev->mem_start;
8685 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
8686
8687 dev->irq = pdev->irq;
8688
275f165f 8689 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 8690 if (!bp->regview) {
cdaa7cb8
VZ
8691 dev_err(&bp->pdev->dev,
8692 "Cannot map register space, aborting\n");
a2fbb9ea
ET
8693 rc = -ENOMEM;
8694 goto err_out_release;
8695 }
8696
34f80b04 8697 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
523224a3 8698 min_t(u64, BNX2X_DB_SIZE(bp),
34f80b04 8699 pci_resource_len(pdev, 2)));
a2fbb9ea 8700 if (!bp->doorbells) {
cdaa7cb8
VZ
8701 dev_err(&bp->pdev->dev,
8702 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
8703 rc = -ENOMEM;
8704 goto err_out_unmap;
8705 }
8706
8707 bnx2x_set_power_state(bp, PCI_D0);
8708
34f80b04
EG
8709 /* clean indirect addresses */
8710 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
8711 PCICFG_VENDOR_ID_OFFSET);
8712 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
8713 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
8714 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
8715 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 8716
72fd0718
VZ
8717 /* Reset the load counter */
8718 bnx2x_clear_load_cnt(bp);
8719
34f80b04 8720 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 8721
c64213cd 8722 dev->netdev_ops = &bnx2x_netdev_ops;
de0c62db 8723 bnx2x_set_ethtool_ops(dev);
34f80b04
EG
8724 dev->features |= NETIF_F_SG;
8725 dev->features |= NETIF_F_HW_CSUM;
8726 if (bp->flags & USING_DAC_FLAG)
8727 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
8728 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8729 dev->features |= NETIF_F_TSO6;
34f80b04
EG
8730#ifdef BCM_VLAN
8731 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 8732 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
8733
8734 dev->vlan_features |= NETIF_F_SG;
8735 dev->vlan_features |= NETIF_F_HW_CSUM;
8736 if (bp->flags & USING_DAC_FLAG)
8737 dev->vlan_features |= NETIF_F_HIGHDMA;
8738 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8739 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 8740#endif
a2fbb9ea 8741
01cd4528
EG
8742 /* get_port_hwinfo() will set prtad and mmds properly */
8743 bp->mdio.prtad = MDIO_PRTAD_NONE;
8744 bp->mdio.mmds = 0;
8745 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8746 bp->mdio.dev = dev;
8747 bp->mdio.mdio_read = bnx2x_mdio_read;
8748 bp->mdio.mdio_write = bnx2x_mdio_write;
8749
a2fbb9ea
ET
8750 return 0;
8751
8752err_out_unmap:
8753 if (bp->regview) {
8754 iounmap(bp->regview);
8755 bp->regview = NULL;
8756 }
a2fbb9ea
ET
8757 if (bp->doorbells) {
8758 iounmap(bp->doorbells);
8759 bp->doorbells = NULL;
8760 }
8761
8762err_out_release:
34f80b04
EG
8763 if (atomic_read(&pdev->enable_cnt) == 1)
8764 pci_release_regions(pdev);
a2fbb9ea
ET
8765
8766err_out_disable:
8767 pci_disable_device(pdev);
8768 pci_set_drvdata(pdev, NULL);
8769
8770err_out:
8771 return rc;
8772}
8773
37f9ce62
EG
8774static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
8775 int *width, int *speed)
25047950
ET
8776{
8777 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
8778
37f9ce62 8779 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 8780
37f9ce62
EG
8781 /* return value of 1=2.5GHz 2=5GHz */
8782 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 8783}
37f9ce62 8784
6891dd25 8785static int bnx2x_check_firmware(struct bnx2x *bp)
94a78b79 8786{
37f9ce62 8787 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
8788 struct bnx2x_fw_file_hdr *fw_hdr;
8789 struct bnx2x_fw_file_section *sections;
94a78b79 8790 u32 offset, len, num_ops;
37f9ce62 8791 u16 *ops_offsets;
94a78b79 8792 int i;
37f9ce62 8793 const u8 *fw_ver;
94a78b79
VZ
8794
8795 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
8796 return -EINVAL;
8797
8798 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
8799 sections = (struct bnx2x_fw_file_section *)fw_hdr;
8800
8801 /* Make sure none of the offsets and sizes make us read beyond
8802 * the end of the firmware data */
8803 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
8804 offset = be32_to_cpu(sections[i].offset);
8805 len = be32_to_cpu(sections[i].len);
8806 if (offset + len > firmware->size) {
cdaa7cb8
VZ
8807 dev_err(&bp->pdev->dev,
8808 "Section %d length is out of bounds\n", i);
94a78b79
VZ
8809 return -EINVAL;
8810 }
8811 }
8812
8813 /* Likewise for the init_ops offsets */
8814 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
8815 ops_offsets = (u16 *)(firmware->data + offset);
8816 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
8817
8818 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
8819 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
8820 dev_err(&bp->pdev->dev,
8821 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
8822 return -EINVAL;
8823 }
8824 }
8825
8826 /* Check FW version */
8827 offset = be32_to_cpu(fw_hdr->fw_version.offset);
8828 fw_ver = firmware->data + offset;
8829 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
8830 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
8831 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
8832 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
8833 dev_err(&bp->pdev->dev,
8834 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
8835 fw_ver[0], fw_ver[1], fw_ver[2],
8836 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
8837 BCM_5710_FW_MINOR_VERSION,
8838 BCM_5710_FW_REVISION_VERSION,
8839 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 8840 return -EINVAL;
94a78b79
VZ
8841 }
8842
8843 return 0;
8844}
8845
ab6ad5a4 8846static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 8847{
ab6ad5a4
EG
8848 const __be32 *source = (const __be32 *)_source;
8849 u32 *target = (u32 *)_target;
94a78b79 8850 u32 i;
94a78b79
VZ
8851
8852 for (i = 0; i < n/4; i++)
8853 target[i] = be32_to_cpu(source[i]);
8854}
8855
8856/*
8857 Ops array is stored in the following format:
8858 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
8859 */
ab6ad5a4 8860static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 8861{
ab6ad5a4
EG
8862 const __be32 *source = (const __be32 *)_source;
8863 struct raw_op *target = (struct raw_op *)_target;
94a78b79 8864 u32 i, j, tmp;
94a78b79 8865
ab6ad5a4 8866 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
8867 tmp = be32_to_cpu(source[j]);
8868 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
8869 target[i].offset = tmp & 0xffffff;
8870 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
8871 }
8872}
ab6ad5a4 8873
523224a3
DK
8874/**
8875 * IRO array is stored in the following format:
8876 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
8877 */
8878static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
8879{
8880 const __be32 *source = (const __be32 *)_source;
8881 struct iro *target = (struct iro *)_target;
8882 u32 i, j, tmp;
8883
8884 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
8885 target[i].base = be32_to_cpu(source[j]);
8886 j++;
8887 tmp = be32_to_cpu(source[j]);
8888 target[i].m1 = (tmp >> 16) & 0xffff;
8889 target[i].m2 = tmp & 0xffff;
8890 j++;
8891 tmp = be32_to_cpu(source[j]);
8892 target[i].m3 = (tmp >> 16) & 0xffff;
8893 target[i].size = tmp & 0xffff;
8894 j++;
8895 }
8896}
8897
ab6ad5a4 8898static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 8899{
ab6ad5a4
EG
8900 const __be16 *source = (const __be16 *)_source;
8901 u16 *target = (u16 *)_target;
94a78b79 8902 u32 i;
94a78b79
VZ
8903
8904 for (i = 0; i < n/2; i++)
8905 target[i] = be16_to_cpu(source[i]);
8906}
8907
7995c64e
JP
8908#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
8909do { \
8910 u32 len = be32_to_cpu(fw_hdr->arr.len); \
8911 bp->arr = kmalloc(len, GFP_KERNEL); \
8912 if (!bp->arr) { \
8913 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
8914 goto lbl; \
8915 } \
8916 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
8917 (u8 *)bp->arr, len); \
8918} while (0)
94a78b79 8919
6891dd25 8920int bnx2x_init_firmware(struct bnx2x *bp)
94a78b79 8921{
45229b42 8922 const char *fw_file_name;
94a78b79 8923 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 8924 int rc;
94a78b79 8925
94a78b79 8926 if (CHIP_IS_E1(bp))
45229b42 8927 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 8928 else if (CHIP_IS_E1H(bp))
45229b42 8929 fw_file_name = FW_FILE_NAME_E1H;
f2e0899f
DK
8930 else if (CHIP_IS_E2(bp))
8931 fw_file_name = FW_FILE_NAME_E2;
cdaa7cb8 8932 else {
6891dd25 8933 BNX2X_ERR("Unsupported chip revision\n");
cdaa7cb8
VZ
8934 return -EINVAL;
8935 }
94a78b79 8936
6891dd25 8937 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
94a78b79 8938
6891dd25 8939 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
94a78b79 8940 if (rc) {
6891dd25 8941 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
8942 goto request_firmware_exit;
8943 }
8944
8945 rc = bnx2x_check_firmware(bp);
8946 if (rc) {
6891dd25 8947 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
8948 goto request_firmware_exit;
8949 }
8950
8951 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
8952
8953 /* Initialize the pointers to the init arrays */
8954 /* Blob */
8955 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
8956
8957 /* Opcodes */
8958 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
8959
8960 /* Offsets */
ab6ad5a4
EG
8961 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
8962 be16_to_cpu_n);
94a78b79
VZ
8963
8964 /* STORMs firmware */
573f2035
EG
8965 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8966 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
8967 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
8968 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
8969 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8970 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
8971 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
8972 be32_to_cpu(fw_hdr->usem_pram_data.offset);
8973 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8974 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
8975 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
8976 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
8977 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8978 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
8979 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
8980 be32_to_cpu(fw_hdr->csem_pram_data.offset);
523224a3
DK
8981 /* IRO */
8982 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
94a78b79
VZ
8983
8984 return 0;
ab6ad5a4 8985
523224a3
DK
8986iro_alloc_err:
8987 kfree(bp->init_ops_offsets);
94a78b79
VZ
8988init_offsets_alloc_err:
8989 kfree(bp->init_ops);
8990init_ops_alloc_err:
8991 kfree(bp->init_data);
8992request_firmware_exit:
8993 release_firmware(bp->firmware);
8994
8995 return rc;
8996}
8997
523224a3
DK
8998static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
8999{
9000 int cid_count = L2_FP_COUNT(l2_cid_count);
94a78b79 9001
523224a3
DK
9002#ifdef BCM_CNIC
9003 cid_count += CNIC_CID_MAX;
9004#endif
9005 return roundup(cid_count, QM_CID_ROUND);
9006}
f85582f8 9007
a2fbb9ea
ET
9008static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9009 const struct pci_device_id *ent)
9010{
a2fbb9ea
ET
9011 struct net_device *dev = NULL;
9012 struct bnx2x *bp;
37f9ce62 9013 int pcie_width, pcie_speed;
523224a3
DK
9014 int rc, cid_count;
9015
f2e0899f
DK
9016 switch (ent->driver_data) {
9017 case BCM57710:
9018 case BCM57711:
9019 case BCM57711E:
9020 cid_count = FP_SB_MAX_E1x;
9021 break;
9022
9023 case BCM57712:
9024 case BCM57712E:
9025 cid_count = FP_SB_MAX_E2;
9026 break;
a2fbb9ea 9027
f2e0899f
DK
9028 default:
9029 pr_err("Unknown board_type (%ld), aborting\n",
9030 ent->driver_data);
9031 return ENODEV;
9032 }
9033
9034 cid_count += CNIC_CONTEXT_USE;
f85582f8 9035
a2fbb9ea 9036 /* dev zeroed in init_etherdev */
523224a3 9037 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
34f80b04 9038 if (!dev) {
cdaa7cb8 9039 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 9040 return -ENOMEM;
34f80b04 9041 }
a2fbb9ea 9042
a2fbb9ea 9043 bp = netdev_priv(dev);
7995c64e 9044 bp->msg_enable = debug;
a2fbb9ea 9045
df4770de
EG
9046 pci_set_drvdata(pdev, dev);
9047
523224a3
DK
9048 bp->l2_cid_count = cid_count;
9049
34f80b04 9050 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
9051 if (rc < 0) {
9052 free_netdev(dev);
9053 return rc;
9054 }
9055
34f80b04 9056 rc = bnx2x_init_bp(bp);
693fc0d1
EG
9057 if (rc)
9058 goto init_one_exit;
9059
523224a3
DK
9060 /* calc qm_cid_count */
9061 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9062
693fc0d1 9063 rc = register_netdev(dev);
34f80b04 9064 if (rc) {
693fc0d1 9065 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
9066 goto init_one_exit;
9067 }
9068
d6214d7a
DK
9069 /* Configure interupt mode: try to enable MSI-X/MSI if
9070 * needed, set bp->num_queues appropriately.
9071 */
9072 bnx2x_set_int_mode(bp);
9073
9074 /* Add all NAPI objects */
9075 bnx2x_add_all_napi(bp);
9076
37f9ce62 9077 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
d6214d7a 9078
cdaa7cb8
VZ
9079 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9080 " IRQ %d, ", board_info[ent->driver_data].name,
9081 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
f2e0899f
DK
9082 pcie_width,
9083 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9084 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9085 "5GHz (Gen2)" : "2.5GHz",
cdaa7cb8
VZ
9086 dev->base_addr, bp->pdev->irq);
9087 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 9088
a2fbb9ea 9089 return 0;
34f80b04
EG
9090
9091init_one_exit:
9092 if (bp->regview)
9093 iounmap(bp->regview);
9094
9095 if (bp->doorbells)
9096 iounmap(bp->doorbells);
9097
9098 free_netdev(dev);
9099
9100 if (atomic_read(&pdev->enable_cnt) == 1)
9101 pci_release_regions(pdev);
9102
9103 pci_disable_device(pdev);
9104 pci_set_drvdata(pdev, NULL);
9105
9106 return rc;
a2fbb9ea
ET
9107}
9108
9109static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9110{
9111 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
9112 struct bnx2x *bp;
9113
9114 if (!dev) {
cdaa7cb8 9115 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
9116 return;
9117 }
228241eb 9118 bp = netdev_priv(dev);
a2fbb9ea 9119
a2fbb9ea
ET
9120 unregister_netdev(dev);
9121
d6214d7a
DK
9122 /* Delete all NAPI objects */
9123 bnx2x_del_all_napi(bp);
9124
9125 /* Disable MSI/MSI-X */
9126 bnx2x_disable_msi(bp);
f85582f8 9127
72fd0718
VZ
9128 /* Make sure RESET task is not scheduled before continuing */
9129 cancel_delayed_work_sync(&bp->reset_task);
9130
a2fbb9ea
ET
9131 if (bp->regview)
9132 iounmap(bp->regview);
9133
9134 if (bp->doorbells)
9135 iounmap(bp->doorbells);
9136
523224a3
DK
9137 bnx2x_free_mem_bp(bp);
9138
a2fbb9ea 9139 free_netdev(dev);
34f80b04
EG
9140
9141 if (atomic_read(&pdev->enable_cnt) == 1)
9142 pci_release_regions(pdev);
9143
a2fbb9ea
ET
9144 pci_disable_device(pdev);
9145 pci_set_drvdata(pdev, NULL);
9146}
9147
f8ef6e44
YG
9148static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9149{
9150 int i;
9151
9152 bp->state = BNX2X_STATE_ERROR;
9153
9154 bp->rx_mode = BNX2X_RX_MODE_NONE;
9155
9156 bnx2x_netif_stop(bp, 0);
c89af1a3 9157 netif_carrier_off(bp->dev);
f8ef6e44
YG
9158
9159 del_timer_sync(&bp->timer);
9160 bp->stats_state = STATS_STATE_DISABLED;
9161 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9162
9163 /* Release IRQs */
d6214d7a 9164 bnx2x_free_irq(bp);
f8ef6e44 9165
f8ef6e44
YG
9166 /* Free SKBs, SGEs, TPA pool and driver internals */
9167 bnx2x_free_skbs(bp);
523224a3 9168
54b9ddaa 9169 for_each_queue(bp, i)
f8ef6e44 9170 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 9171
f8ef6e44
YG
9172 bnx2x_free_mem(bp);
9173
9174 bp->state = BNX2X_STATE_CLOSED;
9175
f8ef6e44
YG
9176 return 0;
9177}
9178
9179static void bnx2x_eeh_recover(struct bnx2x *bp)
9180{
9181 u32 val;
9182
9183 mutex_init(&bp->port.phy_mutex);
9184
9185 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9186 bp->link_params.shmem_base = bp->common.shmem_base;
9187 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9188
9189 if (!bp->common.shmem_base ||
9190 (bp->common.shmem_base < 0xA0000) ||
9191 (bp->common.shmem_base >= 0xC0000)) {
9192 BNX2X_DEV_INFO("MCP not active\n");
9193 bp->flags |= NO_MCP_FLAG;
9194 return;
9195 }
9196
9197 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9198 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9199 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9200 BNX2X_ERR("BAD MCP validity signature\n");
9201
9202 if (!BP_NOMCP(bp)) {
f2e0899f
DK
9203 bp->fw_seq =
9204 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9205 DRV_MSG_SEQ_NUMBER_MASK);
f8ef6e44
YG
9206 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9207 }
9208}
9209
493adb1f
WX
9210/**
9211 * bnx2x_io_error_detected - called when PCI error is detected
9212 * @pdev: Pointer to PCI device
9213 * @state: The current pci connection state
9214 *
9215 * This function is called after a PCI bus error affecting
9216 * this device has been detected.
9217 */
9218static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9219 pci_channel_state_t state)
9220{
9221 struct net_device *dev = pci_get_drvdata(pdev);
9222 struct bnx2x *bp = netdev_priv(dev);
9223
9224 rtnl_lock();
9225
9226 netif_device_detach(dev);
9227
07ce50e4
DN
9228 if (state == pci_channel_io_perm_failure) {
9229 rtnl_unlock();
9230 return PCI_ERS_RESULT_DISCONNECT;
9231 }
9232
493adb1f 9233 if (netif_running(dev))
f8ef6e44 9234 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
9235
9236 pci_disable_device(pdev);
9237
9238 rtnl_unlock();
9239
9240 /* Request a slot reset */
9241 return PCI_ERS_RESULT_NEED_RESET;
9242}
9243
9244/**
9245 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9246 * @pdev: Pointer to PCI device
9247 *
9248 * Restart the card from scratch, as if from a cold-boot.
9249 */
9250static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9251{
9252 struct net_device *dev = pci_get_drvdata(pdev);
9253 struct bnx2x *bp = netdev_priv(dev);
9254
9255 rtnl_lock();
9256
9257 if (pci_enable_device(pdev)) {
9258 dev_err(&pdev->dev,
9259 "Cannot re-enable PCI device after reset\n");
9260 rtnl_unlock();
9261 return PCI_ERS_RESULT_DISCONNECT;
9262 }
9263
9264 pci_set_master(pdev);
9265 pci_restore_state(pdev);
9266
9267 if (netif_running(dev))
9268 bnx2x_set_power_state(bp, PCI_D0);
9269
9270 rtnl_unlock();
9271
9272 return PCI_ERS_RESULT_RECOVERED;
9273}
9274
9275/**
9276 * bnx2x_io_resume - called when traffic can start flowing again
9277 * @pdev: Pointer to PCI device
9278 *
9279 * This callback is called when the error recovery driver tells us that
9280 * its OK to resume normal operation.
9281 */
9282static void bnx2x_io_resume(struct pci_dev *pdev)
9283{
9284 struct net_device *dev = pci_get_drvdata(pdev);
9285 struct bnx2x *bp = netdev_priv(dev);
9286
72fd0718 9287 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
f2e0899f
DK
9288 printk(KERN_ERR "Handling parity error recovery. "
9289 "Try again later\n");
72fd0718
VZ
9290 return;
9291 }
9292
493adb1f
WX
9293 rtnl_lock();
9294
f8ef6e44
YG
9295 bnx2x_eeh_recover(bp);
9296
493adb1f 9297 if (netif_running(dev))
f8ef6e44 9298 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
9299
9300 netif_device_attach(dev);
9301
9302 rtnl_unlock();
9303}
9304
9305static struct pci_error_handlers bnx2x_err_handler = {
9306 .error_detected = bnx2x_io_error_detected,
356e2385
EG
9307 .slot_reset = bnx2x_io_slot_reset,
9308 .resume = bnx2x_io_resume,
493adb1f
WX
9309};
9310
a2fbb9ea 9311static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
9312 .name = DRV_MODULE_NAME,
9313 .id_table = bnx2x_pci_tbl,
9314 .probe = bnx2x_init_one,
9315 .remove = __devexit_p(bnx2x_remove_one),
9316 .suspend = bnx2x_suspend,
9317 .resume = bnx2x_resume,
9318 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
9319};
9320
9321static int __init bnx2x_init(void)
9322{
dd21ca6d
SG
9323 int ret;
9324
7995c64e 9325 pr_info("%s", version);
938cf541 9326
1cf167f2
EG
9327 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9328 if (bnx2x_wq == NULL) {
7995c64e 9329 pr_err("Cannot create workqueue\n");
1cf167f2
EG
9330 return -ENOMEM;
9331 }
9332
dd21ca6d
SG
9333 ret = pci_register_driver(&bnx2x_pci_driver);
9334 if (ret) {
7995c64e 9335 pr_err("Cannot register driver\n");
dd21ca6d
SG
9336 destroy_workqueue(bnx2x_wq);
9337 }
9338 return ret;
a2fbb9ea
ET
9339}
9340
9341static void __exit bnx2x_cleanup(void)
9342{
9343 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
9344
9345 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
9346}
9347
9348module_init(bnx2x_init);
9349module_exit(bnx2x_cleanup);
9350
993ac7b5
MC
9351#ifdef BCM_CNIC
9352
9353/* count denotes the number of new completions we have seen */
9354static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9355{
9356 struct eth_spe *spe;
9357
9358#ifdef BNX2X_STOP_ON_ERROR
9359 if (unlikely(bp->panic))
9360 return;
9361#endif
9362
9363 spin_lock_bh(&bp->spq_lock);
c2bff63f 9364 BUG_ON(bp->cnic_spq_pending < count);
993ac7b5
MC
9365 bp->cnic_spq_pending -= count;
9366
993ac7b5 9367
c2bff63f
DK
9368 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9369 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9370 & SPE_HDR_CONN_TYPE) >>
9371 SPE_HDR_CONN_TYPE_SHIFT;
9372
9373 /* Set validation for iSCSI L2 client before sending SETUP
9374 * ramrod
9375 */
9376 if (type == ETH_CONNECTION_TYPE) {
9377 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9378 hdr.conn_and_cmd_data) >>
9379 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9380
9381 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9382 bnx2x_set_ctx_validation(&bp->context.
9383 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9384 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9385 }
9386
9387 /* There may be not more than 8 L2 and COMMON SPEs and not more
9388 * than 8 L5 SPEs in the air.
9389 */
9390 if ((type == NONE_CONNECTION_TYPE) ||
9391 (type == ETH_CONNECTION_TYPE)) {
9392 if (!atomic_read(&bp->spq_left))
9393 break;
9394 else
9395 atomic_dec(&bp->spq_left);
9396 } else if (type == ISCSI_CONNECTION_TYPE) {
9397 if (bp->cnic_spq_pending >=
9398 bp->cnic_eth_dev.max_kwqe_pending)
9399 break;
9400 else
9401 bp->cnic_spq_pending++;
9402 } else {
9403 BNX2X_ERR("Unknown SPE type: %d\n", type);
9404 bnx2x_panic();
993ac7b5 9405 break;
c2bff63f 9406 }
993ac7b5
MC
9407
9408 spe = bnx2x_sp_get_next(bp);
9409 *spe = *bp->cnic_kwq_cons;
9410
993ac7b5
MC
9411 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9412 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9413
9414 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9415 bp->cnic_kwq_cons = bp->cnic_kwq;
9416 else
9417 bp->cnic_kwq_cons++;
9418 }
9419 bnx2x_sp_prod_update(bp);
9420 spin_unlock_bh(&bp->spq_lock);
9421}
9422
9423static int bnx2x_cnic_sp_queue(struct net_device *dev,
9424 struct kwqe_16 *kwqes[], u32 count)
9425{
9426 struct bnx2x *bp = netdev_priv(dev);
9427 int i;
9428
9429#ifdef BNX2X_STOP_ON_ERROR
9430 if (unlikely(bp->panic))
9431 return -EIO;
9432#endif
9433
9434 spin_lock_bh(&bp->spq_lock);
9435
9436 for (i = 0; i < count; i++) {
9437 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9438
9439 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9440 break;
9441
9442 *bp->cnic_kwq_prod = *spe;
9443
9444 bp->cnic_kwq_pending++;
9445
9446 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9447 spe->hdr.conn_and_cmd_data, spe->hdr.type,
523224a3
DK
9448 spe->data.update_data_addr.hi,
9449 spe->data.update_data_addr.lo,
993ac7b5
MC
9450 bp->cnic_kwq_pending);
9451
9452 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9453 bp->cnic_kwq_prod = bp->cnic_kwq;
9454 else
9455 bp->cnic_kwq_prod++;
9456 }
9457
9458 spin_unlock_bh(&bp->spq_lock);
9459
9460 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9461 bnx2x_cnic_sp_post(bp, 0);
9462
9463 return i;
9464}
9465
9466static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9467{
9468 struct cnic_ops *c_ops;
9469 int rc = 0;
9470
9471 mutex_lock(&bp->cnic_mutex);
9472 c_ops = bp->cnic_ops;
9473 if (c_ops)
9474 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9475 mutex_unlock(&bp->cnic_mutex);
9476
9477 return rc;
9478}
9479
9480static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9481{
9482 struct cnic_ops *c_ops;
9483 int rc = 0;
9484
9485 rcu_read_lock();
9486 c_ops = rcu_dereference(bp->cnic_ops);
9487 if (c_ops)
9488 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9489 rcu_read_unlock();
9490
9491 return rc;
9492}
9493
9494/*
9495 * for commands that have no data
9496 */
9f6c9258 9497int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
993ac7b5
MC
9498{
9499 struct cnic_ctl_info ctl = {0};
9500
9501 ctl.cmd = cmd;
9502
9503 return bnx2x_cnic_ctl_send(bp, &ctl);
9504}
9505
9506static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9507{
9508 struct cnic_ctl_info ctl;
9509
9510 /* first we tell CNIC and only then we count this as a completion */
9511 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9512 ctl.data.comp.cid = cid;
9513
9514 bnx2x_cnic_ctl_send_bh(bp, &ctl);
c2bff63f 9515 bnx2x_cnic_sp_post(bp, 0);
993ac7b5
MC
9516}
9517
9518static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9519{
9520 struct bnx2x *bp = netdev_priv(dev);
9521 int rc = 0;
9522
9523 switch (ctl->cmd) {
9524 case DRV_CTL_CTXTBL_WR_CMD: {
9525 u32 index = ctl->data.io.offset;
9526 dma_addr_t addr = ctl->data.io.dma_addr;
9527
9528 bnx2x_ilt_wr(bp, index, addr);
9529 break;
9530 }
9531
c2bff63f
DK
9532 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9533 int count = ctl->data.credit.credit_count;
993ac7b5
MC
9534
9535 bnx2x_cnic_sp_post(bp, count);
9536 break;
9537 }
9538
9539 /* rtnl_lock is held. */
9540 case DRV_CTL_START_L2_CMD: {
9541 u32 cli = ctl->data.ring.client_id;
9542
523224a3
DK
9543 /* Set iSCSI MAC address */
9544 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9545
9546 mmiowb();
9547 barrier();
9548
9549 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9550 * because it's the only way for UIO Client to accept
9551 * multicasts (in non-promiscuous mode only one Client per
9552 * function will receive multicast packets (leading in our
9553 * case).
9554 */
9555 bnx2x_rxq_set_mac_filters(bp, cli,
9556 BNX2X_ACCEPT_UNICAST |
9557 BNX2X_ACCEPT_BROADCAST |
9558 BNX2X_ACCEPT_ALL_MULTICAST);
9559 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9560
993ac7b5
MC
9561 break;
9562 }
9563
9564 /* rtnl_lock is held. */
9565 case DRV_CTL_STOP_L2_CMD: {
9566 u32 cli = ctl->data.ring.client_id;
9567
523224a3
DK
9568 /* Stop accepting on iSCSI L2 ring */
9569 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9570 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9571
9572 mmiowb();
9573 barrier();
9574
9575 /* Unset iSCSI L2 MAC */
9576 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
993ac7b5
MC
9577 break;
9578 }
c2bff63f
DK
9579 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9580 int count = ctl->data.credit.credit_count;
9581
9582 smp_mb__before_atomic_inc();
9583 atomic_add(count, &bp->spq_left);
9584 smp_mb__after_atomic_inc();
9585 break;
9586 }
993ac7b5
MC
9587
9588 default:
9589 BNX2X_ERR("unknown command %x\n", ctl->cmd);
9590 rc = -EINVAL;
9591 }
9592
9593 return rc;
9594}
9595
9f6c9258 9596void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
993ac7b5
MC
9597{
9598 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9599
9600 if (bp->flags & USING_MSIX_FLAG) {
9601 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
9602 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
9603 cp->irq_arr[0].vector = bp->msix_table[1].vector;
9604 } else {
9605 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
9606 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
9607 }
f2e0899f
DK
9608 if (CHIP_IS_E2(bp))
9609 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
9610 else
9611 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
9612
993ac7b5 9613 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
523224a3 9614 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
993ac7b5
MC
9615 cp->irq_arr[1].status_blk = bp->def_status_blk;
9616 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
523224a3 9617 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
993ac7b5
MC
9618
9619 cp->num_irq = 2;
9620}
9621
9622static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
9623 void *data)
9624{
9625 struct bnx2x *bp = netdev_priv(dev);
9626 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9627
9628 if (ops == NULL)
9629 return -EINVAL;
9630
9631 if (atomic_read(&bp->intr_sem) != 0)
9632 return -EBUSY;
9633
9634 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
9635 if (!bp->cnic_kwq)
9636 return -ENOMEM;
9637
9638 bp->cnic_kwq_cons = bp->cnic_kwq;
9639 bp->cnic_kwq_prod = bp->cnic_kwq;
9640 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
9641
9642 bp->cnic_spq_pending = 0;
9643 bp->cnic_kwq_pending = 0;
9644
9645 bp->cnic_data = data;
9646
9647 cp->num_irq = 0;
9648 cp->drv_state = CNIC_DRV_STATE_REGD;
523224a3 9649 cp->iro_arr = bp->iro_arr;
993ac7b5 9650
993ac7b5 9651 bnx2x_setup_cnic_irq_info(bp);
c2bff63f 9652
993ac7b5
MC
9653 rcu_assign_pointer(bp->cnic_ops, ops);
9654
9655 return 0;
9656}
9657
9658static int bnx2x_unregister_cnic(struct net_device *dev)
9659{
9660 struct bnx2x *bp = netdev_priv(dev);
9661 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9662
9663 mutex_lock(&bp->cnic_mutex);
9664 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
9665 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
9666 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9667 }
9668 cp->drv_state = 0;
9669 rcu_assign_pointer(bp->cnic_ops, NULL);
9670 mutex_unlock(&bp->cnic_mutex);
9671 synchronize_rcu();
9672 kfree(bp->cnic_kwq);
9673 bp->cnic_kwq = NULL;
9674
9675 return 0;
9676}
9677
9678struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
9679{
9680 struct bnx2x *bp = netdev_priv(dev);
9681 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9682
9683 cp->drv_owner = THIS_MODULE;
9684 cp->chip_id = CHIP_ID(bp);
9685 cp->pdev = bp->pdev;
9686 cp->io_base = bp->regview;
9687 cp->io_base2 = bp->doorbells;
9688 cp->max_kwqe_pending = 8;
523224a3 9689 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
c2bff63f
DK
9690 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
9691 bnx2x_cid_ilt_lines(bp);
993ac7b5 9692 cp->ctx_tbl_len = CNIC_ILT_LINES;
c2bff63f 9693 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
993ac7b5
MC
9694 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
9695 cp->drv_ctl = bnx2x_drv_ctl;
9696 cp->drv_register_cnic = bnx2x_register_cnic;
9697 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
c2bff63f
DK
9698 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID;
9699 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
9700
9701 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
9702 "starting cid %d\n",
9703 cp->ctx_blk_size,
9704 cp->ctx_tbl_offset,
9705 cp->ctx_tbl_len,
9706 cp->starting_cid);
993ac7b5
MC
9707 return cp;
9708}
9709EXPORT_SYMBOL(bnx2x_cnic_probe);
9710
9711#endif /* BCM_CNIC */
94a78b79 9712