]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x/bnx2x_main.c
bnx2x: prevent false parity error in MSI-X memory of HC block
[net-next-2.6.git] / drivers / net / bnx2x / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
a2fbb9ea
ET
26#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
0c6671b0 40#include <linux/if_vlan.h>
a2fbb9ea
ET
41#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
34f80b04 44#include <net/ip6_checksum.h>
a2fbb9ea
ET
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
34f80b04 47#include <linux/crc32c.h>
a2fbb9ea
ET
48#include <linux/prefetch.h>
49#include <linux/zlib.h>
a2fbb9ea 50#include <linux/io.h>
45229b42 51#include <linux/stringify.h>
a2fbb9ea 52
b0efbb99 53#define BNX2X_MAIN
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
9f6c9258 57#include "bnx2x_cmn.h"
a2fbb9ea 58
94a78b79
VZ
59#include <linux/firmware.h>
60#include "bnx2x_fw_file_hdr.h"
61/* FW files */
45229b42
BH
62#define FW_FILE_VERSION \
63 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
64 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
65 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
66 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
560131f3
DK
67#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
68#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
f2e0899f 69#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
94a78b79 70
34f80b04
EG
71/* Time in jiffies before concluding the transmitter is hung */
72#define TX_TIMEOUT (5*HZ)
a2fbb9ea 73
53a10565 74static char version[] __devinitdata =
34f80b04 75 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
76 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
24e3fcef 78MODULE_AUTHOR("Eliezer Tamir");
f2e0899f
DK
79MODULE_DESCRIPTION("Broadcom NetXtreme II "
80 "BCM57710/57711/57711E/57712/57712E Driver");
a2fbb9ea
ET
81MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
83MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H);
f2e0899f 85MODULE_FIRMWARE(FW_FILE_NAME_E2);
a2fbb9ea 86
555f6c78
EG
87static int multi_mode = 1;
88module_param(multi_mode, int, 0);
ca00392c
EG
89MODULE_PARM_DESC(multi_mode, " Multi queue mode "
90 "(0 Disable; 1 Enable (default))");
91
d6214d7a 92int num_queues;
54b9ddaa
VZ
93module_param(num_queues, int, 0);
94MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
95 " (default is as a number of CPUs)");
555f6c78 96
19680c48 97static int disable_tpa;
19680c48 98module_param(disable_tpa, int, 0);
9898f86d 99MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
100
101static int int_mode;
102module_param(int_mode, int, 0);
cdaa7cb8
VZ
103MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
104 "(1 INT#x; 2 MSI)");
8badd27a 105
a18f5128
EG
106static int dropless_fc;
107module_param(dropless_fc, int, 0);
108MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109
9898f86d 110static int poll;
a2fbb9ea 111module_param(poll, int, 0);
9898f86d 112MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
113
114static int mrrs = -1;
115module_param(mrrs, int, 0);
116MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117
9898f86d 118static int debug;
a2fbb9ea 119module_param(debug, int, 0);
9898f86d
EG
120MODULE_PARM_DESC(debug, " Default debug msglevel");
121
1cf167f2 122static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
123
124enum bnx2x_board_type {
125 BCM57710 = 0,
34f80b04
EG
126 BCM57711 = 1,
127 BCM57711E = 2,
f2e0899f
DK
128 BCM57712 = 3,
129 BCM57712E = 4
a2fbb9ea
ET
130};
131
34f80b04 132/* indexed by board_type, above */
53a10565 133static struct {
a2fbb9ea
ET
134 char *name;
135} board_info[] __devinitdata = {
34f80b04
EG
136 { "Broadcom NetXtreme II BCM57710 XGb" },
137 { "Broadcom NetXtreme II BCM57711 XGb" },
f2e0899f
DK
138 { "Broadcom NetXtreme II BCM57711E XGb" },
139 { "Broadcom NetXtreme II BCM57712 XGb" },
140 { "Broadcom NetXtreme II BCM57712E XGb" }
a2fbb9ea
ET
141};
142
f2e0899f
DK
143#ifndef PCI_DEVICE_ID_NX2_57712
144#define PCI_DEVICE_ID_NX2_57712 0x1662
145#endif
146#ifndef PCI_DEVICE_ID_NX2_57712E
147#define PCI_DEVICE_ID_NX2_57712E 0x1663
148#endif
34f80b04 149
a3aa1884 150static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
151 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
f2e0899f
DK
154 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
155 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
a2fbb9ea
ET
156 { 0 }
157};
158
159MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
160
161/****************************************************************************
162* General service functions
163****************************************************************************/
164
523224a3
DK
165static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
166 u32 addr, dma_addr_t mapping)
167{
168 REG_WR(bp, addr, U64_LO(mapping));
169 REG_WR(bp, addr + 4, U64_HI(mapping));
170}
171
172static inline void __storm_memset_fill(struct bnx2x *bp,
173 u32 addr, size_t size, u32 val)
174{
175 int i;
176 for (i = 0; i < size/4; i++)
177 REG_WR(bp, addr + (i * 4), val);
178}
179
180static inline void storm_memset_ustats_zero(struct bnx2x *bp,
181 u8 port, u16 stat_id)
182{
183 size_t size = sizeof(struct ustorm_per_client_stats);
184
185 u32 addr = BAR_USTRORM_INTMEM +
186 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
187
188 __storm_memset_fill(bp, addr, size, 0);
189}
190
191static inline void storm_memset_tstats_zero(struct bnx2x *bp,
192 u8 port, u16 stat_id)
193{
194 size_t size = sizeof(struct tstorm_per_client_stats);
195
196 u32 addr = BAR_TSTRORM_INTMEM +
197 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
198
199 __storm_memset_fill(bp, addr, size, 0);
200}
201
202static inline void storm_memset_xstats_zero(struct bnx2x *bp,
203 u8 port, u16 stat_id)
204{
205 size_t size = sizeof(struct xstorm_per_client_stats);
206
207 u32 addr = BAR_XSTRORM_INTMEM +
208 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
209
210 __storm_memset_fill(bp, addr, size, 0);
211}
212
213
214static inline void storm_memset_spq_addr(struct bnx2x *bp,
215 dma_addr_t mapping, u16 abs_fid)
216{
217 u32 addr = XSEM_REG_FAST_MEMORY +
218 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
219
220 __storm_memset_dma_mapping(bp, addr, mapping);
221}
222
223static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
224{
225 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
226}
227
228static inline void storm_memset_func_cfg(struct bnx2x *bp,
229 struct tstorm_eth_function_common_config *tcfg,
230 u16 abs_fid)
231{
232 size_t size = sizeof(struct tstorm_eth_function_common_config);
233
234 u32 addr = BAR_TSTRORM_INTMEM +
235 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
236
237 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
238}
239
240static inline void storm_memset_xstats_flags(struct bnx2x *bp,
241 struct stats_indication_flags *flags,
242 u16 abs_fid)
243{
244 size_t size = sizeof(struct stats_indication_flags);
245
246 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
247
248 __storm_memset_struct(bp, addr, size, (u32 *)flags);
249}
250
251static inline void storm_memset_tstats_flags(struct bnx2x *bp,
252 struct stats_indication_flags *flags,
253 u16 abs_fid)
254{
255 size_t size = sizeof(struct stats_indication_flags);
256
257 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
258
259 __storm_memset_struct(bp, addr, size, (u32 *)flags);
260}
261
262static inline void storm_memset_ustats_flags(struct bnx2x *bp,
263 struct stats_indication_flags *flags,
264 u16 abs_fid)
265{
266 size_t size = sizeof(struct stats_indication_flags);
267
268 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
269
270 __storm_memset_struct(bp, addr, size, (u32 *)flags);
271}
272
273static inline void storm_memset_cstats_flags(struct bnx2x *bp,
274 struct stats_indication_flags *flags,
275 u16 abs_fid)
276{
277 size_t size = sizeof(struct stats_indication_flags);
278
279 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
280
281 __storm_memset_struct(bp, addr, size, (u32 *)flags);
282}
283
284static inline void storm_memset_xstats_addr(struct bnx2x *bp,
285 dma_addr_t mapping, u16 abs_fid)
286{
287 u32 addr = BAR_XSTRORM_INTMEM +
288 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
289
290 __storm_memset_dma_mapping(bp, addr, mapping);
291}
292
293static inline void storm_memset_tstats_addr(struct bnx2x *bp,
294 dma_addr_t mapping, u16 abs_fid)
295{
296 u32 addr = BAR_TSTRORM_INTMEM +
297 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
298
299 __storm_memset_dma_mapping(bp, addr, mapping);
300}
301
302static inline void storm_memset_ustats_addr(struct bnx2x *bp,
303 dma_addr_t mapping, u16 abs_fid)
304{
305 u32 addr = BAR_USTRORM_INTMEM +
306 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
307
308 __storm_memset_dma_mapping(bp, addr, mapping);
309}
310
311static inline void storm_memset_cstats_addr(struct bnx2x *bp,
312 dma_addr_t mapping, u16 abs_fid)
313{
314 u32 addr = BAR_CSTRORM_INTMEM +
315 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
316
317 __storm_memset_dma_mapping(bp, addr, mapping);
318}
319
320static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
321 u16 pf_id)
322{
323 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
324 pf_id);
325 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
326 pf_id);
327 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
328 pf_id);
329 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
330 pf_id);
331}
332
333static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
334 u8 enable)
335{
336 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
337 enable);
338 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
339 enable);
340 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
341 enable);
342 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
343 enable);
344}
345
346static inline void storm_memset_eq_data(struct bnx2x *bp,
347 struct event_ring_data *eq_data,
348 u16 pfid)
349{
350 size_t size = sizeof(struct event_ring_data);
351
352 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
353
354 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
355}
356
357static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
358 u16 pfid)
359{
360 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
361 REG_WR16(bp, addr, eq_prod);
362}
363
364static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
365 u16 fw_sb_id, u8 sb_index,
366 u8 ticks)
367{
368
f2e0899f
DK
369 int index_offset = CHIP_IS_E2(bp) ?
370 offsetof(struct hc_status_block_data_e2, index_data) :
523224a3
DK
371 offsetof(struct hc_status_block_data_e1x, index_data);
372 u32 addr = BAR_CSTRORM_INTMEM +
373 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
374 index_offset +
375 sizeof(struct hc_index_data)*sb_index +
376 offsetof(struct hc_index_data, timeout);
377 REG_WR8(bp, addr, ticks);
378 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
379 port, fw_sb_id, sb_index, ticks);
380}
381static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
382 u16 fw_sb_id, u8 sb_index,
383 u8 disable)
384{
385 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
f2e0899f
DK
386 int index_offset = CHIP_IS_E2(bp) ?
387 offsetof(struct hc_status_block_data_e2, index_data) :
523224a3
DK
388 offsetof(struct hc_status_block_data_e1x, index_data);
389 u32 addr = BAR_CSTRORM_INTMEM +
390 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
391 index_offset +
392 sizeof(struct hc_index_data)*sb_index +
393 offsetof(struct hc_index_data, flags);
394 u16 flags = REG_RD16(bp, addr);
395 /* clear and set */
396 flags &= ~HC_INDEX_DATA_HC_ENABLED;
397 flags |= enable_flag;
398 REG_WR16(bp, addr, flags);
399 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
400 port, fw_sb_id, sb_index, disable);
401}
402
a2fbb9ea
ET
403/* used only at init
404 * locking is done by mcp
405 */
573f2035 406void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
407{
408 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
409 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
410 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
411 PCICFG_VENDOR_ID_OFFSET);
412}
413
a2fbb9ea
ET
414static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
415{
416 u32 val;
417
418 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
419 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
420 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
421 PCICFG_VENDOR_ID_OFFSET);
422
423 return val;
424}
a2fbb9ea 425
f2e0899f
DK
426#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
427#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
428#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
429#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
430#define DMAE_DP_DST_NONE "dst_addr [none]"
431
432void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
433{
434 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
435
436 switch (dmae->opcode & DMAE_COMMAND_DST) {
437 case DMAE_CMD_DST_PCI:
438 if (src_type == DMAE_CMD_SRC_PCI)
439 DP(msglvl, "DMAE: opcode 0x%08x\n"
440 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
441 "comp_addr [%x:%08x], comp_val 0x%08x\n",
442 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
443 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
444 dmae->comp_addr_hi, dmae->comp_addr_lo,
445 dmae->comp_val);
446 else
447 DP(msglvl, "DMAE: opcode 0x%08x\n"
448 "src [%08x], len [%d*4], dst [%x:%08x]\n"
449 "comp_addr [%x:%08x], comp_val 0x%08x\n",
450 dmae->opcode, dmae->src_addr_lo >> 2,
451 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
452 dmae->comp_addr_hi, dmae->comp_addr_lo,
453 dmae->comp_val);
454 break;
455 case DMAE_CMD_DST_GRC:
456 if (src_type == DMAE_CMD_SRC_PCI)
457 DP(msglvl, "DMAE: opcode 0x%08x\n"
458 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
459 "comp_addr [%x:%08x], comp_val 0x%08x\n",
460 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
461 dmae->len, dmae->dst_addr_lo >> 2,
462 dmae->comp_addr_hi, dmae->comp_addr_lo,
463 dmae->comp_val);
464 else
465 DP(msglvl, "DMAE: opcode 0x%08x\n"
466 "src [%08x], len [%d*4], dst [%08x]\n"
467 "comp_addr [%x:%08x], comp_val 0x%08x\n",
468 dmae->opcode, dmae->src_addr_lo >> 2,
469 dmae->len, dmae->dst_addr_lo >> 2,
470 dmae->comp_addr_hi, dmae->comp_addr_lo,
471 dmae->comp_val);
472 break;
473 default:
474 if (src_type == DMAE_CMD_SRC_PCI)
475 DP(msglvl, "DMAE: opcode 0x%08x\n"
476 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
477 "dst_addr [none]\n"
478 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
479 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
480 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
481 dmae->comp_val);
482 else
483 DP(msglvl, "DMAE: opcode 0x%08x\n"
484 DP_LEVEL "src_addr [%08x] len [%d * 4] "
485 "dst_addr [none]\n"
486 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
487 dmae->opcode, dmae->src_addr_lo >> 2,
488 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
489 dmae->comp_val);
490 break;
491 }
492
493}
494
6c719d00 495const u32 dmae_reg_go_c[] = {
a2fbb9ea
ET
496 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
497 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
498 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
499 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
500};
501
502/* copy command into DMAE command memory and set DMAE command go */
6c719d00 503void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
a2fbb9ea
ET
504{
505 u32 cmd_offset;
506 int i;
507
508 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
509 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
510 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
511
ad8d3948
EG
512 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
513 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
514 }
515 REG_WR(bp, dmae_reg_go_c[idx], 1);
516}
517
f2e0899f 518u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
a2fbb9ea 519{
f2e0899f
DK
520 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
521 DMAE_CMD_C_ENABLE);
522}
ad8d3948 523
f2e0899f
DK
524u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
525{
526 return opcode & ~DMAE_CMD_SRC_RESET;
527}
ad8d3948 528
f2e0899f
DK
529u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
530 bool with_comp, u8 comp_type)
531{
532 u32 opcode = 0;
533
534 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
535 (dst_type << DMAE_COMMAND_DST_SHIFT));
ad8d3948 536
f2e0899f
DK
537 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
538
539 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
540 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
541 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
542 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
a2fbb9ea 543
a2fbb9ea 544#ifdef __BIG_ENDIAN
f2e0899f 545 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
a2fbb9ea 546#else
f2e0899f 547 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
a2fbb9ea 548#endif
f2e0899f
DK
549 if (with_comp)
550 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
551 return opcode;
552}
553
554void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
555 u8 src_type, u8 dst_type)
556{
557 memset(dmae, 0, sizeof(struct dmae_command));
558
559 /* set the opcode */
560 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
561 true, DMAE_COMP_PCI);
562
563 /* fill in the completion parameters */
564 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
565 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
566 dmae->comp_val = DMAE_COMP_VAL;
567}
568
569/* issue a dmae command over the init-channel and wailt for completion */
570int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
571{
572 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
573 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
574 int rc = 0;
575
576 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
577 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
578 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 579
f2e0899f 580 /* lock the dmae channel */
5ff7b6d4
EG
581 mutex_lock(&bp->dmae_mutex);
582
f2e0899f 583 /* reset completion */
a2fbb9ea
ET
584 *wb_comp = 0;
585
f2e0899f
DK
586 /* post the command on the channel used for initializations */
587 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea 588
f2e0899f 589 /* wait for completion */
a2fbb9ea 590 udelay(5);
f2e0899f 591 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
ad8d3948
EG
592 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
593
ad8d3948 594 if (!cnt) {
c3eefaf6 595 BNX2X_ERR("DMAE timeout!\n");
f2e0899f
DK
596 rc = DMAE_TIMEOUT;
597 goto unlock;
a2fbb9ea 598 }
ad8d3948 599 cnt--;
f2e0899f 600 udelay(50);
a2fbb9ea 601 }
f2e0899f
DK
602 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
603 BNX2X_ERR("DMAE PCI error!\n");
604 rc = DMAE_PCI_ERROR;
605 }
606
607 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
608 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
609 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948 610
f2e0899f 611unlock:
ad8d3948 612 mutex_unlock(&bp->dmae_mutex);
f2e0899f
DK
613 return rc;
614}
615
616void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
617 u32 len32)
618{
619 struct dmae_command dmae;
620
621 if (!bp->dmae_ready) {
622 u32 *data = bnx2x_sp(bp, wb_data[0]);
623
624 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
625 " using indirect\n", dst_addr, len32);
626 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
627 return;
628 }
629
630 /* set opcode and fixed command fields */
631 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
632
633 /* fill in addresses and len */
634 dmae.src_addr_lo = U64_LO(dma_addr);
635 dmae.src_addr_hi = U64_HI(dma_addr);
636 dmae.dst_addr_lo = dst_addr >> 2;
637 dmae.dst_addr_hi = 0;
638 dmae.len = len32;
639
640 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
641
642 /* issue the command and wait for completion */
643 bnx2x_issue_dmae_with_comp(bp, &dmae);
a2fbb9ea
ET
644}
645
c18487ee 646void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 647{
5ff7b6d4 648 struct dmae_command dmae;
ad8d3948
EG
649
650 if (!bp->dmae_ready) {
651 u32 *data = bnx2x_sp(bp, wb_data[0]);
652 int i;
653
654 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
655 " using indirect\n", src_addr, len32);
656 for (i = 0; i < len32; i++)
657 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
658 return;
659 }
660
f2e0899f
DK
661 /* set opcode and fixed command fields */
662 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
a2fbb9ea 663
f2e0899f 664 /* fill in addresses and len */
5ff7b6d4
EG
665 dmae.src_addr_lo = src_addr >> 2;
666 dmae.src_addr_hi = 0;
667 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
668 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
669 dmae.len = len32;
ad8d3948 670
f2e0899f 671 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
ad8d3948 672
f2e0899f
DK
673 /* issue the command and wait for completion */
674 bnx2x_issue_dmae_with_comp(bp, &dmae);
ad8d3948
EG
675}
676
573f2035
EG
677void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
678 u32 addr, u32 len)
679{
02e3c6cb 680 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
681 int offset = 0;
682
02e3c6cb 683 while (len > dmae_wr_max) {
573f2035 684 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
685 addr + offset, dmae_wr_max);
686 offset += dmae_wr_max * 4;
687 len -= dmae_wr_max;
573f2035
EG
688 }
689
690 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
691}
692
ad8d3948
EG
693/* used only for slowpath so not inlined */
694static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
695{
696 u32 wb_write[2];
697
698 wb_write[0] = val_hi;
699 wb_write[1] = val_lo;
700 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 701}
a2fbb9ea 702
ad8d3948
EG
703#ifdef USE_WB_RD
704static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
705{
706 u32 wb_data[2];
707
708 REG_RD_DMAE(bp, reg, wb_data, 2);
709
710 return HILO_U64(wb_data[0], wb_data[1]);
711}
712#endif
713
a2fbb9ea
ET
714static int bnx2x_mc_assert(struct bnx2x *bp)
715{
a2fbb9ea 716 char last_idx;
34f80b04
EG
717 int i, rc = 0;
718 u32 row0, row1, row2, row3;
719
720 /* XSTORM */
721 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
722 XSTORM_ASSERT_LIST_INDEX_OFFSET);
723 if (last_idx)
724 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
725
726 /* print the asserts */
727 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
728
729 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
730 XSTORM_ASSERT_LIST_OFFSET(i));
731 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
732 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
733 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
734 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
735 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
736 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
737
738 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
739 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
740 " 0x%08x 0x%08x 0x%08x\n",
741 i, row3, row2, row1, row0);
742 rc++;
743 } else {
744 break;
745 }
746 }
747
748 /* TSTORM */
749 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
750 TSTORM_ASSERT_LIST_INDEX_OFFSET);
751 if (last_idx)
752 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
753
754 /* print the asserts */
755 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
756
757 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
758 TSTORM_ASSERT_LIST_OFFSET(i));
759 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
760 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
761 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
762 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
763 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
764 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
765
766 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
767 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
768 " 0x%08x 0x%08x 0x%08x\n",
769 i, row3, row2, row1, row0);
770 rc++;
771 } else {
772 break;
773 }
774 }
775
776 /* CSTORM */
777 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
778 CSTORM_ASSERT_LIST_INDEX_OFFSET);
779 if (last_idx)
780 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
781
782 /* print the asserts */
783 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
784
785 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
786 CSTORM_ASSERT_LIST_OFFSET(i));
787 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
788 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
789 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
790 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
791 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
792 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
793
794 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
795 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
796 " 0x%08x 0x%08x 0x%08x\n",
797 i, row3, row2, row1, row0);
798 rc++;
799 } else {
800 break;
801 }
802 }
803
804 /* USTORM */
805 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
806 USTORM_ASSERT_LIST_INDEX_OFFSET);
807 if (last_idx)
808 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
809
810 /* print the asserts */
811 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
812
813 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
814 USTORM_ASSERT_LIST_OFFSET(i));
815 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
816 USTORM_ASSERT_LIST_OFFSET(i) + 4);
817 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
818 USTORM_ASSERT_LIST_OFFSET(i) + 8);
819 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
820 USTORM_ASSERT_LIST_OFFSET(i) + 12);
821
822 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
823 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
824 " 0x%08x 0x%08x 0x%08x\n",
825 i, row3, row2, row1, row0);
826 rc++;
827 } else {
828 break;
a2fbb9ea
ET
829 }
830 }
34f80b04 831
a2fbb9ea
ET
832 return rc;
833}
c14423fe 834
a2fbb9ea
ET
835static void bnx2x_fw_dump(struct bnx2x *bp)
836{
cdaa7cb8 837 u32 addr;
a2fbb9ea 838 u32 mark, offset;
4781bfad 839 __be32 data[9];
a2fbb9ea 840 int word;
f2e0899f 841 u32 trace_shmem_base;
2145a920
VZ
842 if (BP_NOMCP(bp)) {
843 BNX2X_ERR("NO MCP - can not dump\n");
844 return;
845 }
cdaa7cb8 846
f2e0899f
DK
847 if (BP_PATH(bp) == 0)
848 trace_shmem_base = bp->common.shmem_base;
849 else
850 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
851 addr = trace_shmem_base - 0x0800 + 4;
cdaa7cb8 852 mark = REG_RD(bp, addr);
f2e0899f
DK
853 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
854 + ((mark + 0x3) & ~0x3) - 0x08000000;
7995c64e 855 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 856
7995c64e 857 pr_err("");
f2e0899f 858 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
a2fbb9ea 859 for (word = 0; word < 8; word++)
cdaa7cb8 860 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 861 data[8] = 0x0;
7995c64e 862 pr_cont("%s", (char *)data);
a2fbb9ea 863 }
cdaa7cb8 864 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 865 for (word = 0; word < 8; word++)
cdaa7cb8 866 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 867 data[8] = 0x0;
7995c64e 868 pr_cont("%s", (char *)data);
a2fbb9ea 869 }
7995c64e 870 pr_err("end of fw dump\n");
a2fbb9ea
ET
871}
872
6c719d00 873void bnx2x_panic_dump(struct bnx2x *bp)
a2fbb9ea
ET
874{
875 int i;
523224a3
DK
876 u16 j;
877 struct hc_sp_status_block_data sp_sb_data;
878 int func = BP_FUNC(bp);
879#ifdef BNX2X_STOP_ON_ERROR
880 u16 start = 0, end = 0;
881#endif
a2fbb9ea 882
66e855f3
YG
883 bp->stats_state = STATS_STATE_DISABLED;
884 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
885
a2fbb9ea
ET
886 BNX2X_ERR("begin crash dump -----------------\n");
887
8440d2b6
EG
888 /* Indices */
889 /* Common */
523224a3 890 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
cdaa7cb8 891 " spq_prod_idx(0x%x)\n",
523224a3
DK
892 bp->def_idx, bp->def_att_idx,
893 bp->attn_state, bp->spq_prod_idx);
894 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
895 bp->def_status_blk->atten_status_block.attn_bits,
896 bp->def_status_blk->atten_status_block.attn_bits_ack,
897 bp->def_status_blk->atten_status_block.status_block_id,
898 bp->def_status_blk->atten_status_block.attn_bits_index);
899 BNX2X_ERR(" def (");
900 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
901 pr_cont("0x%x%s",
902 bp->def_status_blk->sp_sb.index_values[i],
903 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
904
905 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
906 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
907 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
908 i*sizeof(u32));
909
910 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
911 "pf_id(0x%x) vnic_id(0x%x) "
912 "vf_id(0x%x) vf_valid (0x%x)\n",
913 sp_sb_data.igu_sb_id,
914 sp_sb_data.igu_seg_id,
915 sp_sb_data.p_func.pf_id,
916 sp_sb_data.p_func.vnic_id,
917 sp_sb_data.p_func.vf_id,
918 sp_sb_data.p_func.vf_valid);
919
8440d2b6 920
54b9ddaa 921 for_each_queue(bp, i) {
a2fbb9ea 922 struct bnx2x_fastpath *fp = &bp->fp[i];
523224a3 923 int loop;
f2e0899f 924 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
925 struct hc_status_block_data_e1x sb_data_e1x;
926 struct hc_status_block_sm *hc_sm_p =
f2e0899f
DK
927 CHIP_IS_E2(bp) ?
928 sb_data_e2.common.state_machine :
523224a3
DK
929 sb_data_e1x.common.state_machine;
930 struct hc_index_data *hc_index_p =
f2e0899f
DK
931 CHIP_IS_E2(bp) ?
932 sb_data_e2.index_data :
523224a3
DK
933 sb_data_e1x.index_data;
934 int data_size;
935 u32 *sb_data_p;
936
937 /* Rx */
cdaa7cb8 938 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
523224a3 939 " rx_comp_prod(0x%x)"
cdaa7cb8 940 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 941 i, fp->rx_bd_prod, fp->rx_bd_cons,
523224a3 942 fp->rx_comp_prod,
66e855f3 943 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8 944 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
523224a3 945 " fp_hc_idx(0x%x)\n",
8440d2b6 946 fp->rx_sge_prod, fp->last_max_sge,
523224a3 947 le16_to_cpu(fp->fp_hc_idx));
a2fbb9ea 948
523224a3 949 /* Tx */
cdaa7cb8
VZ
950 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
951 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
952 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
953 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
954 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
523224a3 955
f2e0899f
DK
956 loop = CHIP_IS_E2(bp) ?
957 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
523224a3
DK
958
959 /* host sb data */
960
961 BNX2X_ERR(" run indexes (");
962 for (j = 0; j < HC_SB_MAX_SM; j++)
963 pr_cont("0x%x%s",
964 fp->sb_running_index[j],
965 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
966
967 BNX2X_ERR(" indexes (");
968 for (j = 0; j < loop; j++)
969 pr_cont("0x%x%s",
970 fp->sb_index_values[j],
971 (j == loop - 1) ? ")" : " ");
972 /* fw sb data */
f2e0899f
DK
973 data_size = CHIP_IS_E2(bp) ?
974 sizeof(struct hc_status_block_data_e2) :
523224a3
DK
975 sizeof(struct hc_status_block_data_e1x);
976 data_size /= sizeof(u32);
f2e0899f
DK
977 sb_data_p = CHIP_IS_E2(bp) ?
978 (u32 *)&sb_data_e2 :
979 (u32 *)&sb_data_e1x;
523224a3
DK
980 /* copy sb data in here */
981 for (j = 0; j < data_size; j++)
982 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
983 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
984 j * sizeof(u32));
985
f2e0899f
DK
986 if (CHIP_IS_E2(bp)) {
987 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
988 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
989 sb_data_e2.common.p_func.pf_id,
990 sb_data_e2.common.p_func.vf_id,
991 sb_data_e2.common.p_func.vf_valid,
992 sb_data_e2.common.p_func.vnic_id,
993 sb_data_e2.common.same_igu_sb_1b);
994 } else {
995 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
996 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
997 sb_data_e1x.common.p_func.pf_id,
998 sb_data_e1x.common.p_func.vf_id,
999 sb_data_e1x.common.p_func.vf_valid,
1000 sb_data_e1x.common.p_func.vnic_id,
1001 sb_data_e1x.common.same_igu_sb_1b);
1002 }
523224a3
DK
1003
1004 /* SB_SMs data */
1005 for (j = 0; j < HC_SB_MAX_SM; j++) {
1006 pr_cont("SM[%d] __flags (0x%x) "
1007 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1008 "time_to_expire (0x%x) "
1009 "timer_value(0x%x)\n", j,
1010 hc_sm_p[j].__flags,
1011 hc_sm_p[j].igu_sb_id,
1012 hc_sm_p[j].igu_seg_id,
1013 hc_sm_p[j].time_to_expire,
1014 hc_sm_p[j].timer_value);
1015 }
1016
1017 /* Indecies data */
1018 for (j = 0; j < loop; j++) {
1019 pr_cont("INDEX[%d] flags (0x%x) "
1020 "timeout (0x%x)\n", j,
1021 hc_index_p[j].flags,
1022 hc_index_p[j].timeout);
1023 }
8440d2b6 1024 }
a2fbb9ea 1025
523224a3 1026#ifdef BNX2X_STOP_ON_ERROR
8440d2b6
EG
1027 /* Rings */
1028 /* Rx */
54b9ddaa 1029 for_each_queue(bp, i) {
8440d2b6 1030 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
1031
1032 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1033 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 1034 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
1035 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1036 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1037
c3eefaf6
EG
1038 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1039 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
1040 }
1041
3196a88a
EG
1042 start = RX_SGE(fp->rx_sge_prod);
1043 end = RX_SGE(fp->last_max_sge);
8440d2b6 1044 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
1045 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1046 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1047
c3eefaf6
EG
1048 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1049 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
1050 }
1051
a2fbb9ea
ET
1052 start = RCQ_BD(fp->rx_comp_cons - 10);
1053 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 1054 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
1055 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1056
c3eefaf6
EG
1057 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1058 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
1059 }
1060 }
1061
8440d2b6 1062 /* Tx */
54b9ddaa 1063 for_each_queue(bp, i) {
8440d2b6
EG
1064 struct bnx2x_fastpath *fp = &bp->fp[i];
1065
1066 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1067 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1068 for (j = start; j != end; j = TX_BD(j + 1)) {
1069 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1070
c3eefaf6
EG
1071 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1072 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
1073 }
1074
1075 start = TX_BD(fp->tx_bd_cons - 10);
1076 end = TX_BD(fp->tx_bd_cons + 254);
1077 for (j = start; j != end; j = TX_BD(j + 1)) {
1078 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1079
c3eefaf6
EG
1080 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1081 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
1082 }
1083 }
523224a3 1084#endif
34f80b04 1085 bnx2x_fw_dump(bp);
a2fbb9ea
ET
1086 bnx2x_mc_assert(bp);
1087 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
1088}
1089
f2e0899f 1090static void bnx2x_hc_int_enable(struct bnx2x *bp)
a2fbb9ea 1091{
34f80b04 1092 int port = BP_PORT(bp);
a2fbb9ea
ET
1093 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1094 u32 val = REG_RD(bp, addr);
1095 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1096 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
1097
1098 if (msix) {
8badd27a
EG
1099 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1100 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
1101 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1102 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
1103 } else if (msi) {
1104 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1105 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1106 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1107 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
1108 } else {
1109 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 1110 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
1111 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1112 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 1113
a0fd065c
DK
1114 if (!CHIP_IS_E1(bp)) {
1115 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1116 val, port, addr);
615f8fd9 1117
a0fd065c 1118 REG_WR(bp, addr, val);
615f8fd9 1119
a0fd065c
DK
1120 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1121 }
a2fbb9ea
ET
1122 }
1123
a0fd065c
DK
1124 if (CHIP_IS_E1(bp))
1125 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1126
8badd27a
EG
1127 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1128 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
1129
1130 REG_WR(bp, addr, val);
37dbbf32
EG
1131 /*
1132 * Ensure that HC_CONFIG is written before leading/trailing edge config
1133 */
1134 mmiowb();
1135 barrier();
34f80b04 1136
f2e0899f 1137 if (!CHIP_IS_E1(bp)) {
34f80b04 1138 /* init leading/trailing edge */
fb3bff17 1139 if (IS_MF(bp)) {
8badd27a 1140 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 1141 if (bp->port.pmf)
4acac6a5
EG
1142 /* enable nig and gpio3 attention */
1143 val |= 0x1100;
34f80b04
EG
1144 } else
1145 val = 0xffff;
1146
1147 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1148 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1149 }
37dbbf32
EG
1150
1151 /* Make sure that interrupts are indeed enabled from here on */
1152 mmiowb();
a2fbb9ea
ET
1153}
1154
f2e0899f
DK
1155static void bnx2x_igu_int_enable(struct bnx2x *bp)
1156{
1157 u32 val;
1158 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1159 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1160
1161 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1162
1163 if (msix) {
1164 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1165 IGU_PF_CONF_SINGLE_ISR_EN);
1166 val |= (IGU_PF_CONF_FUNC_EN |
1167 IGU_PF_CONF_MSI_MSIX_EN |
1168 IGU_PF_CONF_ATTN_BIT_EN);
1169 } else if (msi) {
1170 val &= ~IGU_PF_CONF_INT_LINE_EN;
1171 val |= (IGU_PF_CONF_FUNC_EN |
1172 IGU_PF_CONF_MSI_MSIX_EN |
1173 IGU_PF_CONF_ATTN_BIT_EN |
1174 IGU_PF_CONF_SINGLE_ISR_EN);
1175 } else {
1176 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1177 val |= (IGU_PF_CONF_FUNC_EN |
1178 IGU_PF_CONF_INT_LINE_EN |
1179 IGU_PF_CONF_ATTN_BIT_EN |
1180 IGU_PF_CONF_SINGLE_ISR_EN);
1181 }
1182
1183 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1184 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1185
1186 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1187
1188 barrier();
1189
1190 /* init leading/trailing edge */
1191 if (IS_MF(bp)) {
1192 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1193 if (bp->port.pmf)
1194 /* enable nig and gpio3 attention */
1195 val |= 0x1100;
1196 } else
1197 val = 0xffff;
1198
1199 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1200 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1201
1202 /* Make sure that interrupts are indeed enabled from here on */
1203 mmiowb();
1204}
1205
1206void bnx2x_int_enable(struct bnx2x *bp)
1207{
1208 if (bp->common.int_block == INT_BLOCK_HC)
1209 bnx2x_hc_int_enable(bp);
1210 else
1211 bnx2x_igu_int_enable(bp);
1212}
1213
1214static void bnx2x_hc_int_disable(struct bnx2x *bp)
a2fbb9ea 1215{
34f80b04 1216 int port = BP_PORT(bp);
a2fbb9ea
ET
1217 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1218 u32 val = REG_RD(bp, addr);
1219
a0fd065c
DK
1220 /*
1221 * in E1 we must use only PCI configuration space to disable
1222 * MSI/MSIX capablility
1223 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1224 */
1225 if (CHIP_IS_E1(bp)) {
1226 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1227 * Use mask register to prevent from HC sending interrupts
1228 * after we exit the function
1229 */
1230 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1231
1232 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1233 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1234 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1235 } else
1236 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1237 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1238 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1239 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
1240
1241 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1242 val, port, addr);
1243
8badd27a
EG
1244 /* flush all outstanding writes */
1245 mmiowb();
1246
a2fbb9ea
ET
1247 REG_WR(bp, addr, val);
1248 if (REG_RD(bp, addr) != val)
1249 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1250}
1251
f2e0899f
DK
1252static void bnx2x_igu_int_disable(struct bnx2x *bp)
1253{
1254 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1255
1256 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1257 IGU_PF_CONF_INT_LINE_EN |
1258 IGU_PF_CONF_ATTN_BIT_EN);
1259
1260 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1261
1262 /* flush all outstanding writes */
1263 mmiowb();
1264
1265 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1266 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1267 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1268}
1269
1270void bnx2x_int_disable(struct bnx2x *bp)
1271{
1272 if (bp->common.int_block == INT_BLOCK_HC)
1273 bnx2x_hc_int_disable(bp);
1274 else
1275 bnx2x_igu_int_disable(bp);
1276}
1277
9f6c9258 1278void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 1279{
a2fbb9ea 1280 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1281 int i, offset;
a2fbb9ea 1282
34f80b04 1283 /* disable interrupt handling */
a2fbb9ea 1284 atomic_inc(&bp->intr_sem);
e1510706
EG
1285 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1286
f8ef6e44
YG
1287 if (disable_hw)
1288 /* prevent the HW from sending interrupts */
1289 bnx2x_int_disable(bp);
a2fbb9ea
ET
1290
1291 /* make sure all ISRs are done */
1292 if (msix) {
8badd27a
EG
1293 synchronize_irq(bp->msix_table[0].vector);
1294 offset = 1;
37b091ba
MC
1295#ifdef BCM_CNIC
1296 offset++;
1297#endif
a2fbb9ea 1298 for_each_queue(bp, i)
8badd27a 1299 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
1300 } else
1301 synchronize_irq(bp->pdev->irq);
1302
1303 /* make sure sp_task is not running */
1cf167f2
EG
1304 cancel_delayed_work(&bp->sp_task);
1305 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
1306}
1307
34f80b04 1308/* fast path */
a2fbb9ea
ET
1309
1310/*
34f80b04 1311 * General service functions
a2fbb9ea
ET
1312 */
1313
72fd0718
VZ
1314/* Return true if succeeded to acquire the lock */
1315static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1316{
1317 u32 lock_status;
1318 u32 resource_bit = (1 << resource);
1319 int func = BP_FUNC(bp);
1320 u32 hw_lock_control_reg;
1321
1322 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1323
1324 /* Validating that the resource is within range */
1325 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1326 DP(NETIF_MSG_HW,
1327 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1328 resource, HW_LOCK_MAX_RESOURCE_VALUE);
0fdf4d09 1329 return false;
72fd0718
VZ
1330 }
1331
1332 if (func <= 5)
1333 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1334 else
1335 hw_lock_control_reg =
1336 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1337
1338 /* Try to acquire the lock */
1339 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1340 lock_status = REG_RD(bp, hw_lock_control_reg);
1341 if (lock_status & resource_bit)
1342 return true;
1343
1344 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1345 return false;
1346}
1347
993ac7b5
MC
1348#ifdef BCM_CNIC
1349static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1350#endif
3196a88a 1351
9f6c9258 1352void bnx2x_sp_event(struct bnx2x_fastpath *fp,
a2fbb9ea
ET
1353 union eth_rx_cqe *rr_cqe)
1354{
1355 struct bnx2x *bp = fp->bp;
1356 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1357 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1358
34f80b04 1359 DP(BNX2X_MSG_SP,
a2fbb9ea 1360 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1361 fp->index, cid, command, bp->state,
34f80b04 1362 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea 1363
523224a3
DK
1364 switch (command | fp->state) {
1365 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1366 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1367 fp->state = BNX2X_FP_STATE_OPEN;
a2fbb9ea
ET
1368 break;
1369
523224a3
DK
1370 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1371 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
a2fbb9ea
ET
1372 fp->state = BNX2X_FP_STATE_HALTED;
1373 break;
1374
523224a3
DK
1375 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1376 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1377 fp->state = BNX2X_FP_STATE_TERMINATED;
a2fbb9ea
ET
1378 break;
1379
523224a3
DK
1380 default:
1381 BNX2X_ERR("unexpected MC reply (%d) "
1382 "fp[%d] state is %x\n",
1383 command, fp->index, fp->state);
993ac7b5 1384 break;
523224a3 1385 }
3196a88a 1386
8fe23fbd
DK
1387 smp_mb__before_atomic_inc();
1388 atomic_inc(&bp->spq_left);
523224a3
DK
1389 /* push the change in fp->state and towards the memory */
1390 smp_wmb();
49d66772 1391
523224a3 1392 return;
a2fbb9ea
ET
1393}
1394
9f6c9258 1395irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
a2fbb9ea 1396{
555f6c78 1397 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1398 u16 status = bnx2x_ack_int(bp);
34f80b04 1399 u16 mask;
ca00392c 1400 int i;
a2fbb9ea 1401
34f80b04 1402 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1403 if (unlikely(status == 0)) {
1404 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1405 return IRQ_NONE;
1406 }
f5372251 1407 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1408
34f80b04 1409 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1410 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1411 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1412 return IRQ_HANDLED;
1413 }
1414
3196a88a
EG
1415#ifdef BNX2X_STOP_ON_ERROR
1416 if (unlikely(bp->panic))
1417 return IRQ_HANDLED;
1418#endif
1419
f2e0899f 1420 for_each_queue(bp, i) {
ca00392c 1421 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1422
523224a3 1423 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
ca00392c 1424 if (status & mask) {
54b9ddaa
VZ
1425 /* Handle Rx and Tx according to SB id */
1426 prefetch(fp->rx_cons_sb);
54b9ddaa 1427 prefetch(fp->tx_cons_sb);
523224a3 1428 prefetch(&fp->sb_running_index[SM_RX_ID]);
54b9ddaa 1429 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1430 status &= ~mask;
1431 }
a2fbb9ea
ET
1432 }
1433
993ac7b5 1434#ifdef BCM_CNIC
523224a3 1435 mask = 0x2;
993ac7b5
MC
1436 if (status & (mask | 0x1)) {
1437 struct cnic_ops *c_ops = NULL;
1438
1439 rcu_read_lock();
1440 c_ops = rcu_dereference(bp->cnic_ops);
1441 if (c_ops)
1442 c_ops->cnic_handler(bp->cnic_data, NULL);
1443 rcu_read_unlock();
1444
1445 status &= ~mask;
1446 }
1447#endif
a2fbb9ea 1448
34f80b04 1449 if (unlikely(status & 0x1)) {
1cf167f2 1450 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1451
1452 status &= ~0x1;
1453 if (!status)
1454 return IRQ_HANDLED;
1455 }
1456
cdaa7cb8
VZ
1457 if (unlikely(status))
1458 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 1459 status);
a2fbb9ea 1460
c18487ee 1461 return IRQ_HANDLED;
a2fbb9ea
ET
1462}
1463
c18487ee 1464/* end of fast path */
a2fbb9ea 1465
a2fbb9ea 1466
c18487ee
YR
1467/* Link */
1468
1469/*
1470 * General service functions
1471 */
a2fbb9ea 1472
9f6c9258 1473int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1474{
1475 u32 lock_status;
1476 u32 resource_bit = (1 << resource);
4a37fb66
YG
1477 int func = BP_FUNC(bp);
1478 u32 hw_lock_control_reg;
c18487ee 1479 int cnt;
a2fbb9ea 1480
c18487ee
YR
1481 /* Validating that the resource is within range */
1482 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1483 DP(NETIF_MSG_HW,
1484 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1485 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1486 return -EINVAL;
1487 }
a2fbb9ea 1488
4a37fb66
YG
1489 if (func <= 5) {
1490 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1491 } else {
1492 hw_lock_control_reg =
1493 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1494 }
1495
c18487ee 1496 /* Validating that the resource is not already taken */
4a37fb66 1497 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1498 if (lock_status & resource_bit) {
1499 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1500 lock_status, resource_bit);
1501 return -EEXIST;
1502 }
a2fbb9ea 1503
46230476
EG
1504 /* Try for 5 second every 5ms */
1505 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1506 /* Try to acquire the lock */
4a37fb66
YG
1507 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1508 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1509 if (lock_status & resource_bit)
1510 return 0;
a2fbb9ea 1511
c18487ee 1512 msleep(5);
a2fbb9ea 1513 }
c18487ee
YR
1514 DP(NETIF_MSG_HW, "Timeout\n");
1515 return -EAGAIN;
1516}
a2fbb9ea 1517
9f6c9258 1518int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1519{
1520 u32 lock_status;
1521 u32 resource_bit = (1 << resource);
4a37fb66
YG
1522 int func = BP_FUNC(bp);
1523 u32 hw_lock_control_reg;
a2fbb9ea 1524
72fd0718
VZ
1525 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1526
c18487ee
YR
1527 /* Validating that the resource is within range */
1528 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1529 DP(NETIF_MSG_HW,
1530 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1531 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1532 return -EINVAL;
1533 }
1534
4a37fb66
YG
1535 if (func <= 5) {
1536 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1537 } else {
1538 hw_lock_control_reg =
1539 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1540 }
1541
c18487ee 1542 /* Validating that the resource is currently taken */
4a37fb66 1543 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1544 if (!(lock_status & resource_bit)) {
1545 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1546 lock_status, resource_bit);
1547 return -EFAULT;
a2fbb9ea
ET
1548 }
1549
9f6c9258
DK
1550 REG_WR(bp, hw_lock_control_reg, resource_bit);
1551 return 0;
c18487ee 1552}
a2fbb9ea 1553
9f6c9258 1554
4acac6a5
EG
1555int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1556{
1557 /* The GPIO should be swapped if swap register is set and active */
1558 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1559 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1560 int gpio_shift = gpio_num +
1561 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1562 u32 gpio_mask = (1 << gpio_shift);
1563 u32 gpio_reg;
1564 int value;
1565
1566 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1567 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1568 return -EINVAL;
1569 }
1570
1571 /* read GPIO value */
1572 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1573
1574 /* get the requested pin value */
1575 if ((gpio_reg & gpio_mask) == gpio_mask)
1576 value = 1;
1577 else
1578 value = 0;
1579
1580 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1581
1582 return value;
1583}
1584
17de50b7 1585int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1586{
1587 /* The GPIO should be swapped if swap register is set and active */
1588 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1589 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1590 int gpio_shift = gpio_num +
1591 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1592 u32 gpio_mask = (1 << gpio_shift);
1593 u32 gpio_reg;
a2fbb9ea 1594
c18487ee
YR
1595 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1596 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1597 return -EINVAL;
1598 }
a2fbb9ea 1599
4a37fb66 1600 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1601 /* read GPIO and mask except the float bits */
1602 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1603
c18487ee
YR
1604 switch (mode) {
1605 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1606 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1607 gpio_num, gpio_shift);
1608 /* clear FLOAT and set CLR */
1609 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1610 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1611 break;
a2fbb9ea 1612
c18487ee
YR
1613 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1614 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1615 gpio_num, gpio_shift);
1616 /* clear FLOAT and set SET */
1617 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1618 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1619 break;
a2fbb9ea 1620
17de50b7 1621 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1622 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1623 gpio_num, gpio_shift);
1624 /* set FLOAT */
1625 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1626 break;
a2fbb9ea 1627
c18487ee
YR
1628 default:
1629 break;
a2fbb9ea
ET
1630 }
1631
c18487ee 1632 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1633 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1634
c18487ee 1635 return 0;
a2fbb9ea
ET
1636}
1637
4acac6a5
EG
1638int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1639{
1640 /* The GPIO should be swapped if swap register is set and active */
1641 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1642 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1643 int gpio_shift = gpio_num +
1644 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1645 u32 gpio_mask = (1 << gpio_shift);
1646 u32 gpio_reg;
1647
1648 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1649 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1650 return -EINVAL;
1651 }
1652
1653 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1654 /* read GPIO int */
1655 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1656
1657 switch (mode) {
1658 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1659 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1660 "output low\n", gpio_num, gpio_shift);
1661 /* clear SET and set CLR */
1662 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1663 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1664 break;
1665
1666 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1667 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1668 "output high\n", gpio_num, gpio_shift);
1669 /* clear CLR and set SET */
1670 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1671 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1672 break;
1673
1674 default:
1675 break;
1676 }
1677
1678 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1679 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1680
1681 return 0;
1682}
1683
c18487ee 1684static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1685{
c18487ee
YR
1686 u32 spio_mask = (1 << spio_num);
1687 u32 spio_reg;
a2fbb9ea 1688
c18487ee
YR
1689 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1690 (spio_num > MISC_REGISTERS_SPIO_7)) {
1691 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1692 return -EINVAL;
a2fbb9ea
ET
1693 }
1694
4a37fb66 1695 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1696 /* read SPIO and mask except the float bits */
1697 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1698
c18487ee 1699 switch (mode) {
6378c025 1700 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1701 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1702 /* clear FLOAT and set CLR */
1703 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1704 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1705 break;
a2fbb9ea 1706
6378c025 1707 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1708 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1709 /* clear FLOAT and set SET */
1710 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1711 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1712 break;
a2fbb9ea 1713
c18487ee
YR
1714 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1715 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1716 /* set FLOAT */
1717 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1718 break;
a2fbb9ea 1719
c18487ee
YR
1720 default:
1721 break;
a2fbb9ea
ET
1722 }
1723
c18487ee 1724 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1725 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1726
a2fbb9ea
ET
1727 return 0;
1728}
1729
a22f0788
YR
1730int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1731{
1732 u32 sel_phy_idx = 0;
1733 if (bp->link_vars.link_up) {
1734 sel_phy_idx = EXT_PHY1;
1735 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1736 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1737 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1738 sel_phy_idx = EXT_PHY2;
1739 } else {
1740
1741 switch (bnx2x_phy_selection(&bp->link_params)) {
1742 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1743 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1744 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1745 sel_phy_idx = EXT_PHY1;
1746 break;
1747 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1748 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1749 sel_phy_idx = EXT_PHY2;
1750 break;
1751 }
1752 }
1753 /*
1754 * The selected actived PHY is always after swapping (in case PHY
1755 * swapping is enabled). So when swapping is enabled, we need to reverse
1756 * the configuration
1757 */
1758
1759 if (bp->link_params.multi_phy_config &
1760 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1761 if (sel_phy_idx == EXT_PHY1)
1762 sel_phy_idx = EXT_PHY2;
1763 else if (sel_phy_idx == EXT_PHY2)
1764 sel_phy_idx = EXT_PHY1;
1765 }
1766 return LINK_CONFIG_IDX(sel_phy_idx);
1767}
1768
9f6c9258 1769void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1770{
a22f0788 1771 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
ad33ea3a
EG
1772 switch (bp->link_vars.ieee_fc &
1773 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1774 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
a22f0788 1775 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 1776 ADVERTISED_Pause);
c18487ee 1777 break;
356e2385 1778
c18487ee 1779 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
a22f0788 1780 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
f85582f8 1781 ADVERTISED_Pause);
c18487ee 1782 break;
356e2385 1783
c18487ee 1784 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
a22f0788 1785 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
c18487ee 1786 break;
356e2385 1787
c18487ee 1788 default:
a22f0788 1789 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 1790 ADVERTISED_Pause);
c18487ee
YR
1791 break;
1792 }
1793}
f1410647 1794
9f6c9258 1795u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 1796{
19680c48
EG
1797 if (!BP_NOMCP(bp)) {
1798 u8 rc;
a22f0788
YR
1799 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1800 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
19680c48 1801 /* Initialize link parameters structure variables */
8c99e7b0
YR
1802 /* It is recommended to turn off RX FC for jumbo frames
1803 for better performance */
f2e0899f 1804 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
c0700f90 1805 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1806 else
c0700f90 1807 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1808
4a37fb66 1809 bnx2x_acquire_phy_lock(bp);
b5bf9068 1810
a22f0788 1811 if (load_mode == LOAD_DIAG) {
de6eae1f 1812 bp->link_params.loopback_mode = LOOPBACK_XGXS;
a22f0788
YR
1813 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1814 }
b5bf9068 1815
19680c48 1816 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 1817
4a37fb66 1818 bnx2x_release_phy_lock(bp);
a2fbb9ea 1819
3c96c68b
EG
1820 bnx2x_calc_fc_adv(bp);
1821
b5bf9068
EG
1822 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1823 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 1824 bnx2x_link_report(bp);
b5bf9068 1825 }
a22f0788 1826 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
19680c48
EG
1827 return rc;
1828 }
f5372251 1829 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 1830 return -EINVAL;
a2fbb9ea
ET
1831}
1832
9f6c9258 1833void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1834{
19680c48 1835 if (!BP_NOMCP(bp)) {
4a37fb66 1836 bnx2x_acquire_phy_lock(bp);
54c2fb78 1837 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
19680c48 1838 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1839 bnx2x_release_phy_lock(bp);
a2fbb9ea 1840
19680c48
EG
1841 bnx2x_calc_fc_adv(bp);
1842 } else
f5372251 1843 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 1844}
a2fbb9ea 1845
c18487ee
YR
1846static void bnx2x__link_reset(struct bnx2x *bp)
1847{
19680c48 1848 if (!BP_NOMCP(bp)) {
4a37fb66 1849 bnx2x_acquire_phy_lock(bp);
589abe3a 1850 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 1851 bnx2x_release_phy_lock(bp);
19680c48 1852 } else
f5372251 1853 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 1854}
a2fbb9ea 1855
a22f0788 1856u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
c18487ee 1857{
2145a920 1858 u8 rc = 0;
a2fbb9ea 1859
2145a920
VZ
1860 if (!BP_NOMCP(bp)) {
1861 bnx2x_acquire_phy_lock(bp);
a22f0788
YR
1862 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1863 is_serdes);
2145a920
VZ
1864 bnx2x_release_phy_lock(bp);
1865 } else
1866 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 1867
c18487ee
YR
1868 return rc;
1869}
a2fbb9ea 1870
8a1c38d1 1871static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 1872{
8a1c38d1
EG
1873 u32 r_param = bp->link_vars.line_speed / 8;
1874 u32 fair_periodic_timeout_usec;
1875 u32 t_fair;
34f80b04 1876
8a1c38d1
EG
1877 memset(&(bp->cmng.rs_vars), 0,
1878 sizeof(struct rate_shaping_vars_per_port));
1879 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 1880
8a1c38d1
EG
1881 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1882 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 1883
8a1c38d1
EG
1884 /* this is the threshold below which no timer arming will occur
1885 1.25 coefficient is for the threshold to be a little bigger
1886 than the real time, to compensate for timer in-accuracy */
1887 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
1888 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1889
8a1c38d1
EG
1890 /* resolution of fairness timer */
1891 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1892 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1893 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 1894
8a1c38d1
EG
1895 /* this is the threshold below which we won't arm the timer anymore */
1896 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 1897
8a1c38d1
EG
1898 /* we multiply by 1e3/8 to get bytes/msec.
1899 We don't want the credits to pass a credit
1900 of the t_fair*FAIR_MEM (algorithm resolution) */
1901 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1902 /* since each tick is 4 usec */
1903 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
1904}
1905
2691d51d
EG
1906/* Calculates the sum of vn_min_rates.
1907 It's needed for further normalizing of the min_rates.
1908 Returns:
1909 sum of vn_min_rates.
1910 or
1911 0 - if all the min_rates are 0.
1912 In the later case fainess algorithm should be deactivated.
1913 If not all min_rates are zero then those that are zeroes will be set to 1.
1914 */
1915static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1916{
1917 int all_zero = 1;
2691d51d
EG
1918 int vn;
1919
1920 bp->vn_weight_sum = 0;
1921 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
f2e0899f 1922 u32 vn_cfg = bp->mf_config[vn];
2691d51d
EG
1923 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1924 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1925
1926 /* Skip hidden vns */
1927 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1928 continue;
1929
1930 /* If min rate is zero - set it to 1 */
1931 if (!vn_min_rate)
1932 vn_min_rate = DEF_MIN_RATE;
1933 else
1934 all_zero = 0;
1935
1936 bp->vn_weight_sum += vn_min_rate;
1937 }
1938
1939 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
1940 if (all_zero) {
1941 bp->cmng.flags.cmng_enables &=
1942 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1943 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1944 " fairness will be disabled\n");
1945 } else
1946 bp->cmng.flags.cmng_enables |=
1947 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
1948}
1949
f2e0899f 1950static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
34f80b04
EG
1951{
1952 struct rate_shaping_vars_per_vn m_rs_vn;
1953 struct fairness_vars_per_vn m_fair_vn;
f2e0899f
DK
1954 u32 vn_cfg = bp->mf_config[vn];
1955 int func = 2*vn + BP_PORT(bp);
34f80b04
EG
1956 u16 vn_min_rate, vn_max_rate;
1957 int i;
1958
1959 /* If function is hidden - set min and max to zeroes */
1960 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1961 vn_min_rate = 0;
1962 vn_max_rate = 0;
1963
1964 } else {
1965 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1966 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1 1967 /* If min rate is zero - set it to 1 */
f2e0899f 1968 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
1969 vn_min_rate = DEF_MIN_RATE;
1970 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1971 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1972 }
f85582f8 1973
8a1c38d1 1974 DP(NETIF_MSG_IFUP,
b015e3d1 1975 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 1976 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
1977
1978 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1979 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1980
1981 /* global vn counter - maximal Mbps for this vn */
1982 m_rs_vn.vn_counter.rate = vn_max_rate;
1983
1984 /* quota - number of bytes transmitted in this period */
1985 m_rs_vn.vn_counter.quota =
1986 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1987
8a1c38d1 1988 if (bp->vn_weight_sum) {
34f80b04
EG
1989 /* credit for each period of the fairness algorithm:
1990 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
1991 vn_weight_sum should not be larger than 10000, thus
1992 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1993 than zero */
34f80b04 1994 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
1995 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1996 (8 * bp->vn_weight_sum))),
1997 (bp->cmng.fair_vars.fair_threshold * 2));
1998 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
1999 m_fair_vn.vn_credit_delta);
2000 }
2001
34f80b04
EG
2002 /* Store it to internal memory */
2003 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2004 REG_WR(bp, BAR_XSTRORM_INTMEM +
2005 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2006 ((u32 *)(&m_rs_vn))[i]);
2007
2008 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2009 REG_WR(bp, BAR_XSTRORM_INTMEM +
2010 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2011 ((u32 *)(&m_fair_vn))[i]);
2012}
f85582f8 2013
523224a3
DK
2014static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2015{
2016 if (CHIP_REV_IS_SLOW(bp))
2017 return CMNG_FNS_NONE;
fb3bff17 2018 if (IS_MF(bp))
523224a3
DK
2019 return CMNG_FNS_MINMAX;
2020
2021 return CMNG_FNS_NONE;
2022}
2023
2024static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2025{
2026 int vn;
2027
2028 if (BP_NOMCP(bp))
2029 return; /* what should be the default bvalue in this case */
2030
2031 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2032 int /*abs*/func = 2*vn + BP_PORT(bp);
f2e0899f 2033 bp->mf_config[vn] =
523224a3
DK
2034 MF_CFG_RD(bp, func_mf_config[func].config);
2035 }
2036}
2037
2038static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2039{
2040
2041 if (cmng_type == CMNG_FNS_MINMAX) {
2042 int vn;
2043
2044 /* clear cmng_enables */
2045 bp->cmng.flags.cmng_enables = 0;
2046
2047 /* read mf conf from shmem */
2048 if (read_cfg)
2049 bnx2x_read_mf_cfg(bp);
2050
2051 /* Init rate shaping and fairness contexts */
2052 bnx2x_init_port_minmax(bp);
2053
2054 /* vn_weight_sum and enable fairness if not 0 */
2055 bnx2x_calc_vn_weight_sum(bp);
2056
2057 /* calculate and set min-max rate for each vn */
2058 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2059 bnx2x_init_vn_minmax(bp, vn);
2060
2061 /* always enable rate shaping and fairness */
2062 bp->cmng.flags.cmng_enables |=
2063 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2064 if (!bp->vn_weight_sum)
2065 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2066 " fairness will be disabled\n");
2067 return;
2068 }
2069
2070 /* rate shaping and fairness are disabled */
2071 DP(NETIF_MSG_IFUP,
2072 "rate shaping and fairness are disabled\n");
2073}
34f80b04 2074
523224a3
DK
2075static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2076{
2077 int port = BP_PORT(bp);
2078 int func;
2079 int vn;
2080
2081 /* Set the attention towards other drivers on the same port */
2082 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2083 if (vn == BP_E1HVN(bp))
2084 continue;
2085
2086 func = ((vn << 1) | port);
2087 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2088 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2089 }
2090}
8a1c38d1 2091
c18487ee
YR
2092/* This function is called upon link interrupt */
2093static void bnx2x_link_attn(struct bnx2x *bp)
2094{
d9e8b185 2095 u32 prev_link_status = bp->link_vars.link_status;
bb2a0f7a
YG
2096 /* Make sure that we are synced with the current statistics */
2097 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2098
c18487ee 2099 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2100
bb2a0f7a
YG
2101 if (bp->link_vars.link_up) {
2102
1c06328c 2103 /* dropless flow control */
f2e0899f 2104 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
1c06328c
EG
2105 int port = BP_PORT(bp);
2106 u32 pause_enabled = 0;
2107
2108 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2109 pause_enabled = 1;
2110
2111 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2112 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2113 pause_enabled);
2114 }
2115
bb2a0f7a
YG
2116 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2117 struct host_port_stats *pstats;
2118
2119 pstats = bnx2x_sp(bp, port_stats);
2120 /* reset old bmac stats */
2121 memset(&(pstats->mac_stx[0]), 0,
2122 sizeof(struct mac_stx));
2123 }
f34d28ea 2124 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2125 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2126 }
2127
d9e8b185
VZ
2128 /* indicate link status only if link status actually changed */
2129 if (prev_link_status != bp->link_vars.link_status)
2130 bnx2x_link_report(bp);
34f80b04 2131
f2e0899f
DK
2132 if (IS_MF(bp))
2133 bnx2x_link_sync_notify(bp);
34f80b04 2134
f2e0899f
DK
2135 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2136 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
8a1c38d1 2137
f2e0899f
DK
2138 if (cmng_fns != CMNG_FNS_NONE) {
2139 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2140 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2141 } else
2142 /* rate shaping and fairness are disabled */
2143 DP(NETIF_MSG_IFUP,
2144 "single function mode without fairness\n");
34f80b04 2145 }
c18487ee 2146}
a2fbb9ea 2147
9f6c9258 2148void bnx2x__link_status_update(struct bnx2x *bp)
c18487ee 2149{
f34d28ea 2150 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2151 return;
a2fbb9ea 2152
c18487ee 2153 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2154
bb2a0f7a
YG
2155 if (bp->link_vars.link_up)
2156 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2157 else
2158 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2159
f2e0899f
DK
2160 /* the link status update could be the result of a DCC event
2161 hence re-read the shmem mf configuration */
2162 bnx2x_read_mf_cfg(bp);
2691d51d 2163
c18487ee
YR
2164 /* indicate link status */
2165 bnx2x_link_report(bp);
a2fbb9ea 2166}
a2fbb9ea 2167
34f80b04
EG
2168static void bnx2x_pmf_update(struct bnx2x *bp)
2169{
2170 int port = BP_PORT(bp);
2171 u32 val;
2172
2173 bp->port.pmf = 1;
2174 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2175
2176 /* enable nig attention */
2177 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
f2e0899f
DK
2178 if (bp->common.int_block == INT_BLOCK_HC) {
2179 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2180 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2181 } else if (CHIP_IS_E2(bp)) {
2182 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2183 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2184 }
bb2a0f7a
YG
2185
2186 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2187}
2188
c18487ee 2189/* end of Link */
a2fbb9ea
ET
2190
2191/* slow path */
2192
2193/*
2194 * General service functions
2195 */
2196
2691d51d 2197/* send the MCP a request, block until there is a reply */
a22f0788 2198u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2691d51d 2199{
f2e0899f 2200 int mb_idx = BP_FW_MB_IDX(bp);
2691d51d
EG
2201 u32 seq = ++bp->fw_seq;
2202 u32 rc = 0;
2203 u32 cnt = 1;
2204 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2205
c4ff7cbf 2206 mutex_lock(&bp->fw_mb_mutex);
f2e0899f
DK
2207 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2208 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2209
2691d51d
EG
2210 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2211
2212 do {
2213 /* let the FW do it's magic ... */
2214 msleep(delay);
2215
f2e0899f 2216 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2691d51d 2217
c4ff7cbf
EG
2218 /* Give the FW up to 5 second (500*10ms) */
2219 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2220
2221 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2222 cnt*delay, rc, seq);
2223
2224 /* is this a reply to our command? */
2225 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2226 rc &= FW_MSG_CODE_MASK;
2227 else {
2228 /* FW BUG! */
2229 BNX2X_ERR("FW failed to respond!\n");
2230 bnx2x_fw_dump(bp);
2231 rc = 0;
2232 }
c4ff7cbf 2233 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2234
2235 return rc;
2236}
2237
523224a3
DK
2238/* must be called under rtnl_lock */
2239void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2691d51d 2240{
523224a3 2241 u32 mask = (1 << cl_id);
2691d51d 2242
523224a3
DK
2243 /* initial seeting is BNX2X_ACCEPT_NONE */
2244 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2245 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2246 u8 unmatched_unicast = 0;
2691d51d 2247
523224a3
DK
2248 if (filters & BNX2X_PROMISCUOUS_MODE) {
2249 /* promiscious - accept all, drop none */
2250 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2251 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2252 }
2253 if (filters & BNX2X_ACCEPT_UNICAST) {
2254 /* accept matched ucast */
2255 drop_all_ucast = 0;
2256 }
2257 if (filters & BNX2X_ACCEPT_MULTICAST) {
2258 /* accept matched mcast */
2259 drop_all_mcast = 0;
2260 }
2261 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2262 /* accept all mcast */
2263 drop_all_ucast = 0;
2264 accp_all_ucast = 1;
2265 }
2266 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2267 /* accept all mcast */
2268 drop_all_mcast = 0;
2269 accp_all_mcast = 1;
2270 }
2271 if (filters & BNX2X_ACCEPT_BROADCAST) {
2272 /* accept (all) bcast */
2273 drop_all_bcast = 0;
2274 accp_all_bcast = 1;
2275 }
2691d51d 2276
523224a3
DK
2277 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2278 bp->mac_filters.ucast_drop_all | mask :
2279 bp->mac_filters.ucast_drop_all & ~mask;
2691d51d 2280
523224a3
DK
2281 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2282 bp->mac_filters.mcast_drop_all | mask :
2283 bp->mac_filters.mcast_drop_all & ~mask;
2691d51d 2284
523224a3
DK
2285 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2286 bp->mac_filters.bcast_drop_all | mask :
2287 bp->mac_filters.bcast_drop_all & ~mask;
2691d51d 2288
523224a3
DK
2289 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2290 bp->mac_filters.ucast_accept_all | mask :
2291 bp->mac_filters.ucast_accept_all & ~mask;
2691d51d 2292
523224a3
DK
2293 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2294 bp->mac_filters.mcast_accept_all | mask :
2295 bp->mac_filters.mcast_accept_all & ~mask;
2296
2297 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2298 bp->mac_filters.bcast_accept_all | mask :
2299 bp->mac_filters.bcast_accept_all & ~mask;
2300
2301 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2302 bp->mac_filters.unmatched_unicast | mask :
2303 bp->mac_filters.unmatched_unicast & ~mask;
2691d51d
EG
2304}
2305
523224a3 2306void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2691d51d 2307{
030f3356
DK
2308 struct tstorm_eth_function_common_config tcfg = {0};
2309 u16 rss_flgs;
2691d51d 2310
030f3356
DK
2311 /* tpa */
2312 if (p->func_flgs & FUNC_FLG_TPA)
2313 tcfg.config_flags |=
2314 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2691d51d 2315
030f3356
DK
2316 /* set rss flags */
2317 rss_flgs = (p->rss->mode <<
2318 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2319
2320 if (p->rss->cap & RSS_IPV4_CAP)
2321 rss_flgs |= RSS_IPV4_CAP_MASK;
2322 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2323 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2324 if (p->rss->cap & RSS_IPV6_CAP)
2325 rss_flgs |= RSS_IPV6_CAP_MASK;
2326 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2327 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2328
2329 tcfg.config_flags |= rss_flgs;
2330 tcfg.rss_result_mask = p->rss->result_mask;
2331
2332 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2691d51d 2333
523224a3
DK
2334 /* Enable the function in the FW */
2335 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2336 storm_memset_func_en(bp, p->func_id, 1);
2691d51d 2337
523224a3
DK
2338 /* statistics */
2339 if (p->func_flgs & FUNC_FLG_STATS) {
2340 struct stats_indication_flags stats_flags = {0};
2341 stats_flags.collect_eth = 1;
2691d51d 2342
523224a3
DK
2343 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2344 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2345
523224a3
DK
2346 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2347 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2348
523224a3
DK
2349 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2350 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2351
523224a3
DK
2352 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2353 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d
EG
2354 }
2355
523224a3
DK
2356 /* spq */
2357 if (p->func_flgs & FUNC_FLG_SPQ) {
2358 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2359 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2360 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2361 }
2691d51d
EG
2362}
2363
523224a3
DK
2364static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2365 struct bnx2x_fastpath *fp)
28912902 2366{
523224a3 2367 u16 flags = 0;
28912902 2368
523224a3
DK
2369 /* calculate queue flags */
2370 flags |= QUEUE_FLG_CACHE_ALIGN;
2371 flags |= QUEUE_FLG_HC;
fb3bff17 2372 flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0;
28912902 2373
523224a3
DK
2374#ifdef BCM_VLAN
2375 flags |= QUEUE_FLG_VLAN;
2376 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2377#endif
2378
2379 if (!fp->disable_tpa)
2380 flags |= QUEUE_FLG_TPA;
2381
2382 flags |= QUEUE_FLG_STATS;
2383
2384 return flags;
2385}
2386
2387static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2388 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2389 struct bnx2x_rxq_init_params *rxq_init)
2390{
2391 u16 max_sge = 0;
2392 u16 sge_sz = 0;
2393 u16 tpa_agg_size = 0;
2394
2395 /* calculate queue flags */
2396 u16 flags = bnx2x_get_cl_flags(bp, fp);
2397
2398 if (!fp->disable_tpa) {
2399 pause->sge_th_hi = 250;
2400 pause->sge_th_lo = 150;
2401 tpa_agg_size = min_t(u32,
2402 (min_t(u32, 8, MAX_SKB_FRAGS) *
2403 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2404 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2405 SGE_PAGE_SHIFT;
2406 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2407 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2408 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2409 0xffff);
2410 }
2411
2412 /* pause - not for e1 */
2413 if (!CHIP_IS_E1(bp)) {
2414 pause->bd_th_hi = 350;
2415 pause->bd_th_lo = 250;
2416 pause->rcq_th_hi = 350;
2417 pause->rcq_th_lo = 250;
2418 pause->sge_th_hi = 0;
2419 pause->sge_th_lo = 0;
2420 pause->pri_map = 1;
2421 }
2422
2423 /* rxq setup */
2424 rxq_init->flags = flags;
2425 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2426 rxq_init->dscr_map = fp->rx_desc_mapping;
2427 rxq_init->sge_map = fp->rx_sge_mapping;
2428 rxq_init->rcq_map = fp->rx_comp_mapping;
2429 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2430 rxq_init->mtu = bp->dev->mtu;
2431 rxq_init->buf_sz = bp->rx_buf_size;
2432 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2433 rxq_init->cl_id = fp->cl_id;
2434 rxq_init->spcl_id = fp->cl_id;
2435 rxq_init->stat_id = fp->cl_id;
2436 rxq_init->tpa_agg_sz = tpa_agg_size;
2437 rxq_init->sge_buf_sz = sge_sz;
2438 rxq_init->max_sges_pkt = max_sge;
2439 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2440 rxq_init->fw_sb_id = fp->fw_sb_id;
2441
2442 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2443
2444 rxq_init->cid = HW_CID(bp, fp->cid);
2445
2446 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2447}
2448
2449static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2450 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2451{
2452 u16 flags = bnx2x_get_cl_flags(bp, fp);
2453
2454 txq_init->flags = flags;
2455 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2456 txq_init->dscr_map = fp->tx_desc_mapping;
2457 txq_init->stat_id = fp->cl_id;
2458 txq_init->cid = HW_CID(bp, fp->cid);
2459 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2460 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2461 txq_init->fw_sb_id = fp->fw_sb_id;
2462 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2463}
2464
2465void bnx2x_pf_init(struct bnx2x *bp)
2466{
2467 struct bnx2x_func_init_params func_init = {0};
2468 struct bnx2x_rss_params rss = {0};
2469 struct event_ring_data eq_data = { {0} };
2470 u16 flags;
2471
2472 /* pf specific setups */
2473 if (!CHIP_IS_E1(bp))
fb3bff17 2474 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
523224a3 2475
f2e0899f
DK
2476 if (CHIP_IS_E2(bp)) {
2477 /* reset IGU PF statistics: MSIX + ATTN */
2478 /* PF */
2479 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2480 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2481 (CHIP_MODE_IS_4_PORT(bp) ?
2482 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2483 /* ATTN */
2484 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2485 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2486 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2487 (CHIP_MODE_IS_4_PORT(bp) ?
2488 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2489 }
2490
523224a3
DK
2491 /* function setup flags */
2492 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2493
f2e0899f
DK
2494 if (CHIP_IS_E1x(bp))
2495 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2496 else
2497 flags |= FUNC_FLG_TPA;
523224a3 2498
030f3356
DK
2499 /* function setup */
2500
523224a3
DK
2501 /**
2502 * Although RSS is meaningless when there is a single HW queue we
2503 * still need it enabled in order to have HW Rx hash generated.
523224a3 2504 */
030f3356
DK
2505 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2506 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2507 rss.mode = bp->multi_mode;
2508 rss.result_mask = MULTI_MASK;
2509 func_init.rss = &rss;
523224a3
DK
2510
2511 func_init.func_flgs = flags;
2512 func_init.pf_id = BP_FUNC(bp);
2513 func_init.func_id = BP_FUNC(bp);
2514 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2515 func_init.spq_map = bp->spq_mapping;
2516 func_init.spq_prod = bp->spq_prod_idx;
2517
2518 bnx2x_func_init(bp, &func_init);
2519
2520 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2521
2522 /*
2523 Congestion management values depend on the link rate
2524 There is no active link so initial link rate is set to 10 Gbps.
2525 When the link comes up The congestion management values are
2526 re-calculated according to the actual link rate.
2527 */
2528 bp->link_vars.line_speed = SPEED_10000;
2529 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2530
2531 /* Only the PMF sets the HW */
2532 if (bp->port.pmf)
2533 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2534
2535 /* no rx until link is up */
2536 bp->rx_mode = BNX2X_RX_MODE_NONE;
2537 bnx2x_set_storm_rx_mode(bp);
2538
2539 /* init Event Queue */
2540 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2541 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2542 eq_data.producer = bp->eq_prod;
2543 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2544 eq_data.sb_id = DEF_SB_ID;
2545 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2546}
2547
2548
2549static void bnx2x_e1h_disable(struct bnx2x *bp)
2550{
2551 int port = BP_PORT(bp);
2552
2553 netif_tx_disable(bp->dev);
2554
2555 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2556
2557 netif_carrier_off(bp->dev);
2558}
2559
2560static void bnx2x_e1h_enable(struct bnx2x *bp)
2561{
2562 int port = BP_PORT(bp);
2563
2564 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2565
2566 /* Tx queue should be only reenabled */
2567 netif_tx_wake_all_queues(bp->dev);
2568
2569 /*
2570 * Should not call netif_carrier_on since it will be called if the link
2571 * is up when checking for link state
2572 */
2573}
2574
2575static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2576{
2577 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2578
2579 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2580
2581 /*
2582 * This is the only place besides the function initialization
2583 * where the bp->flags can change so it is done without any
2584 * locks
2585 */
f2e0899f 2586 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
523224a3
DK
2587 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2588 bp->flags |= MF_FUNC_DIS;
2589
2590 bnx2x_e1h_disable(bp);
2591 } else {
2592 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2593 bp->flags &= ~MF_FUNC_DIS;
2594
2595 bnx2x_e1h_enable(bp);
2596 }
2597 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2598 }
2599 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2600
2601 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2602 bnx2x_link_sync_notify(bp);
2603 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2604 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2605 }
2606
2607 /* Report results to MCP */
2608 if (dcc_event)
2609 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2610 else
2611 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2612}
2613
2614/* must be called under the spq lock */
2615static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2616{
2617 struct eth_spe *next_spe = bp->spq_prod_bd;
2618
2619 if (bp->spq_prod_bd == bp->spq_last_bd) {
2620 bp->spq_prod_bd = bp->spq;
2621 bp->spq_prod_idx = 0;
2622 DP(NETIF_MSG_TIMER, "end of spq\n");
2623 } else {
2624 bp->spq_prod_bd++;
2625 bp->spq_prod_idx++;
2626 }
2627 return next_spe;
2628}
2629
2630/* must be called under the spq lock */
28912902
MC
2631static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2632{
2633 int func = BP_FUNC(bp);
2634
2635 /* Make sure that BD data is updated before writing the producer */
2636 wmb();
2637
523224a3 2638 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
f85582f8 2639 bp->spq_prod_idx);
28912902
MC
2640 mmiowb();
2641}
2642
a2fbb9ea 2643/* the slow path queue is odd since completions arrive on the fastpath ring */
9f6c9258 2644int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
f85582f8 2645 u32 data_hi, u32 data_lo, int common)
a2fbb9ea 2646{
28912902 2647 struct eth_spe *spe;
523224a3 2648 u16 type;
a2fbb9ea 2649
a2fbb9ea
ET
2650#ifdef BNX2X_STOP_ON_ERROR
2651 if (unlikely(bp->panic))
2652 return -EIO;
2653#endif
2654
34f80b04 2655 spin_lock_bh(&bp->spq_lock);
a2fbb9ea 2656
8fe23fbd 2657 if (!atomic_read(&bp->spq_left)) {
a2fbb9ea 2658 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2659 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2660 bnx2x_panic();
2661 return -EBUSY;
2662 }
f1410647 2663
28912902
MC
2664 spe = bnx2x_sp_get_next(bp);
2665
a2fbb9ea 2666 /* CID needs port number to be encoded int it */
28912902 2667 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
2668 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2669 HW_CID(bp, cid));
523224a3 2670
a2fbb9ea 2671 if (common)
523224a3
DK
2672 /* Common ramrods:
2673 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2674 * TRAFFIC_STOP, TRAFFIC_START
2675 */
2676 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2677 & SPE_HDR_CONN_TYPE;
2678 else
2679 /* ETH ramrods: SETUP, HALT */
2680 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2681 & SPE_HDR_CONN_TYPE;
a2fbb9ea 2682
523224a3
DK
2683 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2684 SPE_HDR_FUNCTION_ID);
a2fbb9ea 2685
523224a3
DK
2686 spe->hdr.type = cpu_to_le16(type);
2687
2688 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2689 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2690
2691 /* stats ramrod has it's own slot on the spq */
2692 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2693 /* It's ok if the actual decrement is issued towards the memory
2694 * somewhere between the spin_lock and spin_unlock. Thus no
2695 * more explict memory barrier is needed.
2696 */
8fe23fbd 2697 atomic_dec(&bp->spq_left);
a2fbb9ea 2698
cdaa7cb8 2699 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
523224a3
DK
2700 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2701 "type(0x%x) left %x\n",
cdaa7cb8
VZ
2702 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2703 (u32)(U64_LO(bp->spq_mapping) +
2704 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
8fe23fbd 2705 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
cdaa7cb8 2706
28912902 2707 bnx2x_sp_prod_update(bp);
34f80b04 2708 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2709 return 0;
2710}
2711
2712/* acquire split MCP access lock register */
4a37fb66 2713static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2714{
72fd0718 2715 u32 j, val;
34f80b04 2716 int rc = 0;
a2fbb9ea
ET
2717
2718 might_sleep();
72fd0718 2719 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
2720 val = (1UL << 31);
2721 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2722 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2723 if (val & (1L << 31))
2724 break;
2725
2726 msleep(5);
2727 }
a2fbb9ea 2728 if (!(val & (1L << 31))) {
19680c48 2729 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2730 rc = -EBUSY;
2731 }
2732
2733 return rc;
2734}
2735
4a37fb66
YG
2736/* release split MCP access lock register */
2737static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 2738{
72fd0718 2739 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
2740}
2741
523224a3
DK
2742#define BNX2X_DEF_SB_ATT_IDX 0x0001
2743#define BNX2X_DEF_SB_IDX 0x0002
2744
a2fbb9ea
ET
2745static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2746{
523224a3 2747 struct host_sp_status_block *def_sb = bp->def_status_blk;
a2fbb9ea
ET
2748 u16 rc = 0;
2749
2750 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2751 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2752 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
523224a3 2753 rc |= BNX2X_DEF_SB_ATT_IDX;
a2fbb9ea 2754 }
523224a3
DK
2755
2756 if (bp->def_idx != def_sb->sp_sb.running_index) {
2757 bp->def_idx = def_sb->sp_sb.running_index;
2758 rc |= BNX2X_DEF_SB_IDX;
a2fbb9ea 2759 }
523224a3
DK
2760
2761 /* Do not reorder: indecies reading should complete before handling */
2762 barrier();
a2fbb9ea
ET
2763 return rc;
2764}
2765
2766/*
2767 * slow path service functions
2768 */
2769
2770static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2771{
34f80b04 2772 int port = BP_PORT(bp);
a2fbb9ea
ET
2773 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2774 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2775 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2776 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2777 u32 aeu_mask;
87942b46 2778 u32 nig_mask = 0;
f2e0899f 2779 u32 reg_addr;
a2fbb9ea 2780
a2fbb9ea
ET
2781 if (bp->attn_state & asserted)
2782 BNX2X_ERR("IGU ERROR\n");
2783
3fcaf2e5
EG
2784 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2785 aeu_mask = REG_RD(bp, aeu_addr);
2786
a2fbb9ea 2787 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 2788 aeu_mask, asserted);
72fd0718 2789 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 2790 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2791
3fcaf2e5
EG
2792 REG_WR(bp, aeu_addr, aeu_mask);
2793 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2794
3fcaf2e5 2795 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2796 bp->attn_state |= asserted;
3fcaf2e5 2797 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2798
2799 if (asserted & ATTN_HARD_WIRED_MASK) {
2800 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2801
a5e9a7cf
EG
2802 bnx2x_acquire_phy_lock(bp);
2803
877e9aa4 2804 /* save nig interrupt mask */
87942b46 2805 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2806 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2807
c18487ee 2808 bnx2x_link_attn(bp);
a2fbb9ea
ET
2809
2810 /* handle unicore attn? */
2811 }
2812 if (asserted & ATTN_SW_TIMER_4_FUNC)
2813 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2814
2815 if (asserted & GPIO_2_FUNC)
2816 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2817
2818 if (asserted & GPIO_3_FUNC)
2819 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2820
2821 if (asserted & GPIO_4_FUNC)
2822 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2823
2824 if (port == 0) {
2825 if (asserted & ATTN_GENERAL_ATTN_1) {
2826 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2827 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2828 }
2829 if (asserted & ATTN_GENERAL_ATTN_2) {
2830 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2831 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2832 }
2833 if (asserted & ATTN_GENERAL_ATTN_3) {
2834 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2835 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2836 }
2837 } else {
2838 if (asserted & ATTN_GENERAL_ATTN_4) {
2839 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2840 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2841 }
2842 if (asserted & ATTN_GENERAL_ATTN_5) {
2843 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2844 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2845 }
2846 if (asserted & ATTN_GENERAL_ATTN_6) {
2847 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2848 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2849 }
2850 }
2851
2852 } /* if hardwired */
2853
f2e0899f
DK
2854 if (bp->common.int_block == INT_BLOCK_HC)
2855 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2856 COMMAND_REG_ATTN_BITS_SET);
2857 else
2858 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2859
2860 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2861 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2862 REG_WR(bp, reg_addr, asserted);
a2fbb9ea
ET
2863
2864 /* now set back the mask */
a5e9a7cf 2865 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2866 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2867 bnx2x_release_phy_lock(bp);
2868 }
a2fbb9ea
ET
2869}
2870
fd4ef40d
EG
2871static inline void bnx2x_fan_failure(struct bnx2x *bp)
2872{
2873 int port = BP_PORT(bp);
b7737c9b 2874 u32 ext_phy_config;
fd4ef40d 2875 /* mark the failure */
b7737c9b
YR
2876 ext_phy_config =
2877 SHMEM_RD(bp,
2878 dev_info.port_hw_config[port].external_phy_config);
2879
2880 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2881 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
fd4ef40d 2882 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
b7737c9b 2883 ext_phy_config);
fd4ef40d
EG
2884
2885 /* log the failure */
cdaa7cb8
VZ
2886 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2887 " the driver to shutdown the card to prevent permanent"
2888 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 2889}
ab6ad5a4 2890
877e9aa4 2891static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2892{
34f80b04 2893 int port = BP_PORT(bp);
877e9aa4 2894 int reg_offset;
d90d96ba 2895 u32 val;
877e9aa4 2896
34f80b04
EG
2897 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2898 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2899
34f80b04 2900 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2901
2902 val = REG_RD(bp, reg_offset);
2903 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2904 REG_WR(bp, reg_offset, val);
2905
2906 BNX2X_ERR("SPIO5 hw attention\n");
2907
fd4ef40d 2908 /* Fan failure attention */
d90d96ba 2909 bnx2x_hw_reset_phy(&bp->link_params);
fd4ef40d 2910 bnx2x_fan_failure(bp);
877e9aa4 2911 }
34f80b04 2912
589abe3a
EG
2913 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2914 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2915 bnx2x_acquire_phy_lock(bp);
2916 bnx2x_handle_module_detect_int(&bp->link_params);
2917 bnx2x_release_phy_lock(bp);
2918 }
2919
34f80b04
EG
2920 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2921
2922 val = REG_RD(bp, reg_offset);
2923 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2924 REG_WR(bp, reg_offset, val);
2925
2926 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2927 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2928 bnx2x_panic();
2929 }
877e9aa4
ET
2930}
2931
2932static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2933{
2934 u32 val;
2935
0626b899 2936 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2937
2938 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2939 BNX2X_ERR("DB hw attention 0x%x\n", val);
2940 /* DORQ discard attention */
2941 if (val & 0x2)
2942 BNX2X_ERR("FATAL error from DORQ\n");
2943 }
34f80b04
EG
2944
2945 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2946
2947 int port = BP_PORT(bp);
2948 int reg_offset;
2949
2950 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2951 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2952
2953 val = REG_RD(bp, reg_offset);
2954 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2955 REG_WR(bp, reg_offset, val);
2956
2957 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 2958 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
2959 bnx2x_panic();
2960 }
877e9aa4
ET
2961}
2962
2963static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2964{
2965 u32 val;
2966
2967 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2968
2969 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2970 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2971 /* CFC error attention */
2972 if (val & 0x2)
2973 BNX2X_ERR("FATAL error from CFC\n");
2974 }
2975
2976 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2977
2978 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2979 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2980 /* RQ_USDMDP_FIFO_OVERFLOW */
2981 if (val & 0x18000)
2982 BNX2X_ERR("FATAL error from PXP\n");
f2e0899f
DK
2983 if (CHIP_IS_E2(bp)) {
2984 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
2985 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
2986 }
877e9aa4 2987 }
34f80b04
EG
2988
2989 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2990
2991 int port = BP_PORT(bp);
2992 int reg_offset;
2993
2994 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2995 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2996
2997 val = REG_RD(bp, reg_offset);
2998 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2999 REG_WR(bp, reg_offset, val);
3000
3001 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3002 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3003 bnx2x_panic();
3004 }
877e9aa4
ET
3005}
3006
3007static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3008{
34f80b04
EG
3009 u32 val;
3010
877e9aa4
ET
3011 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3012
34f80b04
EG
3013 if (attn & BNX2X_PMF_LINK_ASSERT) {
3014 int func = BP_FUNC(bp);
3015
3016 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
f2e0899f
DK
3017 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3018 func_mf_config[BP_ABS_FUNC(bp)].config);
3019 val = SHMEM_RD(bp,
3020 func_mb[BP_FW_MB_IDX(bp)].drv_status);
2691d51d
EG
3021 if (val & DRV_STATUS_DCC_EVENT_MASK)
3022 bnx2x_dcc_event(bp,
3023 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3024 bnx2x__link_status_update(bp);
2691d51d 3025 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3026 bnx2x_pmf_update(bp);
3027
3028 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3029
3030 BNX2X_ERR("MC assert!\n");
3031 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3032 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3033 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3034 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3035 bnx2x_panic();
3036
3037 } else if (attn & BNX2X_MCP_ASSERT) {
3038
3039 BNX2X_ERR("MCP assert!\n");
3040 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3041 bnx2x_fw_dump(bp);
877e9aa4
ET
3042
3043 } else
3044 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3045 }
3046
3047 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3048 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3049 if (attn & BNX2X_GRC_TIMEOUT) {
f2e0899f
DK
3050 val = CHIP_IS_E1(bp) ? 0 :
3051 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
34f80b04
EG
3052 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3053 }
3054 if (attn & BNX2X_GRC_RSV) {
f2e0899f
DK
3055 val = CHIP_IS_E1(bp) ? 0 :
3056 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
34f80b04
EG
3057 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3058 }
877e9aa4 3059 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3060 }
3061}
3062
72fd0718
VZ
3063#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3064#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3065#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3066#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3067#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3068#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
f85582f8 3069
72fd0718
VZ
3070/*
3071 * should be run under rtnl lock
3072 */
3073static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3074{
3075 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3076 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3077 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3078 barrier();
3079 mmiowb();
3080}
3081
3082/*
3083 * should be run under rtnl lock
3084 */
3085static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3086{
3087 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3088 val |= (1 << 16);
3089 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3090 barrier();
3091 mmiowb();
3092}
3093
3094/*
3095 * should be run under rtnl lock
3096 */
9f6c9258 3097bool bnx2x_reset_is_done(struct bnx2x *bp)
72fd0718
VZ
3098{
3099 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3100 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3101 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3102}
3103
3104/*
3105 * should be run under rtnl lock
3106 */
9f6c9258 3107inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
72fd0718
VZ
3108{
3109 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3110
3111 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3112
3113 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3114 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3115 barrier();
3116 mmiowb();
3117}
3118
3119/*
3120 * should be run under rtnl lock
3121 */
9f6c9258 3122u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
72fd0718
VZ
3123{
3124 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3125
3126 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3127
3128 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3129 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3130 barrier();
3131 mmiowb();
3132
3133 return val1;
3134}
3135
3136/*
3137 * should be run under rtnl lock
3138 */
3139static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3140{
3141 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3142}
3143
3144static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3145{
3146 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3147 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3148}
3149
3150static inline void _print_next_block(int idx, const char *blk)
3151{
3152 if (idx)
3153 pr_cont(", ");
3154 pr_cont("%s", blk);
3155}
3156
3157static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3158{
3159 int i = 0;
3160 u32 cur_bit = 0;
3161 for (i = 0; sig; i++) {
3162 cur_bit = ((u32)0x1 << i);
3163 if (sig & cur_bit) {
3164 switch (cur_bit) {
3165 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3166 _print_next_block(par_num++, "BRB");
3167 break;
3168 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3169 _print_next_block(par_num++, "PARSER");
3170 break;
3171 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3172 _print_next_block(par_num++, "TSDM");
3173 break;
3174 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3175 _print_next_block(par_num++, "SEARCHER");
3176 break;
3177 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3178 _print_next_block(par_num++, "TSEMI");
3179 break;
3180 }
3181
3182 /* Clear the bit */
3183 sig &= ~cur_bit;
3184 }
3185 }
3186
3187 return par_num;
3188}
3189
3190static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3191{
3192 int i = 0;
3193 u32 cur_bit = 0;
3194 for (i = 0; sig; i++) {
3195 cur_bit = ((u32)0x1 << i);
3196 if (sig & cur_bit) {
3197 switch (cur_bit) {
3198 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3199 _print_next_block(par_num++, "PBCLIENT");
3200 break;
3201 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3202 _print_next_block(par_num++, "QM");
3203 break;
3204 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3205 _print_next_block(par_num++, "XSDM");
3206 break;
3207 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3208 _print_next_block(par_num++, "XSEMI");
3209 break;
3210 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3211 _print_next_block(par_num++, "DOORBELLQ");
3212 break;
3213 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3214 _print_next_block(par_num++, "VAUX PCI CORE");
3215 break;
3216 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3217 _print_next_block(par_num++, "DEBUG");
3218 break;
3219 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3220 _print_next_block(par_num++, "USDM");
3221 break;
3222 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3223 _print_next_block(par_num++, "USEMI");
3224 break;
3225 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3226 _print_next_block(par_num++, "UPB");
3227 break;
3228 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3229 _print_next_block(par_num++, "CSDM");
3230 break;
3231 }
3232
3233 /* Clear the bit */
3234 sig &= ~cur_bit;
3235 }
3236 }
3237
3238 return par_num;
3239}
3240
3241static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3242{
3243 int i = 0;
3244 u32 cur_bit = 0;
3245 for (i = 0; sig; i++) {
3246 cur_bit = ((u32)0x1 << i);
3247 if (sig & cur_bit) {
3248 switch (cur_bit) {
3249 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3250 _print_next_block(par_num++, "CSEMI");
3251 break;
3252 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3253 _print_next_block(par_num++, "PXP");
3254 break;
3255 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3256 _print_next_block(par_num++,
3257 "PXPPCICLOCKCLIENT");
3258 break;
3259 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3260 _print_next_block(par_num++, "CFC");
3261 break;
3262 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3263 _print_next_block(par_num++, "CDU");
3264 break;
3265 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3266 _print_next_block(par_num++, "IGU");
3267 break;
3268 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3269 _print_next_block(par_num++, "MISC");
3270 break;
3271 }
3272
3273 /* Clear the bit */
3274 sig &= ~cur_bit;
3275 }
3276 }
3277
3278 return par_num;
3279}
3280
3281static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3282{
3283 int i = 0;
3284 u32 cur_bit = 0;
3285 for (i = 0; sig; i++) {
3286 cur_bit = ((u32)0x1 << i);
3287 if (sig & cur_bit) {
3288 switch (cur_bit) {
3289 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3290 _print_next_block(par_num++, "MCP ROM");
3291 break;
3292 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3293 _print_next_block(par_num++, "MCP UMP RX");
3294 break;
3295 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3296 _print_next_block(par_num++, "MCP UMP TX");
3297 break;
3298 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3299 _print_next_block(par_num++, "MCP SCPAD");
3300 break;
3301 }
3302
3303 /* Clear the bit */
3304 sig &= ~cur_bit;
3305 }
3306 }
3307
3308 return par_num;
3309}
3310
3311static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3312 u32 sig2, u32 sig3)
3313{
3314 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3315 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3316 int par_num = 0;
3317 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3318 "[0]:0x%08x [1]:0x%08x "
3319 "[2]:0x%08x [3]:0x%08x\n",
3320 sig0 & HW_PRTY_ASSERT_SET_0,
3321 sig1 & HW_PRTY_ASSERT_SET_1,
3322 sig2 & HW_PRTY_ASSERT_SET_2,
3323 sig3 & HW_PRTY_ASSERT_SET_3);
3324 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3325 bp->dev->name);
3326 par_num = bnx2x_print_blocks_with_parity0(
3327 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3328 par_num = bnx2x_print_blocks_with_parity1(
3329 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3330 par_num = bnx2x_print_blocks_with_parity2(
3331 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3332 par_num = bnx2x_print_blocks_with_parity3(
3333 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3334 printk("\n");
3335 return true;
3336 } else
3337 return false;
3338}
3339
9f6c9258 3340bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 3341{
a2fbb9ea 3342 struct attn_route attn;
72fd0718
VZ
3343 int port = BP_PORT(bp);
3344
3345 attn.sig[0] = REG_RD(bp,
3346 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3347 port*4);
3348 attn.sig[1] = REG_RD(bp,
3349 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3350 port*4);
3351 attn.sig[2] = REG_RD(bp,
3352 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3353 port*4);
3354 attn.sig[3] = REG_RD(bp,
3355 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3356 port*4);
3357
3358 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3359 attn.sig[3]);
3360}
3361
f2e0899f
DK
3362
3363static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3364{
3365 u32 val;
3366 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3367
3368 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3369 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3370 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3371 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3372 "ADDRESS_ERROR\n");
3373 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3374 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3375 "INCORRECT_RCV_BEHAVIOR\n");
3376 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3377 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3378 "WAS_ERROR_ATTN\n");
3379 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3380 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3381 "VF_LENGTH_VIOLATION_ATTN\n");
3382 if (val &
3383 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3384 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3385 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3386 if (val &
3387 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3388 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3389 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3390 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3391 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3392 "TCPL_ERROR_ATTN\n");
3393 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3394 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3395 "TCPL_IN_TWO_RCBS_ATTN\n");
3396 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3397 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3398 "CSSNOOP_FIFO_OVERFLOW\n");
3399 }
3400 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3401 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3402 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3403 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3404 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3405 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3406 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3407 "_ATC_TCPL_TO_NOT_PEND\n");
3408 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3409 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3410 "ATC_GPA_MULTIPLE_HITS\n");
3411 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3412 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3413 "ATC_RCPL_TO_EMPTY_CNT\n");
3414 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3415 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3416 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3417 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3418 "ATC_IREQ_LESS_THAN_STU\n");
3419 }
3420
3421 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3422 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3423 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3424 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3425 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3426 }
3427
3428}
3429
72fd0718
VZ
3430static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3431{
3432 struct attn_route attn, *group_mask;
34f80b04 3433 int port = BP_PORT(bp);
877e9aa4 3434 int index;
a2fbb9ea
ET
3435 u32 reg_addr;
3436 u32 val;
3fcaf2e5 3437 u32 aeu_mask;
a2fbb9ea
ET
3438
3439 /* need to take HW lock because MCP or other port might also
3440 try to handle this event */
4a37fb66 3441 bnx2x_acquire_alr(bp);
a2fbb9ea 3442
72fd0718
VZ
3443 if (bnx2x_chk_parity_attn(bp)) {
3444 bp->recovery_state = BNX2X_RECOVERY_INIT;
3445 bnx2x_set_reset_in_progress(bp);
3446 schedule_delayed_work(&bp->reset_task, 0);
3447 /* Disable HW interrupts */
3448 bnx2x_int_disable(bp);
3449 bnx2x_release_alr(bp);
3450 /* In case of parity errors don't handle attentions so that
3451 * other function would "see" parity errors.
3452 */
3453 return;
3454 }
3455
a2fbb9ea
ET
3456 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3457 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3458 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3459 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
f2e0899f
DK
3460 if (CHIP_IS_E2(bp))
3461 attn.sig[4] =
3462 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3463 else
3464 attn.sig[4] = 0;
3465
3466 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3467 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
a2fbb9ea
ET
3468
3469 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3470 if (deasserted & (1 << index)) {
72fd0718 3471 group_mask = &bp->attn_group[index];
a2fbb9ea 3472
f2e0899f
DK
3473 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3474 "%08x %08x %08x\n",
3475 index,
3476 group_mask->sig[0], group_mask->sig[1],
3477 group_mask->sig[2], group_mask->sig[3],
3478 group_mask->sig[4]);
a2fbb9ea 3479
f2e0899f
DK
3480 bnx2x_attn_int_deasserted4(bp,
3481 attn.sig[4] & group_mask->sig[4]);
877e9aa4 3482 bnx2x_attn_int_deasserted3(bp,
72fd0718 3483 attn.sig[3] & group_mask->sig[3]);
877e9aa4 3484 bnx2x_attn_int_deasserted1(bp,
72fd0718 3485 attn.sig[1] & group_mask->sig[1]);
877e9aa4 3486 bnx2x_attn_int_deasserted2(bp,
72fd0718 3487 attn.sig[2] & group_mask->sig[2]);
877e9aa4 3488 bnx2x_attn_int_deasserted0(bp,
72fd0718 3489 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
3490 }
3491 }
3492
4a37fb66 3493 bnx2x_release_alr(bp);
a2fbb9ea 3494
f2e0899f
DK
3495 if (bp->common.int_block == INT_BLOCK_HC)
3496 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3497 COMMAND_REG_ATTN_BITS_CLR);
3498 else
3499 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
a2fbb9ea
ET
3500
3501 val = ~deasserted;
f2e0899f
DK
3502 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3503 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5c862848 3504 REG_WR(bp, reg_addr, val);
a2fbb9ea 3505
a2fbb9ea 3506 if (~bp->attn_state & deasserted)
3fcaf2e5 3507 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3508
3509 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3510 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3511
3fcaf2e5
EG
3512 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3513 aeu_mask = REG_RD(bp, reg_addr);
3514
3515 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3516 aeu_mask, deasserted);
72fd0718 3517 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 3518 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3519
3fcaf2e5
EG
3520 REG_WR(bp, reg_addr, aeu_mask);
3521 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3522
3523 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3524 bp->attn_state &= ~deasserted;
3525 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3526}
3527
3528static void bnx2x_attn_int(struct bnx2x *bp)
3529{
3530 /* read local copy of bits */
68d59484
EG
3531 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3532 attn_bits);
3533 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3534 attn_bits_ack);
a2fbb9ea
ET
3535 u32 attn_state = bp->attn_state;
3536
3537 /* look for changed bits */
3538 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3539 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3540
3541 DP(NETIF_MSG_HW,
3542 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3543 attn_bits, attn_ack, asserted, deasserted);
3544
3545 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3546 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3547
3548 /* handle bits that were raised */
3549 if (asserted)
3550 bnx2x_attn_int_asserted(bp, asserted);
3551
3552 if (deasserted)
3553 bnx2x_attn_int_deasserted(bp, deasserted);
3554}
3555
523224a3
DK
3556static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3557{
3558 /* No memory barriers */
3559 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3560 mmiowb(); /* keep prod updates ordered */
3561}
3562
3563#ifdef BCM_CNIC
3564static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3565 union event_ring_elem *elem)
3566{
3567 if (!bp->cnic_eth_dev.starting_cid ||
3568 cid < bp->cnic_eth_dev.starting_cid)
3569 return 1;
3570
3571 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3572
3573 if (unlikely(elem->message.data.cfc_del_event.error)) {
3574 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3575 cid);
3576 bnx2x_panic_dump(bp);
3577 }
3578 bnx2x_cnic_cfc_comp(bp, cid);
3579 return 0;
3580}
3581#endif
3582
3583static void bnx2x_eq_int(struct bnx2x *bp)
3584{
3585 u16 hw_cons, sw_cons, sw_prod;
3586 union event_ring_elem *elem;
3587 u32 cid;
3588 u8 opcode;
3589 int spqe_cnt = 0;
3590
3591 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3592
3593 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3594 * when we get the the next-page we nned to adjust so the loop
3595 * condition below will be met. The next element is the size of a
3596 * regular element and hence incrementing by 1
3597 */
3598 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3599 hw_cons++;
3600
3601 /* This function may never run in parralel with itself for a
3602 * specific bp, thus there is no need in "paired" read memory
3603 * barrier here.
3604 */
3605 sw_cons = bp->eq_cons;
3606 sw_prod = bp->eq_prod;
3607
3608 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
8fe23fbd 3609 hw_cons, sw_cons, atomic_read(&bp->spq_left));
523224a3
DK
3610
3611 for (; sw_cons != hw_cons;
3612 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3613
3614
3615 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3616
3617 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3618 opcode = elem->message.opcode;
3619
3620
3621 /* handle eq element */
3622 switch (opcode) {
3623 case EVENT_RING_OPCODE_STAT_QUERY:
3624 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3625 /* nothing to do with stats comp */
3626 continue;
3627
3628 case EVENT_RING_OPCODE_CFC_DEL:
3629 /* handle according to cid range */
3630 /*
3631 * we may want to verify here that the bp state is
3632 * HALTING
3633 */
3634 DP(NETIF_MSG_IFDOWN,
3635 "got delete ramrod for MULTI[%d]\n", cid);
3636#ifdef BCM_CNIC
3637 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3638 goto next_spqe;
3639#endif
3640 bnx2x_fp(bp, cid, state) =
3641 BNX2X_FP_STATE_CLOSED;
3642
3643 goto next_spqe;
3644 }
3645
3646 switch (opcode | bp->state) {
3647 case (EVENT_RING_OPCODE_FUNCTION_START |
3648 BNX2X_STATE_OPENING_WAIT4_PORT):
3649 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3650 bp->state = BNX2X_STATE_FUNC_STARTED;
3651 break;
3652
3653 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3654 BNX2X_STATE_CLOSING_WAIT4_HALT):
3655 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3656 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3657 break;
3658
3659 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3660 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3661 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3662 bp->set_mac_pending = 0;
3663 break;
3664
3665 case (EVENT_RING_OPCODE_SET_MAC |
3666 BNX2X_STATE_CLOSING_WAIT4_HALT):
3667 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3668 bp->set_mac_pending = 0;
3669 break;
3670 default:
3671 /* unknown event log error and continue */
3672 BNX2X_ERR("Unknown EQ event %d\n",
3673 elem->message.opcode);
3674 }
3675next_spqe:
3676 spqe_cnt++;
3677 } /* for */
3678
8fe23fbd
DK
3679 smp_mb__before_atomic_inc();
3680 atomic_add(spqe_cnt, &bp->spq_left);
523224a3
DK
3681
3682 bp->eq_cons = sw_cons;
3683 bp->eq_prod = sw_prod;
3684 /* Make sure that above mem writes were issued towards the memory */
3685 smp_wmb();
3686
3687 /* update producer */
3688 bnx2x_update_eq_prod(bp, bp->eq_prod);
3689}
3690
a2fbb9ea
ET
3691static void bnx2x_sp_task(struct work_struct *work)
3692{
1cf167f2 3693 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3694 u16 status;
3695
3696 /* Return here if interrupt is disabled */
3697 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3698 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3699 return;
3700 }
3701
3702 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3703/* if (status == 0) */
3704/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3705
cdaa7cb8 3706 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 3707
877e9aa4 3708 /* HW attentions */
523224a3 3709 if (status & BNX2X_DEF_SB_ATT_IDX) {
a2fbb9ea 3710 bnx2x_attn_int(bp);
523224a3 3711 status &= ~BNX2X_DEF_SB_ATT_IDX;
cdaa7cb8
VZ
3712 }
3713
523224a3
DK
3714 /* SP events: STAT_QUERY and others */
3715 if (status & BNX2X_DEF_SB_IDX) {
3716
3717 /* Handle EQ completions */
3718 bnx2x_eq_int(bp);
3719
3720 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3721 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3722
3723 status &= ~BNX2X_DEF_SB_IDX;
cdaa7cb8
VZ
3724 }
3725
3726 if (unlikely(status))
3727 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3728 status);
a2fbb9ea 3729
523224a3
DK
3730 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3731 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
a2fbb9ea
ET
3732}
3733
9f6c9258 3734irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
a2fbb9ea
ET
3735{
3736 struct net_device *dev = dev_instance;
3737 struct bnx2x *bp = netdev_priv(dev);
3738
3739 /* Return here if interrupt is disabled */
3740 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3741 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3742 return IRQ_HANDLED;
3743 }
3744
523224a3
DK
3745 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3746 IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3747
3748#ifdef BNX2X_STOP_ON_ERROR
3749 if (unlikely(bp->panic))
3750 return IRQ_HANDLED;
3751#endif
3752
993ac7b5
MC
3753#ifdef BCM_CNIC
3754 {
3755 struct cnic_ops *c_ops;
3756
3757 rcu_read_lock();
3758 c_ops = rcu_dereference(bp->cnic_ops);
3759 if (c_ops)
3760 c_ops->cnic_handler(bp->cnic_data, NULL);
3761 rcu_read_unlock();
3762 }
3763#endif
1cf167f2 3764 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3765
3766 return IRQ_HANDLED;
3767}
3768
3769/* end of slow path */
3770
a2fbb9ea
ET
3771static void bnx2x_timer(unsigned long data)
3772{
3773 struct bnx2x *bp = (struct bnx2x *) data;
3774
3775 if (!netif_running(bp->dev))
3776 return;
3777
3778 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3779 goto timer_restart;
a2fbb9ea
ET
3780
3781 if (poll) {
3782 struct bnx2x_fastpath *fp = &bp->fp[0];
3783 int rc;
3784
7961f791 3785 bnx2x_tx_int(fp);
a2fbb9ea
ET
3786 rc = bnx2x_rx_int(fp, 1000);
3787 }
3788
34f80b04 3789 if (!BP_NOMCP(bp)) {
f2e0899f 3790 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
3791 u32 drv_pulse;
3792 u32 mcp_pulse;
3793
3794 ++bp->fw_drv_pulse_wr_seq;
3795 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3796 /* TBD - add SYSTEM_TIME */
3797 drv_pulse = bp->fw_drv_pulse_wr_seq;
f2e0899f 3798 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
a2fbb9ea 3799
f2e0899f 3800 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
a2fbb9ea
ET
3801 MCP_PULSE_SEQ_MASK);
3802 /* The delta between driver pulse and mcp response
3803 * should be 1 (before mcp response) or 0 (after mcp response)
3804 */
3805 if ((drv_pulse != mcp_pulse) &&
3806 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3807 /* someone lost a heartbeat... */
3808 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3809 drv_pulse, mcp_pulse);
3810 }
3811 }
3812
f34d28ea 3813 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 3814 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3815
f1410647 3816timer_restart:
a2fbb9ea
ET
3817 mod_timer(&bp->timer, jiffies + bp->current_interval);
3818}
3819
3820/* end of Statistics */
3821
3822/* nic init */
3823
3824/*
3825 * nic init service functions
3826 */
3827
523224a3 3828static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
a2fbb9ea 3829{
523224a3
DK
3830 u32 i;
3831 if (!(len%4) && !(addr%4))
3832 for (i = 0; i < len; i += 4)
3833 REG_WR(bp, addr + i, fill);
3834 else
3835 for (i = 0; i < len; i++)
3836 REG_WR8(bp, addr + i, fill);
34f80b04 3837
34f80b04
EG
3838}
3839
523224a3
DK
3840/* helper: writes FP SP data to FW - data_size in dwords */
3841static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3842 int fw_sb_id,
3843 u32 *sb_data_p,
3844 u32 data_size)
34f80b04 3845{
a2fbb9ea 3846 int index;
523224a3
DK
3847 for (index = 0; index < data_size; index++)
3848 REG_WR(bp, BAR_CSTRORM_INTMEM +
3849 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3850 sizeof(u32)*index,
3851 *(sb_data_p + index));
3852}
a2fbb9ea 3853
523224a3
DK
3854static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3855{
3856 u32 *sb_data_p;
3857 u32 data_size = 0;
f2e0899f 3858 struct hc_status_block_data_e2 sb_data_e2;
523224a3 3859 struct hc_status_block_data_e1x sb_data_e1x;
a2fbb9ea 3860
523224a3 3861 /* disable the function first */
f2e0899f
DK
3862 if (CHIP_IS_E2(bp)) {
3863 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3864 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3865 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3866 sb_data_e2.common.p_func.vf_valid = false;
3867 sb_data_p = (u32 *)&sb_data_e2;
3868 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3869 } else {
3870 memset(&sb_data_e1x, 0,
3871 sizeof(struct hc_status_block_data_e1x));
3872 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3873 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3874 sb_data_e1x.common.p_func.vf_valid = false;
3875 sb_data_p = (u32 *)&sb_data_e1x;
3876 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3877 }
523224a3 3878 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
a2fbb9ea 3879
523224a3
DK
3880 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3881 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3882 CSTORM_STATUS_BLOCK_SIZE);
3883 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3884 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3885 CSTORM_SYNC_BLOCK_SIZE);
3886}
34f80b04 3887
523224a3
DK
3888/* helper: writes SP SB data to FW */
3889static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3890 struct hc_sp_status_block_data *sp_sb_data)
3891{
3892 int func = BP_FUNC(bp);
3893 int i;
3894 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3895 REG_WR(bp, BAR_CSTRORM_INTMEM +
3896 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3897 i*sizeof(u32),
3898 *((u32 *)sp_sb_data + i));
34f80b04
EG
3899}
3900
523224a3 3901static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
34f80b04
EG
3902{
3903 int func = BP_FUNC(bp);
523224a3
DK
3904 struct hc_sp_status_block_data sp_sb_data;
3905 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
a2fbb9ea 3906
523224a3
DK
3907 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3908 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3909 sp_sb_data.p_func.vf_valid = false;
3910
3911 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3912
3913 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3914 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3915 CSTORM_SP_STATUS_BLOCK_SIZE);
3916 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3917 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3918 CSTORM_SP_SYNC_BLOCK_SIZE);
3919
3920}
3921
3922
3923static inline
3924void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3925 int igu_sb_id, int igu_seg_id)
3926{
3927 hc_sm->igu_sb_id = igu_sb_id;
3928 hc_sm->igu_seg_id = igu_seg_id;
3929 hc_sm->timer_value = 0xFF;
3930 hc_sm->time_to_expire = 0xFFFFFFFF;
a2fbb9ea
ET
3931}
3932
523224a3
DK
3933void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
3934 u8 vf_valid, int fw_sb_id, int igu_sb_id)
a2fbb9ea 3935{
523224a3
DK
3936 int igu_seg_id;
3937
f2e0899f 3938 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
3939 struct hc_status_block_data_e1x sb_data_e1x;
3940 struct hc_status_block_sm *hc_sm_p;
3941 struct hc_index_data *hc_index_p;
3942 int data_size;
3943 u32 *sb_data_p;
3944
f2e0899f
DK
3945 if (CHIP_INT_MODE_IS_BC(bp))
3946 igu_seg_id = HC_SEG_ACCESS_NORM;
3947 else
3948 igu_seg_id = IGU_SEG_ACCESS_NORM;
523224a3
DK
3949
3950 bnx2x_zero_fp_sb(bp, fw_sb_id);
3951
f2e0899f
DK
3952 if (CHIP_IS_E2(bp)) {
3953 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3954 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
3955 sb_data_e2.common.p_func.vf_id = vfid;
3956 sb_data_e2.common.p_func.vf_valid = vf_valid;
3957 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
3958 sb_data_e2.common.same_igu_sb_1b = true;
3959 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
3960 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
3961 hc_sm_p = sb_data_e2.common.state_machine;
3962 hc_index_p = sb_data_e2.index_data;
3963 sb_data_p = (u32 *)&sb_data_e2;
3964 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3965 } else {
3966 memset(&sb_data_e1x, 0,
3967 sizeof(struct hc_status_block_data_e1x));
3968 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3969 sb_data_e1x.common.p_func.vf_id = 0xff;
3970 sb_data_e1x.common.p_func.vf_valid = false;
3971 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
3972 sb_data_e1x.common.same_igu_sb_1b = true;
3973 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3974 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3975 hc_sm_p = sb_data_e1x.common.state_machine;
3976 hc_index_p = sb_data_e1x.index_data;
3977 sb_data_p = (u32 *)&sb_data_e1x;
3978 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3979 }
523224a3
DK
3980
3981 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3982 igu_sb_id, igu_seg_id);
3983 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
3984 igu_sb_id, igu_seg_id);
3985
3986 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
3987
3988 /* write indecies to HW */
3989 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3990}
3991
3992static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
3993 u8 sb_index, u8 disable, u16 usec)
3994{
3995 int port = BP_PORT(bp);
3996 u8 ticks = usec / BNX2X_BTR;
3997
3998 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3999
4000 disable = disable ? 1 : (usec ? 0 : 1);
4001 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4002}
4003
4004static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4005 u16 tx_usec, u16 rx_usec)
4006{
4007 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4008 false, rx_usec);
4009 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4010 false, tx_usec);
4011}
f2e0899f 4012
523224a3
DK
4013static void bnx2x_init_def_sb(struct bnx2x *bp)
4014{
4015 struct host_sp_status_block *def_sb = bp->def_status_blk;
4016 dma_addr_t mapping = bp->def_status_blk_mapping;
4017 int igu_sp_sb_index;
4018 int igu_seg_id;
34f80b04
EG
4019 int port = BP_PORT(bp);
4020 int func = BP_FUNC(bp);
523224a3 4021 int reg_offset;
a2fbb9ea 4022 u64 section;
523224a3
DK
4023 int index;
4024 struct hc_sp_status_block_data sp_sb_data;
4025 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4026
f2e0899f
DK
4027 if (CHIP_INT_MODE_IS_BC(bp)) {
4028 igu_sp_sb_index = DEF_SB_IGU_ID;
4029 igu_seg_id = HC_SEG_ACCESS_DEF;
4030 } else {
4031 igu_sp_sb_index = bp->igu_dsb_id;
4032 igu_seg_id = IGU_SEG_ACCESS_DEF;
4033 }
a2fbb9ea
ET
4034
4035 /* ATTN */
523224a3 4036 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
a2fbb9ea 4037 atten_status_block);
523224a3 4038 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
a2fbb9ea 4039
49d66772
ET
4040 bp->attn_state = 0;
4041
a2fbb9ea
ET
4042 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4043 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
34f80b04 4044 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
523224a3
DK
4045 int sindex;
4046 /* take care of sig[0]..sig[4] */
4047 for (sindex = 0; sindex < 4; sindex++)
4048 bp->attn_group[index].sig[sindex] =
4049 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
f2e0899f
DK
4050
4051 if (CHIP_IS_E2(bp))
4052 /*
4053 * enable5 is separate from the rest of the registers,
4054 * and therefore the address skip is 4
4055 * and not 16 between the different groups
4056 */
4057 bp->attn_group[index].sig[4] = REG_RD(bp,
4058 reg_offset + 0x10 + 0x4*index);
4059 else
4060 bp->attn_group[index].sig[4] = 0;
a2fbb9ea
ET
4061 }
4062
f2e0899f
DK
4063 if (bp->common.int_block == INT_BLOCK_HC) {
4064 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4065 HC_REG_ATTN_MSG0_ADDR_L);
4066
4067 REG_WR(bp, reg_offset, U64_LO(section));
4068 REG_WR(bp, reg_offset + 4, U64_HI(section));
4069 } else if (CHIP_IS_E2(bp)) {
4070 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4071 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4072 }
a2fbb9ea 4073
523224a3
DK
4074 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4075 sp_sb);
a2fbb9ea 4076
523224a3 4077 bnx2x_zero_sp_sb(bp);
a2fbb9ea 4078
523224a3
DK
4079 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4080 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4081 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4082 sp_sb_data.igu_seg_id = igu_seg_id;
4083 sp_sb_data.p_func.pf_id = func;
f2e0899f 4084 sp_sb_data.p_func.vnic_id = BP_VN(bp);
523224a3 4085 sp_sb_data.p_func.vf_id = 0xff;
a2fbb9ea 4086
523224a3 4087 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
49d66772 4088
bb2a0f7a 4089 bp->stats_pending = 0;
66e855f3 4090 bp->set_mac_pending = 0;
bb2a0f7a 4091
523224a3 4092 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4093}
4094
9f6c9258 4095void bnx2x_update_coalesce(struct bnx2x *bp)
a2fbb9ea 4096{
a2fbb9ea
ET
4097 int i;
4098
523224a3
DK
4099 for_each_queue(bp, i)
4100 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4101 bp->rx_ticks, bp->tx_ticks);
a2fbb9ea
ET
4102}
4103
a2fbb9ea
ET
4104static void bnx2x_init_sp_ring(struct bnx2x *bp)
4105{
a2fbb9ea 4106 spin_lock_init(&bp->spq_lock);
8fe23fbd 4107 atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
a2fbb9ea 4108
a2fbb9ea 4109 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4110 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4111 bp->spq_prod_bd = bp->spq;
4112 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
a2fbb9ea
ET
4113}
4114
523224a3 4115static void bnx2x_init_eq_ring(struct bnx2x *bp)
a2fbb9ea
ET
4116{
4117 int i;
523224a3
DK
4118 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4119 union event_ring_elem *elem =
4120 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
a2fbb9ea 4121
523224a3
DK
4122 elem->next_page.addr.hi =
4123 cpu_to_le32(U64_HI(bp->eq_mapping +
4124 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4125 elem->next_page.addr.lo =
4126 cpu_to_le32(U64_LO(bp->eq_mapping +
4127 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
a2fbb9ea 4128 }
523224a3
DK
4129 bp->eq_cons = 0;
4130 bp->eq_prod = NUM_EQ_DESC;
4131 bp->eq_cons_sb = BNX2X_EQ_INDEX;
a2fbb9ea
ET
4132}
4133
4134static void bnx2x_init_ind_table(struct bnx2x *bp)
4135{
26c8fa4d 4136 int func = BP_FUNC(bp);
a2fbb9ea
ET
4137 int i;
4138
555f6c78 4139 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4140 return;
4141
555f6c78
EG
4142 DP(NETIF_MSG_IFUP,
4143 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4144 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4145 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4146 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 4147 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
4148}
4149
9f6c9258 4150void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
a2fbb9ea 4151{
34f80b04 4152 int mode = bp->rx_mode;
523224a3
DK
4153 u16 cl_id;
4154
581ce43d
EG
4155 /* All but management unicast packets should pass to the host as well */
4156 u32 llh_mask =
4157 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4158 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4159 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4160 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 4161
a2fbb9ea
ET
4162 switch (mode) {
4163 case BNX2X_RX_MODE_NONE: /* no Rx */
523224a3
DK
4164 cl_id = BP_L_ID(bp);
4165 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
a2fbb9ea 4166 break;
356e2385 4167
a2fbb9ea 4168 case BNX2X_RX_MODE_NORMAL:
523224a3
DK
4169 cl_id = BP_L_ID(bp);
4170 bnx2x_rxq_set_mac_filters(bp, cl_id,
4171 BNX2X_ACCEPT_UNICAST |
4172 BNX2X_ACCEPT_BROADCAST |
4173 BNX2X_ACCEPT_MULTICAST);
a2fbb9ea 4174 break;
356e2385 4175
a2fbb9ea 4176 case BNX2X_RX_MODE_ALLMULTI:
523224a3
DK
4177 cl_id = BP_L_ID(bp);
4178 bnx2x_rxq_set_mac_filters(bp, cl_id,
4179 BNX2X_ACCEPT_UNICAST |
4180 BNX2X_ACCEPT_BROADCAST |
4181 BNX2X_ACCEPT_ALL_MULTICAST);
a2fbb9ea 4182 break;
356e2385 4183
a2fbb9ea 4184 case BNX2X_RX_MODE_PROMISC:
523224a3
DK
4185 cl_id = BP_L_ID(bp);
4186 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
4187
581ce43d
EG
4188 /* pass management unicast packets as well */
4189 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 4190 break;
356e2385 4191
a2fbb9ea 4192 default:
34f80b04
EG
4193 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4194 break;
a2fbb9ea
ET
4195 }
4196
581ce43d 4197 REG_WR(bp,
523224a3
DK
4198 BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
4199 NIG_REG_LLH0_BRB1_DRV_MASK,
581ce43d
EG
4200 llh_mask);
4201
523224a3
DK
4202 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4203 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4204 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
4205 bp->mac_filters.ucast_drop_all,
4206 bp->mac_filters.mcast_drop_all,
4207 bp->mac_filters.bcast_drop_all,
4208 bp->mac_filters.ucast_accept_all,
4209 bp->mac_filters.mcast_accept_all,
4210 bp->mac_filters.bcast_accept_all
4211 );
a2fbb9ea 4212
523224a3 4213 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
a2fbb9ea
ET
4214}
4215
471de716
EG
4216static void bnx2x_init_internal_common(struct bnx2x *bp)
4217{
4218 int i;
4219
523224a3 4220 if (!CHIP_IS_E1(bp)) {
de832a55 4221
523224a3
DK
4222 /* xstorm needs to know whether to add ovlan to packets or not,
4223 * in switch-independent we'll write 0 to here... */
34f80b04 4224 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4225 bp->mf_mode);
34f80b04 4226 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4227 bp->mf_mode);
34f80b04 4228 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4229 bp->mf_mode);
34f80b04 4230 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4231 bp->mf_mode);
34f80b04
EG
4232 }
4233
523224a3
DK
4234 /* Zero this manually as its initialization is
4235 currently missing in the initTool */
4236 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
ca00392c 4237 REG_WR(bp, BAR_USTRORM_INTMEM +
523224a3 4238 USTORM_AGG_DATA_OFFSET + i * 4, 0);
f2e0899f
DK
4239 if (CHIP_IS_E2(bp)) {
4240 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4241 CHIP_INT_MODE_IS_BC(bp) ?
4242 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4243 }
523224a3 4244}
8a1c38d1 4245
523224a3
DK
4246static void bnx2x_init_internal_port(struct bnx2x *bp)
4247{
4248 /* port */
a2fbb9ea
ET
4249}
4250
471de716
EG
4251static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4252{
4253 switch (load_code) {
4254 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 4255 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
471de716
EG
4256 bnx2x_init_internal_common(bp);
4257 /* no break */
4258
4259 case FW_MSG_CODE_DRV_LOAD_PORT:
4260 bnx2x_init_internal_port(bp);
4261 /* no break */
4262
4263 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3
DK
4264 /* internal memory per function is
4265 initialized inside bnx2x_pf_init */
471de716
EG
4266 break;
4267
4268 default:
4269 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4270 break;
4271 }
4272}
4273
523224a3
DK
4274static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4275{
4276 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4277
4278 fp->state = BNX2X_FP_STATE_CLOSED;
4279
4280 fp->index = fp->cid = fp_idx;
4281 fp->cl_id = BP_L_ID(bp) + fp_idx;
4282 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4283 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4284 /* qZone id equals to FW (per path) client id */
4285 fp->cl_qzone_id = fp->cl_id +
f2e0899f
DK
4286 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4287 ETH_MAX_RX_CLIENTS_E1H);
523224a3 4288 /* init shortcut */
f2e0899f
DK
4289 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4290 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
523224a3
DK
4291 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4292 /* Setup SB indicies */
4293 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4294 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4295
4296 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4297 "cl_id %d fw_sb %d igu_sb %d\n",
4298 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4299 fp->igu_sb_id);
4300 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4301 fp->fw_sb_id, fp->igu_sb_id);
4302
4303 bnx2x_update_fpsb_idx(fp);
4304}
4305
9f6c9258 4306void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4307{
4308 int i;
4309
523224a3
DK
4310 for_each_queue(bp, i)
4311 bnx2x_init_fp_sb(bp, i);
37b091ba 4312#ifdef BCM_CNIC
523224a3
DK
4313
4314 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4315 BNX2X_VF_ID_INVALID, false,
4316 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4317
37b091ba 4318#endif
a2fbb9ea 4319
16119785
EG
4320 /* ensure status block indices were read */
4321 rmb();
4322
523224a3 4323 bnx2x_init_def_sb(bp);
5c862848 4324 bnx2x_update_dsb_idx(bp);
a2fbb9ea 4325 bnx2x_init_rx_rings(bp);
523224a3 4326 bnx2x_init_tx_rings(bp);
a2fbb9ea 4327 bnx2x_init_sp_ring(bp);
523224a3 4328 bnx2x_init_eq_ring(bp);
471de716 4329 bnx2x_init_internal(bp, load_code);
523224a3 4330 bnx2x_pf_init(bp);
a2fbb9ea 4331 bnx2x_init_ind_table(bp);
0ef00459
EG
4332 bnx2x_stats_init(bp);
4333
4334 /* At this point, we are ready for interrupts */
4335 atomic_set(&bp->intr_sem, 0);
4336
4337 /* flush all before enabling interrupts */
4338 mb();
4339 mmiowb();
4340
615f8fd9 4341 bnx2x_int_enable(bp);
eb8da205
EG
4342
4343 /* Check for SPIO5 */
4344 bnx2x_attn_int_deasserted0(bp,
4345 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4346 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
4347}
4348
4349/* end of nic init */
4350
4351/*
4352 * gzip service functions
4353 */
4354
4355static int bnx2x_gunzip_init(struct bnx2x *bp)
4356{
1a983142
FT
4357 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4358 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
4359 if (bp->gunzip_buf == NULL)
4360 goto gunzip_nomem1;
4361
4362 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4363 if (bp->strm == NULL)
4364 goto gunzip_nomem2;
4365
4366 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4367 GFP_KERNEL);
4368 if (bp->strm->workspace == NULL)
4369 goto gunzip_nomem3;
4370
4371 return 0;
4372
4373gunzip_nomem3:
4374 kfree(bp->strm);
4375 bp->strm = NULL;
4376
4377gunzip_nomem2:
1a983142
FT
4378 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4379 bp->gunzip_mapping);
a2fbb9ea
ET
4380 bp->gunzip_buf = NULL;
4381
4382gunzip_nomem1:
cdaa7cb8
VZ
4383 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4384 " un-compression\n");
a2fbb9ea
ET
4385 return -ENOMEM;
4386}
4387
4388static void bnx2x_gunzip_end(struct bnx2x *bp)
4389{
4390 kfree(bp->strm->workspace);
a2fbb9ea
ET
4391 kfree(bp->strm);
4392 bp->strm = NULL;
4393
4394 if (bp->gunzip_buf) {
1a983142
FT
4395 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4396 bp->gunzip_mapping);
a2fbb9ea
ET
4397 bp->gunzip_buf = NULL;
4398 }
4399}
4400
94a78b79 4401static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
4402{
4403 int n, rc;
4404
4405 /* check gzip header */
94a78b79
VZ
4406 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4407 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 4408 return -EINVAL;
94a78b79 4409 }
a2fbb9ea
ET
4410
4411 n = 10;
4412
34f80b04 4413#define FNAME 0x8
a2fbb9ea
ET
4414
4415 if (zbuf[3] & FNAME)
4416 while ((zbuf[n++] != 0) && (n < len));
4417
94a78b79 4418 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
4419 bp->strm->avail_in = len - n;
4420 bp->strm->next_out = bp->gunzip_buf;
4421 bp->strm->avail_out = FW_BUF_SIZE;
4422
4423 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4424 if (rc != Z_OK)
4425 return rc;
4426
4427 rc = zlib_inflate(bp->strm, Z_FINISH);
4428 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
4429 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4430 bp->strm->msg);
a2fbb9ea
ET
4431
4432 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4433 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
4434 netdev_err(bp->dev, "Firmware decompression error:"
4435 " gunzip_outlen (%d) not aligned\n",
4436 bp->gunzip_outlen);
a2fbb9ea
ET
4437 bp->gunzip_outlen >>= 2;
4438
4439 zlib_inflateEnd(bp->strm);
4440
4441 if (rc == Z_STREAM_END)
4442 return 0;
4443
4444 return rc;
4445}
4446
4447/* nic load/unload */
4448
4449/*
34f80b04 4450 * General service functions
a2fbb9ea
ET
4451 */
4452
4453/* send a NIG loopback debug packet */
4454static void bnx2x_lb_pckt(struct bnx2x *bp)
4455{
a2fbb9ea 4456 u32 wb_write[3];
a2fbb9ea
ET
4457
4458 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4459 wb_write[0] = 0x55555555;
4460 wb_write[1] = 0x55555555;
34f80b04 4461 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4462 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4463
4464 /* NON-IP protocol */
a2fbb9ea
ET
4465 wb_write[0] = 0x09000000;
4466 wb_write[1] = 0x55555555;
34f80b04 4467 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4468 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4469}
4470
4471/* some of the internal memories
4472 * are not directly readable from the driver
4473 * to test them we send debug packets
4474 */
4475static int bnx2x_int_mem_test(struct bnx2x *bp)
4476{
4477 int factor;
4478 int count, i;
4479 u32 val = 0;
4480
ad8d3948 4481 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4482 factor = 120;
ad8d3948
EG
4483 else if (CHIP_REV_IS_EMUL(bp))
4484 factor = 200;
4485 else
a2fbb9ea 4486 factor = 1;
a2fbb9ea 4487
a2fbb9ea
ET
4488 /* Disable inputs of parser neighbor blocks */
4489 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4490 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4491 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4492 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4493
4494 /* Write 0 to parser credits for CFC search request */
4495 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4496
4497 /* send Ethernet packet */
4498 bnx2x_lb_pckt(bp);
4499
4500 /* TODO do i reset NIG statistic? */
4501 /* Wait until NIG register shows 1 packet of size 0x10 */
4502 count = 1000 * factor;
4503 while (count) {
34f80b04 4504
a2fbb9ea
ET
4505 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4506 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4507 if (val == 0x10)
4508 break;
4509
4510 msleep(10);
4511 count--;
4512 }
4513 if (val != 0x10) {
4514 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4515 return -1;
4516 }
4517
4518 /* Wait until PRS register shows 1 packet */
4519 count = 1000 * factor;
4520 while (count) {
4521 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4522 if (val == 1)
4523 break;
4524
4525 msleep(10);
4526 count--;
4527 }
4528 if (val != 0x1) {
4529 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4530 return -2;
4531 }
4532
4533 /* Reset and init BRB, PRS */
34f80b04 4534 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4535 msleep(50);
34f80b04 4536 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 4537 msleep(50);
94a78b79
VZ
4538 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4539 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
4540
4541 DP(NETIF_MSG_HW, "part2\n");
4542
4543 /* Disable inputs of parser neighbor blocks */
4544 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4545 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4546 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4547 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4548
4549 /* Write 0 to parser credits for CFC search request */
4550 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4551
4552 /* send 10 Ethernet packets */
4553 for (i = 0; i < 10; i++)
4554 bnx2x_lb_pckt(bp);
4555
4556 /* Wait until NIG register shows 10 + 1
4557 packets of size 11*0x10 = 0xb0 */
4558 count = 1000 * factor;
4559 while (count) {
34f80b04 4560
a2fbb9ea
ET
4561 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4562 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4563 if (val == 0xb0)
4564 break;
4565
4566 msleep(10);
4567 count--;
4568 }
4569 if (val != 0xb0) {
4570 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4571 return -3;
4572 }
4573
4574 /* Wait until PRS register shows 2 packets */
4575 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4576 if (val != 2)
4577 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4578
4579 /* Write 1 to parser credits for CFC search request */
4580 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4581
4582 /* Wait until PRS register shows 3 packets */
4583 msleep(10 * factor);
4584 /* Wait until NIG register shows 1 packet of size 0x10 */
4585 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4586 if (val != 3)
4587 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4588
4589 /* clear NIG EOP FIFO */
4590 for (i = 0; i < 11; i++)
4591 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4592 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4593 if (val != 1) {
4594 BNX2X_ERR("clear of NIG failed\n");
4595 return -4;
4596 }
4597
4598 /* Reset and init BRB, PRS, NIG */
4599 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4600 msleep(50);
4601 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4602 msleep(50);
94a78b79
VZ
4603 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4604 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 4605#ifndef BCM_CNIC
a2fbb9ea
ET
4606 /* set NIC mode */
4607 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4608#endif
4609
4610 /* Enable inputs of parser neighbor blocks */
4611 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4612 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4613 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 4614 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
4615
4616 DP(NETIF_MSG_HW, "done\n");
4617
4618 return 0; /* OK */
4619}
4620
4621static void enable_blocks_attention(struct bnx2x *bp)
4622{
4623 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
f2e0899f
DK
4624 if (CHIP_IS_E2(bp))
4625 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4626 else
4627 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
a2fbb9ea
ET
4628 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4629 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
f2e0899f
DK
4630 /*
4631 * mask read length error interrupts in brb for parser
4632 * (parsing unit and 'checksum and crc' unit)
4633 * these errors are legal (PU reads fixed length and CAC can cause
4634 * read length error on truncated packets)
4635 */
4636 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
a2fbb9ea
ET
4637 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4638 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4639 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4640 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4641 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
4642/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4643/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4644 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4645 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4646 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
4647/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4648/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4649 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4650 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4651 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4652 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
4653/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4654/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
f85582f8 4655
34f80b04
EG
4656 if (CHIP_REV_IS_FPGA(bp))
4657 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
f2e0899f
DK
4658 else if (CHIP_IS_E2(bp))
4659 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4660 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4661 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4662 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4663 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4664 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
34f80b04
EG
4665 else
4666 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
4667 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4668 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4669 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
4670/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4671/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4672 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4673 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
4674/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4675 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
4676}
4677
72fd0718
VZ
4678static const struct {
4679 u32 addr;
4680 u32 mask;
4681} bnx2x_parity_mask[] = {
f2e0899f
DK
4682 {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
4683 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4684 {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
4685 {HC_REG_HC_PRTY_MASK, 0x7},
4686 {MISC_REG_MISC_PRTY_MASK, 0x1},
f85582f8
DK
4687 {QM_REG_QM_PRTY_MASK, 0x0},
4688 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
72fd0718
VZ
4689 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4690 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
f85582f8
DK
4691 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
4692 {CDU_REG_CDU_PRTY_MASK, 0x0},
4693 {CFC_REG_CFC_PRTY_MASK, 0x0},
4694 {DBG_REG_DBG_PRTY_MASK, 0x0},
4695 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
4696 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
4697 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
4698 {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */
4699 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
4700 {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */
4701 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
4702 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
4703 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
4704 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
4705 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
4706 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
4707 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
4708 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
4709 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
72fd0718
VZ
4710};
4711
4712static void enable_blocks_parity(struct bnx2x *bp)
4713{
cbd9da7b 4714 int i;
72fd0718 4715
cbd9da7b 4716 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
72fd0718
VZ
4717 REG_WR(bp, bnx2x_parity_mask[i].addr,
4718 bnx2x_parity_mask[i].mask);
4719}
4720
34f80b04 4721
81f75bbf
EG
4722static void bnx2x_reset_common(struct bnx2x *bp)
4723{
4724 /* reset_common */
4725 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4726 0xd3ffff7f);
4727 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4728}
4729
573f2035
EG
4730static void bnx2x_init_pxp(struct bnx2x *bp)
4731{
4732 u16 devctl;
4733 int r_order, w_order;
4734
4735 pci_read_config_word(bp->pdev,
4736 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4737 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4738 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4739 if (bp->mrrs == -1)
4740 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4741 else {
4742 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4743 r_order = bp->mrrs;
4744 }
4745
4746 bnx2x_init_pxp_arb(bp, r_order, w_order);
4747}
fd4ef40d
EG
4748
4749static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4750{
2145a920 4751 int is_required;
fd4ef40d 4752 u32 val;
2145a920 4753 int port;
fd4ef40d 4754
2145a920
VZ
4755 if (BP_NOMCP(bp))
4756 return;
4757
4758 is_required = 0;
fd4ef40d
EG
4759 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4760 SHARED_HW_CFG_FAN_FAILURE_MASK;
4761
4762 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4763 is_required = 1;
4764
4765 /*
4766 * The fan failure mechanism is usually related to the PHY type since
4767 * the power consumption of the board is affected by the PHY. Currently,
4768 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4769 */
4770 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4771 for (port = PORT_0; port < PORT_MAX; port++) {
fd4ef40d 4772 is_required |=
d90d96ba
YR
4773 bnx2x_fan_failure_det_req(
4774 bp,
4775 bp->common.shmem_base,
a22f0788 4776 bp->common.shmem2_base,
d90d96ba 4777 port);
fd4ef40d
EG
4778 }
4779
4780 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4781
4782 if (is_required == 0)
4783 return;
4784
4785 /* Fan failure is indicated by SPIO 5 */
4786 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4787 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4788
4789 /* set to active low mode */
4790 val = REG_RD(bp, MISC_REG_SPIO_INT);
4791 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 4792 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
4793 REG_WR(bp, MISC_REG_SPIO_INT, val);
4794
4795 /* enable interrupt to signal the IGU */
4796 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4797 val |= (1 << MISC_REGISTERS_SPIO_5);
4798 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4799}
4800
f2e0899f
DK
4801static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4802{
4803 u32 offset = 0;
4804
4805 if (CHIP_IS_E1(bp))
4806 return;
4807 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4808 return;
4809
4810 switch (BP_ABS_FUNC(bp)) {
4811 case 0:
4812 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4813 break;
4814 case 1:
4815 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4816 break;
4817 case 2:
4818 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4819 break;
4820 case 3:
4821 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4822 break;
4823 case 4:
4824 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4825 break;
4826 case 5:
4827 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4828 break;
4829 case 6:
4830 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4831 break;
4832 case 7:
4833 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4834 break;
4835 default:
4836 return;
4837 }
4838
4839 REG_WR(bp, offset, pretend_func_num);
4840 REG_RD(bp, offset);
4841 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4842}
4843
4844static void bnx2x_pf_disable(struct bnx2x *bp)
4845{
4846 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4847 val &= ~IGU_PF_CONF_FUNC_EN;
4848
4849 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4850 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4851 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4852}
4853
523224a3 4854static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
a2fbb9ea 4855{
a2fbb9ea 4856 u32 val, i;
a2fbb9ea 4857
f2e0899f 4858 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
a2fbb9ea 4859
81f75bbf 4860 bnx2x_reset_common(bp);
34f80b04
EG
4861 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4862 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 4863
94a78b79 4864 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
f2e0899f 4865 if (!CHIP_IS_E1(bp))
fb3bff17 4866 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
a2fbb9ea 4867
f2e0899f
DK
4868 if (CHIP_IS_E2(bp)) {
4869 u8 fid;
4870
4871 /**
4872 * 4-port mode or 2-port mode we need to turn of master-enable
4873 * for everyone, after that, turn it back on for self.
4874 * so, we disregard multi-function or not, and always disable
4875 * for all functions on the given path, this means 0,2,4,6 for
4876 * path 0 and 1,3,5,7 for path 1
4877 */
4878 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4879 if (fid == BP_ABS_FUNC(bp)) {
4880 REG_WR(bp,
4881 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4882 1);
4883 continue;
4884 }
4885
4886 bnx2x_pretend_func(bp, fid);
4887 /* clear pf enable */
4888 bnx2x_pf_disable(bp);
4889 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4890 }
4891 }
a2fbb9ea 4892
94a78b79 4893 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
4894 if (CHIP_IS_E1(bp)) {
4895 /* enable HW interrupt from PXP on USDM overflow
4896 bit 16 on INT_MASK_0 */
4897 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4898 }
a2fbb9ea 4899
94a78b79 4900 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 4901 bnx2x_init_pxp(bp);
a2fbb9ea
ET
4902
4903#ifdef __BIG_ENDIAN
34f80b04
EG
4904 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4905 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4906 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4907 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4908 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
4909 /* make sure this value is 0 */
4910 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
4911
4912/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4913 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4914 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4915 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4916 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
4917#endif
4918
523224a3
DK
4919 bnx2x_ilt_init_page_size(bp, INITOP_SET);
4920
34f80b04
EG
4921 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4922 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 4923
34f80b04
EG
4924 /* let the HW do it's magic ... */
4925 msleep(100);
4926 /* finish PXP init */
4927 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4928 if (val != 1) {
4929 BNX2X_ERR("PXP2 CFG failed\n");
4930 return -EBUSY;
4931 }
4932 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4933 if (val != 1) {
4934 BNX2X_ERR("PXP2 RD_INIT failed\n");
4935 return -EBUSY;
4936 }
a2fbb9ea 4937
f2e0899f
DK
4938 /* Timers bug workaround E2 only. We need to set the entire ILT to
4939 * have entries with value "0" and valid bit on.
4940 * This needs to be done by the first PF that is loaded in a path
4941 * (i.e. common phase)
4942 */
4943 if (CHIP_IS_E2(bp)) {
4944 struct ilt_client_info ilt_cli;
4945 struct bnx2x_ilt ilt;
4946 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
4947 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
4948
4949 /* initalize dummy TM client */
4950 ilt_cli.start = 0;
4951 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
4952 ilt_cli.client_num = ILT_CLIENT_TM;
4953
4954 /* Step 1: set zeroes to all ilt page entries with valid bit on
4955 * Step 2: set the timers first/last ilt entry to point
4956 * to the entire range to prevent ILT range error for 3rd/4th
4957 * vnic (this code assumes existance of the vnic)
4958 *
4959 * both steps performed by call to bnx2x_ilt_client_init_op()
4960 * with dummy TM client
4961 *
4962 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
4963 * and his brother are split registers
4964 */
4965 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
4966 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
4967 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4968
4969 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
4970 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
4971 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
4972 }
4973
4974
34f80b04
EG
4975 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4976 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 4977
f2e0899f
DK
4978 if (CHIP_IS_E2(bp)) {
4979 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
4980 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
4981 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
4982
4983 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
4984
4985 /* let the HW do it's magic ... */
4986 do {
4987 msleep(200);
4988 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
4989 } while (factor-- && (val != 1));
4990
4991 if (val != 1) {
4992 BNX2X_ERR("ATC_INIT failed\n");
4993 return -EBUSY;
4994 }
4995 }
4996
94a78b79 4997 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 4998
34f80b04
EG
4999 /* clean the DMAE memory */
5000 bp->dmae_ready = 1;
5001 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5002
94a78b79
VZ
5003 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5004 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5005 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5006 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 5007
34f80b04
EG
5008 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5009 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5010 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5011 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5012
94a78b79 5013 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba 5014
f2e0899f
DK
5015 if (CHIP_MODE_IS_4_PORT(bp))
5016 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
f85582f8 5017
523224a3
DK
5018 /* QM queues pointers table */
5019 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
5020
34f80b04
EG
5021 /* soft reset pulse */
5022 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5023 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 5024
37b091ba 5025#ifdef BCM_CNIC
94a78b79 5026 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 5027#endif
a2fbb9ea 5028
94a78b79 5029 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
523224a3
DK
5030 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5031
34f80b04
EG
5032 if (!CHIP_REV_IS_SLOW(bp)) {
5033 /* enable hw interrupt from doorbell Q */
5034 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5035 }
a2fbb9ea 5036
94a78b79 5037 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
f2e0899f
DK
5038 if (CHIP_MODE_IS_4_PORT(bp)) {
5039 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5040 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5041 }
5042
94a78b79 5043 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 5044 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 5045#ifndef BCM_CNIC
3196a88a
EG
5046 /* set NIC mode */
5047 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 5048#endif
f2e0899f 5049 if (!CHIP_IS_E1(bp))
fb3bff17 5050 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
f85582f8 5051
f2e0899f
DK
5052 if (CHIP_IS_E2(bp)) {
5053 /* Bit-map indicating which L2 hdrs may appear after the
5054 basic Ethernet header */
5055 int has_ovlan = IS_MF(bp);
5056 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5057 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5058 }
a2fbb9ea 5059
94a78b79
VZ
5060 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5061 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5062 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5063 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 5064
ca00392c
EG
5065 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5066 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5067 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5068 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5069
94a78b79
VZ
5070 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5071 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5072 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5073 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 5074
f2e0899f
DK
5075 if (CHIP_MODE_IS_4_PORT(bp))
5076 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5077
34f80b04
EG
5078 /* sync semi rtc */
5079 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5080 0x80000000);
5081 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5082 0x80000000);
a2fbb9ea 5083
94a78b79
VZ
5084 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5085 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5086 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 5087
f2e0899f
DK
5088 if (CHIP_IS_E2(bp)) {
5089 int has_ovlan = IS_MF(bp);
5090 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5091 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5092 }
5093
34f80b04 5094 REG_WR(bp, SRC_REG_SOFT_RST, 1);
c68ed255
TH
5095 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5096 REG_WR(bp, i, random32());
f85582f8 5097
94a78b79 5098 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
5099#ifdef BCM_CNIC
5100 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5101 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5102 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5103 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5104 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5105 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5106 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5107 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5108 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5109 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5110#endif
34f80b04 5111 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5112
34f80b04
EG
5113 if (sizeof(union cdu_context) != 1024)
5114 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
5115 dev_alert(&bp->pdev->dev, "please adjust the size "
5116 "of cdu_context(%ld)\n",
7995c64e 5117 (long)sizeof(union cdu_context));
a2fbb9ea 5118
94a78b79 5119 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
5120 val = (4 << 24) + (0 << 12) + 1024;
5121 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 5122
94a78b79 5123 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 5124 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5125 /* enable context validation interrupt from CFC */
5126 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5127
5128 /* set the thresholds to prevent CFC/CDU race */
5129 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5130
94a78b79 5131 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
f2e0899f
DK
5132
5133 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5134 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5135
5136 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
94a78b79 5137 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 5138
94a78b79 5139 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
5140 /* Reset PCIE errors for debug */
5141 REG_WR(bp, 0x2814, 0xffffffff);
5142 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5143
f2e0899f
DK
5144 if (CHIP_IS_E2(bp)) {
5145 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5146 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5147 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5148 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5149 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5150 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5151 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5152 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5153 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5154 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5155 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5156 }
5157
94a78b79 5158 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 5159 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 5160 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 5161 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 5162
94a78b79 5163 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
f2e0899f 5164 if (!CHIP_IS_E1(bp)) {
fb3bff17
DK
5165 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5166 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
34f80b04 5167 }
f2e0899f
DK
5168 if (CHIP_IS_E2(bp)) {
5169 /* Bit-map indicating which L2 hdrs may appear after the
5170 basic Ethernet header */
5171 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
5172 }
34f80b04
EG
5173
5174 if (CHIP_REV_IS_SLOW(bp))
5175 msleep(200);
5176
5177 /* finish CFC init */
5178 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5179 if (val != 1) {
5180 BNX2X_ERR("CFC LL_INIT failed\n");
5181 return -EBUSY;
5182 }
5183 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5184 if (val != 1) {
5185 BNX2X_ERR("CFC AC_INIT failed\n");
5186 return -EBUSY;
5187 }
5188 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5189 if (val != 1) {
5190 BNX2X_ERR("CFC CAM_INIT failed\n");
5191 return -EBUSY;
5192 }
5193 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5194
f2e0899f
DK
5195 if (CHIP_IS_E1(bp)) {
5196 /* read NIG statistic
5197 to see if this is our first up since powerup */
5198 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5199 val = *bnx2x_sp(bp, wb_data[0]);
34f80b04 5200
f2e0899f
DK
5201 /* do internal memory self test */
5202 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5203 BNX2X_ERR("internal mem self test failed\n");
5204 return -EBUSY;
5205 }
34f80b04
EG
5206 }
5207
d90d96ba 5208 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
a22f0788
YR
5209 bp->common.shmem_base,
5210 bp->common.shmem2_base);
f1410647 5211
fd4ef40d
EG
5212 bnx2x_setup_fan_failure_detection(bp);
5213
34f80b04
EG
5214 /* clear PXP2 attentions */
5215 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5216
34f80b04 5217 enable_blocks_attention(bp);
72fd0718
VZ
5218 if (CHIP_PARITY_SUPPORTED(bp))
5219 enable_blocks_parity(bp);
a2fbb9ea 5220
6bbca910 5221 if (!BP_NOMCP(bp)) {
f2e0899f
DK
5222 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5223 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5224 CHIP_IS_E1x(bp)) {
5225 u32 shmem_base[2], shmem2_base[2];
5226 shmem_base[0] = bp->common.shmem_base;
5227 shmem2_base[0] = bp->common.shmem2_base;
5228 if (CHIP_IS_E2(bp)) {
5229 shmem_base[1] =
5230 SHMEM2_RD(bp, other_shmem_base_addr);
5231 shmem2_base[1] =
5232 SHMEM2_RD(bp, other_shmem2_base_addr);
5233 }
5234 bnx2x_acquire_phy_lock(bp);
5235 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5236 bp->common.chip_id);
5237 bnx2x_release_phy_lock(bp);
5238 }
6bbca910
YR
5239 } else
5240 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5241
34f80b04
EG
5242 return 0;
5243}
a2fbb9ea 5244
523224a3 5245static int bnx2x_init_hw_port(struct bnx2x *bp)
34f80b04
EG
5246{
5247 int port = BP_PORT(bp);
94a78b79 5248 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 5249 u32 low, high;
34f80b04 5250 u32 val;
a2fbb9ea 5251
cdaa7cb8 5252 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
5253
5254 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 5255
94a78b79 5256 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 5257 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c 5258
f2e0899f
DK
5259 /* Timers bug workaround: disables the pf_master bit in pglue at
5260 * common phase, we need to enable it here before any dmae access are
5261 * attempted. Therefore we manually added the enable-master to the
5262 * port phase (it also happens in the function phase)
5263 */
5264 if (CHIP_IS_E2(bp))
5265 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5266
ca00392c
EG
5267 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5268 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5269 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 5270 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 5271
523224a3
DK
5272 /* QM cid (connection) count */
5273 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
a2fbb9ea 5274
523224a3 5275#ifdef BCM_CNIC
94a78b79 5276 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
5277 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5278 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 5279#endif
cdaa7cb8 5280
94a78b79 5281 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 5282
f2e0899f
DK
5283 if (CHIP_MODE_IS_4_PORT(bp))
5284 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
5285
5286 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5287 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5288 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5289 /* no pause for emulation and FPGA */
5290 low = 0;
5291 high = 513;
5292 } else {
5293 if (IS_MF(bp))
5294 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5295 else if (bp->dev->mtu > 4096) {
5296 if (bp->flags & ONE_PORT_FLAG)
5297 low = 160;
5298 else {
5299 val = bp->dev->mtu;
5300 /* (24*1024 + val*4)/256 */
5301 low = 96 + (val/64) +
5302 ((val % 64) ? 1 : 0);
5303 }
5304 } else
5305 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5306 high = low + 56; /* 14*1024/256 */
5307 }
5308 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5309 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
1c06328c 5310 }
1c06328c 5311
f2e0899f
DK
5312 if (CHIP_MODE_IS_4_PORT(bp)) {
5313 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5314 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5315 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5316 BRB1_REG_MAC_GUARANTIED_0), 40);
5317 }
1c06328c 5318
94a78b79 5319 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 5320
94a78b79 5321 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 5322 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 5323 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 5324 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 5325
94a78b79
VZ
5326 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5327 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5328 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5329 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
f2e0899f
DK
5330 if (CHIP_MODE_IS_4_PORT(bp))
5331 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
356e2385 5332
94a78b79 5333 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 5334 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 5335
94a78b79 5336 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea 5337
f2e0899f
DK
5338 if (!CHIP_IS_E2(bp)) {
5339 /* configure PBF to work without PAUSE mtu 9000 */
5340 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea 5341
f2e0899f
DK
5342 /* update threshold */
5343 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5344 /* update init credit */
5345 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea 5346
f2e0899f
DK
5347 /* probe changes */
5348 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5349 udelay(50);
5350 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5351 }
a2fbb9ea 5352
37b091ba
MC
5353#ifdef BCM_CNIC
5354 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 5355#endif
94a78b79 5356 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 5357 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
5358
5359 if (CHIP_IS_E1(bp)) {
5360 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5361 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5362 }
94a78b79 5363 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 5364
f2e0899f
DK
5365 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5366
94a78b79 5367 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
5368 /* init aeu_mask_attn_func_0/1:
5369 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5370 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5371 * bits 4-7 are used for "per vn group attention" */
5372 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
fb3bff17 5373 (IS_MF(bp) ? 0xF7 : 0x7));
34f80b04 5374
94a78b79 5375 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 5376 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 5377 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 5378 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 5379 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 5380
94a78b79 5381 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
5382
5383 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5384
f2e0899f 5385 if (!CHIP_IS_E1(bp)) {
fb3bff17 5386 /* 0x2 disable mf_ov, 0x1 enable */
34f80b04 5387 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
fb3bff17 5388 (IS_MF(bp) ? 0x1 : 0x2));
34f80b04 5389
f2e0899f
DK
5390 if (CHIP_IS_E2(bp)) {
5391 val = 0;
5392 switch (bp->mf_mode) {
5393 case MULTI_FUNCTION_SD:
5394 val = 1;
5395 break;
5396 case MULTI_FUNCTION_SI:
5397 val = 2;
5398 break;
5399 }
5400
5401 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5402 NIG_REG_LLH0_CLS_TYPE), val);
5403 }
1c06328c
EG
5404 {
5405 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5406 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5407 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5408 }
34f80b04
EG
5409 }
5410
94a78b79 5411 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 5412 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
d90d96ba 5413 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
a22f0788
YR
5414 bp->common.shmem_base,
5415 bp->common.shmem2_base);
d90d96ba 5416 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
a22f0788 5417 bp->common.shmem2_base, port)) {
4d295db0
EG
5418 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5419 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5420 val = REG_RD(bp, reg_addr);
f1410647 5421 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0 5422 REG_WR(bp, reg_addr, val);
f1410647 5423 }
c18487ee 5424 bnx2x__link_reset(bp);
a2fbb9ea 5425
34f80b04
EG
5426 return 0;
5427}
5428
34f80b04
EG
5429static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5430{
5431 int reg;
5432
f2e0899f 5433 if (CHIP_IS_E1(bp))
34f80b04 5434 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
f2e0899f
DK
5435 else
5436 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
34f80b04
EG
5437
5438 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5439}
5440
f2e0899f
DK
5441static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5442{
5443 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5444}
5445
5446static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5447{
5448 u32 i, base = FUNC_ILT_BASE(func);
5449 for (i = base; i < base + ILT_PER_FUNC; i++)
5450 bnx2x_ilt_wr(bp, i, 0);
5451}
5452
523224a3 5453static int bnx2x_init_hw_func(struct bnx2x *bp)
34f80b04
EG
5454{
5455 int port = BP_PORT(bp);
5456 int func = BP_FUNC(bp);
523224a3
DK
5457 struct bnx2x_ilt *ilt = BP_ILT(bp);
5458 u16 cdu_ilt_start;
8badd27a 5459 u32 addr, val;
f4a66897
VZ
5460 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5461 int i, main_mem_width;
34f80b04 5462
cdaa7cb8 5463 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 5464
8badd27a 5465 /* set MSI reconfigure capability */
f2e0899f
DK
5466 if (bp->common.int_block == INT_BLOCK_HC) {
5467 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5468 val = REG_RD(bp, addr);
5469 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5470 REG_WR(bp, addr, val);
5471 }
8badd27a 5472
523224a3
DK
5473 ilt = BP_ILT(bp);
5474 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
37b091ba 5475
523224a3
DK
5476 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5477 ilt->lines[cdu_ilt_start + i].page =
5478 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5479 ilt->lines[cdu_ilt_start + i].page_mapping =
5480 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5481 /* cdu ilt pages are allocated manually so there's no need to
5482 set the size */
37b091ba 5483 }
523224a3 5484 bnx2x_ilt_init_op(bp, INITOP_SET);
f85582f8 5485
523224a3
DK
5486#ifdef BCM_CNIC
5487 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
37b091ba 5488
523224a3
DK
5489 /* T1 hash bits value determines the T1 number of entries */
5490 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
5491#endif
37b091ba 5492
523224a3
DK
5493#ifndef BCM_CNIC
5494 /* set NIC mode */
5495 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5496#endif /* BCM_CNIC */
37b091ba 5497
f2e0899f
DK
5498 if (CHIP_IS_E2(bp)) {
5499 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5500
5501 /* Turn on a single ISR mode in IGU if driver is going to use
5502 * INT#x or MSI
5503 */
5504 if (!(bp->flags & USING_MSIX_FLAG))
5505 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5506 /*
5507 * Timers workaround bug: function init part.
5508 * Need to wait 20msec after initializing ILT,
5509 * needed to make sure there are no requests in
5510 * one of the PXP internal queues with "old" ILT addresses
5511 */
5512 msleep(20);
5513 /*
5514 * Master enable - Due to WB DMAE writes performed before this
5515 * register is re-initialized as part of the regular function
5516 * init
5517 */
5518 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5519 /* Enable the function in IGU */
5520 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5521 }
5522
523224a3 5523 bp->dmae_ready = 1;
34f80b04 5524
523224a3
DK
5525 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5526
f2e0899f
DK
5527 if (CHIP_IS_E2(bp))
5528 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5529
523224a3
DK
5530 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5531 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5532 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5533 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5534 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5535 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5536 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5537 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5538 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5539
f2e0899f
DK
5540 if (CHIP_IS_E2(bp)) {
5541 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5542 BP_PATH(bp));
5543 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5544 BP_PATH(bp));
5545 }
5546
5547 if (CHIP_MODE_IS_4_PORT(bp))
5548 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5549
5550 if (CHIP_IS_E2(bp))
5551 REG_WR(bp, QM_REG_PF_EN, 1);
5552
523224a3 5553 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
f2e0899f
DK
5554
5555 if (CHIP_MODE_IS_4_PORT(bp))
5556 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5557
523224a3
DK
5558 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5559 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5560 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5561 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5562 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5563 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5564 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5565 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5566 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5567 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5568 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
f2e0899f
DK
5569 if (CHIP_IS_E2(bp))
5570 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5571
523224a3
DK
5572 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5573
5574 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
34f80b04 5575
f2e0899f
DK
5576 if (CHIP_IS_E2(bp))
5577 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5578
fb3bff17 5579 if (IS_MF(bp)) {
34f80b04 5580 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
fb3bff17 5581 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
34f80b04
EG
5582 }
5583
523224a3
DK
5584 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5585
34f80b04 5586 /* HC init per function */
f2e0899f
DK
5587 if (bp->common.int_block == INT_BLOCK_HC) {
5588 if (CHIP_IS_E1H(bp)) {
5589 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5590
5591 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5592 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5593 }
5594 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5595
5596 } else {
5597 int num_segs, sb_idx, prod_offset;
5598
34f80b04
EG
5599 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5600
f2e0899f
DK
5601 if (CHIP_IS_E2(bp)) {
5602 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5603 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5604 }
5605
5606 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5607
5608 if (CHIP_IS_E2(bp)) {
5609 int dsb_idx = 0;
5610 /**
5611 * Producer memory:
5612 * E2 mode: address 0-135 match to the mapping memory;
5613 * 136 - PF0 default prod; 137 - PF1 default prod;
5614 * 138 - PF2 default prod; 139 - PF3 default prod;
5615 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5616 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5617 * 144-147 reserved.
5618 *
5619 * E1.5 mode - In backward compatible mode;
5620 * for non default SB; each even line in the memory
5621 * holds the U producer and each odd line hold
5622 * the C producer. The first 128 producers are for
5623 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5624 * producers are for the DSB for each PF.
5625 * Each PF has five segments: (the order inside each
5626 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5627 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5628 * 144-147 attn prods;
5629 */
5630 /* non-default-status-blocks */
5631 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5632 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5633 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5634 prod_offset = (bp->igu_base_sb + sb_idx) *
5635 num_segs;
5636
5637 for (i = 0; i < num_segs; i++) {
5638 addr = IGU_REG_PROD_CONS_MEMORY +
5639 (prod_offset + i) * 4;
5640 REG_WR(bp, addr, 0);
5641 }
5642 /* send consumer update with value 0 */
5643 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5644 USTORM_ID, 0, IGU_INT_NOP, 1);
5645 bnx2x_igu_clear_sb(bp,
5646 bp->igu_base_sb + sb_idx);
5647 }
5648
5649 /* default-status-blocks */
5650 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5651 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5652
5653 if (CHIP_MODE_IS_4_PORT(bp))
5654 dsb_idx = BP_FUNC(bp);
5655 else
5656 dsb_idx = BP_E1HVN(bp);
5657
5658 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5659 IGU_BC_BASE_DSB_PROD + dsb_idx :
5660 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5661
5662 for (i = 0; i < (num_segs * E1HVN_MAX);
5663 i += E1HVN_MAX) {
5664 addr = IGU_REG_PROD_CONS_MEMORY +
5665 (prod_offset + i)*4;
5666 REG_WR(bp, addr, 0);
5667 }
5668 /* send consumer update with 0 */
5669 if (CHIP_INT_MODE_IS_BC(bp)) {
5670 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5671 USTORM_ID, 0, IGU_INT_NOP, 1);
5672 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5673 CSTORM_ID, 0, IGU_INT_NOP, 1);
5674 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5675 XSTORM_ID, 0, IGU_INT_NOP, 1);
5676 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5677 TSTORM_ID, 0, IGU_INT_NOP, 1);
5678 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5679 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5680 } else {
5681 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5682 USTORM_ID, 0, IGU_INT_NOP, 1);
5683 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5684 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5685 }
5686 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5687
5688 /* !!! these should become driver const once
5689 rf-tool supports split-68 const */
5690 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5691 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5692 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5693 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5694 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5695 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5696 }
34f80b04 5697 }
34f80b04 5698
c14423fe 5699 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5700 REG_WR(bp, 0x2114, 0xffffffff);
5701 REG_WR(bp, 0x2120, 0xffffffff);
523224a3
DK
5702
5703 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5704 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5705 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5706 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5707 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5708 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5709
f4a66897
VZ
5710 if (CHIP_IS_E1x(bp)) {
5711 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5712 main_mem_base = HC_REG_MAIN_MEMORY +
5713 BP_PORT(bp) * (main_mem_size * 4);
5714 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5715 main_mem_width = 8;
5716
5717 val = REG_RD(bp, main_mem_prty_clr);
5718 if (val)
5719 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5720 "block during "
5721 "function init (0x%x)!\n", val);
5722
5723 /* Clear "false" parity errors in MSI-X table */
5724 for (i = main_mem_base;
5725 i < main_mem_base + main_mem_size * 4;
5726 i += main_mem_width) {
5727 bnx2x_read_dmae(bp, i, main_mem_width / 4);
5728 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5729 i, main_mem_width / 4);
5730 }
5731 /* Clear HC parity attention */
5732 REG_RD(bp, main_mem_prty_clr);
5733 }
5734
b7737c9b 5735 bnx2x_phy_probe(&bp->link_params);
f85582f8 5736
34f80b04
EG
5737 return 0;
5738}
5739
9f6c9258 5740int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
34f80b04 5741{
523224a3 5742 int rc = 0;
a2fbb9ea 5743
34f80b04 5744 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
f2e0899f 5745 BP_ABS_FUNC(bp), load_code);
a2fbb9ea 5746
34f80b04
EG
5747 bp->dmae_ready = 0;
5748 mutex_init(&bp->dmae_mutex);
54016b26
EG
5749 rc = bnx2x_gunzip_init(bp);
5750 if (rc)
5751 return rc;
a2fbb9ea 5752
34f80b04
EG
5753 switch (load_code) {
5754 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 5755 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
523224a3 5756 rc = bnx2x_init_hw_common(bp, load_code);
34f80b04
EG
5757 if (rc)
5758 goto init_hw_err;
5759 /* no break */
5760
5761 case FW_MSG_CODE_DRV_LOAD_PORT:
523224a3 5762 rc = bnx2x_init_hw_port(bp);
34f80b04
EG
5763 if (rc)
5764 goto init_hw_err;
5765 /* no break */
5766
5767 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3 5768 rc = bnx2x_init_hw_func(bp);
34f80b04
EG
5769 if (rc)
5770 goto init_hw_err;
5771 break;
5772
5773 default:
5774 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5775 break;
5776 }
5777
5778 if (!BP_NOMCP(bp)) {
f2e0899f 5779 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
5780
5781 bp->fw_drv_pulse_wr_seq =
f2e0899f 5782 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
a2fbb9ea 5783 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
5784 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5785 }
a2fbb9ea 5786
34f80b04
EG
5787init_hw_err:
5788 bnx2x_gunzip_end(bp);
5789
5790 return rc;
a2fbb9ea
ET
5791}
5792
9f6c9258 5793void bnx2x_free_mem(struct bnx2x *bp)
a2fbb9ea
ET
5794{
5795
5796#define BNX2X_PCI_FREE(x, y, size) \
5797 do { \
5798 if (x) { \
523224a3 5799 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
a2fbb9ea
ET
5800 x = NULL; \
5801 y = 0; \
5802 } \
5803 } while (0)
5804
5805#define BNX2X_FREE(x) \
5806 do { \
5807 if (x) { \
523224a3 5808 kfree((void *)x); \
a2fbb9ea
ET
5809 x = NULL; \
5810 } \
5811 } while (0)
5812
5813 int i;
5814
5815 /* fastpath */
555f6c78 5816 /* Common */
a2fbb9ea 5817 for_each_queue(bp, i) {
555f6c78 5818 /* status blocks */
f2e0899f
DK
5819 if (CHIP_IS_E2(bp))
5820 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5821 bnx2x_fp(bp, i, status_blk_mapping),
5822 sizeof(struct host_hc_status_block_e2));
5823 else
5824 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5825 bnx2x_fp(bp, i, status_blk_mapping),
5826 sizeof(struct host_hc_status_block_e1x));
555f6c78
EG
5827 }
5828 /* Rx */
54b9ddaa 5829 for_each_queue(bp, i) {
a2fbb9ea 5830
555f6c78 5831 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
5832 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5833 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5834 bnx2x_fp(bp, i, rx_desc_mapping),
5835 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5836
5837 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5838 bnx2x_fp(bp, i, rx_comp_mapping),
5839 sizeof(struct eth_fast_path_rx_cqe) *
5840 NUM_RCQ_BD);
a2fbb9ea 5841
7a9b2557 5842 /* SGE ring */
32626230 5843 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5844 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5845 bnx2x_fp(bp, i, rx_sge_mapping),
5846 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5847 }
555f6c78 5848 /* Tx */
54b9ddaa 5849 for_each_queue(bp, i) {
555f6c78
EG
5850
5851 /* fastpath tx rings: tx_buf tx_desc */
5852 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5853 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5854 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 5855 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 5856 }
a2fbb9ea
ET
5857 /* end of fastpath */
5858
5859 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
523224a3 5860 sizeof(struct host_sp_status_block));
a2fbb9ea
ET
5861
5862 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5863 sizeof(struct bnx2x_slowpath));
a2fbb9ea 5864
523224a3
DK
5865 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5866 bp->context.size);
5867
5868 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5869
5870 BNX2X_FREE(bp->ilt->lines);
f85582f8 5871
37b091ba 5872#ifdef BCM_CNIC
f2e0899f
DK
5873 if (CHIP_IS_E2(bp))
5874 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5875 sizeof(struct host_hc_status_block_e2));
5876 else
5877 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5878 sizeof(struct host_hc_status_block_e1x));
f85582f8 5879
523224a3 5880 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
a2fbb9ea 5881#endif
f85582f8 5882
7a9b2557 5883 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea 5884
523224a3
DK
5885 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5886 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5887
a2fbb9ea
ET
5888#undef BNX2X_PCI_FREE
5889#undef BNX2X_KFREE
5890}
5891
f2e0899f
DK
5892static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
5893{
5894 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
5895 if (CHIP_IS_E2(bp)) {
5896 bnx2x_fp(bp, index, sb_index_values) =
5897 (__le16 *)status_blk.e2_sb->sb.index_values;
5898 bnx2x_fp(bp, index, sb_running_index) =
5899 (__le16 *)status_blk.e2_sb->sb.running_index;
5900 } else {
5901 bnx2x_fp(bp, index, sb_index_values) =
5902 (__le16 *)status_blk.e1x_sb->sb.index_values;
5903 bnx2x_fp(bp, index, sb_running_index) =
5904 (__le16 *)status_blk.e1x_sb->sb.running_index;
5905 }
5906}
5907
9f6c9258 5908int bnx2x_alloc_mem(struct bnx2x *bp)
a2fbb9ea 5909{
a2fbb9ea
ET
5910#define BNX2X_PCI_ALLOC(x, y, size) \
5911 do { \
1a983142 5912 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
9f6c9258
DK
5913 if (x == NULL) \
5914 goto alloc_mem_err; \
5915 memset(x, 0, size); \
5916 } while (0)
a2fbb9ea 5917
9f6c9258
DK
5918#define BNX2X_ALLOC(x, size) \
5919 do { \
523224a3 5920 x = kzalloc(size, GFP_KERNEL); \
9f6c9258
DK
5921 if (x == NULL) \
5922 goto alloc_mem_err; \
9f6c9258 5923 } while (0)
a2fbb9ea 5924
9f6c9258 5925 int i;
a2fbb9ea 5926
9f6c9258
DK
5927 /* fastpath */
5928 /* Common */
a2fbb9ea 5929 for_each_queue(bp, i) {
f2e0899f 5930 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
9f6c9258 5931 bnx2x_fp(bp, i, bp) = bp;
9f6c9258 5932 /* status blocks */
f2e0899f
DK
5933 if (CHIP_IS_E2(bp))
5934 BNX2X_PCI_ALLOC(sb->e2_sb,
5935 &bnx2x_fp(bp, i, status_blk_mapping),
5936 sizeof(struct host_hc_status_block_e2));
5937 else
5938 BNX2X_PCI_ALLOC(sb->e1x_sb,
9f6c9258 5939 &bnx2x_fp(bp, i, status_blk_mapping),
523224a3
DK
5940 sizeof(struct host_hc_status_block_e1x));
5941
f2e0899f 5942 set_sb_shortcuts(bp, i);
a2fbb9ea 5943 }
9f6c9258
DK
5944 /* Rx */
5945 for_each_queue(bp, i) {
a2fbb9ea 5946
9f6c9258
DK
5947 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5948 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5949 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5950 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5951 &bnx2x_fp(bp, i, rx_desc_mapping),
5952 sizeof(struct eth_rx_bd) * NUM_RX_BD);
555f6c78 5953
9f6c9258
DK
5954 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5955 &bnx2x_fp(bp, i, rx_comp_mapping),
5956 sizeof(struct eth_fast_path_rx_cqe) *
5957 NUM_RCQ_BD);
a2fbb9ea 5958
9f6c9258
DK
5959 /* SGE ring */
5960 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5961 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5962 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5963 &bnx2x_fp(bp, i, rx_sge_mapping),
5964 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5965 }
5966 /* Tx */
5967 for_each_queue(bp, i) {
8badd27a 5968
9f6c9258
DK
5969 /* fastpath tx rings: tx_buf tx_desc */
5970 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5971 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5972 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5973 &bnx2x_fp(bp, i, tx_desc_mapping),
5974 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
8badd27a 5975 }
9f6c9258 5976 /* end of fastpath */
8badd27a 5977
523224a3 5978#ifdef BCM_CNIC
f2e0899f
DK
5979 if (CHIP_IS_E2(bp))
5980 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5981 sizeof(struct host_hc_status_block_e2));
5982 else
5983 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5984 sizeof(struct host_hc_status_block_e1x));
8badd27a 5985
523224a3
DK
5986 /* allocate searcher T2 table */
5987 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5988#endif
a2fbb9ea 5989
8badd27a 5990
523224a3
DK
5991 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5992 sizeof(struct host_sp_status_block));
a2fbb9ea 5993
523224a3
DK
5994 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5995 sizeof(struct bnx2x_slowpath));
a2fbb9ea 5996
523224a3 5997 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
f85582f8 5998
523224a3
DK
5999 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
6000 bp->context.size);
65abd74d 6001
523224a3 6002 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
65abd74d 6003
523224a3
DK
6004 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
6005 goto alloc_mem_err;
65abd74d 6006
9f6c9258
DK
6007 /* Slow path ring */
6008 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
65abd74d 6009
523224a3
DK
6010 /* EQ */
6011 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6012 BCM_PAGE_SIZE * NUM_EQ_PAGES);
9f6c9258 6013 return 0;
e1510706 6014
9f6c9258
DK
6015alloc_mem_err:
6016 bnx2x_free_mem(bp);
6017 return -ENOMEM;
e1510706 6018
9f6c9258
DK
6019#undef BNX2X_PCI_ALLOC
6020#undef BNX2X_ALLOC
65abd74d
YG
6021}
6022
a2fbb9ea
ET
6023/*
6024 * Init service functions
6025 */
523224a3 6026int bnx2x_func_start(struct bnx2x *bp)
a2fbb9ea 6027{
523224a3 6028 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
a2fbb9ea 6029
523224a3
DK
6030 /* Wait for completion */
6031 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
6032 WAIT_RAMROD_COMMON);
6033}
a2fbb9ea 6034
523224a3
DK
6035int bnx2x_func_stop(struct bnx2x *bp)
6036{
6037 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
a2fbb9ea 6038
523224a3
DK
6039 /* Wait for completion */
6040 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6041 0, &(bp->state), WAIT_RAMROD_COMMON);
a2fbb9ea
ET
6042}
6043
e665bfda 6044/**
f85582f8 6045 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
e665bfda
MC
6046 *
6047 * @param bp driver descriptor
6048 * @param set set or clear an entry (1 or 0)
6049 * @param mac pointer to a buffer containing a MAC
6050 * @param cl_bit_vec bit vector of clients to register a MAC for
6051 * @param cam_offset offset in a CAM to use
523224a3 6052 * @param is_bcast is the set MAC a broadcast address (for E1 only)
e665bfda 6053 */
523224a3 6054static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
f85582f8
DK
6055 u32 cl_bit_vec, u8 cam_offset,
6056 u8 is_bcast)
34f80b04 6057{
523224a3
DK
6058 struct mac_configuration_cmd *config =
6059 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6060 int ramrod_flags = WAIT_RAMROD_COMMON;
6061
6062 bp->set_mac_pending = 1;
6063 smp_wmb();
6064
8d9c5f34 6065 config->hdr.length = 1;
e665bfda
MC
6066 config->hdr.offset = cam_offset;
6067 config->hdr.client_id = 0xff;
34f80b04
EG
6068 config->hdr.reserved1 = 0;
6069
6070 /* primary MAC */
6071 config->config_table[0].msb_mac_addr =
e665bfda 6072 swab16(*(u16 *)&mac[0]);
34f80b04 6073 config->config_table[0].middle_mac_addr =
e665bfda 6074 swab16(*(u16 *)&mac[2]);
34f80b04 6075 config->config_table[0].lsb_mac_addr =
e665bfda 6076 swab16(*(u16 *)&mac[4]);
ca00392c 6077 config->config_table[0].clients_bit_vector =
e665bfda 6078 cpu_to_le32(cl_bit_vec);
34f80b04 6079 config->config_table[0].vlan_id = 0;
523224a3 6080 config->config_table[0].pf_id = BP_FUNC(bp);
3101c2bc 6081 if (set)
523224a3
DK
6082 SET_FLAG(config->config_table[0].flags,
6083 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6084 T_ETH_MAC_COMMAND_SET);
3101c2bc 6085 else
523224a3
DK
6086 SET_FLAG(config->config_table[0].flags,
6087 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6088 T_ETH_MAC_COMMAND_INVALIDATE);
34f80b04 6089
523224a3
DK
6090 if (is_bcast)
6091 SET_FLAG(config->config_table[0].flags,
6092 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6093
6094 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
3101c2bc 6095 (set ? "setting" : "clearing"),
34f80b04
EG
6096 config->config_table[0].msb_mac_addr,
6097 config->config_table[0].middle_mac_addr,
523224a3 6098 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
34f80b04 6099
523224a3 6100 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
34f80b04 6101 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
523224a3
DK
6102 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6103
6104 /* Wait for a completion */
6105 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
34f80b04
EG
6106}
6107
523224a3 6108int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
f85582f8 6109 int *state_p, int flags)
a2fbb9ea
ET
6110{
6111 /* can take a while if any port is running */
8b3a0f0b 6112 int cnt = 5000;
523224a3
DK
6113 u8 poll = flags & WAIT_RAMROD_POLL;
6114 u8 common = flags & WAIT_RAMROD_COMMON;
a2fbb9ea 6115
c14423fe
ET
6116 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6117 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6118
6119 might_sleep();
34f80b04 6120 while (cnt--) {
a2fbb9ea 6121 if (poll) {
523224a3
DK
6122 if (common)
6123 bnx2x_eq_int(bp);
6124 else {
6125 bnx2x_rx_int(bp->fp, 10);
6126 /* if index is different from 0
6127 * the reply for some commands will
6128 * be on the non default queue
6129 */
6130 if (idx)
6131 bnx2x_rx_int(&bp->fp[idx], 10);
6132 }
a2fbb9ea 6133 }
a2fbb9ea 6134
3101c2bc 6135 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6136 if (*state_p == state) {
6137#ifdef BNX2X_STOP_ON_ERROR
6138 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6139#endif
a2fbb9ea 6140 return 0;
8b3a0f0b 6141 }
a2fbb9ea 6142
a2fbb9ea 6143 msleep(1);
e3553b29
EG
6144
6145 if (bp->panic)
6146 return -EIO;
a2fbb9ea
ET
6147 }
6148
a2fbb9ea 6149 /* timeout! */
49d66772
ET
6150 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6151 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6152#ifdef BNX2X_STOP_ON_ERROR
6153 bnx2x_panic();
6154#endif
a2fbb9ea 6155
49d66772 6156 return -EBUSY;
a2fbb9ea
ET
6157}
6158
523224a3 6159u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
e665bfda 6160{
f2e0899f
DK
6161 if (CHIP_IS_E1H(bp))
6162 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6163 else if (CHIP_MODE_IS_4_PORT(bp))
6164 return BP_FUNC(bp) * 32 + rel_offset;
6165 else
6166 return BP_VN(bp) * 32 + rel_offset;
523224a3
DK
6167}
6168
6169void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6170{
6171 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6172 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
e665bfda 6173
523224a3
DK
6174 /* networking MAC */
6175 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6176 (1 << bp->fp->cl_id), cam_offset , 0);
e665bfda 6177
523224a3
DK
6178 if (CHIP_IS_E1(bp)) {
6179 /* broadcast MAC */
6180 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6181 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6182 }
e665bfda 6183}
523224a3
DK
6184static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6185{
6186 int i = 0, old;
6187 struct net_device *dev = bp->dev;
6188 struct netdev_hw_addr *ha;
6189 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6190 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6191
6192 netdev_for_each_mc_addr(ha, dev) {
6193 /* copy mac */
6194 config_cmd->config_table[i].msb_mac_addr =
6195 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6196 config_cmd->config_table[i].middle_mac_addr =
6197 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6198 config_cmd->config_table[i].lsb_mac_addr =
6199 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
e665bfda 6200
523224a3
DK
6201 config_cmd->config_table[i].vlan_id = 0;
6202 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6203 config_cmd->config_table[i].clients_bit_vector =
6204 cpu_to_le32(1 << BP_L_ID(bp));
6205
6206 SET_FLAG(config_cmd->config_table[i].flags,
6207 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6208 T_ETH_MAC_COMMAND_SET);
6209
6210 DP(NETIF_MSG_IFUP,
6211 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6212 config_cmd->config_table[i].msb_mac_addr,
6213 config_cmd->config_table[i].middle_mac_addr,
6214 config_cmd->config_table[i].lsb_mac_addr);
6215 i++;
6216 }
6217 old = config_cmd->hdr.length;
6218 if (old > i) {
6219 for (; i < old; i++) {
6220 if (CAM_IS_INVALID(config_cmd->
6221 config_table[i])) {
6222 /* already invalidated */
6223 break;
6224 }
6225 /* invalidate */
6226 SET_FLAG(config_cmd->config_table[i].flags,
6227 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6228 T_ETH_MAC_COMMAND_INVALIDATE);
6229 }
6230 }
6231
6232 config_cmd->hdr.length = i;
6233 config_cmd->hdr.offset = offset;
6234 config_cmd->hdr.client_id = 0xff;
6235 config_cmd->hdr.reserved1 = 0;
6236
6237 bp->set_mac_pending = 1;
6238 smp_wmb();
6239
6240 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6241 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6242}
6243static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
e665bfda 6244{
523224a3
DK
6245 int i;
6246 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6247 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6248 int ramrod_flags = WAIT_RAMROD_COMMON;
6249
6250 bp->set_mac_pending = 1;
e665bfda
MC
6251 smp_wmb();
6252
523224a3
DK
6253 for (i = 0; i < config_cmd->hdr.length; i++)
6254 SET_FLAG(config_cmd->config_table[i].flags,
6255 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6256 T_ETH_MAC_COMMAND_INVALIDATE);
6257
6258 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6259 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
e665bfda
MC
6260
6261 /* Wait for a completion */
523224a3
DK
6262 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6263 ramrod_flags);
6264
e665bfda
MC
6265}
6266
993ac7b5
MC
6267#ifdef BCM_CNIC
6268/**
6269 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6270 * MAC(s). This function will wait until the ramdord completion
6271 * returns.
6272 *
6273 * @param bp driver handle
6274 * @param set set or clear the CAM entry
6275 *
6276 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6277 */
9f6c9258 6278int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
993ac7b5 6279{
523224a3
DK
6280 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6281 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6282 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
6283 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
993ac7b5
MC
6284
6285 /* Send a SET_MAC ramrod */
523224a3
DK
6286 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6287 cam_offset, 0);
993ac7b5
MC
6288 return 0;
6289}
6290#endif
6291
523224a3
DK
6292static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6293 struct bnx2x_client_init_params *params,
6294 u8 activate,
6295 struct client_init_ramrod_data *data)
6296{
6297 /* Clear the buffer */
6298 memset(data, 0, sizeof(*data));
6299
6300 /* general */
6301 data->general.client_id = params->rxq_params.cl_id;
6302 data->general.statistics_counter_id = params->rxq_params.stat_id;
6303 data->general.statistics_en_flg =
6304 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6305 data->general.activate_flg = activate;
6306 data->general.sp_client_id = params->rxq_params.spcl_id;
6307
6308 /* Rx data */
6309 data->rx.tpa_en_flg =
6310 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6311 data->rx.vmqueue_mode_en_flg = 0;
6312 data->rx.cache_line_alignment_log_size =
6313 params->rxq_params.cache_line_log;
6314 data->rx.enable_dynamic_hc =
6315 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6316 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6317 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6318 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6319
6320 /* We don't set drop flags */
6321 data->rx.drop_ip_cs_err_flg = 0;
6322 data->rx.drop_tcp_cs_err_flg = 0;
6323 data->rx.drop_ttl0_flg = 0;
6324 data->rx.drop_udp_cs_err_flg = 0;
6325
6326 data->rx.inner_vlan_removal_enable_flg =
6327 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6328 data->rx.outer_vlan_removal_enable_flg =
6329 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6330 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6331 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6332 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6333 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6334 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6335 data->rx.bd_page_base.lo =
6336 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6337 data->rx.bd_page_base.hi =
6338 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6339 data->rx.sge_page_base.lo =
6340 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6341 data->rx.sge_page_base.hi =
6342 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6343 data->rx.cqe_page_base.lo =
6344 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6345 data->rx.cqe_page_base.hi =
6346 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6347 data->rx.is_leading_rss =
6348 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6349 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6350
6351 /* Tx data */
6352 data->tx.enforce_security_flg = 0; /* VF specific */
6353 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6354 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6355 data->tx.mtu = 0; /* VF specific */
6356 data->tx.tx_bd_page_base.lo =
6357 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6358 data->tx.tx_bd_page_base.hi =
6359 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6360
6361 /* flow control data */
6362 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6363 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6364 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6365 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6366 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6367 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6368 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6369
6370 data->fc.safc_group_num = params->txq_params.cos;
6371 data->fc.safc_group_en_flg =
6372 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6373 data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
6374}
6375
6376static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6377{
6378 /* ustorm cxt validation */
6379 cxt->ustorm_ag_context.cdu_usage =
6380 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6381 ETH_CONNECTION_TYPE);
6382 /* xcontext validation */
6383 cxt->xstorm_ag_context.cdu_reserved =
6384 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6385 ETH_CONNECTION_TYPE);
6386}
6387
6388int bnx2x_setup_fw_client(struct bnx2x *bp,
6389 struct bnx2x_client_init_params *params,
6390 u8 activate,
6391 struct client_init_ramrod_data *data,
6392 dma_addr_t data_mapping)
6393{
6394 u16 hc_usec;
6395 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6396 int ramrod_flags = 0, rc;
6397
6398 /* HC and context validation values */
6399 hc_usec = params->txq_params.hc_rate ?
6400 1000000 / params->txq_params.hc_rate : 0;
6401 bnx2x_update_coalesce_sb_index(bp,
6402 params->txq_params.fw_sb_id,
6403 params->txq_params.sb_cq_index,
6404 !(params->txq_params.flags & QUEUE_FLG_HC),
6405 hc_usec);
6406
6407 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6408
6409 hc_usec = params->rxq_params.hc_rate ?
6410 1000000 / params->rxq_params.hc_rate : 0;
6411 bnx2x_update_coalesce_sb_index(bp,
6412 params->rxq_params.fw_sb_id,
6413 params->rxq_params.sb_cq_index,
6414 !(params->rxq_params.flags & QUEUE_FLG_HC),
6415 hc_usec);
6416
6417 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6418 params->rxq_params.cid);
6419
6420 /* zero stats */
6421 if (params->txq_params.flags & QUEUE_FLG_STATS)
6422 storm_memset_xstats_zero(bp, BP_PORT(bp),
6423 params->txq_params.stat_id);
6424
6425 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6426 storm_memset_ustats_zero(bp, BP_PORT(bp),
6427 params->rxq_params.stat_id);
6428 storm_memset_tstats_zero(bp, BP_PORT(bp),
6429 params->rxq_params.stat_id);
6430 }
6431
6432 /* Fill the ramrod data */
6433 bnx2x_fill_cl_init_data(bp, params, activate, data);
6434
6435 /* SETUP ramrod.
6436 *
6437 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6438 * barrier except from mmiowb() is needed to impose a
6439 * proper ordering of memory operations.
6440 */
6441 mmiowb();
a2fbb9ea 6442
a2fbb9ea 6443
523224a3
DK
6444 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6445 U64_HI(data_mapping), U64_LO(data_mapping), 0);
a2fbb9ea 6446
34f80b04 6447 /* Wait for completion */
523224a3
DK
6448 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6449 params->ramrod_params.index,
6450 params->ramrod_params.pstate,
6451 ramrod_flags);
34f80b04 6452 return rc;
a2fbb9ea
ET
6453}
6454
d6214d7a
DK
6455/**
6456 * Configure interrupt mode according to current configuration.
6457 * In case of MSI-X it will also try to enable MSI-X.
6458 *
6459 * @param bp
6460 *
6461 * @return int
6462 */
6463static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
ca00392c 6464{
d6214d7a 6465 int rc = 0;
ca00392c 6466
d6214d7a
DK
6467 switch (bp->int_mode) {
6468 case INT_MODE_MSI:
6469 bnx2x_enable_msi(bp);
6470 /* falling through... */
6471 case INT_MODE_INTx:
54b9ddaa 6472 bp->num_queues = 1;
d6214d7a 6473 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
ca00392c 6474 break;
d6214d7a
DK
6475 default:
6476 /* Set number of queues according to bp->multi_mode value */
6477 bnx2x_set_num_queues(bp);
ca00392c 6478
d6214d7a
DK
6479 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6480 bp->num_queues);
ca00392c 6481
d6214d7a
DK
6482 /* if we can't use MSI-X we only need one fp,
6483 * so try to enable MSI-X with the requested number of fp's
6484 * and fallback to MSI or legacy INTx with one fp
6485 */
6486 rc = bnx2x_enable_msix(bp);
6487 if (rc) {
6488 /* failed to enable MSI-X */
6489 if (bp->multi_mode)
6490 DP(NETIF_MSG_IFUP,
6491 "Multi requested but failed to "
6492 "enable MSI-X (%d), "
6493 "set number of queues to %d\n",
6494 bp->num_queues,
6495 1);
6496 bp->num_queues = 1;
6497
6498 if (!(bp->flags & DISABLE_MSI_FLAG))
6499 bnx2x_enable_msi(bp);
6500 }
ca00392c 6501
9f6c9258
DK
6502 break;
6503 }
d6214d7a
DK
6504
6505 return rc;
a2fbb9ea
ET
6506}
6507
c2bff63f
DK
6508/* must be called prioir to any HW initializations */
6509static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6510{
6511 return L2_ILT_LINES(bp);
6512}
6513
523224a3
DK
6514void bnx2x_ilt_set_info(struct bnx2x *bp)
6515{
6516 struct ilt_client_info *ilt_client;
6517 struct bnx2x_ilt *ilt = BP_ILT(bp);
6518 u16 line = 0;
6519
6520 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6521 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6522
6523 /* CDU */
6524 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6525 ilt_client->client_num = ILT_CLIENT_CDU;
6526 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6527 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6528 ilt_client->start = line;
6529 line += L2_ILT_LINES(bp);
6530#ifdef BCM_CNIC
6531 line += CNIC_ILT_LINES;
6532#endif
6533 ilt_client->end = line - 1;
6534
6535 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6536 "flags 0x%x, hw psz %d\n",
6537 ilt_client->start,
6538 ilt_client->end,
6539 ilt_client->page_size,
6540 ilt_client->flags,
6541 ilog2(ilt_client->page_size >> 12));
6542
6543 /* QM */
6544 if (QM_INIT(bp->qm_cid_count)) {
6545 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6546 ilt_client->client_num = ILT_CLIENT_QM;
6547 ilt_client->page_size = QM_ILT_PAGE_SZ;
6548 ilt_client->flags = 0;
6549 ilt_client->start = line;
6550
6551 /* 4 bytes for each cid */
6552 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6553 QM_ILT_PAGE_SZ);
6554
6555 ilt_client->end = line - 1;
6556
6557 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6558 "flags 0x%x, hw psz %d\n",
6559 ilt_client->start,
6560 ilt_client->end,
6561 ilt_client->page_size,
6562 ilt_client->flags,
6563 ilog2(ilt_client->page_size >> 12));
6564
6565 }
6566 /* SRC */
6567 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6568#ifdef BCM_CNIC
6569 ilt_client->client_num = ILT_CLIENT_SRC;
6570 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6571 ilt_client->flags = 0;
6572 ilt_client->start = line;
6573 line += SRC_ILT_LINES;
6574 ilt_client->end = line - 1;
6575
6576 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6577 "flags 0x%x, hw psz %d\n",
6578 ilt_client->start,
6579 ilt_client->end,
6580 ilt_client->page_size,
6581 ilt_client->flags,
6582 ilog2(ilt_client->page_size >> 12));
6583
6584#else
6585 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6586#endif
9f6c9258 6587
523224a3
DK
6588 /* TM */
6589 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6590#ifdef BCM_CNIC
6591 ilt_client->client_num = ILT_CLIENT_TM;
6592 ilt_client->page_size = TM_ILT_PAGE_SZ;
6593 ilt_client->flags = 0;
6594 ilt_client->start = line;
6595 line += TM_ILT_LINES;
6596 ilt_client->end = line - 1;
6597
6598 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6599 "flags 0x%x, hw psz %d\n",
6600 ilt_client->start,
6601 ilt_client->end,
6602 ilt_client->page_size,
6603 ilt_client->flags,
6604 ilog2(ilt_client->page_size >> 12));
9f6c9258 6605
523224a3
DK
6606#else
6607 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6608#endif
6609}
f85582f8 6610
523224a3
DK
6611int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6612 int is_leading)
a2fbb9ea 6613{
523224a3 6614 struct bnx2x_client_init_params params = { {0} };
a2fbb9ea
ET
6615 int rc;
6616
523224a3
DK
6617 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6618 IGU_INT_ENABLE, 0);
a2fbb9ea 6619
523224a3
DK
6620 params.ramrod_params.pstate = &fp->state;
6621 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6622 params.ramrod_params.index = fp->index;
6623 params.ramrod_params.cid = fp->cid;
a2fbb9ea 6624
523224a3
DK
6625 if (is_leading)
6626 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
a2fbb9ea 6627
523224a3
DK
6628 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6629
6630 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6631
6632 rc = bnx2x_setup_fw_client(bp, &params, 1,
6633 bnx2x_sp(bp, client_init_data),
6634 bnx2x_sp_mapping(bp, client_init_data));
34f80b04 6635 return rc;
a2fbb9ea
ET
6636}
6637
523224a3 6638int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
a2fbb9ea 6639{
34f80b04 6640 int rc;
a2fbb9ea 6641
523224a3 6642 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
a2fbb9ea 6643
523224a3
DK
6644 /* halt the connection */
6645 *p->pstate = BNX2X_FP_STATE_HALTING;
6646 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6647 p->cl_id, 0);
a2fbb9ea 6648
34f80b04 6649 /* Wait for completion */
523224a3
DK
6650 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6651 p->pstate, poll_flag);
34f80b04 6652 if (rc) /* timeout */
da5a662a 6653 return rc;
a2fbb9ea 6654
523224a3
DK
6655 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6656 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6657 p->cl_id, 0);
6658 /* Wait for completion */
6659 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6660 p->pstate, poll_flag);
6661 if (rc) /* timeout */
6662 return rc;
a2fbb9ea 6663
a2fbb9ea 6664
523224a3
DK
6665 /* delete cfc entry */
6666 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
da5a662a 6667
523224a3
DK
6668 /* Wait for completion */
6669 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6670 p->pstate, WAIT_RAMROD_COMMON);
da5a662a 6671 return rc;
a2fbb9ea
ET
6672}
6673
523224a3
DK
6674static int bnx2x_stop_client(struct bnx2x *bp, int index)
6675{
6676 struct bnx2x_client_ramrod_params client_stop = {0};
6677 struct bnx2x_fastpath *fp = &bp->fp[index];
6678
6679 client_stop.index = index;
6680 client_stop.cid = fp->cid;
6681 client_stop.cl_id = fp->cl_id;
6682 client_stop.pstate = &(fp->state);
6683 client_stop.poll = 0;
6684
6685 return bnx2x_stop_fw_client(bp, &client_stop);
6686}
6687
6688
34f80b04
EG
6689static void bnx2x_reset_func(struct bnx2x *bp)
6690{
6691 int port = BP_PORT(bp);
6692 int func = BP_FUNC(bp);
f2e0899f 6693 int i;
523224a3 6694 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
f2e0899f
DK
6695 (CHIP_IS_E2(bp) ?
6696 offsetof(struct hc_status_block_data_e2, common) :
6697 offsetof(struct hc_status_block_data_e1x, common));
523224a3
DK
6698 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6699 int pfid_offset = offsetof(struct pci_entity, pf_id);
6700
6701 /* Disable the function in the FW */
6702 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6703 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6704 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6705 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6706
6707 /* FP SBs */
6708 for_each_queue(bp, i) {
6709 struct bnx2x_fastpath *fp = &bp->fp[i];
6710 REG_WR8(bp,
6711 BAR_CSTRORM_INTMEM +
6712 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6713 + pfunc_offset_fp + pfid_offset,
6714 HC_FUNCTION_DISABLED);
6715 }
6716
6717 /* SP SB */
6718 REG_WR8(bp,
6719 BAR_CSTRORM_INTMEM +
6720 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6721 pfunc_offset_sp + pfid_offset,
6722 HC_FUNCTION_DISABLED);
6723
6724
6725 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6726 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6727 0);
34f80b04
EG
6728
6729 /* Configure IGU */
f2e0899f
DK
6730 if (bp->common.int_block == INT_BLOCK_HC) {
6731 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6732 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6733 } else {
6734 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6735 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6736 }
34f80b04 6737
37b091ba
MC
6738#ifdef BCM_CNIC
6739 /* Disable Timer scan */
6740 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6741 /*
6742 * Wait for at least 10ms and up to 2 second for the timers scan to
6743 * complete
6744 */
6745 for (i = 0; i < 200; i++) {
6746 msleep(10);
6747 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6748 break;
6749 }
6750#endif
34f80b04 6751 /* Clear ILT */
f2e0899f
DK
6752 bnx2x_clear_func_ilt(bp, func);
6753
6754 /* Timers workaround bug for E2: if this is vnic-3,
6755 * we need to set the entire ilt range for this timers.
6756 */
6757 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6758 struct ilt_client_info ilt_cli;
6759 /* use dummy TM client */
6760 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6761 ilt_cli.start = 0;
6762 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6763 ilt_cli.client_num = ILT_CLIENT_TM;
6764
6765 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6766 }
6767
6768 /* this assumes that reset_port() called before reset_func()*/
6769 if (CHIP_IS_E2(bp))
6770 bnx2x_pf_disable(bp);
523224a3
DK
6771
6772 bp->dmae_ready = 0;
34f80b04
EG
6773}
6774
6775static void bnx2x_reset_port(struct bnx2x *bp)
6776{
6777 int port = BP_PORT(bp);
6778 u32 val;
6779
6780 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6781
6782 /* Do not rcv packets to BRB */
6783 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6784 /* Do not direct rcv packets that are not for MCP to the BRB */
6785 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6786 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6787
6788 /* Configure AEU */
6789 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6790
6791 msleep(100);
6792 /* Check for BRB port occupancy */
6793 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6794 if (val)
6795 DP(NETIF_MSG_IFDOWN,
33471629 6796 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6797
6798 /* TODO: Close Doorbell port? */
6799}
6800
34f80b04
EG
6801static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6802{
6803 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
f2e0899f 6804 BP_ABS_FUNC(bp), reset_code);
34f80b04
EG
6805
6806 switch (reset_code) {
6807 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6808 bnx2x_reset_port(bp);
6809 bnx2x_reset_func(bp);
6810 bnx2x_reset_common(bp);
6811 break;
6812
6813 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6814 bnx2x_reset_port(bp);
6815 bnx2x_reset_func(bp);
6816 break;
6817
6818 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6819 bnx2x_reset_func(bp);
6820 break;
49d66772 6821
34f80b04
EG
6822 default:
6823 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6824 break;
6825 }
6826}
6827
9f6c9258 6828void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6829{
da5a662a 6830 int port = BP_PORT(bp);
a2fbb9ea 6831 u32 reset_code = 0;
da5a662a 6832 int i, cnt, rc;
a2fbb9ea 6833
555f6c78 6834 /* Wait until tx fastpath tasks complete */
54b9ddaa 6835 for_each_queue(bp, i) {
228241eb
ET
6836 struct bnx2x_fastpath *fp = &bp->fp[i];
6837
34f80b04 6838 cnt = 1000;
e8b5fc51 6839 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 6840
34f80b04
EG
6841 if (!cnt) {
6842 BNX2X_ERR("timeout waiting for queue[%d]\n",
6843 i);
6844#ifdef BNX2X_STOP_ON_ERROR
6845 bnx2x_panic();
6846 return -EBUSY;
6847#else
6848 break;
6849#endif
6850 }
6851 cnt--;
da5a662a 6852 msleep(1);
34f80b04 6853 }
228241eb 6854 }
da5a662a
VZ
6855 /* Give HW time to discard old tx messages */
6856 msleep(1);
a2fbb9ea 6857
3101c2bc 6858 if (CHIP_IS_E1(bp)) {
523224a3
DK
6859 /* invalidate mc list,
6860 * wait and poll (interrupts are off)
6861 */
6862 bnx2x_invlidate_e1_mc_list(bp);
6863 bnx2x_set_eth_mac(bp, 0);
3101c2bc 6864
523224a3 6865 } else {
65abd74d
YG
6866 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6867
523224a3 6868 bnx2x_set_eth_mac(bp, 0);
3101c2bc
YG
6869
6870 for (i = 0; i < MC_HASH_SIZE; i++)
6871 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6872 }
523224a3 6873
993ac7b5
MC
6874#ifdef BCM_CNIC
6875 /* Clear iSCSI L2 MAC */
6876 mutex_lock(&bp->cnic_mutex);
6877 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
6878 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
6879 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
6880 }
6881 mutex_unlock(&bp->cnic_mutex);
6882#endif
3101c2bc 6883
65abd74d
YG
6884 if (unload_mode == UNLOAD_NORMAL)
6885 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6886
7d0446c2 6887 else if (bp->flags & NO_WOL_FLAG)
65abd74d 6888 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 6889
7d0446c2 6890 else if (bp->wol) {
65abd74d
YG
6891 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6892 u8 *mac_addr = bp->dev->dev_addr;
6893 u32 val;
6894 /* The mac address is written to entries 1-4 to
6895 preserve entry 0 which is used by the PMF */
6896 u8 entry = (BP_E1HVN(bp) + 1)*8;
6897
6898 val = (mac_addr[0] << 8) | mac_addr[1];
6899 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6900
6901 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6902 (mac_addr[4] << 8) | mac_addr[5];
6903 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6904
6905 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6906
6907 } else
6908 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6909
34f80b04
EG
6910 /* Close multi and leading connections
6911 Completions for ramrods are collected in a synchronous way */
523224a3
DK
6912 for_each_queue(bp, i)
6913
6914 if (bnx2x_stop_client(bp, i))
6915#ifdef BNX2X_STOP_ON_ERROR
6916 return;
6917#else
228241eb 6918 goto unload_error;
523224a3 6919#endif
a2fbb9ea 6920
523224a3 6921 rc = bnx2x_func_stop(bp);
da5a662a 6922 if (rc) {
523224a3 6923 BNX2X_ERR("Function stop failed!\n");
da5a662a 6924#ifdef BNX2X_STOP_ON_ERROR
523224a3 6925 return;
da5a662a
VZ
6926#else
6927 goto unload_error;
34f80b04 6928#endif
228241eb 6929 }
523224a3 6930#ifndef BNX2X_STOP_ON_ERROR
228241eb 6931unload_error:
523224a3 6932#endif
34f80b04 6933 if (!BP_NOMCP(bp))
a22f0788 6934 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04 6935 else {
f2e0899f
DK
6936 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
6937 "%d, %d, %d\n", BP_PATH(bp),
6938 load_count[BP_PATH(bp)][0],
6939 load_count[BP_PATH(bp)][1],
6940 load_count[BP_PATH(bp)][2]);
6941 load_count[BP_PATH(bp)][0]--;
6942 load_count[BP_PATH(bp)][1 + port]--;
6943 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
6944 "%d, %d, %d\n", BP_PATH(bp),
6945 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
6946 load_count[BP_PATH(bp)][2]);
6947 if (load_count[BP_PATH(bp)][0] == 0)
34f80b04 6948 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
f2e0899f 6949 else if (load_count[BP_PATH(bp)][1 + port] == 0)
34f80b04
EG
6950 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6951 else
6952 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6953 }
a2fbb9ea 6954
34f80b04
EG
6955 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6956 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6957 bnx2x__link_reset(bp);
a2fbb9ea 6958
523224a3
DK
6959 /* Disable HW interrupts, NAPI */
6960 bnx2x_netif_stop(bp, 1);
6961
6962 /* Release IRQs */
d6214d7a 6963 bnx2x_free_irq(bp);
523224a3 6964
a2fbb9ea 6965 /* Reset the chip */
228241eb 6966 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6967
6968 /* Report UNLOAD_DONE to MCP */
34f80b04 6969 if (!BP_NOMCP(bp))
a22f0788 6970 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
356e2385 6971
72fd0718
VZ
6972}
6973
9f6c9258 6974void bnx2x_disable_close_the_gate(struct bnx2x *bp)
72fd0718
VZ
6975{
6976 u32 val;
6977
6978 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6979
6980 if (CHIP_IS_E1(bp)) {
6981 int port = BP_PORT(bp);
6982 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6983 MISC_REG_AEU_MASK_ATTN_FUNC_0;
6984
6985 val = REG_RD(bp, addr);
6986 val &= ~(0x300);
6987 REG_WR(bp, addr, val);
6988 } else if (CHIP_IS_E1H(bp)) {
6989 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6990 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6991 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6992 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6993 }
6994}
6995
72fd0718
VZ
6996/* Close gates #2, #3 and #4: */
6997static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
6998{
6999 u32 val, addr;
7000
7001 /* Gates #2 and #4a are closed/opened for "not E1" only */
7002 if (!CHIP_IS_E1(bp)) {
7003 /* #4 */
7004 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7005 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7006 close ? (val | 0x1) : (val & (~(u32)1)));
7007 /* #2 */
7008 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7009 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7010 close ? (val | 0x1) : (val & (~(u32)1)));
7011 }
7012
7013 /* #3 */
7014 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7015 val = REG_RD(bp, addr);
7016 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7017
7018 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7019 close ? "closing" : "opening");
7020 mmiowb();
7021}
7022
7023#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
7024
7025static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7026{
7027 /* Do some magic... */
7028 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7029 *magic_val = val & SHARED_MF_CLP_MAGIC;
7030 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7031}
7032
7033/* Restore the value of the `magic' bit.
7034 *
7035 * @param pdev Device handle.
7036 * @param magic_val Old value of the `magic' bit.
7037 */
7038static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7039{
7040 /* Restore the `magic' bit value... */
72fd0718
VZ
7041 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7042 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7043 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7044}
7045
f85582f8
DK
7046/**
7047 * Prepares for MCP reset: takes care of CLP configurations.
72fd0718
VZ
7048 *
7049 * @param bp
7050 * @param magic_val Old value of 'magic' bit.
7051 */
7052static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7053{
7054 u32 shmem;
7055 u32 validity_offset;
7056
7057 DP(NETIF_MSG_HW, "Starting\n");
7058
7059 /* Set `magic' bit in order to save MF config */
7060 if (!CHIP_IS_E1(bp))
7061 bnx2x_clp_reset_prep(bp, magic_val);
7062
7063 /* Get shmem offset */
7064 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7065 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7066
7067 /* Clear validity map flags */
7068 if (shmem > 0)
7069 REG_WR(bp, shmem + validity_offset, 0);
7070}
7071
7072#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7073#define MCP_ONE_TIMEOUT 100 /* 100 ms */
7074
7075/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7076 * depending on the HW type.
7077 *
7078 * @param bp
7079 */
7080static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7081{
7082 /* special handling for emulation and FPGA,
7083 wait 10 times longer */
7084 if (CHIP_REV_IS_SLOW(bp))
7085 msleep(MCP_ONE_TIMEOUT*10);
7086 else
7087 msleep(MCP_ONE_TIMEOUT);
7088}
7089
7090static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7091{
7092 u32 shmem, cnt, validity_offset, val;
7093 int rc = 0;
7094
7095 msleep(100);
7096
7097 /* Get shmem offset */
7098 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7099 if (shmem == 0) {
7100 BNX2X_ERR("Shmem 0 return failure\n");
7101 rc = -ENOTTY;
7102 goto exit_lbl;
7103 }
7104
7105 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7106
7107 /* Wait for MCP to come up */
7108 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7109 /* TBD: its best to check validity map of last port.
7110 * currently checks on port 0.
7111 */
7112 val = REG_RD(bp, shmem + validity_offset);
7113 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7114 shmem + validity_offset, val);
7115
7116 /* check that shared memory is valid. */
7117 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7118 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7119 break;
7120
7121 bnx2x_mcp_wait_one(bp);
7122 }
7123
7124 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7125
7126 /* Check that shared memory is valid. This indicates that MCP is up. */
7127 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7128 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7129 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7130 rc = -ENOTTY;
7131 goto exit_lbl;
7132 }
7133
7134exit_lbl:
7135 /* Restore the `magic' bit value */
7136 if (!CHIP_IS_E1(bp))
7137 bnx2x_clp_reset_done(bp, magic_val);
7138
7139 return rc;
7140}
7141
7142static void bnx2x_pxp_prep(struct bnx2x *bp)
7143{
7144 if (!CHIP_IS_E1(bp)) {
7145 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7146 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7147 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7148 mmiowb();
7149 }
7150}
7151
7152/*
7153 * Reset the whole chip except for:
7154 * - PCIE core
7155 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7156 * one reset bit)
7157 * - IGU
7158 * - MISC (including AEU)
7159 * - GRC
7160 * - RBCN, RBCP
7161 */
7162static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7163{
7164 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7165
7166 not_reset_mask1 =
7167 MISC_REGISTERS_RESET_REG_1_RST_HC |
7168 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7169 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7170
7171 not_reset_mask2 =
7172 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7173 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7174 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7175 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7176 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7177 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7178 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7179 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7180
7181 reset_mask1 = 0xffffffff;
7182
7183 if (CHIP_IS_E1(bp))
7184 reset_mask2 = 0xffff;
7185 else
7186 reset_mask2 = 0x1ffff;
7187
7188 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7189 reset_mask1 & (~not_reset_mask1));
7190 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7191 reset_mask2 & (~not_reset_mask2));
7192
7193 barrier();
7194 mmiowb();
7195
7196 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7197 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7198 mmiowb();
7199}
7200
7201static int bnx2x_process_kill(struct bnx2x *bp)
7202{
7203 int cnt = 1000;
7204 u32 val = 0;
7205 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7206
7207
7208 /* Empty the Tetris buffer, wait for 1s */
7209 do {
7210 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7211 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7212 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7213 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7214 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7215 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7216 ((port_is_idle_0 & 0x1) == 0x1) &&
7217 ((port_is_idle_1 & 0x1) == 0x1) &&
7218 (pgl_exp_rom2 == 0xffffffff))
7219 break;
7220 msleep(1);
7221 } while (cnt-- > 0);
7222
7223 if (cnt <= 0) {
7224 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7225 " are still"
7226 " outstanding read requests after 1s!\n");
7227 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7228 " port_is_idle_0=0x%08x,"
7229 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7230 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7231 pgl_exp_rom2);
7232 return -EAGAIN;
7233 }
7234
7235 barrier();
7236
7237 /* Close gates #2, #3 and #4 */
7238 bnx2x_set_234_gates(bp, true);
7239
7240 /* TBD: Indicate that "process kill" is in progress to MCP */
7241
7242 /* Clear "unprepared" bit */
7243 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7244 barrier();
7245
7246 /* Make sure all is written to the chip before the reset */
7247 mmiowb();
7248
7249 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7250 * PSWHST, GRC and PSWRD Tetris buffer.
7251 */
7252 msleep(1);
7253
7254 /* Prepare to chip reset: */
7255 /* MCP */
7256 bnx2x_reset_mcp_prep(bp, &val);
7257
7258 /* PXP */
7259 bnx2x_pxp_prep(bp);
7260 barrier();
7261
7262 /* reset the chip */
7263 bnx2x_process_kill_chip_reset(bp);
7264 barrier();
7265
7266 /* Recover after reset: */
7267 /* MCP */
7268 if (bnx2x_reset_mcp_comp(bp, val))
7269 return -EAGAIN;
7270
7271 /* PXP */
7272 bnx2x_pxp_prep(bp);
7273
7274 /* Open the gates #2, #3 and #4 */
7275 bnx2x_set_234_gates(bp, false);
7276
7277 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7278 * reset state, re-enable attentions. */
7279
a2fbb9ea
ET
7280 return 0;
7281}
7282
72fd0718
VZ
7283static int bnx2x_leader_reset(struct bnx2x *bp)
7284{
7285 int rc = 0;
7286 /* Try to recover after the failure */
7287 if (bnx2x_process_kill(bp)) {
7288 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7289 bp->dev->name);
7290 rc = -EAGAIN;
7291 goto exit_leader_reset;
7292 }
7293
7294 /* Clear "reset is in progress" bit and update the driver state */
7295 bnx2x_set_reset_done(bp);
7296 bp->recovery_state = BNX2X_RECOVERY_DONE;
7297
7298exit_leader_reset:
7299 bp->is_leader = 0;
7300 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7301 smp_wmb();
7302 return rc;
7303}
7304
72fd0718
VZ
7305/* Assumption: runs under rtnl lock. This together with the fact
7306 * that it's called only from bnx2x_reset_task() ensure that it
7307 * will never be called when netif_running(bp->dev) is false.
7308 */
7309static void bnx2x_parity_recover(struct bnx2x *bp)
7310{
7311 DP(NETIF_MSG_HW, "Handling parity\n");
7312 while (1) {
7313 switch (bp->recovery_state) {
7314 case BNX2X_RECOVERY_INIT:
7315 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7316 /* Try to get a LEADER_LOCK HW lock */
7317 if (bnx2x_trylock_hw_lock(bp,
7318 HW_LOCK_RESOURCE_RESERVED_08))
7319 bp->is_leader = 1;
7320
7321 /* Stop the driver */
7322 /* If interface has been removed - break */
7323 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7324 return;
7325
7326 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7327 /* Ensure "is_leader" and "recovery_state"
7328 * update values are seen on other CPUs
7329 */
7330 smp_wmb();
7331 break;
7332
7333 case BNX2X_RECOVERY_WAIT:
7334 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7335 if (bp->is_leader) {
7336 u32 load_counter = bnx2x_get_load_cnt(bp);
7337 if (load_counter) {
7338 /* Wait until all other functions get
7339 * down.
7340 */
7341 schedule_delayed_work(&bp->reset_task,
7342 HZ/10);
7343 return;
7344 } else {
7345 /* If all other functions got down -
7346 * try to bring the chip back to
7347 * normal. In any case it's an exit
7348 * point for a leader.
7349 */
7350 if (bnx2x_leader_reset(bp) ||
7351 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7352 printk(KERN_ERR"%s: Recovery "
7353 "has failed. Power cycle is "
7354 "needed.\n", bp->dev->name);
7355 /* Disconnect this device */
7356 netif_device_detach(bp->dev);
7357 /* Block ifup for all function
7358 * of this ASIC until
7359 * "process kill" or power
7360 * cycle.
7361 */
7362 bnx2x_set_reset_in_progress(bp);
7363 /* Shut down the power */
7364 bnx2x_set_power_state(bp,
7365 PCI_D3hot);
7366 return;
7367 }
7368
7369 return;
7370 }
7371 } else { /* non-leader */
7372 if (!bnx2x_reset_is_done(bp)) {
7373 /* Try to get a LEADER_LOCK HW lock as
7374 * long as a former leader may have
7375 * been unloaded by the user or
7376 * released a leadership by another
7377 * reason.
7378 */
7379 if (bnx2x_trylock_hw_lock(bp,
7380 HW_LOCK_RESOURCE_RESERVED_08)) {
7381 /* I'm a leader now! Restart a
7382 * switch case.
7383 */
7384 bp->is_leader = 1;
7385 break;
7386 }
7387
7388 schedule_delayed_work(&bp->reset_task,
7389 HZ/10);
7390 return;
7391
7392 } else { /* A leader has completed
7393 * the "process kill". It's an exit
7394 * point for a non-leader.
7395 */
7396 bnx2x_nic_load(bp, LOAD_NORMAL);
7397 bp->recovery_state =
7398 BNX2X_RECOVERY_DONE;
7399 smp_wmb();
7400 return;
7401 }
7402 }
7403 default:
7404 return;
7405 }
7406 }
7407}
7408
7409/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7410 * scheduled on a general queue in order to prevent a dead lock.
7411 */
34f80b04
EG
7412static void bnx2x_reset_task(struct work_struct *work)
7413{
72fd0718 7414 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
7415
7416#ifdef BNX2X_STOP_ON_ERROR
7417 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7418 " so reset not done to allow debug dump,\n"
72fd0718 7419 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
7420 return;
7421#endif
7422
7423 rtnl_lock();
7424
7425 if (!netif_running(bp->dev))
7426 goto reset_task_exit;
7427
72fd0718
VZ
7428 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7429 bnx2x_parity_recover(bp);
7430 else {
7431 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7432 bnx2x_nic_load(bp, LOAD_NORMAL);
7433 }
34f80b04
EG
7434
7435reset_task_exit:
7436 rtnl_unlock();
7437}
7438
a2fbb9ea
ET
7439/* end of nic load/unload */
7440
a2fbb9ea
ET
7441/*
7442 * Init service functions
7443 */
7444
f2e0899f
DK
7445u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
7446{
7447 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7448 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7449 return base + (BP_ABS_FUNC(bp)) * stride;
f1ef27ef
EG
7450}
7451
f2e0899f 7452static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
f1ef27ef 7453{
f2e0899f 7454 u32 reg = bnx2x_get_pretend_reg(bp);
f1ef27ef
EG
7455
7456 /* Flush all outstanding writes */
7457 mmiowb();
7458
7459 /* Pretend to be function 0 */
7460 REG_WR(bp, reg, 0);
f2e0899f 7461 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
f1ef27ef
EG
7462
7463 /* From now we are in the "like-E1" mode */
7464 bnx2x_int_disable(bp);
7465
7466 /* Flush all outstanding writes */
7467 mmiowb();
7468
f2e0899f
DK
7469 /* Restore the original function */
7470 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7471 REG_RD(bp, reg);
f1ef27ef
EG
7472}
7473
f2e0899f 7474static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
f1ef27ef 7475{
f2e0899f 7476 if (CHIP_IS_E1(bp))
f1ef27ef 7477 bnx2x_int_disable(bp);
f2e0899f
DK
7478 else
7479 bnx2x_undi_int_disable_e1h(bp);
f1ef27ef
EG
7480}
7481
34f80b04
EG
7482static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7483{
7484 u32 val;
7485
7486 /* Check if there is any driver already loaded */
7487 val = REG_RD(bp, MISC_REG_UNPREPARED);
7488 if (val == 0x1) {
7489 /* Check if it is the UNDI driver
7490 * UNDI driver initializes CID offset for normal bell to 0x7
7491 */
4a37fb66 7492 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7493 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7494 if (val == 0x7) {
7495 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
f2e0899f
DK
7496 /* save our pf_num */
7497 int orig_pf_num = bp->pf_num;
da5a662a
VZ
7498 u32 swap_en;
7499 u32 swap_val;
34f80b04 7500
b4661739
EG
7501 /* clear the UNDI indication */
7502 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7503
34f80b04
EG
7504 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7505
7506 /* try unload UNDI on port 0 */
f2e0899f 7507 bp->pf_num = 0;
da5a662a 7508 bp->fw_seq =
f2e0899f 7509 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a 7510 DRV_MSG_SEQ_NUMBER_MASK);
a22f0788 7511 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
7512
7513 /* if UNDI is loaded on the other port */
7514 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7515
da5a662a 7516 /* send "DONE" for previous unload */
a22f0788
YR
7517 bnx2x_fw_command(bp,
7518 DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
7519
7520 /* unload UNDI on port 1 */
f2e0899f 7521 bp->pf_num = 1;
da5a662a 7522 bp->fw_seq =
f2e0899f 7523 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a
VZ
7524 DRV_MSG_SEQ_NUMBER_MASK);
7525 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7526
a22f0788 7527 bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
7528 }
7529
b4661739
EG
7530 /* now it's safe to release the lock */
7531 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7532
f2e0899f 7533 bnx2x_undi_int_disable(bp);
da5a662a
VZ
7534
7535 /* close input traffic and wait for it */
7536 /* Do not rcv packets to BRB */
7537 REG_WR(bp,
7538 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7539 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7540 /* Do not direct rcv packets that are not for MCP to
7541 * the BRB */
7542 REG_WR(bp,
7543 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7544 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7545 /* clear AEU */
7546 REG_WR(bp,
7547 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7548 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7549 msleep(10);
7550
7551 /* save NIG port swap info */
7552 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7553 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7554 /* reset device */
7555 REG_WR(bp,
7556 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7557 0xd3ffffff);
34f80b04
EG
7558 REG_WR(bp,
7559 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7560 0x1403);
da5a662a
VZ
7561 /* take the NIG out of reset and restore swap values */
7562 REG_WR(bp,
7563 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7564 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7565 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7566 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7567
7568 /* send unload done to the MCP */
a22f0788 7569 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
7570
7571 /* restore our func and fw_seq */
f2e0899f 7572 bp->pf_num = orig_pf_num;
da5a662a 7573 bp->fw_seq =
f2e0899f 7574 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a 7575 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7576 } else
7577 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7578 }
7579}
7580
7581static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7582{
7583 u32 val, val2, val3, val4, id;
72ce58c3 7584 u16 pmc;
34f80b04
EG
7585
7586 /* Get the chip revision id and number. */
7587 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7588 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7589 id = ((val & 0xffff) << 16);
7590 val = REG_RD(bp, MISC_REG_CHIP_REV);
7591 id |= ((val & 0xf) << 12);
7592 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7593 id |= ((val & 0xff) << 4);
5a40e08e 7594 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7595 id |= (val & 0xf);
7596 bp->common.chip_id = id;
523224a3
DK
7597
7598 /* Set doorbell size */
7599 bp->db_size = (1 << BNX2X_DB_SHIFT);
7600
f2e0899f
DK
7601 if (CHIP_IS_E2(bp)) {
7602 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7603 if ((val & 1) == 0)
7604 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7605 else
7606 val = (val >> 1) & 1;
7607 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7608 "2_PORT_MODE");
7609 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7610 CHIP_2_PORT_MODE;
7611
7612 if (CHIP_MODE_IS_4_PORT(bp))
7613 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7614 else
7615 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7616 } else {
7617 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7618 bp->pfid = bp->pf_num; /* 0..7 */
7619 }
7620
523224a3
DK
7621 /*
7622 * set base FW non-default (fast path) status block id, this value is
7623 * used to initialize the fw_sb_id saved on the fp/queue structure to
7624 * determine the id used by the FW.
7625 */
f2e0899f
DK
7626 if (CHIP_IS_E1x(bp))
7627 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7628 else /* E2 */
7629 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7630
7631 bp->link_params.chip_id = bp->common.chip_id;
7632 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
523224a3 7633
1c06328c
EG
7634 val = (REG_RD(bp, 0x2874) & 0x55);
7635 if ((bp->common.chip_id & 0x1) ||
7636 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7637 bp->flags |= ONE_PORT_FLAG;
7638 BNX2X_DEV_INFO("single port device\n");
7639 }
7640
34f80b04
EG
7641 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7642 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7643 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7644 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7645 bp->common.flash_size, bp->common.flash_size);
7646
7647 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
f2e0899f
DK
7648 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7649 MISC_REG_GENERIC_CR_1 :
7650 MISC_REG_GENERIC_CR_0));
34f80b04 7651 bp->link_params.shmem_base = bp->common.shmem_base;
a22f0788 7652 bp->link_params.shmem2_base = bp->common.shmem2_base;
2691d51d
EG
7653 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7654 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04 7655
f2e0899f 7656 if (!bp->common.shmem_base) {
34f80b04
EG
7657 BNX2X_DEV_INFO("MCP not active\n");
7658 bp->flags |= NO_MCP_FLAG;
7659 return;
7660 }
7661
7662 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7663 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7664 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
f2e0899f 7665 BNX2X_ERR("BAD MCP validity signature\n");
34f80b04
EG
7666
7667 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7668 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7669
7670 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7671 SHARED_HW_CFG_LED_MODE_MASK) >>
7672 SHARED_HW_CFG_LED_MODE_SHIFT);
7673
c2c8b03e
EG
7674 bp->link_params.feature_config_flags = 0;
7675 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7676 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7677 bp->link_params.feature_config_flags |=
7678 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7679 else
7680 bp->link_params.feature_config_flags &=
7681 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7682
34f80b04
EG
7683 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7684 bp->common.bc_ver = val;
7685 BNX2X_DEV_INFO("bc_ver %X\n", val);
7686 if (val < BNX2X_BC_VER) {
7687 /* for now only warn
7688 * later we might need to enforce this */
f2e0899f
DK
7689 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7690 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 7691 }
4d295db0 7692 bp->link_params.feature_config_flags |=
a22f0788 7693 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
f85582f8
DK
7694 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7695
a22f0788
YR
7696 bp->link_params.feature_config_flags |=
7697 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7698 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
72ce58c3
EG
7699
7700 if (BP_E1HVN(bp) == 0) {
7701 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7702 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7703 } else {
7704 /* no WOL capability for E1HVN != 0 */
7705 bp->flags |= NO_WOL_FLAG;
7706 }
7707 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7708 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7709
7710 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7711 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7712 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7713 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7714
cdaa7cb8
VZ
7715 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7716 val, val2, val3, val4);
34f80b04
EG
7717}
7718
f2e0899f
DK
7719#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7720#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7721
7722static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7723{
7724 int pfid = BP_FUNC(bp);
7725 int vn = BP_E1HVN(bp);
7726 int igu_sb_id;
7727 u32 val;
7728 u8 fid;
7729
7730 bp->igu_base_sb = 0xff;
7731 bp->igu_sb_cnt = 0;
7732 if (CHIP_INT_MODE_IS_BC(bp)) {
7733 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7734 bp->l2_cid_count);
7735
7736 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7737 FP_SB_MAX_E1x;
7738
7739 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7740 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7741
7742 return;
7743 }
7744
7745 /* IGU in normal mode - read CAM */
7746 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7747 igu_sb_id++) {
7748 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7749 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7750 continue;
7751 fid = IGU_FID(val);
7752 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7753 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7754 continue;
7755 if (IGU_VEC(val) == 0)
7756 /* default status block */
7757 bp->igu_dsb_id = igu_sb_id;
7758 else {
7759 if (bp->igu_base_sb == 0xff)
7760 bp->igu_base_sb = igu_sb_id;
7761 bp->igu_sb_cnt++;
7762 }
7763 }
7764 }
7765 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
7766 if (bp->igu_sb_cnt == 0)
7767 BNX2X_ERR("CAM configuration error\n");
7768}
7769
34f80b04
EG
7770static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7771 u32 switch_cfg)
a2fbb9ea 7772{
a22f0788
YR
7773 int cfg_size = 0, idx, port = BP_PORT(bp);
7774
7775 /* Aggregation of supported attributes of all external phys */
7776 bp->port.supported[0] = 0;
7777 bp->port.supported[1] = 0;
b7737c9b
YR
7778 switch (bp->link_params.num_phys) {
7779 case 1:
a22f0788
YR
7780 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
7781 cfg_size = 1;
7782 break;
b7737c9b 7783 case 2:
a22f0788
YR
7784 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
7785 cfg_size = 1;
7786 break;
7787 case 3:
7788 if (bp->link_params.multi_phy_config &
7789 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7790 bp->port.supported[1] =
7791 bp->link_params.phy[EXT_PHY1].supported;
7792 bp->port.supported[0] =
7793 bp->link_params.phy[EXT_PHY2].supported;
7794 } else {
7795 bp->port.supported[0] =
7796 bp->link_params.phy[EXT_PHY1].supported;
7797 bp->port.supported[1] =
7798 bp->link_params.phy[EXT_PHY2].supported;
7799 }
7800 cfg_size = 2;
7801 break;
b7737c9b 7802 }
a2fbb9ea 7803
a22f0788 7804 if (!(bp->port.supported[0] || bp->port.supported[1])) {
b7737c9b 7805 BNX2X_ERR("NVRAM config error. BAD phy config."
a22f0788 7806 "PHY1 config 0x%x, PHY2 config 0x%x\n",
b7737c9b 7807 SHMEM_RD(bp,
a22f0788
YR
7808 dev_info.port_hw_config[port].external_phy_config),
7809 SHMEM_RD(bp,
7810 dev_info.port_hw_config[port].external_phy_config2));
a2fbb9ea 7811 return;
f85582f8 7812 }
a2fbb9ea 7813
b7737c9b
YR
7814 switch (switch_cfg) {
7815 case SWITCH_CFG_1G:
34f80b04
EG
7816 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7817 port*0x10);
7818 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7819 break;
7820
7821 case SWITCH_CFG_10G:
34f80b04
EG
7822 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7823 port*0x18);
7824 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7825 break;
7826
7827 default:
7828 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
a22f0788 7829 bp->port.link_config[0]);
a2fbb9ea
ET
7830 return;
7831 }
a22f0788
YR
7832 /* mask what we support according to speed_cap_mask per configuration */
7833 for (idx = 0; idx < cfg_size; idx++) {
7834 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7835 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
a22f0788 7836 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7837
a22f0788 7838 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7839 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
a22f0788 7840 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7841
a22f0788 7842 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7843 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
a22f0788 7844 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7845
a22f0788 7846 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7847 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
a22f0788 7848 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7849
a22f0788 7850 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7851 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
a22f0788 7852 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
f85582f8 7853 SUPPORTED_1000baseT_Full);
a2fbb9ea 7854
a22f0788 7855 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7856 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
a22f0788 7857 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7858
a22f0788 7859 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7860 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
a22f0788
YR
7861 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
7862
7863 }
a2fbb9ea 7864
a22f0788
YR
7865 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
7866 bp->port.supported[1]);
a2fbb9ea
ET
7867}
7868
34f80b04 7869static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7870{
a22f0788
YR
7871 u32 link_config, idx, cfg_size = 0;
7872 bp->port.advertising[0] = 0;
7873 bp->port.advertising[1] = 0;
7874 switch (bp->link_params.num_phys) {
7875 case 1:
7876 case 2:
7877 cfg_size = 1;
7878 break;
7879 case 3:
7880 cfg_size = 2;
7881 break;
7882 }
7883 for (idx = 0; idx < cfg_size; idx++) {
7884 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
7885 link_config = bp->port.link_config[idx];
7886 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
f85582f8 7887 case PORT_FEATURE_LINK_SPEED_AUTO:
a22f0788
YR
7888 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
7889 bp->link_params.req_line_speed[idx] =
7890 SPEED_AUTO_NEG;
7891 bp->port.advertising[idx] |=
7892 bp->port.supported[idx];
f85582f8
DK
7893 } else {
7894 /* force 10G, no AN */
a22f0788
YR
7895 bp->link_params.req_line_speed[idx] =
7896 SPEED_10000;
7897 bp->port.advertising[idx] |=
7898 (ADVERTISED_10000baseT_Full |
f85582f8 7899 ADVERTISED_FIBRE);
a22f0788 7900 continue;
f85582f8
DK
7901 }
7902 break;
a2fbb9ea 7903
f85582f8 7904 case PORT_FEATURE_LINK_SPEED_10M_FULL:
a22f0788
YR
7905 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
7906 bp->link_params.req_line_speed[idx] =
7907 SPEED_10;
7908 bp->port.advertising[idx] |=
7909 (ADVERTISED_10baseT_Full |
f85582f8
DK
7910 ADVERTISED_TP);
7911 } else {
7912 BNX2X_ERROR("NVRAM config error. "
7913 "Invalid link_config 0x%x"
7914 " speed_cap_mask 0x%x\n",
7915 link_config,
a22f0788 7916 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
7917 return;
7918 }
7919 break;
a2fbb9ea 7920
f85582f8 7921 case PORT_FEATURE_LINK_SPEED_10M_HALF:
a22f0788
YR
7922 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
7923 bp->link_params.req_line_speed[idx] =
7924 SPEED_10;
7925 bp->link_params.req_duplex[idx] =
7926 DUPLEX_HALF;
7927 bp->port.advertising[idx] |=
7928 (ADVERTISED_10baseT_Half |
f85582f8
DK
7929 ADVERTISED_TP);
7930 } else {
7931 BNX2X_ERROR("NVRAM config error. "
7932 "Invalid link_config 0x%x"
7933 " speed_cap_mask 0x%x\n",
7934 link_config,
7935 bp->link_params.speed_cap_mask[idx]);
7936 return;
7937 }
7938 break;
a2fbb9ea 7939
f85582f8
DK
7940 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7941 if (bp->port.supported[idx] &
7942 SUPPORTED_100baseT_Full) {
a22f0788
YR
7943 bp->link_params.req_line_speed[idx] =
7944 SPEED_100;
7945 bp->port.advertising[idx] |=
7946 (ADVERTISED_100baseT_Full |
f85582f8
DK
7947 ADVERTISED_TP);
7948 } else {
7949 BNX2X_ERROR("NVRAM config error. "
7950 "Invalid link_config 0x%x"
7951 " speed_cap_mask 0x%x\n",
7952 link_config,
7953 bp->link_params.speed_cap_mask[idx]);
7954 return;
7955 }
7956 break;
a2fbb9ea 7957
f85582f8
DK
7958 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7959 if (bp->port.supported[idx] &
7960 SUPPORTED_100baseT_Half) {
7961 bp->link_params.req_line_speed[idx] =
7962 SPEED_100;
7963 bp->link_params.req_duplex[idx] =
7964 DUPLEX_HALF;
a22f0788
YR
7965 bp->port.advertising[idx] |=
7966 (ADVERTISED_100baseT_Half |
f85582f8
DK
7967 ADVERTISED_TP);
7968 } else {
7969 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
7970 "Invalid link_config 0x%x"
7971 " speed_cap_mask 0x%x\n",
a22f0788
YR
7972 link_config,
7973 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
7974 return;
7975 }
7976 break;
a2fbb9ea 7977
f85582f8 7978 case PORT_FEATURE_LINK_SPEED_1G:
a22f0788
YR
7979 if (bp->port.supported[idx] &
7980 SUPPORTED_1000baseT_Full) {
7981 bp->link_params.req_line_speed[idx] =
7982 SPEED_1000;
7983 bp->port.advertising[idx] |=
7984 (ADVERTISED_1000baseT_Full |
f85582f8
DK
7985 ADVERTISED_TP);
7986 } else {
7987 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
7988 "Invalid link_config 0x%x"
7989 " speed_cap_mask 0x%x\n",
a22f0788
YR
7990 link_config,
7991 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
7992 return;
7993 }
7994 break;
a2fbb9ea 7995
f85582f8 7996 case PORT_FEATURE_LINK_SPEED_2_5G:
a22f0788
YR
7997 if (bp->port.supported[idx] &
7998 SUPPORTED_2500baseX_Full) {
7999 bp->link_params.req_line_speed[idx] =
8000 SPEED_2500;
8001 bp->port.advertising[idx] |=
8002 (ADVERTISED_2500baseX_Full |
34f80b04 8003 ADVERTISED_TP);
f85582f8
DK
8004 } else {
8005 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8006 "Invalid link_config 0x%x"
8007 " speed_cap_mask 0x%x\n",
a22f0788 8008 link_config,
f85582f8
DK
8009 bp->link_params.speed_cap_mask[idx]);
8010 return;
8011 }
8012 break;
a2fbb9ea 8013
f85582f8
DK
8014 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8015 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8016 case PORT_FEATURE_LINK_SPEED_10G_KR:
a22f0788
YR
8017 if (bp->port.supported[idx] &
8018 SUPPORTED_10000baseT_Full) {
8019 bp->link_params.req_line_speed[idx] =
8020 SPEED_10000;
8021 bp->port.advertising[idx] |=
8022 (ADVERTISED_10000baseT_Full |
34f80b04 8023 ADVERTISED_FIBRE);
f85582f8
DK
8024 } else {
8025 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8026 "Invalid link_config 0x%x"
8027 " speed_cap_mask 0x%x\n",
a22f0788 8028 link_config,
f85582f8
DK
8029 bp->link_params.speed_cap_mask[idx]);
8030 return;
8031 }
8032 break;
a2fbb9ea 8033
f85582f8
DK
8034 default:
8035 BNX2X_ERROR("NVRAM config error. "
8036 "BAD link speed link_config 0x%x\n",
8037 link_config);
8038 bp->link_params.req_line_speed[idx] =
8039 SPEED_AUTO_NEG;
8040 bp->port.advertising[idx] =
8041 bp->port.supported[idx];
8042 break;
8043 }
a2fbb9ea 8044
a22f0788 8045 bp->link_params.req_flow_ctrl[idx] = (link_config &
34f80b04 8046 PORT_FEATURE_FLOW_CONTROL_MASK);
a22f0788
YR
8047 if ((bp->link_params.req_flow_ctrl[idx] ==
8048 BNX2X_FLOW_CTRL_AUTO) &&
8049 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8050 bp->link_params.req_flow_ctrl[idx] =
8051 BNX2X_FLOW_CTRL_NONE;
8052 }
a2fbb9ea 8053
a22f0788
YR
8054 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8055 " 0x%x advertising 0x%x\n",
8056 bp->link_params.req_line_speed[idx],
8057 bp->link_params.req_duplex[idx],
8058 bp->link_params.req_flow_ctrl[idx],
8059 bp->port.advertising[idx]);
8060 }
a2fbb9ea
ET
8061}
8062
e665bfda
MC
8063static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8064{
8065 mac_hi = cpu_to_be16(mac_hi);
8066 mac_lo = cpu_to_be32(mac_lo);
8067 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8068 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8069}
8070
34f80b04 8071static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8072{
34f80b04
EG
8073 int port = BP_PORT(bp);
8074 u32 val, val2;
589abe3a 8075 u32 config;
b7737c9b 8076 u32 ext_phy_type, ext_phy_config;;
a2fbb9ea 8077
c18487ee 8078 bp->link_params.bp = bp;
34f80b04 8079 bp->link_params.port = port;
c18487ee 8080
c18487ee 8081 bp->link_params.lane_config =
a2fbb9ea 8082 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
4d295db0 8083
a22f0788 8084 bp->link_params.speed_cap_mask[0] =
a2fbb9ea
ET
8085 SHMEM_RD(bp,
8086 dev_info.port_hw_config[port].speed_capability_mask);
a22f0788
YR
8087 bp->link_params.speed_cap_mask[1] =
8088 SHMEM_RD(bp,
8089 dev_info.port_hw_config[port].speed_capability_mask2);
8090 bp->port.link_config[0] =
a2fbb9ea
ET
8091 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8092
a22f0788
YR
8093 bp->port.link_config[1] =
8094 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
c2c8b03e 8095
a22f0788
YR
8096 bp->link_params.multi_phy_config =
8097 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
3ce2c3f9
EG
8098 /* If the device is capable of WoL, set the default state according
8099 * to the HW
8100 */
4d295db0 8101 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8102 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8103 (config & PORT_FEATURE_WOL_ENABLED));
8104
f85582f8 8105 BNX2X_DEV_INFO("lane_config 0x%08x "
a22f0788 8106 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
c18487ee 8107 bp->link_params.lane_config,
a22f0788
YR
8108 bp->link_params.speed_cap_mask[0],
8109 bp->port.link_config[0]);
a2fbb9ea 8110
a22f0788 8111 bp->link_params.switch_cfg = (bp->port.link_config[0] &
f85582f8 8112 PORT_FEATURE_CONNECTED_SWITCH_MASK);
b7737c9b 8113 bnx2x_phy_probe(&bp->link_params);
c18487ee 8114 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8115
8116 bnx2x_link_settings_requested(bp);
8117
01cd4528
EG
8118 /*
8119 * If connected directly, work with the internal PHY, otherwise, work
8120 * with the external PHY
8121 */
b7737c9b
YR
8122 ext_phy_config =
8123 SHMEM_RD(bp,
8124 dev_info.port_hw_config[port].external_phy_config);
8125 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
01cd4528 8126 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
b7737c9b 8127 bp->mdio.prtad = bp->port.phy_addr;
01cd4528
EG
8128
8129 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8130 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8131 bp->mdio.prtad =
b7737c9b 8132 XGXS_EXT_PHY_ADDR(ext_phy_config);
01cd4528 8133
a2fbb9ea
ET
8134 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8135 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 8136 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
8137 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8138 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
8139
8140#ifdef BCM_CNIC
8141 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8142 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8143 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8144#endif
34f80b04
EG
8145}
8146
8147static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8148{
f2e0899f
DK
8149 int func = BP_ABS_FUNC(bp);
8150 int vn;
34f80b04
EG
8151 u32 val, val2;
8152 int rc = 0;
a2fbb9ea 8153
34f80b04 8154 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8155
f2e0899f
DK
8156 if (CHIP_IS_E1x(bp)) {
8157 bp->common.int_block = INT_BLOCK_HC;
8158
8159 bp->igu_dsb_id = DEF_SB_IGU_ID;
8160 bp->igu_base_sb = 0;
8161 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
8162 } else {
8163 bp->common.int_block = INT_BLOCK_IGU;
8164 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8165 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8166 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8167 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8168 } else
8169 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
523224a3 8170
f2e0899f
DK
8171 bnx2x_get_igu_cam_info(bp);
8172
8173 }
8174 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8175 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8176
8177 /*
8178 * Initialize MF configuration
8179 */
523224a3 8180
fb3bff17
DK
8181 bp->mf_ov = 0;
8182 bp->mf_mode = 0;
f2e0899f
DK
8183 vn = BP_E1HVN(bp);
8184 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8185 if (SHMEM2_HAS(bp, mf_cfg_addr))
8186 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8187 else
8188 bp->common.mf_cfg_base = bp->common.shmem_base +
523224a3
DK
8189 offsetof(struct shmem_region, func_mb) +
8190 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
f2e0899f 8191 bp->mf_config[vn] =
523224a3 8192 MF_CFG_RD(bp, func_mf_config[func].config);
a2fbb9ea 8193
523224a3 8194 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8195 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8196 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
fb3bff17 8197 bp->mf_mode = 1;
2691d51d 8198 BNX2X_DEV_INFO("%s function mode\n",
fb3bff17 8199 IS_MF(bp) ? "multi" : "single");
2691d51d 8200
fb3bff17 8201 if (IS_MF(bp)) {
523224a3 8202 val = (MF_CFG_RD(bp, func_mf_config[func].
2691d51d
EG
8203 e1hov_tag) &
8204 FUNC_MF_CFG_E1HOV_TAG_MASK);
8205 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
fb3bff17 8206 bp->mf_ov = val;
f2e0899f 8207 BNX2X_DEV_INFO("MF OV for func %d is %d "
2691d51d 8208 "(0x%04x)\n",
fb3bff17 8209 func, bp->mf_ov, bp->mf_ov);
2691d51d 8210 } else {
f2e0899f 8211 BNX2X_ERROR("No valid MF OV for func %d,"
cdaa7cb8 8212 " aborting\n", func);
34f80b04
EG
8213 rc = -EPERM;
8214 }
2691d51d 8215 } else {
f2e0899f 8216 if (BP_VN(bp)) {
cdaa7cb8
VZ
8217 BNX2X_ERROR("VN %d in single function mode,"
8218 " aborting\n", BP_E1HVN(bp));
2691d51d
EG
8219 rc = -EPERM;
8220 }
34f80b04
EG
8221 }
8222 }
a2fbb9ea 8223
f2e0899f
DK
8224 /* adjust igu_sb_cnt to MF for E1x */
8225 if (CHIP_IS_E1x(bp) && IS_MF(bp))
523224a3
DK
8226 bp->igu_sb_cnt /= E1HVN_MAX;
8227
f2e0899f
DK
8228 /*
8229 * adjust E2 sb count: to be removed when FW will support
8230 * more then 16 L2 clients
8231 */
8232#define MAX_L2_CLIENTS 16
8233 if (CHIP_IS_E2(bp))
8234 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8235 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8236
34f80b04
EG
8237 if (!BP_NOMCP(bp)) {
8238 bnx2x_get_port_hwinfo(bp);
8239
f2e0899f
DK
8240 bp->fw_seq =
8241 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8242 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04
EG
8243 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8244 }
8245
fb3bff17 8246 if (IS_MF(bp)) {
523224a3
DK
8247 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8248 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
34f80b04
EG
8249 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8250 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8251 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8252 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8253 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8254 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8255 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8256 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8257 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8258 ETH_ALEN);
8259 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8260 ETH_ALEN);
a2fbb9ea 8261 }
34f80b04
EG
8262
8263 return rc;
a2fbb9ea
ET
8264 }
8265
34f80b04
EG
8266 if (BP_NOMCP(bp)) {
8267 /* only supposed to happen on emulation/FPGA */
cdaa7cb8 8268 BNX2X_ERROR("warning: random MAC workaround active\n");
34f80b04
EG
8269 random_ether_addr(bp->dev->dev_addr);
8270 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8271 }
a2fbb9ea 8272
34f80b04
EG
8273 return rc;
8274}
8275
34f24c7f
VZ
8276static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8277{
8278 int cnt, i, block_end, rodi;
8279 char vpd_data[BNX2X_VPD_LEN+1];
8280 char str_id_reg[VENDOR_ID_LEN+1];
8281 char str_id_cap[VENDOR_ID_LEN+1];
8282 u8 len;
8283
8284 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8285 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8286
8287 if (cnt < BNX2X_VPD_LEN)
8288 goto out_not_found;
8289
8290 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8291 PCI_VPD_LRDT_RO_DATA);
8292 if (i < 0)
8293 goto out_not_found;
8294
8295
8296 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8297 pci_vpd_lrdt_size(&vpd_data[i]);
8298
8299 i += PCI_VPD_LRDT_TAG_SIZE;
8300
8301 if (block_end > BNX2X_VPD_LEN)
8302 goto out_not_found;
8303
8304 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8305 PCI_VPD_RO_KEYWORD_MFR_ID);
8306 if (rodi < 0)
8307 goto out_not_found;
8308
8309 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8310
8311 if (len != VENDOR_ID_LEN)
8312 goto out_not_found;
8313
8314 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8315
8316 /* vendor specific info */
8317 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8318 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8319 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8320 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8321
8322 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8323 PCI_VPD_RO_KEYWORD_VENDOR0);
8324 if (rodi >= 0) {
8325 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8326
8327 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8328
8329 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8330 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8331 bp->fw_ver[len] = ' ';
8332 }
8333 }
8334 return;
8335 }
8336out_not_found:
8337 return;
8338}
8339
34f80b04
EG
8340static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8341{
f2e0899f 8342 int func;
87942b46 8343 int timer_interval;
34f80b04
EG
8344 int rc;
8345
da5a662a
VZ
8346 /* Disable interrupt handling until HW is initialized */
8347 atomic_set(&bp->intr_sem, 1);
e1510706 8348 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8349
34f80b04 8350 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 8351 mutex_init(&bp->fw_mb_mutex);
bb7e95c8 8352 spin_lock_init(&bp->stats_lock);
993ac7b5
MC
8353#ifdef BCM_CNIC
8354 mutex_init(&bp->cnic_mutex);
8355#endif
a2fbb9ea 8356
1cf167f2 8357 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 8358 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
8359
8360 rc = bnx2x_get_hwinfo(bp);
8361
523224a3
DK
8362 if (!rc)
8363 rc = bnx2x_alloc_mem_bp(bp);
8364
34f24c7f 8365 bnx2x_read_fwinfo(bp);
f2e0899f
DK
8366
8367 func = BP_FUNC(bp);
8368
34f80b04
EG
8369 /* need to reset chip if undi was active */
8370 if (!BP_NOMCP(bp))
8371 bnx2x_undi_unload(bp);
8372
8373 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 8374 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
8375
8376 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
8377 dev_err(&bp->pdev->dev, "MCP disabled, "
8378 "must load devices in order!\n");
34f80b04 8379
555f6c78 8380 /* Set multi queue mode */
8badd27a
EG
8381 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8382 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
cdaa7cb8
VZ
8383 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8384 "requested is not MSI-X\n");
555f6c78
EG
8385 multi_mode = ETH_RSS_MODE_DISABLED;
8386 }
8387 bp->multi_mode = multi_mode;
5d7cd496 8388 bp->int_mode = int_mode;
555f6c78 8389
4fd89b7a
DK
8390 bp->dev->features |= NETIF_F_GRO;
8391
7a9b2557
VZ
8392 /* Set TPA flags */
8393 if (disable_tpa) {
8394 bp->flags &= ~TPA_ENABLE_FLAG;
8395 bp->dev->features &= ~NETIF_F_LRO;
8396 } else {
8397 bp->flags |= TPA_ENABLE_FLAG;
8398 bp->dev->features |= NETIF_F_LRO;
8399 }
5d7cd496 8400 bp->disable_tpa = disable_tpa;
7a9b2557 8401
a18f5128
EG
8402 if (CHIP_IS_E1(bp))
8403 bp->dropless_fc = 0;
8404 else
8405 bp->dropless_fc = dropless_fc;
8406
8d5726c4 8407 bp->mrrs = mrrs;
7a9b2557 8408
34f80b04 8409 bp->tx_ring_size = MAX_TX_AVAIL;
34f80b04
EG
8410
8411 bp->rx_csum = 1;
34f80b04 8412
7d323bfd 8413 /* make sure that the numbers are in the right granularity */
523224a3
DK
8414 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8415 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
34f80b04 8416
87942b46
EG
8417 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8418 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8419
8420 init_timer(&bp->timer);
8421 bp->timer.expires = jiffies + bp->current_interval;
8422 bp->timer.data = (unsigned long) bp;
8423 bp->timer.function = bnx2x_timer;
8424
8425 return rc;
a2fbb9ea
ET
8426}
8427
a2fbb9ea 8428
de0c62db
DK
8429/****************************************************************************
8430* General service functions
8431****************************************************************************/
a2fbb9ea 8432
bb2a0f7a 8433/* called with rtnl_lock */
a2fbb9ea
ET
8434static int bnx2x_open(struct net_device *dev)
8435{
8436 struct bnx2x *bp = netdev_priv(dev);
8437
6eccabb3
EG
8438 netif_carrier_off(dev);
8439
a2fbb9ea
ET
8440 bnx2x_set_power_state(bp, PCI_D0);
8441
72fd0718
VZ
8442 if (!bnx2x_reset_is_done(bp)) {
8443 do {
8444 /* Reset MCP mail box sequence if there is on going
8445 * recovery
8446 */
8447 bp->fw_seq = 0;
8448
8449 /* If it's the first function to load and reset done
8450 * is still not cleared it may mean that. We don't
8451 * check the attention state here because it may have
8452 * already been cleared by a "common" reset but we
8453 * shell proceed with "process kill" anyway.
8454 */
8455 if ((bnx2x_get_load_cnt(bp) == 0) &&
8456 bnx2x_trylock_hw_lock(bp,
8457 HW_LOCK_RESOURCE_RESERVED_08) &&
8458 (!bnx2x_leader_reset(bp))) {
8459 DP(NETIF_MSG_HW, "Recovered in open\n");
8460 break;
8461 }
8462
8463 bnx2x_set_power_state(bp, PCI_D3hot);
8464
8465 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8466 " completed yet. Try again later. If u still see this"
8467 " message after a few retries then power cycle is"
8468 " required.\n", bp->dev->name);
8469
8470 return -EAGAIN;
8471 } while (0);
8472 }
8473
8474 bp->recovery_state = BNX2X_RECOVERY_DONE;
8475
bb2a0f7a 8476 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
8477}
8478
bb2a0f7a 8479/* called with rtnl_lock */
a2fbb9ea
ET
8480static int bnx2x_close(struct net_device *dev)
8481{
a2fbb9ea
ET
8482 struct bnx2x *bp = netdev_priv(dev);
8483
8484 /* Unload the driver, release IRQs */
bb2a0f7a 8485 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 8486 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
8487
8488 return 0;
8489}
8490
f5372251 8491/* called with netif_tx_lock from dev_mcast.c */
9f6c9258 8492void bnx2x_set_rx_mode(struct net_device *dev)
34f80b04
EG
8493{
8494 struct bnx2x *bp = netdev_priv(dev);
8495 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8496 int port = BP_PORT(bp);
8497
8498 if (bp->state != BNX2X_STATE_OPEN) {
8499 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8500 return;
8501 }
8502
8503 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8504
8505 if (dev->flags & IFF_PROMISC)
8506 rx_mode = BNX2X_RX_MODE_PROMISC;
34f80b04 8507 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
8508 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8509 CHIP_IS_E1(bp)))
34f80b04 8510 rx_mode = BNX2X_RX_MODE_ALLMULTI;
34f80b04
EG
8511 else { /* some multicasts */
8512 if (CHIP_IS_E1(bp)) {
523224a3
DK
8513 /*
8514 * set mc list, do not wait as wait implies sleep
8515 * and set_rx_mode can be invoked from non-sleepable
8516 * context
8517 */
8518 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8519 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8520 BNX2X_MAX_MULTICAST*(1 + port));
e665bfda 8521
523224a3 8522 bnx2x_set_e1_mc_list(bp, offset);
34f80b04
EG
8523 } else { /* E1H */
8524 /* Accept one or more multicasts */
22bedad3 8525 struct netdev_hw_addr *ha;
34f80b04
EG
8526 u32 mc_filter[MC_HASH_SIZE];
8527 u32 crc, bit, regidx;
8528 int i;
8529
8530 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8531
22bedad3 8532 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 8533 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
523224a3 8534 bnx2x_mc_addr(ha));
34f80b04 8535
523224a3
DK
8536 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8537 ETH_ALEN);
34f80b04
EG
8538 bit = (crc >> 24) & 0xff;
8539 regidx = bit >> 5;
8540 bit &= 0x1f;
8541 mc_filter[regidx] |= (1 << bit);
8542 }
8543
8544 for (i = 0; i < MC_HASH_SIZE; i++)
8545 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8546 mc_filter[i]);
8547 }
8548 }
8549
8550 bp->rx_mode = rx_mode;
8551 bnx2x_set_storm_rx_mode(bp);
8552}
8553
c18487ee 8554/* called with rtnl_lock */
01cd4528
EG
8555static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8556 int devad, u16 addr)
a2fbb9ea 8557{
01cd4528
EG
8558 struct bnx2x *bp = netdev_priv(netdev);
8559 u16 value;
8560 int rc;
a2fbb9ea 8561
01cd4528
EG
8562 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8563 prtad, devad, addr);
a2fbb9ea 8564
01cd4528
EG
8565 /* The HW expects different devad if CL22 is used */
8566 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 8567
01cd4528 8568 bnx2x_acquire_phy_lock(bp);
e10bc84d 8569 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
01cd4528
EG
8570 bnx2x_release_phy_lock(bp);
8571 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 8572
01cd4528
EG
8573 if (!rc)
8574 rc = value;
8575 return rc;
8576}
a2fbb9ea 8577
01cd4528
EG
8578/* called with rtnl_lock */
8579static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8580 u16 addr, u16 value)
8581{
8582 struct bnx2x *bp = netdev_priv(netdev);
01cd4528
EG
8583 int rc;
8584
8585 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8586 " value 0x%x\n", prtad, devad, addr, value);
8587
01cd4528
EG
8588 /* The HW expects different devad if CL22 is used */
8589 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 8590
01cd4528 8591 bnx2x_acquire_phy_lock(bp);
e10bc84d 8592 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
01cd4528
EG
8593 bnx2x_release_phy_lock(bp);
8594 return rc;
8595}
c18487ee 8596
01cd4528
EG
8597/* called with rtnl_lock */
8598static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8599{
8600 struct bnx2x *bp = netdev_priv(dev);
8601 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 8602
01cd4528
EG
8603 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8604 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 8605
01cd4528
EG
8606 if (!netif_running(dev))
8607 return -EAGAIN;
8608
8609 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
8610}
8611
257ddbda 8612#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
8613static void poll_bnx2x(struct net_device *dev)
8614{
8615 struct bnx2x *bp = netdev_priv(dev);
8616
8617 disable_irq(bp->pdev->irq);
8618 bnx2x_interrupt(bp->pdev->irq, dev);
8619 enable_irq(bp->pdev->irq);
8620}
8621#endif
8622
c64213cd
SH
8623static const struct net_device_ops bnx2x_netdev_ops = {
8624 .ndo_open = bnx2x_open,
8625 .ndo_stop = bnx2x_close,
8626 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 8627 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
8628 .ndo_set_mac_address = bnx2x_change_mac_addr,
8629 .ndo_validate_addr = eth_validate_addr,
8630 .ndo_do_ioctl = bnx2x_ioctl,
8631 .ndo_change_mtu = bnx2x_change_mtu,
8632 .ndo_tx_timeout = bnx2x_tx_timeout,
8633#ifdef BCM_VLAN
8634 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
8635#endif
257ddbda 8636#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
8637 .ndo_poll_controller = poll_bnx2x,
8638#endif
8639};
8640
34f80b04
EG
8641static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8642 struct net_device *dev)
a2fbb9ea
ET
8643{
8644 struct bnx2x *bp;
8645 int rc;
8646
8647 SET_NETDEV_DEV(dev, &pdev->dev);
8648 bp = netdev_priv(dev);
8649
34f80b04
EG
8650 bp->dev = dev;
8651 bp->pdev = pdev;
a2fbb9ea 8652 bp->flags = 0;
f2e0899f 8653 bp->pf_num = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
8654
8655 rc = pci_enable_device(pdev);
8656 if (rc) {
cdaa7cb8
VZ
8657 dev_err(&bp->pdev->dev,
8658 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
8659 goto err_out;
8660 }
8661
8662 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
8663 dev_err(&bp->pdev->dev,
8664 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
8665 rc = -ENODEV;
8666 goto err_out_disable;
8667 }
8668
8669 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
8670 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
8671 " base address, aborting\n");
a2fbb9ea
ET
8672 rc = -ENODEV;
8673 goto err_out_disable;
8674 }
8675
34f80b04
EG
8676 if (atomic_read(&pdev->enable_cnt) == 1) {
8677 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8678 if (rc) {
cdaa7cb8
VZ
8679 dev_err(&bp->pdev->dev,
8680 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
8681 goto err_out_disable;
8682 }
a2fbb9ea 8683
34f80b04
EG
8684 pci_set_master(pdev);
8685 pci_save_state(pdev);
8686 }
a2fbb9ea
ET
8687
8688 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8689 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
8690 dev_err(&bp->pdev->dev,
8691 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
8692 rc = -EIO;
8693 goto err_out_release;
8694 }
8695
8696 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8697 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
8698 dev_err(&bp->pdev->dev,
8699 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
8700 rc = -EIO;
8701 goto err_out_release;
8702 }
8703
1a983142 8704 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 8705 bp->flags |= USING_DAC_FLAG;
1a983142 8706 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
8707 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
8708 " failed, aborting\n");
a2fbb9ea
ET
8709 rc = -EIO;
8710 goto err_out_release;
8711 }
8712
1a983142 8713 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
8714 dev_err(&bp->pdev->dev,
8715 "System does not support DMA, aborting\n");
a2fbb9ea
ET
8716 rc = -EIO;
8717 goto err_out_release;
8718 }
8719
34f80b04
EG
8720 dev->mem_start = pci_resource_start(pdev, 0);
8721 dev->base_addr = dev->mem_start;
8722 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
8723
8724 dev->irq = pdev->irq;
8725
275f165f 8726 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 8727 if (!bp->regview) {
cdaa7cb8
VZ
8728 dev_err(&bp->pdev->dev,
8729 "Cannot map register space, aborting\n");
a2fbb9ea
ET
8730 rc = -ENOMEM;
8731 goto err_out_release;
8732 }
8733
34f80b04 8734 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
523224a3 8735 min_t(u64, BNX2X_DB_SIZE(bp),
34f80b04 8736 pci_resource_len(pdev, 2)));
a2fbb9ea 8737 if (!bp->doorbells) {
cdaa7cb8
VZ
8738 dev_err(&bp->pdev->dev,
8739 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
8740 rc = -ENOMEM;
8741 goto err_out_unmap;
8742 }
8743
8744 bnx2x_set_power_state(bp, PCI_D0);
8745
34f80b04
EG
8746 /* clean indirect addresses */
8747 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
8748 PCICFG_VENDOR_ID_OFFSET);
8749 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
8750 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
8751 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
8752 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 8753
72fd0718
VZ
8754 /* Reset the load counter */
8755 bnx2x_clear_load_cnt(bp);
8756
34f80b04 8757 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 8758
c64213cd 8759 dev->netdev_ops = &bnx2x_netdev_ops;
de0c62db 8760 bnx2x_set_ethtool_ops(dev);
34f80b04
EG
8761 dev->features |= NETIF_F_SG;
8762 dev->features |= NETIF_F_HW_CSUM;
8763 if (bp->flags & USING_DAC_FLAG)
8764 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
8765 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8766 dev->features |= NETIF_F_TSO6;
34f80b04
EG
8767#ifdef BCM_VLAN
8768 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 8769 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
8770
8771 dev->vlan_features |= NETIF_F_SG;
8772 dev->vlan_features |= NETIF_F_HW_CSUM;
8773 if (bp->flags & USING_DAC_FLAG)
8774 dev->vlan_features |= NETIF_F_HIGHDMA;
8775 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8776 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 8777#endif
a2fbb9ea 8778
01cd4528
EG
8779 /* get_port_hwinfo() will set prtad and mmds properly */
8780 bp->mdio.prtad = MDIO_PRTAD_NONE;
8781 bp->mdio.mmds = 0;
8782 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8783 bp->mdio.dev = dev;
8784 bp->mdio.mdio_read = bnx2x_mdio_read;
8785 bp->mdio.mdio_write = bnx2x_mdio_write;
8786
a2fbb9ea
ET
8787 return 0;
8788
8789err_out_unmap:
8790 if (bp->regview) {
8791 iounmap(bp->regview);
8792 bp->regview = NULL;
8793 }
a2fbb9ea
ET
8794 if (bp->doorbells) {
8795 iounmap(bp->doorbells);
8796 bp->doorbells = NULL;
8797 }
8798
8799err_out_release:
34f80b04
EG
8800 if (atomic_read(&pdev->enable_cnt) == 1)
8801 pci_release_regions(pdev);
a2fbb9ea
ET
8802
8803err_out_disable:
8804 pci_disable_device(pdev);
8805 pci_set_drvdata(pdev, NULL);
8806
8807err_out:
8808 return rc;
8809}
8810
37f9ce62
EG
8811static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
8812 int *width, int *speed)
25047950
ET
8813{
8814 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
8815
37f9ce62 8816 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 8817
37f9ce62
EG
8818 /* return value of 1=2.5GHz 2=5GHz */
8819 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 8820}
37f9ce62 8821
6891dd25 8822static int bnx2x_check_firmware(struct bnx2x *bp)
94a78b79 8823{
37f9ce62 8824 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
8825 struct bnx2x_fw_file_hdr *fw_hdr;
8826 struct bnx2x_fw_file_section *sections;
94a78b79 8827 u32 offset, len, num_ops;
37f9ce62 8828 u16 *ops_offsets;
94a78b79 8829 int i;
37f9ce62 8830 const u8 *fw_ver;
94a78b79
VZ
8831
8832 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
8833 return -EINVAL;
8834
8835 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
8836 sections = (struct bnx2x_fw_file_section *)fw_hdr;
8837
8838 /* Make sure none of the offsets and sizes make us read beyond
8839 * the end of the firmware data */
8840 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
8841 offset = be32_to_cpu(sections[i].offset);
8842 len = be32_to_cpu(sections[i].len);
8843 if (offset + len > firmware->size) {
cdaa7cb8
VZ
8844 dev_err(&bp->pdev->dev,
8845 "Section %d length is out of bounds\n", i);
94a78b79
VZ
8846 return -EINVAL;
8847 }
8848 }
8849
8850 /* Likewise for the init_ops offsets */
8851 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
8852 ops_offsets = (u16 *)(firmware->data + offset);
8853 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
8854
8855 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
8856 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
8857 dev_err(&bp->pdev->dev,
8858 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
8859 return -EINVAL;
8860 }
8861 }
8862
8863 /* Check FW version */
8864 offset = be32_to_cpu(fw_hdr->fw_version.offset);
8865 fw_ver = firmware->data + offset;
8866 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
8867 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
8868 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
8869 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
8870 dev_err(&bp->pdev->dev,
8871 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
8872 fw_ver[0], fw_ver[1], fw_ver[2],
8873 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
8874 BCM_5710_FW_MINOR_VERSION,
8875 BCM_5710_FW_REVISION_VERSION,
8876 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 8877 return -EINVAL;
94a78b79
VZ
8878 }
8879
8880 return 0;
8881}
8882
ab6ad5a4 8883static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 8884{
ab6ad5a4
EG
8885 const __be32 *source = (const __be32 *)_source;
8886 u32 *target = (u32 *)_target;
94a78b79 8887 u32 i;
94a78b79
VZ
8888
8889 for (i = 0; i < n/4; i++)
8890 target[i] = be32_to_cpu(source[i]);
8891}
8892
8893/*
8894 Ops array is stored in the following format:
8895 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
8896 */
ab6ad5a4 8897static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 8898{
ab6ad5a4
EG
8899 const __be32 *source = (const __be32 *)_source;
8900 struct raw_op *target = (struct raw_op *)_target;
94a78b79 8901 u32 i, j, tmp;
94a78b79 8902
ab6ad5a4 8903 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
8904 tmp = be32_to_cpu(source[j]);
8905 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
8906 target[i].offset = tmp & 0xffffff;
8907 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
8908 }
8909}
ab6ad5a4 8910
523224a3
DK
8911/**
8912 * IRO array is stored in the following format:
8913 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
8914 */
8915static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
8916{
8917 const __be32 *source = (const __be32 *)_source;
8918 struct iro *target = (struct iro *)_target;
8919 u32 i, j, tmp;
8920
8921 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
8922 target[i].base = be32_to_cpu(source[j]);
8923 j++;
8924 tmp = be32_to_cpu(source[j]);
8925 target[i].m1 = (tmp >> 16) & 0xffff;
8926 target[i].m2 = tmp & 0xffff;
8927 j++;
8928 tmp = be32_to_cpu(source[j]);
8929 target[i].m3 = (tmp >> 16) & 0xffff;
8930 target[i].size = tmp & 0xffff;
8931 j++;
8932 }
8933}
8934
ab6ad5a4 8935static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 8936{
ab6ad5a4
EG
8937 const __be16 *source = (const __be16 *)_source;
8938 u16 *target = (u16 *)_target;
94a78b79 8939 u32 i;
94a78b79
VZ
8940
8941 for (i = 0; i < n/2; i++)
8942 target[i] = be16_to_cpu(source[i]);
8943}
8944
7995c64e
JP
8945#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
8946do { \
8947 u32 len = be32_to_cpu(fw_hdr->arr.len); \
8948 bp->arr = kmalloc(len, GFP_KERNEL); \
8949 if (!bp->arr) { \
8950 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
8951 goto lbl; \
8952 } \
8953 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
8954 (u8 *)bp->arr, len); \
8955} while (0)
94a78b79 8956
6891dd25 8957int bnx2x_init_firmware(struct bnx2x *bp)
94a78b79 8958{
45229b42 8959 const char *fw_file_name;
94a78b79 8960 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 8961 int rc;
94a78b79 8962
94a78b79 8963 if (CHIP_IS_E1(bp))
45229b42 8964 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 8965 else if (CHIP_IS_E1H(bp))
45229b42 8966 fw_file_name = FW_FILE_NAME_E1H;
f2e0899f
DK
8967 else if (CHIP_IS_E2(bp))
8968 fw_file_name = FW_FILE_NAME_E2;
cdaa7cb8 8969 else {
6891dd25 8970 BNX2X_ERR("Unsupported chip revision\n");
cdaa7cb8
VZ
8971 return -EINVAL;
8972 }
94a78b79 8973
6891dd25 8974 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
94a78b79 8975
6891dd25 8976 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
94a78b79 8977 if (rc) {
6891dd25 8978 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
8979 goto request_firmware_exit;
8980 }
8981
8982 rc = bnx2x_check_firmware(bp);
8983 if (rc) {
6891dd25 8984 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
8985 goto request_firmware_exit;
8986 }
8987
8988 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
8989
8990 /* Initialize the pointers to the init arrays */
8991 /* Blob */
8992 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
8993
8994 /* Opcodes */
8995 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
8996
8997 /* Offsets */
ab6ad5a4
EG
8998 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
8999 be16_to_cpu_n);
94a78b79
VZ
9000
9001 /* STORMs firmware */
573f2035
EG
9002 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9003 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
9004 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
9005 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
9006 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9007 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9008 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
9009 be32_to_cpu(fw_hdr->usem_pram_data.offset);
9010 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9011 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9012 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
9013 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9014 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9015 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9016 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
9017 be32_to_cpu(fw_hdr->csem_pram_data.offset);
523224a3
DK
9018 /* IRO */
9019 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
94a78b79
VZ
9020
9021 return 0;
ab6ad5a4 9022
523224a3
DK
9023iro_alloc_err:
9024 kfree(bp->init_ops_offsets);
94a78b79
VZ
9025init_offsets_alloc_err:
9026 kfree(bp->init_ops);
9027init_ops_alloc_err:
9028 kfree(bp->init_data);
9029request_firmware_exit:
9030 release_firmware(bp->firmware);
9031
9032 return rc;
9033}
9034
523224a3
DK
9035static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9036{
9037 int cid_count = L2_FP_COUNT(l2_cid_count);
94a78b79 9038
523224a3
DK
9039#ifdef BCM_CNIC
9040 cid_count += CNIC_CID_MAX;
9041#endif
9042 return roundup(cid_count, QM_CID_ROUND);
9043}
f85582f8 9044
a2fbb9ea
ET
9045static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9046 const struct pci_device_id *ent)
9047{
a2fbb9ea
ET
9048 struct net_device *dev = NULL;
9049 struct bnx2x *bp;
37f9ce62 9050 int pcie_width, pcie_speed;
523224a3
DK
9051 int rc, cid_count;
9052
f2e0899f
DK
9053 switch (ent->driver_data) {
9054 case BCM57710:
9055 case BCM57711:
9056 case BCM57711E:
9057 cid_count = FP_SB_MAX_E1x;
9058 break;
9059
9060 case BCM57712:
9061 case BCM57712E:
9062 cid_count = FP_SB_MAX_E2;
9063 break;
a2fbb9ea 9064
f2e0899f
DK
9065 default:
9066 pr_err("Unknown board_type (%ld), aborting\n",
9067 ent->driver_data);
9068 return ENODEV;
9069 }
9070
9071 cid_count += CNIC_CONTEXT_USE;
f85582f8 9072
a2fbb9ea 9073 /* dev zeroed in init_etherdev */
523224a3 9074 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
34f80b04 9075 if (!dev) {
cdaa7cb8 9076 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 9077 return -ENOMEM;
34f80b04 9078 }
a2fbb9ea 9079
a2fbb9ea 9080 bp = netdev_priv(dev);
7995c64e 9081 bp->msg_enable = debug;
a2fbb9ea 9082
df4770de
EG
9083 pci_set_drvdata(pdev, dev);
9084
523224a3
DK
9085 bp->l2_cid_count = cid_count;
9086
34f80b04 9087 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
9088 if (rc < 0) {
9089 free_netdev(dev);
9090 return rc;
9091 }
9092
34f80b04 9093 rc = bnx2x_init_bp(bp);
693fc0d1
EG
9094 if (rc)
9095 goto init_one_exit;
9096
523224a3
DK
9097 /* calc qm_cid_count */
9098 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9099
693fc0d1 9100 rc = register_netdev(dev);
34f80b04 9101 if (rc) {
693fc0d1 9102 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
9103 goto init_one_exit;
9104 }
9105
d6214d7a
DK
9106 /* Configure interupt mode: try to enable MSI-X/MSI if
9107 * needed, set bp->num_queues appropriately.
9108 */
9109 bnx2x_set_int_mode(bp);
9110
9111 /* Add all NAPI objects */
9112 bnx2x_add_all_napi(bp);
9113
37f9ce62 9114 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
d6214d7a 9115
cdaa7cb8
VZ
9116 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9117 " IRQ %d, ", board_info[ent->driver_data].name,
9118 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
f2e0899f
DK
9119 pcie_width,
9120 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9121 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9122 "5GHz (Gen2)" : "2.5GHz",
cdaa7cb8
VZ
9123 dev->base_addr, bp->pdev->irq);
9124 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 9125
a2fbb9ea 9126 return 0;
34f80b04
EG
9127
9128init_one_exit:
9129 if (bp->regview)
9130 iounmap(bp->regview);
9131
9132 if (bp->doorbells)
9133 iounmap(bp->doorbells);
9134
9135 free_netdev(dev);
9136
9137 if (atomic_read(&pdev->enable_cnt) == 1)
9138 pci_release_regions(pdev);
9139
9140 pci_disable_device(pdev);
9141 pci_set_drvdata(pdev, NULL);
9142
9143 return rc;
a2fbb9ea
ET
9144}
9145
9146static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9147{
9148 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
9149 struct bnx2x *bp;
9150
9151 if (!dev) {
cdaa7cb8 9152 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
9153 return;
9154 }
228241eb 9155 bp = netdev_priv(dev);
a2fbb9ea 9156
a2fbb9ea
ET
9157 unregister_netdev(dev);
9158
d6214d7a
DK
9159 /* Delete all NAPI objects */
9160 bnx2x_del_all_napi(bp);
9161
9162 /* Disable MSI/MSI-X */
9163 bnx2x_disable_msi(bp);
f85582f8 9164
72fd0718
VZ
9165 /* Make sure RESET task is not scheduled before continuing */
9166 cancel_delayed_work_sync(&bp->reset_task);
9167
a2fbb9ea
ET
9168 if (bp->regview)
9169 iounmap(bp->regview);
9170
9171 if (bp->doorbells)
9172 iounmap(bp->doorbells);
9173
523224a3
DK
9174 bnx2x_free_mem_bp(bp);
9175
a2fbb9ea 9176 free_netdev(dev);
34f80b04
EG
9177
9178 if (atomic_read(&pdev->enable_cnt) == 1)
9179 pci_release_regions(pdev);
9180
a2fbb9ea
ET
9181 pci_disable_device(pdev);
9182 pci_set_drvdata(pdev, NULL);
9183}
9184
f8ef6e44
YG
9185static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9186{
9187 int i;
9188
9189 bp->state = BNX2X_STATE_ERROR;
9190
9191 bp->rx_mode = BNX2X_RX_MODE_NONE;
9192
9193 bnx2x_netif_stop(bp, 0);
c89af1a3 9194 netif_carrier_off(bp->dev);
f8ef6e44
YG
9195
9196 del_timer_sync(&bp->timer);
9197 bp->stats_state = STATS_STATE_DISABLED;
9198 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9199
9200 /* Release IRQs */
d6214d7a 9201 bnx2x_free_irq(bp);
f8ef6e44 9202
f8ef6e44
YG
9203 /* Free SKBs, SGEs, TPA pool and driver internals */
9204 bnx2x_free_skbs(bp);
523224a3 9205
54b9ddaa 9206 for_each_queue(bp, i)
f8ef6e44 9207 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 9208
f8ef6e44
YG
9209 bnx2x_free_mem(bp);
9210
9211 bp->state = BNX2X_STATE_CLOSED;
9212
f8ef6e44
YG
9213 return 0;
9214}
9215
9216static void bnx2x_eeh_recover(struct bnx2x *bp)
9217{
9218 u32 val;
9219
9220 mutex_init(&bp->port.phy_mutex);
9221
9222 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9223 bp->link_params.shmem_base = bp->common.shmem_base;
9224 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9225
9226 if (!bp->common.shmem_base ||
9227 (bp->common.shmem_base < 0xA0000) ||
9228 (bp->common.shmem_base >= 0xC0000)) {
9229 BNX2X_DEV_INFO("MCP not active\n");
9230 bp->flags |= NO_MCP_FLAG;
9231 return;
9232 }
9233
9234 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9235 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9236 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9237 BNX2X_ERR("BAD MCP validity signature\n");
9238
9239 if (!BP_NOMCP(bp)) {
f2e0899f
DK
9240 bp->fw_seq =
9241 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9242 DRV_MSG_SEQ_NUMBER_MASK);
f8ef6e44
YG
9243 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9244 }
9245}
9246
493adb1f
WX
9247/**
9248 * bnx2x_io_error_detected - called when PCI error is detected
9249 * @pdev: Pointer to PCI device
9250 * @state: The current pci connection state
9251 *
9252 * This function is called after a PCI bus error affecting
9253 * this device has been detected.
9254 */
9255static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9256 pci_channel_state_t state)
9257{
9258 struct net_device *dev = pci_get_drvdata(pdev);
9259 struct bnx2x *bp = netdev_priv(dev);
9260
9261 rtnl_lock();
9262
9263 netif_device_detach(dev);
9264
07ce50e4
DN
9265 if (state == pci_channel_io_perm_failure) {
9266 rtnl_unlock();
9267 return PCI_ERS_RESULT_DISCONNECT;
9268 }
9269
493adb1f 9270 if (netif_running(dev))
f8ef6e44 9271 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
9272
9273 pci_disable_device(pdev);
9274
9275 rtnl_unlock();
9276
9277 /* Request a slot reset */
9278 return PCI_ERS_RESULT_NEED_RESET;
9279}
9280
9281/**
9282 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9283 * @pdev: Pointer to PCI device
9284 *
9285 * Restart the card from scratch, as if from a cold-boot.
9286 */
9287static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9288{
9289 struct net_device *dev = pci_get_drvdata(pdev);
9290 struct bnx2x *bp = netdev_priv(dev);
9291
9292 rtnl_lock();
9293
9294 if (pci_enable_device(pdev)) {
9295 dev_err(&pdev->dev,
9296 "Cannot re-enable PCI device after reset\n");
9297 rtnl_unlock();
9298 return PCI_ERS_RESULT_DISCONNECT;
9299 }
9300
9301 pci_set_master(pdev);
9302 pci_restore_state(pdev);
9303
9304 if (netif_running(dev))
9305 bnx2x_set_power_state(bp, PCI_D0);
9306
9307 rtnl_unlock();
9308
9309 return PCI_ERS_RESULT_RECOVERED;
9310}
9311
9312/**
9313 * bnx2x_io_resume - called when traffic can start flowing again
9314 * @pdev: Pointer to PCI device
9315 *
9316 * This callback is called when the error recovery driver tells us that
9317 * its OK to resume normal operation.
9318 */
9319static void bnx2x_io_resume(struct pci_dev *pdev)
9320{
9321 struct net_device *dev = pci_get_drvdata(pdev);
9322 struct bnx2x *bp = netdev_priv(dev);
9323
72fd0718 9324 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
f2e0899f
DK
9325 printk(KERN_ERR "Handling parity error recovery. "
9326 "Try again later\n");
72fd0718
VZ
9327 return;
9328 }
9329
493adb1f
WX
9330 rtnl_lock();
9331
f8ef6e44
YG
9332 bnx2x_eeh_recover(bp);
9333
493adb1f 9334 if (netif_running(dev))
f8ef6e44 9335 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
9336
9337 netif_device_attach(dev);
9338
9339 rtnl_unlock();
9340}
9341
9342static struct pci_error_handlers bnx2x_err_handler = {
9343 .error_detected = bnx2x_io_error_detected,
356e2385
EG
9344 .slot_reset = bnx2x_io_slot_reset,
9345 .resume = bnx2x_io_resume,
493adb1f
WX
9346};
9347
a2fbb9ea 9348static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
9349 .name = DRV_MODULE_NAME,
9350 .id_table = bnx2x_pci_tbl,
9351 .probe = bnx2x_init_one,
9352 .remove = __devexit_p(bnx2x_remove_one),
9353 .suspend = bnx2x_suspend,
9354 .resume = bnx2x_resume,
9355 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
9356};
9357
9358static int __init bnx2x_init(void)
9359{
dd21ca6d
SG
9360 int ret;
9361
7995c64e 9362 pr_info("%s", version);
938cf541 9363
1cf167f2
EG
9364 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9365 if (bnx2x_wq == NULL) {
7995c64e 9366 pr_err("Cannot create workqueue\n");
1cf167f2
EG
9367 return -ENOMEM;
9368 }
9369
dd21ca6d
SG
9370 ret = pci_register_driver(&bnx2x_pci_driver);
9371 if (ret) {
7995c64e 9372 pr_err("Cannot register driver\n");
dd21ca6d
SG
9373 destroy_workqueue(bnx2x_wq);
9374 }
9375 return ret;
a2fbb9ea
ET
9376}
9377
9378static void __exit bnx2x_cleanup(void)
9379{
9380 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
9381
9382 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
9383}
9384
9385module_init(bnx2x_init);
9386module_exit(bnx2x_cleanup);
9387
993ac7b5
MC
9388#ifdef BCM_CNIC
9389
9390/* count denotes the number of new completions we have seen */
9391static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9392{
9393 struct eth_spe *spe;
9394
9395#ifdef BNX2X_STOP_ON_ERROR
9396 if (unlikely(bp->panic))
9397 return;
9398#endif
9399
9400 spin_lock_bh(&bp->spq_lock);
c2bff63f 9401 BUG_ON(bp->cnic_spq_pending < count);
993ac7b5
MC
9402 bp->cnic_spq_pending -= count;
9403
993ac7b5 9404
c2bff63f
DK
9405 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9406 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9407 & SPE_HDR_CONN_TYPE) >>
9408 SPE_HDR_CONN_TYPE_SHIFT;
9409
9410 /* Set validation for iSCSI L2 client before sending SETUP
9411 * ramrod
9412 */
9413 if (type == ETH_CONNECTION_TYPE) {
9414 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9415 hdr.conn_and_cmd_data) >>
9416 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9417
9418 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9419 bnx2x_set_ctx_validation(&bp->context.
9420 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9421 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9422 }
9423
9424 /* There may be not more than 8 L2 and COMMON SPEs and not more
9425 * than 8 L5 SPEs in the air.
9426 */
9427 if ((type == NONE_CONNECTION_TYPE) ||
9428 (type == ETH_CONNECTION_TYPE)) {
9429 if (!atomic_read(&bp->spq_left))
9430 break;
9431 else
9432 atomic_dec(&bp->spq_left);
9433 } else if (type == ISCSI_CONNECTION_TYPE) {
9434 if (bp->cnic_spq_pending >=
9435 bp->cnic_eth_dev.max_kwqe_pending)
9436 break;
9437 else
9438 bp->cnic_spq_pending++;
9439 } else {
9440 BNX2X_ERR("Unknown SPE type: %d\n", type);
9441 bnx2x_panic();
993ac7b5 9442 break;
c2bff63f 9443 }
993ac7b5
MC
9444
9445 spe = bnx2x_sp_get_next(bp);
9446 *spe = *bp->cnic_kwq_cons;
9447
993ac7b5
MC
9448 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9449 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9450
9451 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9452 bp->cnic_kwq_cons = bp->cnic_kwq;
9453 else
9454 bp->cnic_kwq_cons++;
9455 }
9456 bnx2x_sp_prod_update(bp);
9457 spin_unlock_bh(&bp->spq_lock);
9458}
9459
9460static int bnx2x_cnic_sp_queue(struct net_device *dev,
9461 struct kwqe_16 *kwqes[], u32 count)
9462{
9463 struct bnx2x *bp = netdev_priv(dev);
9464 int i;
9465
9466#ifdef BNX2X_STOP_ON_ERROR
9467 if (unlikely(bp->panic))
9468 return -EIO;
9469#endif
9470
9471 spin_lock_bh(&bp->spq_lock);
9472
9473 for (i = 0; i < count; i++) {
9474 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9475
9476 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9477 break;
9478
9479 *bp->cnic_kwq_prod = *spe;
9480
9481 bp->cnic_kwq_pending++;
9482
9483 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9484 spe->hdr.conn_and_cmd_data, spe->hdr.type,
523224a3
DK
9485 spe->data.update_data_addr.hi,
9486 spe->data.update_data_addr.lo,
993ac7b5
MC
9487 bp->cnic_kwq_pending);
9488
9489 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9490 bp->cnic_kwq_prod = bp->cnic_kwq;
9491 else
9492 bp->cnic_kwq_prod++;
9493 }
9494
9495 spin_unlock_bh(&bp->spq_lock);
9496
9497 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9498 bnx2x_cnic_sp_post(bp, 0);
9499
9500 return i;
9501}
9502
9503static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9504{
9505 struct cnic_ops *c_ops;
9506 int rc = 0;
9507
9508 mutex_lock(&bp->cnic_mutex);
9509 c_ops = bp->cnic_ops;
9510 if (c_ops)
9511 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9512 mutex_unlock(&bp->cnic_mutex);
9513
9514 return rc;
9515}
9516
9517static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9518{
9519 struct cnic_ops *c_ops;
9520 int rc = 0;
9521
9522 rcu_read_lock();
9523 c_ops = rcu_dereference(bp->cnic_ops);
9524 if (c_ops)
9525 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9526 rcu_read_unlock();
9527
9528 return rc;
9529}
9530
9531/*
9532 * for commands that have no data
9533 */
9f6c9258 9534int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
993ac7b5
MC
9535{
9536 struct cnic_ctl_info ctl = {0};
9537
9538 ctl.cmd = cmd;
9539
9540 return bnx2x_cnic_ctl_send(bp, &ctl);
9541}
9542
9543static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9544{
9545 struct cnic_ctl_info ctl;
9546
9547 /* first we tell CNIC and only then we count this as a completion */
9548 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9549 ctl.data.comp.cid = cid;
9550
9551 bnx2x_cnic_ctl_send_bh(bp, &ctl);
c2bff63f 9552 bnx2x_cnic_sp_post(bp, 0);
993ac7b5
MC
9553}
9554
9555static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9556{
9557 struct bnx2x *bp = netdev_priv(dev);
9558 int rc = 0;
9559
9560 switch (ctl->cmd) {
9561 case DRV_CTL_CTXTBL_WR_CMD: {
9562 u32 index = ctl->data.io.offset;
9563 dma_addr_t addr = ctl->data.io.dma_addr;
9564
9565 bnx2x_ilt_wr(bp, index, addr);
9566 break;
9567 }
9568
c2bff63f
DK
9569 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9570 int count = ctl->data.credit.credit_count;
993ac7b5
MC
9571
9572 bnx2x_cnic_sp_post(bp, count);
9573 break;
9574 }
9575
9576 /* rtnl_lock is held. */
9577 case DRV_CTL_START_L2_CMD: {
9578 u32 cli = ctl->data.ring.client_id;
9579
523224a3
DK
9580 /* Set iSCSI MAC address */
9581 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9582
9583 mmiowb();
9584 barrier();
9585
9586 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9587 * because it's the only way for UIO Client to accept
9588 * multicasts (in non-promiscuous mode only one Client per
9589 * function will receive multicast packets (leading in our
9590 * case).
9591 */
9592 bnx2x_rxq_set_mac_filters(bp, cli,
9593 BNX2X_ACCEPT_UNICAST |
9594 BNX2X_ACCEPT_BROADCAST |
9595 BNX2X_ACCEPT_ALL_MULTICAST);
9596 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9597
993ac7b5
MC
9598 break;
9599 }
9600
9601 /* rtnl_lock is held. */
9602 case DRV_CTL_STOP_L2_CMD: {
9603 u32 cli = ctl->data.ring.client_id;
9604
523224a3
DK
9605 /* Stop accepting on iSCSI L2 ring */
9606 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9607 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9608
9609 mmiowb();
9610 barrier();
9611
9612 /* Unset iSCSI L2 MAC */
9613 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
993ac7b5
MC
9614 break;
9615 }
c2bff63f
DK
9616 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9617 int count = ctl->data.credit.credit_count;
9618
9619 smp_mb__before_atomic_inc();
9620 atomic_add(count, &bp->spq_left);
9621 smp_mb__after_atomic_inc();
9622 break;
9623 }
993ac7b5
MC
9624
9625 default:
9626 BNX2X_ERR("unknown command %x\n", ctl->cmd);
9627 rc = -EINVAL;
9628 }
9629
9630 return rc;
9631}
9632
9f6c9258 9633void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
993ac7b5
MC
9634{
9635 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9636
9637 if (bp->flags & USING_MSIX_FLAG) {
9638 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
9639 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
9640 cp->irq_arr[0].vector = bp->msix_table[1].vector;
9641 } else {
9642 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
9643 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
9644 }
f2e0899f
DK
9645 if (CHIP_IS_E2(bp))
9646 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
9647 else
9648 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
9649
993ac7b5 9650 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
523224a3 9651 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
993ac7b5
MC
9652 cp->irq_arr[1].status_blk = bp->def_status_blk;
9653 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
523224a3 9654 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
993ac7b5
MC
9655
9656 cp->num_irq = 2;
9657}
9658
9659static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
9660 void *data)
9661{
9662 struct bnx2x *bp = netdev_priv(dev);
9663 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9664
9665 if (ops == NULL)
9666 return -EINVAL;
9667
9668 if (atomic_read(&bp->intr_sem) != 0)
9669 return -EBUSY;
9670
9671 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
9672 if (!bp->cnic_kwq)
9673 return -ENOMEM;
9674
9675 bp->cnic_kwq_cons = bp->cnic_kwq;
9676 bp->cnic_kwq_prod = bp->cnic_kwq;
9677 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
9678
9679 bp->cnic_spq_pending = 0;
9680 bp->cnic_kwq_pending = 0;
9681
9682 bp->cnic_data = data;
9683
9684 cp->num_irq = 0;
9685 cp->drv_state = CNIC_DRV_STATE_REGD;
523224a3 9686 cp->iro_arr = bp->iro_arr;
993ac7b5 9687
993ac7b5 9688 bnx2x_setup_cnic_irq_info(bp);
c2bff63f 9689
993ac7b5
MC
9690 rcu_assign_pointer(bp->cnic_ops, ops);
9691
9692 return 0;
9693}
9694
9695static int bnx2x_unregister_cnic(struct net_device *dev)
9696{
9697 struct bnx2x *bp = netdev_priv(dev);
9698 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9699
9700 mutex_lock(&bp->cnic_mutex);
9701 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
9702 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
9703 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9704 }
9705 cp->drv_state = 0;
9706 rcu_assign_pointer(bp->cnic_ops, NULL);
9707 mutex_unlock(&bp->cnic_mutex);
9708 synchronize_rcu();
9709 kfree(bp->cnic_kwq);
9710 bp->cnic_kwq = NULL;
9711
9712 return 0;
9713}
9714
9715struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
9716{
9717 struct bnx2x *bp = netdev_priv(dev);
9718 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9719
9720 cp->drv_owner = THIS_MODULE;
9721 cp->chip_id = CHIP_ID(bp);
9722 cp->pdev = bp->pdev;
9723 cp->io_base = bp->regview;
9724 cp->io_base2 = bp->doorbells;
9725 cp->max_kwqe_pending = 8;
523224a3 9726 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
c2bff63f
DK
9727 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
9728 bnx2x_cid_ilt_lines(bp);
993ac7b5 9729 cp->ctx_tbl_len = CNIC_ILT_LINES;
c2bff63f 9730 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
993ac7b5
MC
9731 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
9732 cp->drv_ctl = bnx2x_drv_ctl;
9733 cp->drv_register_cnic = bnx2x_register_cnic;
9734 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
c2bff63f
DK
9735 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID;
9736 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
9737
9738 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
9739 "starting cid %d\n",
9740 cp->ctx_blk_size,
9741 cp->ctx_tbl_offset,
9742 cp->ctx_tbl_len,
9743 cp->starting_cid);
993ac7b5
MC
9744 return cp;
9745}
9746EXPORT_SYMBOL(bnx2x_cnic_probe);
9747
9748#endif /* BCM_CNIC */
94a78b79 9749