]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x/bnx2x_main.c
bnx2x: Do interrupt mode initialization and NAPIs adding before register_netdev()
[net-next-2.6.git] / drivers / net / bnx2x / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
a2fbb9ea
ET
26#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
0c6671b0 40#include <linux/if_vlan.h>
a2fbb9ea
ET
41#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
34f80b04 44#include <net/ip6_checksum.h>
a2fbb9ea
ET
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
34f80b04 47#include <linux/crc32c.h>
a2fbb9ea
ET
48#include <linux/prefetch.h>
49#include <linux/zlib.h>
a2fbb9ea 50#include <linux/io.h>
45229b42 51#include <linux/stringify.h>
a2fbb9ea 52
b0efbb99 53#define BNX2X_MAIN
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
9f6c9258 57#include "bnx2x_cmn.h"
a2fbb9ea 58
94a78b79
VZ
59#include <linux/firmware.h>
60#include "bnx2x_fw_file_hdr.h"
61/* FW files */
45229b42
BH
62#define FW_FILE_VERSION \
63 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
64 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
65 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
66 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
560131f3
DK
67#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
68#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
f2e0899f 69#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
94a78b79 70
34f80b04
EG
71/* Time in jiffies before concluding the transmitter is hung */
72#define TX_TIMEOUT (5*HZ)
a2fbb9ea 73
53a10565 74static char version[] __devinitdata =
34f80b04 75 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
76 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
24e3fcef 78MODULE_AUTHOR("Eliezer Tamir");
f2e0899f
DK
79MODULE_DESCRIPTION("Broadcom NetXtreme II "
80 "BCM57710/57711/57711E/57712/57712E Driver");
a2fbb9ea
ET
81MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
83MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H);
f2e0899f 85MODULE_FIRMWARE(FW_FILE_NAME_E2);
a2fbb9ea 86
555f6c78
EG
87static int multi_mode = 1;
88module_param(multi_mode, int, 0);
ca00392c
EG
89MODULE_PARM_DESC(multi_mode, " Multi queue mode "
90 "(0 Disable; 1 Enable (default))");
91
d6214d7a 92int num_queues;
54b9ddaa
VZ
93module_param(num_queues, int, 0);
94MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
95 " (default is as a number of CPUs)");
555f6c78 96
19680c48 97static int disable_tpa;
19680c48 98module_param(disable_tpa, int, 0);
9898f86d 99MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
100
101static int int_mode;
102module_param(int_mode, int, 0);
cdaa7cb8
VZ
103MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
104 "(1 INT#x; 2 MSI)");
8badd27a 105
a18f5128
EG
106static int dropless_fc;
107module_param(dropless_fc, int, 0);
108MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109
9898f86d 110static int poll;
a2fbb9ea 111module_param(poll, int, 0);
9898f86d 112MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
113
114static int mrrs = -1;
115module_param(mrrs, int, 0);
116MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117
9898f86d 118static int debug;
a2fbb9ea 119module_param(debug, int, 0);
9898f86d
EG
120MODULE_PARM_DESC(debug, " Default debug msglevel");
121
1cf167f2 122static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
123
124enum bnx2x_board_type {
125 BCM57710 = 0,
34f80b04
EG
126 BCM57711 = 1,
127 BCM57711E = 2,
f2e0899f
DK
128 BCM57712 = 3,
129 BCM57712E = 4
a2fbb9ea
ET
130};
131
34f80b04 132/* indexed by board_type, above */
53a10565 133static struct {
a2fbb9ea
ET
134 char *name;
135} board_info[] __devinitdata = {
34f80b04
EG
136 { "Broadcom NetXtreme II BCM57710 XGb" },
137 { "Broadcom NetXtreme II BCM57711 XGb" },
f2e0899f
DK
138 { "Broadcom NetXtreme II BCM57711E XGb" },
139 { "Broadcom NetXtreme II BCM57712 XGb" },
140 { "Broadcom NetXtreme II BCM57712E XGb" }
a2fbb9ea
ET
141};
142
f2e0899f
DK
143#ifndef PCI_DEVICE_ID_NX2_57712
144#define PCI_DEVICE_ID_NX2_57712 0x1662
145#endif
146#ifndef PCI_DEVICE_ID_NX2_57712E
147#define PCI_DEVICE_ID_NX2_57712E 0x1663
148#endif
34f80b04 149
a3aa1884 150static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
151 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
f2e0899f
DK
154 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
155 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
a2fbb9ea
ET
156 { 0 }
157};
158
159MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
160
161/****************************************************************************
162* General service functions
163****************************************************************************/
164
523224a3
DK
165static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
166 u32 addr, dma_addr_t mapping)
167{
168 REG_WR(bp, addr, U64_LO(mapping));
169 REG_WR(bp, addr + 4, U64_HI(mapping));
170}
171
172static inline void __storm_memset_fill(struct bnx2x *bp,
173 u32 addr, size_t size, u32 val)
174{
175 int i;
176 for (i = 0; i < size/4; i++)
177 REG_WR(bp, addr + (i * 4), val);
178}
179
180static inline void storm_memset_ustats_zero(struct bnx2x *bp,
181 u8 port, u16 stat_id)
182{
183 size_t size = sizeof(struct ustorm_per_client_stats);
184
185 u32 addr = BAR_USTRORM_INTMEM +
186 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
187
188 __storm_memset_fill(bp, addr, size, 0);
189}
190
191static inline void storm_memset_tstats_zero(struct bnx2x *bp,
192 u8 port, u16 stat_id)
193{
194 size_t size = sizeof(struct tstorm_per_client_stats);
195
196 u32 addr = BAR_TSTRORM_INTMEM +
197 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
198
199 __storm_memset_fill(bp, addr, size, 0);
200}
201
202static inline void storm_memset_xstats_zero(struct bnx2x *bp,
203 u8 port, u16 stat_id)
204{
205 size_t size = sizeof(struct xstorm_per_client_stats);
206
207 u32 addr = BAR_XSTRORM_INTMEM +
208 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
209
210 __storm_memset_fill(bp, addr, size, 0);
211}
212
213
214static inline void storm_memset_spq_addr(struct bnx2x *bp,
215 dma_addr_t mapping, u16 abs_fid)
216{
217 u32 addr = XSEM_REG_FAST_MEMORY +
218 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
219
220 __storm_memset_dma_mapping(bp, addr, mapping);
221}
222
223static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
224{
225 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
226}
227
228static inline void storm_memset_func_cfg(struct bnx2x *bp,
229 struct tstorm_eth_function_common_config *tcfg,
230 u16 abs_fid)
231{
232 size_t size = sizeof(struct tstorm_eth_function_common_config);
233
234 u32 addr = BAR_TSTRORM_INTMEM +
235 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
236
237 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
238}
239
240static inline void storm_memset_xstats_flags(struct bnx2x *bp,
241 struct stats_indication_flags *flags,
242 u16 abs_fid)
243{
244 size_t size = sizeof(struct stats_indication_flags);
245
246 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
247
248 __storm_memset_struct(bp, addr, size, (u32 *)flags);
249}
250
251static inline void storm_memset_tstats_flags(struct bnx2x *bp,
252 struct stats_indication_flags *flags,
253 u16 abs_fid)
254{
255 size_t size = sizeof(struct stats_indication_flags);
256
257 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
258
259 __storm_memset_struct(bp, addr, size, (u32 *)flags);
260}
261
262static inline void storm_memset_ustats_flags(struct bnx2x *bp,
263 struct stats_indication_flags *flags,
264 u16 abs_fid)
265{
266 size_t size = sizeof(struct stats_indication_flags);
267
268 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
269
270 __storm_memset_struct(bp, addr, size, (u32 *)flags);
271}
272
273static inline void storm_memset_cstats_flags(struct bnx2x *bp,
274 struct stats_indication_flags *flags,
275 u16 abs_fid)
276{
277 size_t size = sizeof(struct stats_indication_flags);
278
279 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
280
281 __storm_memset_struct(bp, addr, size, (u32 *)flags);
282}
283
284static inline void storm_memset_xstats_addr(struct bnx2x *bp,
285 dma_addr_t mapping, u16 abs_fid)
286{
287 u32 addr = BAR_XSTRORM_INTMEM +
288 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
289
290 __storm_memset_dma_mapping(bp, addr, mapping);
291}
292
293static inline void storm_memset_tstats_addr(struct bnx2x *bp,
294 dma_addr_t mapping, u16 abs_fid)
295{
296 u32 addr = BAR_TSTRORM_INTMEM +
297 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
298
299 __storm_memset_dma_mapping(bp, addr, mapping);
300}
301
302static inline void storm_memset_ustats_addr(struct bnx2x *bp,
303 dma_addr_t mapping, u16 abs_fid)
304{
305 u32 addr = BAR_USTRORM_INTMEM +
306 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
307
308 __storm_memset_dma_mapping(bp, addr, mapping);
309}
310
311static inline void storm_memset_cstats_addr(struct bnx2x *bp,
312 dma_addr_t mapping, u16 abs_fid)
313{
314 u32 addr = BAR_CSTRORM_INTMEM +
315 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
316
317 __storm_memset_dma_mapping(bp, addr, mapping);
318}
319
320static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
321 u16 pf_id)
322{
323 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
324 pf_id);
325 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
326 pf_id);
327 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
328 pf_id);
329 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
330 pf_id);
331}
332
333static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
334 u8 enable)
335{
336 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
337 enable);
338 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
339 enable);
340 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
341 enable);
342 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
343 enable);
344}
345
346static inline void storm_memset_eq_data(struct bnx2x *bp,
347 struct event_ring_data *eq_data,
348 u16 pfid)
349{
350 size_t size = sizeof(struct event_ring_data);
351
352 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
353
354 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
355}
356
357static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
358 u16 pfid)
359{
360 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
361 REG_WR16(bp, addr, eq_prod);
362}
363
364static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
365 u16 fw_sb_id, u8 sb_index,
366 u8 ticks)
367{
368
f2e0899f
DK
369 int index_offset = CHIP_IS_E2(bp) ?
370 offsetof(struct hc_status_block_data_e2, index_data) :
523224a3
DK
371 offsetof(struct hc_status_block_data_e1x, index_data);
372 u32 addr = BAR_CSTRORM_INTMEM +
373 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
374 index_offset +
375 sizeof(struct hc_index_data)*sb_index +
376 offsetof(struct hc_index_data, timeout);
377 REG_WR8(bp, addr, ticks);
378 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
379 port, fw_sb_id, sb_index, ticks);
380}
381static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
382 u16 fw_sb_id, u8 sb_index,
383 u8 disable)
384{
385 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
f2e0899f
DK
386 int index_offset = CHIP_IS_E2(bp) ?
387 offsetof(struct hc_status_block_data_e2, index_data) :
523224a3
DK
388 offsetof(struct hc_status_block_data_e1x, index_data);
389 u32 addr = BAR_CSTRORM_INTMEM +
390 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
391 index_offset +
392 sizeof(struct hc_index_data)*sb_index +
393 offsetof(struct hc_index_data, flags);
394 u16 flags = REG_RD16(bp, addr);
395 /* clear and set */
396 flags &= ~HC_INDEX_DATA_HC_ENABLED;
397 flags |= enable_flag;
398 REG_WR16(bp, addr, flags);
399 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
400 port, fw_sb_id, sb_index, disable);
401}
402
a2fbb9ea
ET
403/* used only at init
404 * locking is done by mcp
405 */
8d96286a 406static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
407{
408 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
409 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
410 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
411 PCICFG_VENDOR_ID_OFFSET);
412}
413
a2fbb9ea
ET
414static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
415{
416 u32 val;
417
418 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
419 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
420 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
421 PCICFG_VENDOR_ID_OFFSET);
422
423 return val;
424}
a2fbb9ea 425
f2e0899f
DK
426#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
427#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
428#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
429#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
430#define DMAE_DP_DST_NONE "dst_addr [none]"
431
8d96286a 432static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
433 int msglvl)
f2e0899f
DK
434{
435 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
436
437 switch (dmae->opcode & DMAE_COMMAND_DST) {
438 case DMAE_CMD_DST_PCI:
439 if (src_type == DMAE_CMD_SRC_PCI)
440 DP(msglvl, "DMAE: opcode 0x%08x\n"
441 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
442 "comp_addr [%x:%08x], comp_val 0x%08x\n",
443 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
444 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
445 dmae->comp_addr_hi, dmae->comp_addr_lo,
446 dmae->comp_val);
447 else
448 DP(msglvl, "DMAE: opcode 0x%08x\n"
449 "src [%08x], len [%d*4], dst [%x:%08x]\n"
450 "comp_addr [%x:%08x], comp_val 0x%08x\n",
451 dmae->opcode, dmae->src_addr_lo >> 2,
452 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
453 dmae->comp_addr_hi, dmae->comp_addr_lo,
454 dmae->comp_val);
455 break;
456 case DMAE_CMD_DST_GRC:
457 if (src_type == DMAE_CMD_SRC_PCI)
458 DP(msglvl, "DMAE: opcode 0x%08x\n"
459 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
460 "comp_addr [%x:%08x], comp_val 0x%08x\n",
461 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
462 dmae->len, dmae->dst_addr_lo >> 2,
463 dmae->comp_addr_hi, dmae->comp_addr_lo,
464 dmae->comp_val);
465 else
466 DP(msglvl, "DMAE: opcode 0x%08x\n"
467 "src [%08x], len [%d*4], dst [%08x]\n"
468 "comp_addr [%x:%08x], comp_val 0x%08x\n",
469 dmae->opcode, dmae->src_addr_lo >> 2,
470 dmae->len, dmae->dst_addr_lo >> 2,
471 dmae->comp_addr_hi, dmae->comp_addr_lo,
472 dmae->comp_val);
473 break;
474 default:
475 if (src_type == DMAE_CMD_SRC_PCI)
476 DP(msglvl, "DMAE: opcode 0x%08x\n"
477 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
478 "dst_addr [none]\n"
479 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
480 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
481 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
482 dmae->comp_val);
483 else
484 DP(msglvl, "DMAE: opcode 0x%08x\n"
485 DP_LEVEL "src_addr [%08x] len [%d * 4] "
486 "dst_addr [none]\n"
487 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
488 dmae->opcode, dmae->src_addr_lo >> 2,
489 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
490 dmae->comp_val);
491 break;
492 }
493
494}
495
6c719d00 496const u32 dmae_reg_go_c[] = {
a2fbb9ea
ET
497 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
498 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
499 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
500 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
501};
502
503/* copy command into DMAE command memory and set DMAE command go */
6c719d00 504void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
a2fbb9ea
ET
505{
506 u32 cmd_offset;
507 int i;
508
509 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
510 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
511 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
512
ad8d3948
EG
513 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
514 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
515 }
516 REG_WR(bp, dmae_reg_go_c[idx], 1);
517}
518
f2e0899f 519u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
a2fbb9ea 520{
f2e0899f
DK
521 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
522 DMAE_CMD_C_ENABLE);
523}
ad8d3948 524
f2e0899f
DK
525u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
526{
527 return opcode & ~DMAE_CMD_SRC_RESET;
528}
ad8d3948 529
f2e0899f
DK
530u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
531 bool with_comp, u8 comp_type)
532{
533 u32 opcode = 0;
534
535 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
536 (dst_type << DMAE_COMMAND_DST_SHIFT));
ad8d3948 537
f2e0899f
DK
538 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
539
540 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
541 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
542 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
543 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
a2fbb9ea 544
a2fbb9ea 545#ifdef __BIG_ENDIAN
f2e0899f 546 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
a2fbb9ea 547#else
f2e0899f 548 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
a2fbb9ea 549#endif
f2e0899f
DK
550 if (with_comp)
551 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
552 return opcode;
553}
554
8d96286a 555static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
556 struct dmae_command *dmae,
557 u8 src_type, u8 dst_type)
f2e0899f
DK
558{
559 memset(dmae, 0, sizeof(struct dmae_command));
560
561 /* set the opcode */
562 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
563 true, DMAE_COMP_PCI);
564
565 /* fill in the completion parameters */
566 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
567 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
568 dmae->comp_val = DMAE_COMP_VAL;
569}
570
571/* issue a dmae command over the init-channel and wailt for completion */
8d96286a 572static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
573 struct dmae_command *dmae)
f2e0899f
DK
574{
575 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
576 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
577 int rc = 0;
578
579 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
580 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
581 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 582
f2e0899f 583 /* lock the dmae channel */
5ff7b6d4
EG
584 mutex_lock(&bp->dmae_mutex);
585
f2e0899f 586 /* reset completion */
a2fbb9ea
ET
587 *wb_comp = 0;
588
f2e0899f
DK
589 /* post the command on the channel used for initializations */
590 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea 591
f2e0899f 592 /* wait for completion */
a2fbb9ea 593 udelay(5);
f2e0899f 594 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
ad8d3948
EG
595 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
596
ad8d3948 597 if (!cnt) {
c3eefaf6 598 BNX2X_ERR("DMAE timeout!\n");
f2e0899f
DK
599 rc = DMAE_TIMEOUT;
600 goto unlock;
a2fbb9ea 601 }
ad8d3948 602 cnt--;
f2e0899f 603 udelay(50);
a2fbb9ea 604 }
f2e0899f
DK
605 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
606 BNX2X_ERR("DMAE PCI error!\n");
607 rc = DMAE_PCI_ERROR;
608 }
609
610 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
611 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
612 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948 613
f2e0899f 614unlock:
ad8d3948 615 mutex_unlock(&bp->dmae_mutex);
f2e0899f
DK
616 return rc;
617}
618
619void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
620 u32 len32)
621{
622 struct dmae_command dmae;
623
624 if (!bp->dmae_ready) {
625 u32 *data = bnx2x_sp(bp, wb_data[0]);
626
627 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
628 " using indirect\n", dst_addr, len32);
629 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
630 return;
631 }
632
633 /* set opcode and fixed command fields */
634 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
635
636 /* fill in addresses and len */
637 dmae.src_addr_lo = U64_LO(dma_addr);
638 dmae.src_addr_hi = U64_HI(dma_addr);
639 dmae.dst_addr_lo = dst_addr >> 2;
640 dmae.dst_addr_hi = 0;
641 dmae.len = len32;
642
643 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
644
645 /* issue the command and wait for completion */
646 bnx2x_issue_dmae_with_comp(bp, &dmae);
a2fbb9ea
ET
647}
648
c18487ee 649void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 650{
5ff7b6d4 651 struct dmae_command dmae;
ad8d3948
EG
652
653 if (!bp->dmae_ready) {
654 u32 *data = bnx2x_sp(bp, wb_data[0]);
655 int i;
656
657 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
658 " using indirect\n", src_addr, len32);
659 for (i = 0; i < len32; i++)
660 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
661 return;
662 }
663
f2e0899f
DK
664 /* set opcode and fixed command fields */
665 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
a2fbb9ea 666
f2e0899f 667 /* fill in addresses and len */
5ff7b6d4
EG
668 dmae.src_addr_lo = src_addr >> 2;
669 dmae.src_addr_hi = 0;
670 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
671 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
672 dmae.len = len32;
ad8d3948 673
f2e0899f 674 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
ad8d3948 675
f2e0899f
DK
676 /* issue the command and wait for completion */
677 bnx2x_issue_dmae_with_comp(bp, &dmae);
ad8d3948
EG
678}
679
8d96286a 680static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
681 u32 addr, u32 len)
573f2035 682{
02e3c6cb 683 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
684 int offset = 0;
685
02e3c6cb 686 while (len > dmae_wr_max) {
573f2035 687 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
688 addr + offset, dmae_wr_max);
689 offset += dmae_wr_max * 4;
690 len -= dmae_wr_max;
573f2035
EG
691 }
692
693 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
694}
695
ad8d3948
EG
696/* used only for slowpath so not inlined */
697static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
698{
699 u32 wb_write[2];
700
701 wb_write[0] = val_hi;
702 wb_write[1] = val_lo;
703 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 704}
a2fbb9ea 705
ad8d3948
EG
706#ifdef USE_WB_RD
707static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
708{
709 u32 wb_data[2];
710
711 REG_RD_DMAE(bp, reg, wb_data, 2);
712
713 return HILO_U64(wb_data[0], wb_data[1]);
714}
715#endif
716
a2fbb9ea
ET
717static int bnx2x_mc_assert(struct bnx2x *bp)
718{
a2fbb9ea 719 char last_idx;
34f80b04
EG
720 int i, rc = 0;
721 u32 row0, row1, row2, row3;
722
723 /* XSTORM */
724 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
725 XSTORM_ASSERT_LIST_INDEX_OFFSET);
726 if (last_idx)
727 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
728
729 /* print the asserts */
730 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
731
732 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
733 XSTORM_ASSERT_LIST_OFFSET(i));
734 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
735 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
736 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
737 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
738 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
739 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
740
741 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
742 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
743 " 0x%08x 0x%08x 0x%08x\n",
744 i, row3, row2, row1, row0);
745 rc++;
746 } else {
747 break;
748 }
749 }
750
751 /* TSTORM */
752 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
753 TSTORM_ASSERT_LIST_INDEX_OFFSET);
754 if (last_idx)
755 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
756
757 /* print the asserts */
758 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
759
760 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
761 TSTORM_ASSERT_LIST_OFFSET(i));
762 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
763 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
764 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
765 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
766 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
767 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
768
769 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
770 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
771 " 0x%08x 0x%08x 0x%08x\n",
772 i, row3, row2, row1, row0);
773 rc++;
774 } else {
775 break;
776 }
777 }
778
779 /* CSTORM */
780 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
781 CSTORM_ASSERT_LIST_INDEX_OFFSET);
782 if (last_idx)
783 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
784
785 /* print the asserts */
786 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
787
788 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
789 CSTORM_ASSERT_LIST_OFFSET(i));
790 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
791 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
792 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
793 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
794 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
795 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
796
797 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
798 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
799 " 0x%08x 0x%08x 0x%08x\n",
800 i, row3, row2, row1, row0);
801 rc++;
802 } else {
803 break;
804 }
805 }
806
807 /* USTORM */
808 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
809 USTORM_ASSERT_LIST_INDEX_OFFSET);
810 if (last_idx)
811 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
812
813 /* print the asserts */
814 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
815
816 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
817 USTORM_ASSERT_LIST_OFFSET(i));
818 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
819 USTORM_ASSERT_LIST_OFFSET(i) + 4);
820 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
821 USTORM_ASSERT_LIST_OFFSET(i) + 8);
822 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
823 USTORM_ASSERT_LIST_OFFSET(i) + 12);
824
825 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
826 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
827 " 0x%08x 0x%08x 0x%08x\n",
828 i, row3, row2, row1, row0);
829 rc++;
830 } else {
831 break;
a2fbb9ea
ET
832 }
833 }
34f80b04 834
a2fbb9ea
ET
835 return rc;
836}
c14423fe 837
a2fbb9ea
ET
838static void bnx2x_fw_dump(struct bnx2x *bp)
839{
cdaa7cb8 840 u32 addr;
a2fbb9ea 841 u32 mark, offset;
4781bfad 842 __be32 data[9];
a2fbb9ea 843 int word;
f2e0899f 844 u32 trace_shmem_base;
2145a920
VZ
845 if (BP_NOMCP(bp)) {
846 BNX2X_ERR("NO MCP - can not dump\n");
847 return;
848 }
cdaa7cb8 849
f2e0899f
DK
850 if (BP_PATH(bp) == 0)
851 trace_shmem_base = bp->common.shmem_base;
852 else
853 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
854 addr = trace_shmem_base - 0x0800 + 4;
cdaa7cb8 855 mark = REG_RD(bp, addr);
f2e0899f
DK
856 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
857 + ((mark + 0x3) & ~0x3) - 0x08000000;
7995c64e 858 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 859
7995c64e 860 pr_err("");
f2e0899f 861 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
a2fbb9ea 862 for (word = 0; word < 8; word++)
cdaa7cb8 863 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 864 data[8] = 0x0;
7995c64e 865 pr_cont("%s", (char *)data);
a2fbb9ea 866 }
cdaa7cb8 867 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 868 for (word = 0; word < 8; word++)
cdaa7cb8 869 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 870 data[8] = 0x0;
7995c64e 871 pr_cont("%s", (char *)data);
a2fbb9ea 872 }
7995c64e 873 pr_err("end of fw dump\n");
a2fbb9ea
ET
874}
875
6c719d00 876void bnx2x_panic_dump(struct bnx2x *bp)
a2fbb9ea
ET
877{
878 int i;
523224a3
DK
879 u16 j;
880 struct hc_sp_status_block_data sp_sb_data;
881 int func = BP_FUNC(bp);
882#ifdef BNX2X_STOP_ON_ERROR
883 u16 start = 0, end = 0;
884#endif
a2fbb9ea 885
66e855f3
YG
886 bp->stats_state = STATS_STATE_DISABLED;
887 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
888
a2fbb9ea
ET
889 BNX2X_ERR("begin crash dump -----------------\n");
890
8440d2b6
EG
891 /* Indices */
892 /* Common */
523224a3 893 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
cdaa7cb8 894 " spq_prod_idx(0x%x)\n",
523224a3
DK
895 bp->def_idx, bp->def_att_idx,
896 bp->attn_state, bp->spq_prod_idx);
897 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
898 bp->def_status_blk->atten_status_block.attn_bits,
899 bp->def_status_blk->atten_status_block.attn_bits_ack,
900 bp->def_status_blk->atten_status_block.status_block_id,
901 bp->def_status_blk->atten_status_block.attn_bits_index);
902 BNX2X_ERR(" def (");
903 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
904 pr_cont("0x%x%s",
905 bp->def_status_blk->sp_sb.index_values[i],
906 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
907
908 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
909 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
910 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
911 i*sizeof(u32));
912
913 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
914 "pf_id(0x%x) vnic_id(0x%x) "
915 "vf_id(0x%x) vf_valid (0x%x)\n",
916 sp_sb_data.igu_sb_id,
917 sp_sb_data.igu_seg_id,
918 sp_sb_data.p_func.pf_id,
919 sp_sb_data.p_func.vnic_id,
920 sp_sb_data.p_func.vf_id,
921 sp_sb_data.p_func.vf_valid);
922
8440d2b6 923
54b9ddaa 924 for_each_queue(bp, i) {
a2fbb9ea 925 struct bnx2x_fastpath *fp = &bp->fp[i];
523224a3 926 int loop;
f2e0899f 927 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
928 struct hc_status_block_data_e1x sb_data_e1x;
929 struct hc_status_block_sm *hc_sm_p =
f2e0899f
DK
930 CHIP_IS_E2(bp) ?
931 sb_data_e2.common.state_machine :
523224a3
DK
932 sb_data_e1x.common.state_machine;
933 struct hc_index_data *hc_index_p =
f2e0899f
DK
934 CHIP_IS_E2(bp) ?
935 sb_data_e2.index_data :
523224a3
DK
936 sb_data_e1x.index_data;
937 int data_size;
938 u32 *sb_data_p;
939
940 /* Rx */
cdaa7cb8 941 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
523224a3 942 " rx_comp_prod(0x%x)"
cdaa7cb8 943 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 944 i, fp->rx_bd_prod, fp->rx_bd_cons,
523224a3 945 fp->rx_comp_prod,
66e855f3 946 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8 947 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
523224a3 948 " fp_hc_idx(0x%x)\n",
8440d2b6 949 fp->rx_sge_prod, fp->last_max_sge,
523224a3 950 le16_to_cpu(fp->fp_hc_idx));
a2fbb9ea 951
523224a3 952 /* Tx */
cdaa7cb8
VZ
953 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
954 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
955 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
956 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
957 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
523224a3 958
f2e0899f
DK
959 loop = CHIP_IS_E2(bp) ?
960 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
523224a3
DK
961
962 /* host sb data */
963
964 BNX2X_ERR(" run indexes (");
965 for (j = 0; j < HC_SB_MAX_SM; j++)
966 pr_cont("0x%x%s",
967 fp->sb_running_index[j],
968 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
969
970 BNX2X_ERR(" indexes (");
971 for (j = 0; j < loop; j++)
972 pr_cont("0x%x%s",
973 fp->sb_index_values[j],
974 (j == loop - 1) ? ")" : " ");
975 /* fw sb data */
f2e0899f
DK
976 data_size = CHIP_IS_E2(bp) ?
977 sizeof(struct hc_status_block_data_e2) :
523224a3
DK
978 sizeof(struct hc_status_block_data_e1x);
979 data_size /= sizeof(u32);
f2e0899f
DK
980 sb_data_p = CHIP_IS_E2(bp) ?
981 (u32 *)&sb_data_e2 :
982 (u32 *)&sb_data_e1x;
523224a3
DK
983 /* copy sb data in here */
984 for (j = 0; j < data_size; j++)
985 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
986 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
987 j * sizeof(u32));
988
f2e0899f
DK
989 if (CHIP_IS_E2(bp)) {
990 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
991 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
992 sb_data_e2.common.p_func.pf_id,
993 sb_data_e2.common.p_func.vf_id,
994 sb_data_e2.common.p_func.vf_valid,
995 sb_data_e2.common.p_func.vnic_id,
996 sb_data_e2.common.same_igu_sb_1b);
997 } else {
998 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
999 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1000 sb_data_e1x.common.p_func.pf_id,
1001 sb_data_e1x.common.p_func.vf_id,
1002 sb_data_e1x.common.p_func.vf_valid,
1003 sb_data_e1x.common.p_func.vnic_id,
1004 sb_data_e1x.common.same_igu_sb_1b);
1005 }
523224a3
DK
1006
1007 /* SB_SMs data */
1008 for (j = 0; j < HC_SB_MAX_SM; j++) {
1009 pr_cont("SM[%d] __flags (0x%x) "
1010 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1011 "time_to_expire (0x%x) "
1012 "timer_value(0x%x)\n", j,
1013 hc_sm_p[j].__flags,
1014 hc_sm_p[j].igu_sb_id,
1015 hc_sm_p[j].igu_seg_id,
1016 hc_sm_p[j].time_to_expire,
1017 hc_sm_p[j].timer_value);
1018 }
1019
1020 /* Indecies data */
1021 for (j = 0; j < loop; j++) {
1022 pr_cont("INDEX[%d] flags (0x%x) "
1023 "timeout (0x%x)\n", j,
1024 hc_index_p[j].flags,
1025 hc_index_p[j].timeout);
1026 }
8440d2b6 1027 }
a2fbb9ea 1028
523224a3 1029#ifdef BNX2X_STOP_ON_ERROR
8440d2b6
EG
1030 /* Rings */
1031 /* Rx */
54b9ddaa 1032 for_each_queue(bp, i) {
8440d2b6 1033 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
1034
1035 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1036 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 1037 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
1038 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1039 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1040
c3eefaf6
EG
1041 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1042 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
1043 }
1044
3196a88a
EG
1045 start = RX_SGE(fp->rx_sge_prod);
1046 end = RX_SGE(fp->last_max_sge);
8440d2b6 1047 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
1048 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1049 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1050
c3eefaf6
EG
1051 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1052 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
1053 }
1054
a2fbb9ea
ET
1055 start = RCQ_BD(fp->rx_comp_cons - 10);
1056 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 1057 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
1058 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1059
c3eefaf6
EG
1060 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1061 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
1062 }
1063 }
1064
8440d2b6 1065 /* Tx */
54b9ddaa 1066 for_each_queue(bp, i) {
8440d2b6
EG
1067 struct bnx2x_fastpath *fp = &bp->fp[i];
1068
1069 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1070 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1071 for (j = start; j != end; j = TX_BD(j + 1)) {
1072 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1073
c3eefaf6
EG
1074 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1075 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
1076 }
1077
1078 start = TX_BD(fp->tx_bd_cons - 10);
1079 end = TX_BD(fp->tx_bd_cons + 254);
1080 for (j = start; j != end; j = TX_BD(j + 1)) {
1081 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1082
c3eefaf6
EG
1083 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1084 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
1085 }
1086 }
523224a3 1087#endif
34f80b04 1088 bnx2x_fw_dump(bp);
a2fbb9ea
ET
1089 bnx2x_mc_assert(bp);
1090 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
1091}
1092
f2e0899f 1093static void bnx2x_hc_int_enable(struct bnx2x *bp)
a2fbb9ea 1094{
34f80b04 1095 int port = BP_PORT(bp);
a2fbb9ea
ET
1096 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1097 u32 val = REG_RD(bp, addr);
1098 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1099 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
1100
1101 if (msix) {
8badd27a
EG
1102 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1103 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
1104 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1105 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
1106 } else if (msi) {
1107 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1108 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1109 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1110 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
1111 } else {
1112 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 1113 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
1114 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1115 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 1116
a0fd065c
DK
1117 if (!CHIP_IS_E1(bp)) {
1118 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1119 val, port, addr);
615f8fd9 1120
a0fd065c 1121 REG_WR(bp, addr, val);
615f8fd9 1122
a0fd065c
DK
1123 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1124 }
a2fbb9ea
ET
1125 }
1126
a0fd065c
DK
1127 if (CHIP_IS_E1(bp))
1128 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1129
8badd27a
EG
1130 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1131 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
1132
1133 REG_WR(bp, addr, val);
37dbbf32
EG
1134 /*
1135 * Ensure that HC_CONFIG is written before leading/trailing edge config
1136 */
1137 mmiowb();
1138 barrier();
34f80b04 1139
f2e0899f 1140 if (!CHIP_IS_E1(bp)) {
34f80b04 1141 /* init leading/trailing edge */
fb3bff17 1142 if (IS_MF(bp)) {
8badd27a 1143 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 1144 if (bp->port.pmf)
4acac6a5
EG
1145 /* enable nig and gpio3 attention */
1146 val |= 0x1100;
34f80b04
EG
1147 } else
1148 val = 0xffff;
1149
1150 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1151 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1152 }
37dbbf32
EG
1153
1154 /* Make sure that interrupts are indeed enabled from here on */
1155 mmiowb();
a2fbb9ea
ET
1156}
1157
f2e0899f
DK
1158static void bnx2x_igu_int_enable(struct bnx2x *bp)
1159{
1160 u32 val;
1161 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1162 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1163
1164 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1165
1166 if (msix) {
1167 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1168 IGU_PF_CONF_SINGLE_ISR_EN);
1169 val |= (IGU_PF_CONF_FUNC_EN |
1170 IGU_PF_CONF_MSI_MSIX_EN |
1171 IGU_PF_CONF_ATTN_BIT_EN);
1172 } else if (msi) {
1173 val &= ~IGU_PF_CONF_INT_LINE_EN;
1174 val |= (IGU_PF_CONF_FUNC_EN |
1175 IGU_PF_CONF_MSI_MSIX_EN |
1176 IGU_PF_CONF_ATTN_BIT_EN |
1177 IGU_PF_CONF_SINGLE_ISR_EN);
1178 } else {
1179 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1180 val |= (IGU_PF_CONF_FUNC_EN |
1181 IGU_PF_CONF_INT_LINE_EN |
1182 IGU_PF_CONF_ATTN_BIT_EN |
1183 IGU_PF_CONF_SINGLE_ISR_EN);
1184 }
1185
1186 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1187 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1188
1189 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1190
1191 barrier();
1192
1193 /* init leading/trailing edge */
1194 if (IS_MF(bp)) {
1195 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1196 if (bp->port.pmf)
1197 /* enable nig and gpio3 attention */
1198 val |= 0x1100;
1199 } else
1200 val = 0xffff;
1201
1202 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1203 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1204
1205 /* Make sure that interrupts are indeed enabled from here on */
1206 mmiowb();
1207}
1208
1209void bnx2x_int_enable(struct bnx2x *bp)
1210{
1211 if (bp->common.int_block == INT_BLOCK_HC)
1212 bnx2x_hc_int_enable(bp);
1213 else
1214 bnx2x_igu_int_enable(bp);
1215}
1216
1217static void bnx2x_hc_int_disable(struct bnx2x *bp)
a2fbb9ea 1218{
34f80b04 1219 int port = BP_PORT(bp);
a2fbb9ea
ET
1220 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1221 u32 val = REG_RD(bp, addr);
1222
a0fd065c
DK
1223 /*
1224 * in E1 we must use only PCI configuration space to disable
1225 * MSI/MSIX capablility
1226 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1227 */
1228 if (CHIP_IS_E1(bp)) {
1229 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1230 * Use mask register to prevent from HC sending interrupts
1231 * after we exit the function
1232 */
1233 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1234
1235 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1236 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1237 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1238 } else
1239 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1240 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1241 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1242 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
1243
1244 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1245 val, port, addr);
1246
8badd27a
EG
1247 /* flush all outstanding writes */
1248 mmiowb();
1249
a2fbb9ea
ET
1250 REG_WR(bp, addr, val);
1251 if (REG_RD(bp, addr) != val)
1252 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1253}
1254
f2e0899f
DK
1255static void bnx2x_igu_int_disable(struct bnx2x *bp)
1256{
1257 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1258
1259 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1260 IGU_PF_CONF_INT_LINE_EN |
1261 IGU_PF_CONF_ATTN_BIT_EN);
1262
1263 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1264
1265 /* flush all outstanding writes */
1266 mmiowb();
1267
1268 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1269 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1270 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1271}
1272
8d96286a 1273static void bnx2x_int_disable(struct bnx2x *bp)
f2e0899f
DK
1274{
1275 if (bp->common.int_block == INT_BLOCK_HC)
1276 bnx2x_hc_int_disable(bp);
1277 else
1278 bnx2x_igu_int_disable(bp);
1279}
1280
9f6c9258 1281void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 1282{
a2fbb9ea 1283 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1284 int i, offset;
a2fbb9ea 1285
34f80b04 1286 /* disable interrupt handling */
a2fbb9ea 1287 atomic_inc(&bp->intr_sem);
e1510706
EG
1288 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1289
f8ef6e44
YG
1290 if (disable_hw)
1291 /* prevent the HW from sending interrupts */
1292 bnx2x_int_disable(bp);
a2fbb9ea
ET
1293
1294 /* make sure all ISRs are done */
1295 if (msix) {
8badd27a
EG
1296 synchronize_irq(bp->msix_table[0].vector);
1297 offset = 1;
37b091ba
MC
1298#ifdef BCM_CNIC
1299 offset++;
1300#endif
a2fbb9ea 1301 for_each_queue(bp, i)
8badd27a 1302 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
1303 } else
1304 synchronize_irq(bp->pdev->irq);
1305
1306 /* make sure sp_task is not running */
1cf167f2
EG
1307 cancel_delayed_work(&bp->sp_task);
1308 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
1309}
1310
34f80b04 1311/* fast path */
a2fbb9ea
ET
1312
1313/*
34f80b04 1314 * General service functions
a2fbb9ea
ET
1315 */
1316
72fd0718
VZ
1317/* Return true if succeeded to acquire the lock */
1318static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1319{
1320 u32 lock_status;
1321 u32 resource_bit = (1 << resource);
1322 int func = BP_FUNC(bp);
1323 u32 hw_lock_control_reg;
1324
1325 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1326
1327 /* Validating that the resource is within range */
1328 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1329 DP(NETIF_MSG_HW,
1330 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1331 resource, HW_LOCK_MAX_RESOURCE_VALUE);
0fdf4d09 1332 return false;
72fd0718
VZ
1333 }
1334
1335 if (func <= 5)
1336 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1337 else
1338 hw_lock_control_reg =
1339 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1340
1341 /* Try to acquire the lock */
1342 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1343 lock_status = REG_RD(bp, hw_lock_control_reg);
1344 if (lock_status & resource_bit)
1345 return true;
1346
1347 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1348 return false;
1349}
1350
993ac7b5
MC
1351#ifdef BCM_CNIC
1352static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1353#endif
3196a88a 1354
9f6c9258 1355void bnx2x_sp_event(struct bnx2x_fastpath *fp,
a2fbb9ea
ET
1356 union eth_rx_cqe *rr_cqe)
1357{
1358 struct bnx2x *bp = fp->bp;
1359 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1360 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1361
34f80b04 1362 DP(BNX2X_MSG_SP,
a2fbb9ea 1363 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1364 fp->index, cid, command, bp->state,
34f80b04 1365 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea 1366
523224a3
DK
1367 switch (command | fp->state) {
1368 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1369 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1370 fp->state = BNX2X_FP_STATE_OPEN;
a2fbb9ea
ET
1371 break;
1372
523224a3
DK
1373 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1374 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
a2fbb9ea
ET
1375 fp->state = BNX2X_FP_STATE_HALTED;
1376 break;
1377
523224a3
DK
1378 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1379 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1380 fp->state = BNX2X_FP_STATE_TERMINATED;
a2fbb9ea
ET
1381 break;
1382
523224a3
DK
1383 default:
1384 BNX2X_ERR("unexpected MC reply (%d) "
1385 "fp[%d] state is %x\n",
1386 command, fp->index, fp->state);
993ac7b5 1387 break;
523224a3 1388 }
3196a88a 1389
8fe23fbd
DK
1390 smp_mb__before_atomic_inc();
1391 atomic_inc(&bp->spq_left);
523224a3
DK
1392 /* push the change in fp->state and towards the memory */
1393 smp_wmb();
49d66772 1394
523224a3 1395 return;
a2fbb9ea
ET
1396}
1397
9f6c9258 1398irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
a2fbb9ea 1399{
555f6c78 1400 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1401 u16 status = bnx2x_ack_int(bp);
34f80b04 1402 u16 mask;
ca00392c 1403 int i;
a2fbb9ea 1404
34f80b04 1405 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1406 if (unlikely(status == 0)) {
1407 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1408 return IRQ_NONE;
1409 }
f5372251 1410 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1411
34f80b04 1412 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1413 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1414 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1415 return IRQ_HANDLED;
1416 }
1417
3196a88a
EG
1418#ifdef BNX2X_STOP_ON_ERROR
1419 if (unlikely(bp->panic))
1420 return IRQ_HANDLED;
1421#endif
1422
f2e0899f 1423 for_each_queue(bp, i) {
ca00392c 1424 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1425
523224a3 1426 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
ca00392c 1427 if (status & mask) {
54b9ddaa
VZ
1428 /* Handle Rx and Tx according to SB id */
1429 prefetch(fp->rx_cons_sb);
54b9ddaa 1430 prefetch(fp->tx_cons_sb);
523224a3 1431 prefetch(&fp->sb_running_index[SM_RX_ID]);
54b9ddaa 1432 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1433 status &= ~mask;
1434 }
a2fbb9ea
ET
1435 }
1436
993ac7b5 1437#ifdef BCM_CNIC
523224a3 1438 mask = 0x2;
993ac7b5
MC
1439 if (status & (mask | 0x1)) {
1440 struct cnic_ops *c_ops = NULL;
1441
1442 rcu_read_lock();
1443 c_ops = rcu_dereference(bp->cnic_ops);
1444 if (c_ops)
1445 c_ops->cnic_handler(bp->cnic_data, NULL);
1446 rcu_read_unlock();
1447
1448 status &= ~mask;
1449 }
1450#endif
a2fbb9ea 1451
34f80b04 1452 if (unlikely(status & 0x1)) {
1cf167f2 1453 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1454
1455 status &= ~0x1;
1456 if (!status)
1457 return IRQ_HANDLED;
1458 }
1459
cdaa7cb8
VZ
1460 if (unlikely(status))
1461 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 1462 status);
a2fbb9ea 1463
c18487ee 1464 return IRQ_HANDLED;
a2fbb9ea
ET
1465}
1466
c18487ee 1467/* end of fast path */
a2fbb9ea 1468
a2fbb9ea 1469
c18487ee
YR
1470/* Link */
1471
1472/*
1473 * General service functions
1474 */
a2fbb9ea 1475
9f6c9258 1476int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1477{
1478 u32 lock_status;
1479 u32 resource_bit = (1 << resource);
4a37fb66
YG
1480 int func = BP_FUNC(bp);
1481 u32 hw_lock_control_reg;
c18487ee 1482 int cnt;
a2fbb9ea 1483
c18487ee
YR
1484 /* Validating that the resource is within range */
1485 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1486 DP(NETIF_MSG_HW,
1487 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1488 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1489 return -EINVAL;
1490 }
a2fbb9ea 1491
4a37fb66
YG
1492 if (func <= 5) {
1493 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1494 } else {
1495 hw_lock_control_reg =
1496 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1497 }
1498
c18487ee 1499 /* Validating that the resource is not already taken */
4a37fb66 1500 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1501 if (lock_status & resource_bit) {
1502 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1503 lock_status, resource_bit);
1504 return -EEXIST;
1505 }
a2fbb9ea 1506
46230476
EG
1507 /* Try for 5 second every 5ms */
1508 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1509 /* Try to acquire the lock */
4a37fb66
YG
1510 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1511 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1512 if (lock_status & resource_bit)
1513 return 0;
a2fbb9ea 1514
c18487ee 1515 msleep(5);
a2fbb9ea 1516 }
c18487ee
YR
1517 DP(NETIF_MSG_HW, "Timeout\n");
1518 return -EAGAIN;
1519}
a2fbb9ea 1520
9f6c9258 1521int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1522{
1523 u32 lock_status;
1524 u32 resource_bit = (1 << resource);
4a37fb66
YG
1525 int func = BP_FUNC(bp);
1526 u32 hw_lock_control_reg;
a2fbb9ea 1527
72fd0718
VZ
1528 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1529
c18487ee
YR
1530 /* Validating that the resource is within range */
1531 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1532 DP(NETIF_MSG_HW,
1533 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1534 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1535 return -EINVAL;
1536 }
1537
4a37fb66
YG
1538 if (func <= 5) {
1539 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1540 } else {
1541 hw_lock_control_reg =
1542 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1543 }
1544
c18487ee 1545 /* Validating that the resource is currently taken */
4a37fb66 1546 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1547 if (!(lock_status & resource_bit)) {
1548 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1549 lock_status, resource_bit);
1550 return -EFAULT;
a2fbb9ea
ET
1551 }
1552
9f6c9258
DK
1553 REG_WR(bp, hw_lock_control_reg, resource_bit);
1554 return 0;
c18487ee 1555}
a2fbb9ea 1556
9f6c9258 1557
4acac6a5
EG
1558int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1559{
1560 /* The GPIO should be swapped if swap register is set and active */
1561 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1562 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1563 int gpio_shift = gpio_num +
1564 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1565 u32 gpio_mask = (1 << gpio_shift);
1566 u32 gpio_reg;
1567 int value;
1568
1569 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1570 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1571 return -EINVAL;
1572 }
1573
1574 /* read GPIO value */
1575 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1576
1577 /* get the requested pin value */
1578 if ((gpio_reg & gpio_mask) == gpio_mask)
1579 value = 1;
1580 else
1581 value = 0;
1582
1583 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1584
1585 return value;
1586}
1587
17de50b7 1588int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1589{
1590 /* The GPIO should be swapped if swap register is set and active */
1591 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1592 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1593 int gpio_shift = gpio_num +
1594 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1595 u32 gpio_mask = (1 << gpio_shift);
1596 u32 gpio_reg;
a2fbb9ea 1597
c18487ee
YR
1598 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1599 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1600 return -EINVAL;
1601 }
a2fbb9ea 1602
4a37fb66 1603 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1604 /* read GPIO and mask except the float bits */
1605 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1606
c18487ee
YR
1607 switch (mode) {
1608 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1609 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1610 gpio_num, gpio_shift);
1611 /* clear FLOAT and set CLR */
1612 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1613 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1614 break;
a2fbb9ea 1615
c18487ee
YR
1616 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1617 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1618 gpio_num, gpio_shift);
1619 /* clear FLOAT and set SET */
1620 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1621 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1622 break;
a2fbb9ea 1623
17de50b7 1624 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1625 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1626 gpio_num, gpio_shift);
1627 /* set FLOAT */
1628 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1629 break;
a2fbb9ea 1630
c18487ee
YR
1631 default:
1632 break;
a2fbb9ea
ET
1633 }
1634
c18487ee 1635 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1636 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1637
c18487ee 1638 return 0;
a2fbb9ea
ET
1639}
1640
4acac6a5
EG
1641int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1642{
1643 /* The GPIO should be swapped if swap register is set and active */
1644 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1645 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1646 int gpio_shift = gpio_num +
1647 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1648 u32 gpio_mask = (1 << gpio_shift);
1649 u32 gpio_reg;
1650
1651 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1652 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1653 return -EINVAL;
1654 }
1655
1656 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1657 /* read GPIO int */
1658 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1659
1660 switch (mode) {
1661 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1662 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1663 "output low\n", gpio_num, gpio_shift);
1664 /* clear SET and set CLR */
1665 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1666 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1667 break;
1668
1669 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1670 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1671 "output high\n", gpio_num, gpio_shift);
1672 /* clear CLR and set SET */
1673 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1674 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1675 break;
1676
1677 default:
1678 break;
1679 }
1680
1681 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1682 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1683
1684 return 0;
1685}
1686
c18487ee 1687static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1688{
c18487ee
YR
1689 u32 spio_mask = (1 << spio_num);
1690 u32 spio_reg;
a2fbb9ea 1691
c18487ee
YR
1692 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1693 (spio_num > MISC_REGISTERS_SPIO_7)) {
1694 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1695 return -EINVAL;
a2fbb9ea
ET
1696 }
1697
4a37fb66 1698 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1699 /* read SPIO and mask except the float bits */
1700 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1701
c18487ee 1702 switch (mode) {
6378c025 1703 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1704 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1705 /* clear FLOAT and set CLR */
1706 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1707 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1708 break;
a2fbb9ea 1709
6378c025 1710 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1711 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1712 /* clear FLOAT and set SET */
1713 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1714 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1715 break;
a2fbb9ea 1716
c18487ee
YR
1717 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1718 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1719 /* set FLOAT */
1720 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1721 break;
a2fbb9ea 1722
c18487ee
YR
1723 default:
1724 break;
a2fbb9ea
ET
1725 }
1726
c18487ee 1727 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1728 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1729
a2fbb9ea
ET
1730 return 0;
1731}
1732
a22f0788
YR
1733int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1734{
1735 u32 sel_phy_idx = 0;
1736 if (bp->link_vars.link_up) {
1737 sel_phy_idx = EXT_PHY1;
1738 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1739 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1740 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1741 sel_phy_idx = EXT_PHY2;
1742 } else {
1743
1744 switch (bnx2x_phy_selection(&bp->link_params)) {
1745 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1746 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1747 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1748 sel_phy_idx = EXT_PHY1;
1749 break;
1750 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1751 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1752 sel_phy_idx = EXT_PHY2;
1753 break;
1754 }
1755 }
1756 /*
1757 * The selected actived PHY is always after swapping (in case PHY
1758 * swapping is enabled). So when swapping is enabled, we need to reverse
1759 * the configuration
1760 */
1761
1762 if (bp->link_params.multi_phy_config &
1763 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1764 if (sel_phy_idx == EXT_PHY1)
1765 sel_phy_idx = EXT_PHY2;
1766 else if (sel_phy_idx == EXT_PHY2)
1767 sel_phy_idx = EXT_PHY1;
1768 }
1769 return LINK_CONFIG_IDX(sel_phy_idx);
1770}
1771
9f6c9258 1772void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1773{
a22f0788 1774 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
ad33ea3a
EG
1775 switch (bp->link_vars.ieee_fc &
1776 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1777 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
a22f0788 1778 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 1779 ADVERTISED_Pause);
c18487ee 1780 break;
356e2385 1781
c18487ee 1782 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
a22f0788 1783 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
f85582f8 1784 ADVERTISED_Pause);
c18487ee 1785 break;
356e2385 1786
c18487ee 1787 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
a22f0788 1788 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
c18487ee 1789 break;
356e2385 1790
c18487ee 1791 default:
a22f0788 1792 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 1793 ADVERTISED_Pause);
c18487ee
YR
1794 break;
1795 }
1796}
f1410647 1797
9f6c9258 1798u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 1799{
19680c48
EG
1800 if (!BP_NOMCP(bp)) {
1801 u8 rc;
a22f0788
YR
1802 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1803 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
19680c48 1804 /* Initialize link parameters structure variables */
8c99e7b0
YR
1805 /* It is recommended to turn off RX FC for jumbo frames
1806 for better performance */
f2e0899f 1807 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
c0700f90 1808 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1809 else
c0700f90 1810 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1811
4a37fb66 1812 bnx2x_acquire_phy_lock(bp);
b5bf9068 1813
a22f0788 1814 if (load_mode == LOAD_DIAG) {
de6eae1f 1815 bp->link_params.loopback_mode = LOOPBACK_XGXS;
a22f0788
YR
1816 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1817 }
b5bf9068 1818
19680c48 1819 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 1820
4a37fb66 1821 bnx2x_release_phy_lock(bp);
a2fbb9ea 1822
3c96c68b
EG
1823 bnx2x_calc_fc_adv(bp);
1824
b5bf9068
EG
1825 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1826 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 1827 bnx2x_link_report(bp);
b5bf9068 1828 }
a22f0788 1829 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
19680c48
EG
1830 return rc;
1831 }
f5372251 1832 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 1833 return -EINVAL;
a2fbb9ea
ET
1834}
1835
9f6c9258 1836void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1837{
19680c48 1838 if (!BP_NOMCP(bp)) {
4a37fb66 1839 bnx2x_acquire_phy_lock(bp);
54c2fb78 1840 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
19680c48 1841 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1842 bnx2x_release_phy_lock(bp);
a2fbb9ea 1843
19680c48
EG
1844 bnx2x_calc_fc_adv(bp);
1845 } else
f5372251 1846 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 1847}
a2fbb9ea 1848
c18487ee
YR
1849static void bnx2x__link_reset(struct bnx2x *bp)
1850{
19680c48 1851 if (!BP_NOMCP(bp)) {
4a37fb66 1852 bnx2x_acquire_phy_lock(bp);
589abe3a 1853 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 1854 bnx2x_release_phy_lock(bp);
19680c48 1855 } else
f5372251 1856 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 1857}
a2fbb9ea 1858
a22f0788 1859u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
c18487ee 1860{
2145a920 1861 u8 rc = 0;
a2fbb9ea 1862
2145a920
VZ
1863 if (!BP_NOMCP(bp)) {
1864 bnx2x_acquire_phy_lock(bp);
a22f0788
YR
1865 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1866 is_serdes);
2145a920
VZ
1867 bnx2x_release_phy_lock(bp);
1868 } else
1869 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 1870
c18487ee
YR
1871 return rc;
1872}
a2fbb9ea 1873
8a1c38d1 1874static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 1875{
8a1c38d1
EG
1876 u32 r_param = bp->link_vars.line_speed / 8;
1877 u32 fair_periodic_timeout_usec;
1878 u32 t_fair;
34f80b04 1879
8a1c38d1
EG
1880 memset(&(bp->cmng.rs_vars), 0,
1881 sizeof(struct rate_shaping_vars_per_port));
1882 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 1883
8a1c38d1
EG
1884 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1885 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 1886
8a1c38d1
EG
1887 /* this is the threshold below which no timer arming will occur
1888 1.25 coefficient is for the threshold to be a little bigger
1889 than the real time, to compensate for timer in-accuracy */
1890 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
1891 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1892
8a1c38d1
EG
1893 /* resolution of fairness timer */
1894 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1895 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1896 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 1897
8a1c38d1
EG
1898 /* this is the threshold below which we won't arm the timer anymore */
1899 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 1900
8a1c38d1
EG
1901 /* we multiply by 1e3/8 to get bytes/msec.
1902 We don't want the credits to pass a credit
1903 of the t_fair*FAIR_MEM (algorithm resolution) */
1904 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1905 /* since each tick is 4 usec */
1906 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
1907}
1908
2691d51d
EG
1909/* Calculates the sum of vn_min_rates.
1910 It's needed for further normalizing of the min_rates.
1911 Returns:
1912 sum of vn_min_rates.
1913 or
1914 0 - if all the min_rates are 0.
1915 In the later case fainess algorithm should be deactivated.
1916 If not all min_rates are zero then those that are zeroes will be set to 1.
1917 */
1918static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1919{
1920 int all_zero = 1;
2691d51d
EG
1921 int vn;
1922
1923 bp->vn_weight_sum = 0;
1924 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
f2e0899f 1925 u32 vn_cfg = bp->mf_config[vn];
2691d51d
EG
1926 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1927 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1928
1929 /* Skip hidden vns */
1930 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1931 continue;
1932
1933 /* If min rate is zero - set it to 1 */
1934 if (!vn_min_rate)
1935 vn_min_rate = DEF_MIN_RATE;
1936 else
1937 all_zero = 0;
1938
1939 bp->vn_weight_sum += vn_min_rate;
1940 }
1941
1942 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
1943 if (all_zero) {
1944 bp->cmng.flags.cmng_enables &=
1945 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1946 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1947 " fairness will be disabled\n");
1948 } else
1949 bp->cmng.flags.cmng_enables |=
1950 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
1951}
1952
f2e0899f 1953static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
34f80b04
EG
1954{
1955 struct rate_shaping_vars_per_vn m_rs_vn;
1956 struct fairness_vars_per_vn m_fair_vn;
f2e0899f
DK
1957 u32 vn_cfg = bp->mf_config[vn];
1958 int func = 2*vn + BP_PORT(bp);
34f80b04
EG
1959 u16 vn_min_rate, vn_max_rate;
1960 int i;
1961
1962 /* If function is hidden - set min and max to zeroes */
1963 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1964 vn_min_rate = 0;
1965 vn_max_rate = 0;
1966
1967 } else {
1968 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1969 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1 1970 /* If min rate is zero - set it to 1 */
f2e0899f 1971 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
1972 vn_min_rate = DEF_MIN_RATE;
1973 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1974 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1975 }
f85582f8 1976
8a1c38d1 1977 DP(NETIF_MSG_IFUP,
b015e3d1 1978 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 1979 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
1980
1981 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1982 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1983
1984 /* global vn counter - maximal Mbps for this vn */
1985 m_rs_vn.vn_counter.rate = vn_max_rate;
1986
1987 /* quota - number of bytes transmitted in this period */
1988 m_rs_vn.vn_counter.quota =
1989 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1990
8a1c38d1 1991 if (bp->vn_weight_sum) {
34f80b04
EG
1992 /* credit for each period of the fairness algorithm:
1993 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
1994 vn_weight_sum should not be larger than 10000, thus
1995 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1996 than zero */
34f80b04 1997 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
1998 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1999 (8 * bp->vn_weight_sum))),
2000 (bp->cmng.fair_vars.fair_threshold * 2));
2001 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
2002 m_fair_vn.vn_credit_delta);
2003 }
2004
34f80b04
EG
2005 /* Store it to internal memory */
2006 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2007 REG_WR(bp, BAR_XSTRORM_INTMEM +
2008 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2009 ((u32 *)(&m_rs_vn))[i]);
2010
2011 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2012 REG_WR(bp, BAR_XSTRORM_INTMEM +
2013 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2014 ((u32 *)(&m_fair_vn))[i]);
2015}
f85582f8 2016
523224a3
DK
2017static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2018{
2019 if (CHIP_REV_IS_SLOW(bp))
2020 return CMNG_FNS_NONE;
fb3bff17 2021 if (IS_MF(bp))
523224a3
DK
2022 return CMNG_FNS_MINMAX;
2023
2024 return CMNG_FNS_NONE;
2025}
2026
2027static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2028{
2029 int vn;
2030
2031 if (BP_NOMCP(bp))
2032 return; /* what should be the default bvalue in this case */
2033
2034 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2035 int /*abs*/func = 2*vn + BP_PORT(bp);
f2e0899f 2036 bp->mf_config[vn] =
523224a3
DK
2037 MF_CFG_RD(bp, func_mf_config[func].config);
2038 }
2039}
2040
2041static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2042{
2043
2044 if (cmng_type == CMNG_FNS_MINMAX) {
2045 int vn;
2046
2047 /* clear cmng_enables */
2048 bp->cmng.flags.cmng_enables = 0;
2049
2050 /* read mf conf from shmem */
2051 if (read_cfg)
2052 bnx2x_read_mf_cfg(bp);
2053
2054 /* Init rate shaping and fairness contexts */
2055 bnx2x_init_port_minmax(bp);
2056
2057 /* vn_weight_sum and enable fairness if not 0 */
2058 bnx2x_calc_vn_weight_sum(bp);
2059
2060 /* calculate and set min-max rate for each vn */
2061 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2062 bnx2x_init_vn_minmax(bp, vn);
2063
2064 /* always enable rate shaping and fairness */
2065 bp->cmng.flags.cmng_enables |=
2066 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2067 if (!bp->vn_weight_sum)
2068 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2069 " fairness will be disabled\n");
2070 return;
2071 }
2072
2073 /* rate shaping and fairness are disabled */
2074 DP(NETIF_MSG_IFUP,
2075 "rate shaping and fairness are disabled\n");
2076}
34f80b04 2077
523224a3
DK
2078static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2079{
2080 int port = BP_PORT(bp);
2081 int func;
2082 int vn;
2083
2084 /* Set the attention towards other drivers on the same port */
2085 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2086 if (vn == BP_E1HVN(bp))
2087 continue;
2088
2089 func = ((vn << 1) | port);
2090 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2091 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2092 }
2093}
8a1c38d1 2094
c18487ee
YR
2095/* This function is called upon link interrupt */
2096static void bnx2x_link_attn(struct bnx2x *bp)
2097{
d9e8b185 2098 u32 prev_link_status = bp->link_vars.link_status;
bb2a0f7a
YG
2099 /* Make sure that we are synced with the current statistics */
2100 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2101
c18487ee 2102 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2103
bb2a0f7a
YG
2104 if (bp->link_vars.link_up) {
2105
1c06328c 2106 /* dropless flow control */
f2e0899f 2107 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
1c06328c
EG
2108 int port = BP_PORT(bp);
2109 u32 pause_enabled = 0;
2110
2111 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2112 pause_enabled = 1;
2113
2114 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2115 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2116 pause_enabled);
2117 }
2118
bb2a0f7a
YG
2119 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2120 struct host_port_stats *pstats;
2121
2122 pstats = bnx2x_sp(bp, port_stats);
2123 /* reset old bmac stats */
2124 memset(&(pstats->mac_stx[0]), 0,
2125 sizeof(struct mac_stx));
2126 }
f34d28ea 2127 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2128 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2129 }
2130
d9e8b185
VZ
2131 /* indicate link status only if link status actually changed */
2132 if (prev_link_status != bp->link_vars.link_status)
2133 bnx2x_link_report(bp);
34f80b04 2134
f2e0899f
DK
2135 if (IS_MF(bp))
2136 bnx2x_link_sync_notify(bp);
34f80b04 2137
f2e0899f
DK
2138 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2139 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
8a1c38d1 2140
f2e0899f
DK
2141 if (cmng_fns != CMNG_FNS_NONE) {
2142 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2143 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2144 } else
2145 /* rate shaping and fairness are disabled */
2146 DP(NETIF_MSG_IFUP,
2147 "single function mode without fairness\n");
34f80b04 2148 }
c18487ee 2149}
a2fbb9ea 2150
9f6c9258 2151void bnx2x__link_status_update(struct bnx2x *bp)
c18487ee 2152{
f34d28ea 2153 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2154 return;
a2fbb9ea 2155
c18487ee 2156 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2157
bb2a0f7a
YG
2158 if (bp->link_vars.link_up)
2159 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2160 else
2161 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2162
f2e0899f
DK
2163 /* the link status update could be the result of a DCC event
2164 hence re-read the shmem mf configuration */
2165 bnx2x_read_mf_cfg(bp);
2691d51d 2166
c18487ee
YR
2167 /* indicate link status */
2168 bnx2x_link_report(bp);
a2fbb9ea 2169}
a2fbb9ea 2170
34f80b04
EG
2171static void bnx2x_pmf_update(struct bnx2x *bp)
2172{
2173 int port = BP_PORT(bp);
2174 u32 val;
2175
2176 bp->port.pmf = 1;
2177 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2178
2179 /* enable nig attention */
2180 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
f2e0899f
DK
2181 if (bp->common.int_block == INT_BLOCK_HC) {
2182 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2183 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2184 } else if (CHIP_IS_E2(bp)) {
2185 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2186 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2187 }
bb2a0f7a
YG
2188
2189 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2190}
2191
c18487ee 2192/* end of Link */
a2fbb9ea
ET
2193
2194/* slow path */
2195
2196/*
2197 * General service functions
2198 */
2199
2691d51d 2200/* send the MCP a request, block until there is a reply */
a22f0788 2201u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2691d51d 2202{
f2e0899f 2203 int mb_idx = BP_FW_MB_IDX(bp);
2691d51d
EG
2204 u32 seq = ++bp->fw_seq;
2205 u32 rc = 0;
2206 u32 cnt = 1;
2207 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2208
c4ff7cbf 2209 mutex_lock(&bp->fw_mb_mutex);
f2e0899f
DK
2210 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2211 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2212
2691d51d
EG
2213 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2214
2215 do {
2216 /* let the FW do it's magic ... */
2217 msleep(delay);
2218
f2e0899f 2219 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2691d51d 2220
c4ff7cbf
EG
2221 /* Give the FW up to 5 second (500*10ms) */
2222 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2223
2224 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2225 cnt*delay, rc, seq);
2226
2227 /* is this a reply to our command? */
2228 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2229 rc &= FW_MSG_CODE_MASK;
2230 else {
2231 /* FW BUG! */
2232 BNX2X_ERR("FW failed to respond!\n");
2233 bnx2x_fw_dump(bp);
2234 rc = 0;
2235 }
c4ff7cbf 2236 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2237
2238 return rc;
2239}
2240
523224a3 2241/* must be called under rtnl_lock */
8d96286a 2242static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2691d51d 2243{
523224a3 2244 u32 mask = (1 << cl_id);
2691d51d 2245
523224a3
DK
2246 /* initial seeting is BNX2X_ACCEPT_NONE */
2247 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2248 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2249 u8 unmatched_unicast = 0;
2691d51d 2250
523224a3
DK
2251 if (filters & BNX2X_PROMISCUOUS_MODE) {
2252 /* promiscious - accept all, drop none */
2253 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2254 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2255 }
2256 if (filters & BNX2X_ACCEPT_UNICAST) {
2257 /* accept matched ucast */
2258 drop_all_ucast = 0;
2259 }
2260 if (filters & BNX2X_ACCEPT_MULTICAST) {
2261 /* accept matched mcast */
2262 drop_all_mcast = 0;
2263 }
2264 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2265 /* accept all mcast */
2266 drop_all_ucast = 0;
2267 accp_all_ucast = 1;
2268 }
2269 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2270 /* accept all mcast */
2271 drop_all_mcast = 0;
2272 accp_all_mcast = 1;
2273 }
2274 if (filters & BNX2X_ACCEPT_BROADCAST) {
2275 /* accept (all) bcast */
2276 drop_all_bcast = 0;
2277 accp_all_bcast = 1;
2278 }
2691d51d 2279
523224a3
DK
2280 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2281 bp->mac_filters.ucast_drop_all | mask :
2282 bp->mac_filters.ucast_drop_all & ~mask;
2691d51d 2283
523224a3
DK
2284 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2285 bp->mac_filters.mcast_drop_all | mask :
2286 bp->mac_filters.mcast_drop_all & ~mask;
2691d51d 2287
523224a3
DK
2288 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2289 bp->mac_filters.bcast_drop_all | mask :
2290 bp->mac_filters.bcast_drop_all & ~mask;
2691d51d 2291
523224a3
DK
2292 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2293 bp->mac_filters.ucast_accept_all | mask :
2294 bp->mac_filters.ucast_accept_all & ~mask;
2691d51d 2295
523224a3
DK
2296 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2297 bp->mac_filters.mcast_accept_all | mask :
2298 bp->mac_filters.mcast_accept_all & ~mask;
2299
2300 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2301 bp->mac_filters.bcast_accept_all | mask :
2302 bp->mac_filters.bcast_accept_all & ~mask;
2303
2304 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2305 bp->mac_filters.unmatched_unicast | mask :
2306 bp->mac_filters.unmatched_unicast & ~mask;
2691d51d
EG
2307}
2308
8d96286a 2309static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2691d51d 2310{
030f3356
DK
2311 struct tstorm_eth_function_common_config tcfg = {0};
2312 u16 rss_flgs;
2691d51d 2313
030f3356
DK
2314 /* tpa */
2315 if (p->func_flgs & FUNC_FLG_TPA)
2316 tcfg.config_flags |=
2317 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2691d51d 2318
030f3356
DK
2319 /* set rss flags */
2320 rss_flgs = (p->rss->mode <<
2321 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2322
2323 if (p->rss->cap & RSS_IPV4_CAP)
2324 rss_flgs |= RSS_IPV4_CAP_MASK;
2325 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2326 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2327 if (p->rss->cap & RSS_IPV6_CAP)
2328 rss_flgs |= RSS_IPV6_CAP_MASK;
2329 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2330 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2331
2332 tcfg.config_flags |= rss_flgs;
2333 tcfg.rss_result_mask = p->rss->result_mask;
2334
2335 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2691d51d 2336
523224a3
DK
2337 /* Enable the function in the FW */
2338 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2339 storm_memset_func_en(bp, p->func_id, 1);
2691d51d 2340
523224a3
DK
2341 /* statistics */
2342 if (p->func_flgs & FUNC_FLG_STATS) {
2343 struct stats_indication_flags stats_flags = {0};
2344 stats_flags.collect_eth = 1;
2691d51d 2345
523224a3
DK
2346 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2347 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2348
523224a3
DK
2349 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2350 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2351
523224a3
DK
2352 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2353 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2354
523224a3
DK
2355 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2356 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d
EG
2357 }
2358
523224a3
DK
2359 /* spq */
2360 if (p->func_flgs & FUNC_FLG_SPQ) {
2361 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2362 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2363 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2364 }
2691d51d
EG
2365}
2366
523224a3
DK
2367static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2368 struct bnx2x_fastpath *fp)
28912902 2369{
523224a3 2370 u16 flags = 0;
28912902 2371
523224a3
DK
2372 /* calculate queue flags */
2373 flags |= QUEUE_FLG_CACHE_ALIGN;
2374 flags |= QUEUE_FLG_HC;
fb3bff17 2375 flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0;
28912902 2376
523224a3
DK
2377 flags |= QUEUE_FLG_VLAN;
2378 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
523224a3
DK
2379
2380 if (!fp->disable_tpa)
2381 flags |= QUEUE_FLG_TPA;
2382
2383 flags |= QUEUE_FLG_STATS;
2384
2385 return flags;
2386}
2387
2388static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2389 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2390 struct bnx2x_rxq_init_params *rxq_init)
2391{
2392 u16 max_sge = 0;
2393 u16 sge_sz = 0;
2394 u16 tpa_agg_size = 0;
2395
2396 /* calculate queue flags */
2397 u16 flags = bnx2x_get_cl_flags(bp, fp);
2398
2399 if (!fp->disable_tpa) {
2400 pause->sge_th_hi = 250;
2401 pause->sge_th_lo = 150;
2402 tpa_agg_size = min_t(u32,
2403 (min_t(u32, 8, MAX_SKB_FRAGS) *
2404 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2405 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2406 SGE_PAGE_SHIFT;
2407 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2408 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2409 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2410 0xffff);
2411 }
2412
2413 /* pause - not for e1 */
2414 if (!CHIP_IS_E1(bp)) {
2415 pause->bd_th_hi = 350;
2416 pause->bd_th_lo = 250;
2417 pause->rcq_th_hi = 350;
2418 pause->rcq_th_lo = 250;
2419 pause->sge_th_hi = 0;
2420 pause->sge_th_lo = 0;
2421 pause->pri_map = 1;
2422 }
2423
2424 /* rxq setup */
2425 rxq_init->flags = flags;
2426 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2427 rxq_init->dscr_map = fp->rx_desc_mapping;
2428 rxq_init->sge_map = fp->rx_sge_mapping;
2429 rxq_init->rcq_map = fp->rx_comp_mapping;
2430 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2431 rxq_init->mtu = bp->dev->mtu;
2432 rxq_init->buf_sz = bp->rx_buf_size;
2433 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2434 rxq_init->cl_id = fp->cl_id;
2435 rxq_init->spcl_id = fp->cl_id;
2436 rxq_init->stat_id = fp->cl_id;
2437 rxq_init->tpa_agg_sz = tpa_agg_size;
2438 rxq_init->sge_buf_sz = sge_sz;
2439 rxq_init->max_sges_pkt = max_sge;
2440 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2441 rxq_init->fw_sb_id = fp->fw_sb_id;
2442
2443 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2444
2445 rxq_init->cid = HW_CID(bp, fp->cid);
2446
2447 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2448}
2449
2450static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2451 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2452{
2453 u16 flags = bnx2x_get_cl_flags(bp, fp);
2454
2455 txq_init->flags = flags;
2456 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2457 txq_init->dscr_map = fp->tx_desc_mapping;
2458 txq_init->stat_id = fp->cl_id;
2459 txq_init->cid = HW_CID(bp, fp->cid);
2460 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2461 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2462 txq_init->fw_sb_id = fp->fw_sb_id;
2463 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2464}
2465
8d96286a 2466static void bnx2x_pf_init(struct bnx2x *bp)
523224a3
DK
2467{
2468 struct bnx2x_func_init_params func_init = {0};
2469 struct bnx2x_rss_params rss = {0};
2470 struct event_ring_data eq_data = { {0} };
2471 u16 flags;
2472
2473 /* pf specific setups */
2474 if (!CHIP_IS_E1(bp))
fb3bff17 2475 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
523224a3 2476
f2e0899f
DK
2477 if (CHIP_IS_E2(bp)) {
2478 /* reset IGU PF statistics: MSIX + ATTN */
2479 /* PF */
2480 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2481 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2482 (CHIP_MODE_IS_4_PORT(bp) ?
2483 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2484 /* ATTN */
2485 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2486 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2487 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2488 (CHIP_MODE_IS_4_PORT(bp) ?
2489 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2490 }
2491
523224a3
DK
2492 /* function setup flags */
2493 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2494
f2e0899f
DK
2495 if (CHIP_IS_E1x(bp))
2496 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2497 else
2498 flags |= FUNC_FLG_TPA;
523224a3 2499
030f3356
DK
2500 /* function setup */
2501
523224a3
DK
2502 /**
2503 * Although RSS is meaningless when there is a single HW queue we
2504 * still need it enabled in order to have HW Rx hash generated.
523224a3 2505 */
030f3356
DK
2506 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2507 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2508 rss.mode = bp->multi_mode;
2509 rss.result_mask = MULTI_MASK;
2510 func_init.rss = &rss;
523224a3
DK
2511
2512 func_init.func_flgs = flags;
2513 func_init.pf_id = BP_FUNC(bp);
2514 func_init.func_id = BP_FUNC(bp);
2515 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2516 func_init.spq_map = bp->spq_mapping;
2517 func_init.spq_prod = bp->spq_prod_idx;
2518
2519 bnx2x_func_init(bp, &func_init);
2520
2521 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2522
2523 /*
2524 Congestion management values depend on the link rate
2525 There is no active link so initial link rate is set to 10 Gbps.
2526 When the link comes up The congestion management values are
2527 re-calculated according to the actual link rate.
2528 */
2529 bp->link_vars.line_speed = SPEED_10000;
2530 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2531
2532 /* Only the PMF sets the HW */
2533 if (bp->port.pmf)
2534 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2535
2536 /* no rx until link is up */
2537 bp->rx_mode = BNX2X_RX_MODE_NONE;
2538 bnx2x_set_storm_rx_mode(bp);
2539
2540 /* init Event Queue */
2541 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2542 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2543 eq_data.producer = bp->eq_prod;
2544 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2545 eq_data.sb_id = DEF_SB_ID;
2546 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2547}
2548
2549
2550static void bnx2x_e1h_disable(struct bnx2x *bp)
2551{
2552 int port = BP_PORT(bp);
2553
2554 netif_tx_disable(bp->dev);
2555
2556 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2557
2558 netif_carrier_off(bp->dev);
2559}
2560
2561static void bnx2x_e1h_enable(struct bnx2x *bp)
2562{
2563 int port = BP_PORT(bp);
2564
2565 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2566
2567 /* Tx queue should be only reenabled */
2568 netif_tx_wake_all_queues(bp->dev);
2569
2570 /*
2571 * Should not call netif_carrier_on since it will be called if the link
2572 * is up when checking for link state
2573 */
2574}
2575
2576static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2577{
2578 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2579
2580 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2581
2582 /*
2583 * This is the only place besides the function initialization
2584 * where the bp->flags can change so it is done without any
2585 * locks
2586 */
f2e0899f 2587 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
523224a3
DK
2588 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2589 bp->flags |= MF_FUNC_DIS;
2590
2591 bnx2x_e1h_disable(bp);
2592 } else {
2593 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2594 bp->flags &= ~MF_FUNC_DIS;
2595
2596 bnx2x_e1h_enable(bp);
2597 }
2598 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2599 }
2600 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2601
2602 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2603 bnx2x_link_sync_notify(bp);
2604 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2605 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2606 }
2607
2608 /* Report results to MCP */
2609 if (dcc_event)
2610 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2611 else
2612 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2613}
2614
2615/* must be called under the spq lock */
2616static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2617{
2618 struct eth_spe *next_spe = bp->spq_prod_bd;
2619
2620 if (bp->spq_prod_bd == bp->spq_last_bd) {
2621 bp->spq_prod_bd = bp->spq;
2622 bp->spq_prod_idx = 0;
2623 DP(NETIF_MSG_TIMER, "end of spq\n");
2624 } else {
2625 bp->spq_prod_bd++;
2626 bp->spq_prod_idx++;
2627 }
2628 return next_spe;
2629}
2630
2631/* must be called under the spq lock */
28912902
MC
2632static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2633{
2634 int func = BP_FUNC(bp);
2635
2636 /* Make sure that BD data is updated before writing the producer */
2637 wmb();
2638
523224a3 2639 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
f85582f8 2640 bp->spq_prod_idx);
28912902
MC
2641 mmiowb();
2642}
2643
a2fbb9ea 2644/* the slow path queue is odd since completions arrive on the fastpath ring */
9f6c9258 2645int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
f85582f8 2646 u32 data_hi, u32 data_lo, int common)
a2fbb9ea 2647{
28912902 2648 struct eth_spe *spe;
523224a3 2649 u16 type;
a2fbb9ea 2650
a2fbb9ea
ET
2651#ifdef BNX2X_STOP_ON_ERROR
2652 if (unlikely(bp->panic))
2653 return -EIO;
2654#endif
2655
34f80b04 2656 spin_lock_bh(&bp->spq_lock);
a2fbb9ea 2657
8fe23fbd 2658 if (!atomic_read(&bp->spq_left)) {
a2fbb9ea 2659 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2660 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2661 bnx2x_panic();
2662 return -EBUSY;
2663 }
f1410647 2664
28912902
MC
2665 spe = bnx2x_sp_get_next(bp);
2666
a2fbb9ea 2667 /* CID needs port number to be encoded int it */
28912902 2668 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
2669 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2670 HW_CID(bp, cid));
523224a3 2671
a2fbb9ea 2672 if (common)
523224a3
DK
2673 /* Common ramrods:
2674 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2675 * TRAFFIC_STOP, TRAFFIC_START
2676 */
2677 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2678 & SPE_HDR_CONN_TYPE;
2679 else
2680 /* ETH ramrods: SETUP, HALT */
2681 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2682 & SPE_HDR_CONN_TYPE;
a2fbb9ea 2683
523224a3
DK
2684 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2685 SPE_HDR_FUNCTION_ID);
a2fbb9ea 2686
523224a3
DK
2687 spe->hdr.type = cpu_to_le16(type);
2688
2689 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2690 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2691
2692 /* stats ramrod has it's own slot on the spq */
2693 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2694 /* It's ok if the actual decrement is issued towards the memory
2695 * somewhere between the spin_lock and spin_unlock. Thus no
2696 * more explict memory barrier is needed.
2697 */
8fe23fbd 2698 atomic_dec(&bp->spq_left);
a2fbb9ea 2699
cdaa7cb8 2700 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
523224a3
DK
2701 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2702 "type(0x%x) left %x\n",
cdaa7cb8
VZ
2703 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2704 (u32)(U64_LO(bp->spq_mapping) +
2705 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
8fe23fbd 2706 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
cdaa7cb8 2707
28912902 2708 bnx2x_sp_prod_update(bp);
34f80b04 2709 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2710 return 0;
2711}
2712
2713/* acquire split MCP access lock register */
4a37fb66 2714static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2715{
72fd0718 2716 u32 j, val;
34f80b04 2717 int rc = 0;
a2fbb9ea
ET
2718
2719 might_sleep();
72fd0718 2720 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
2721 val = (1UL << 31);
2722 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2723 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2724 if (val & (1L << 31))
2725 break;
2726
2727 msleep(5);
2728 }
a2fbb9ea 2729 if (!(val & (1L << 31))) {
19680c48 2730 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2731 rc = -EBUSY;
2732 }
2733
2734 return rc;
2735}
2736
4a37fb66
YG
2737/* release split MCP access lock register */
2738static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 2739{
72fd0718 2740 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
2741}
2742
523224a3
DK
2743#define BNX2X_DEF_SB_ATT_IDX 0x0001
2744#define BNX2X_DEF_SB_IDX 0x0002
2745
a2fbb9ea
ET
2746static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2747{
523224a3 2748 struct host_sp_status_block *def_sb = bp->def_status_blk;
a2fbb9ea
ET
2749 u16 rc = 0;
2750
2751 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2752 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2753 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
523224a3 2754 rc |= BNX2X_DEF_SB_ATT_IDX;
a2fbb9ea 2755 }
523224a3
DK
2756
2757 if (bp->def_idx != def_sb->sp_sb.running_index) {
2758 bp->def_idx = def_sb->sp_sb.running_index;
2759 rc |= BNX2X_DEF_SB_IDX;
a2fbb9ea 2760 }
523224a3
DK
2761
2762 /* Do not reorder: indecies reading should complete before handling */
2763 barrier();
a2fbb9ea
ET
2764 return rc;
2765}
2766
2767/*
2768 * slow path service functions
2769 */
2770
2771static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2772{
34f80b04 2773 int port = BP_PORT(bp);
a2fbb9ea
ET
2774 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2775 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2776 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2777 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2778 u32 aeu_mask;
87942b46 2779 u32 nig_mask = 0;
f2e0899f 2780 u32 reg_addr;
a2fbb9ea 2781
a2fbb9ea
ET
2782 if (bp->attn_state & asserted)
2783 BNX2X_ERR("IGU ERROR\n");
2784
3fcaf2e5
EG
2785 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2786 aeu_mask = REG_RD(bp, aeu_addr);
2787
a2fbb9ea 2788 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 2789 aeu_mask, asserted);
72fd0718 2790 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 2791 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2792
3fcaf2e5
EG
2793 REG_WR(bp, aeu_addr, aeu_mask);
2794 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2795
3fcaf2e5 2796 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2797 bp->attn_state |= asserted;
3fcaf2e5 2798 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2799
2800 if (asserted & ATTN_HARD_WIRED_MASK) {
2801 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2802
a5e9a7cf
EG
2803 bnx2x_acquire_phy_lock(bp);
2804
877e9aa4 2805 /* save nig interrupt mask */
87942b46 2806 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2807 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2808
c18487ee 2809 bnx2x_link_attn(bp);
a2fbb9ea
ET
2810
2811 /* handle unicore attn? */
2812 }
2813 if (asserted & ATTN_SW_TIMER_4_FUNC)
2814 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2815
2816 if (asserted & GPIO_2_FUNC)
2817 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2818
2819 if (asserted & GPIO_3_FUNC)
2820 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2821
2822 if (asserted & GPIO_4_FUNC)
2823 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2824
2825 if (port == 0) {
2826 if (asserted & ATTN_GENERAL_ATTN_1) {
2827 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2828 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2829 }
2830 if (asserted & ATTN_GENERAL_ATTN_2) {
2831 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2832 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2833 }
2834 if (asserted & ATTN_GENERAL_ATTN_3) {
2835 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2836 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2837 }
2838 } else {
2839 if (asserted & ATTN_GENERAL_ATTN_4) {
2840 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2841 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2842 }
2843 if (asserted & ATTN_GENERAL_ATTN_5) {
2844 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2845 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2846 }
2847 if (asserted & ATTN_GENERAL_ATTN_6) {
2848 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2849 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2850 }
2851 }
2852
2853 } /* if hardwired */
2854
f2e0899f
DK
2855 if (bp->common.int_block == INT_BLOCK_HC)
2856 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2857 COMMAND_REG_ATTN_BITS_SET);
2858 else
2859 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2860
2861 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2862 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2863 REG_WR(bp, reg_addr, asserted);
a2fbb9ea
ET
2864
2865 /* now set back the mask */
a5e9a7cf 2866 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2867 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2868 bnx2x_release_phy_lock(bp);
2869 }
a2fbb9ea
ET
2870}
2871
fd4ef40d
EG
2872static inline void bnx2x_fan_failure(struct bnx2x *bp)
2873{
2874 int port = BP_PORT(bp);
b7737c9b 2875 u32 ext_phy_config;
fd4ef40d 2876 /* mark the failure */
b7737c9b
YR
2877 ext_phy_config =
2878 SHMEM_RD(bp,
2879 dev_info.port_hw_config[port].external_phy_config);
2880
2881 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2882 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
fd4ef40d 2883 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
b7737c9b 2884 ext_phy_config);
fd4ef40d
EG
2885
2886 /* log the failure */
cdaa7cb8
VZ
2887 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2888 " the driver to shutdown the card to prevent permanent"
2889 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 2890}
ab6ad5a4 2891
877e9aa4 2892static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2893{
34f80b04 2894 int port = BP_PORT(bp);
877e9aa4 2895 int reg_offset;
d90d96ba 2896 u32 val;
877e9aa4 2897
34f80b04
EG
2898 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2899 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2900
34f80b04 2901 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2902
2903 val = REG_RD(bp, reg_offset);
2904 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2905 REG_WR(bp, reg_offset, val);
2906
2907 BNX2X_ERR("SPIO5 hw attention\n");
2908
fd4ef40d 2909 /* Fan failure attention */
d90d96ba 2910 bnx2x_hw_reset_phy(&bp->link_params);
fd4ef40d 2911 bnx2x_fan_failure(bp);
877e9aa4 2912 }
34f80b04 2913
589abe3a
EG
2914 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2915 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2916 bnx2x_acquire_phy_lock(bp);
2917 bnx2x_handle_module_detect_int(&bp->link_params);
2918 bnx2x_release_phy_lock(bp);
2919 }
2920
34f80b04
EG
2921 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2922
2923 val = REG_RD(bp, reg_offset);
2924 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2925 REG_WR(bp, reg_offset, val);
2926
2927 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2928 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2929 bnx2x_panic();
2930 }
877e9aa4
ET
2931}
2932
2933static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2934{
2935 u32 val;
2936
0626b899 2937 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2938
2939 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2940 BNX2X_ERR("DB hw attention 0x%x\n", val);
2941 /* DORQ discard attention */
2942 if (val & 0x2)
2943 BNX2X_ERR("FATAL error from DORQ\n");
2944 }
34f80b04
EG
2945
2946 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2947
2948 int port = BP_PORT(bp);
2949 int reg_offset;
2950
2951 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2952 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2953
2954 val = REG_RD(bp, reg_offset);
2955 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2956 REG_WR(bp, reg_offset, val);
2957
2958 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 2959 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
2960 bnx2x_panic();
2961 }
877e9aa4
ET
2962}
2963
2964static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2965{
2966 u32 val;
2967
2968 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2969
2970 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2971 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2972 /* CFC error attention */
2973 if (val & 0x2)
2974 BNX2X_ERR("FATAL error from CFC\n");
2975 }
2976
2977 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2978
2979 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2980 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2981 /* RQ_USDMDP_FIFO_OVERFLOW */
2982 if (val & 0x18000)
2983 BNX2X_ERR("FATAL error from PXP\n");
f2e0899f
DK
2984 if (CHIP_IS_E2(bp)) {
2985 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
2986 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
2987 }
877e9aa4 2988 }
34f80b04
EG
2989
2990 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2991
2992 int port = BP_PORT(bp);
2993 int reg_offset;
2994
2995 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2996 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2997
2998 val = REG_RD(bp, reg_offset);
2999 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3000 REG_WR(bp, reg_offset, val);
3001
3002 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3003 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3004 bnx2x_panic();
3005 }
877e9aa4
ET
3006}
3007
3008static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3009{
34f80b04
EG
3010 u32 val;
3011
877e9aa4
ET
3012 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3013
34f80b04
EG
3014 if (attn & BNX2X_PMF_LINK_ASSERT) {
3015 int func = BP_FUNC(bp);
3016
3017 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
f2e0899f
DK
3018 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3019 func_mf_config[BP_ABS_FUNC(bp)].config);
3020 val = SHMEM_RD(bp,
3021 func_mb[BP_FW_MB_IDX(bp)].drv_status);
2691d51d
EG
3022 if (val & DRV_STATUS_DCC_EVENT_MASK)
3023 bnx2x_dcc_event(bp,
3024 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3025 bnx2x__link_status_update(bp);
2691d51d 3026 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3027 bnx2x_pmf_update(bp);
3028
3029 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3030
3031 BNX2X_ERR("MC assert!\n");
3032 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3033 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3034 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3035 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3036 bnx2x_panic();
3037
3038 } else if (attn & BNX2X_MCP_ASSERT) {
3039
3040 BNX2X_ERR("MCP assert!\n");
3041 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3042 bnx2x_fw_dump(bp);
877e9aa4
ET
3043
3044 } else
3045 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3046 }
3047
3048 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3049 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3050 if (attn & BNX2X_GRC_TIMEOUT) {
f2e0899f
DK
3051 val = CHIP_IS_E1(bp) ? 0 :
3052 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
34f80b04
EG
3053 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3054 }
3055 if (attn & BNX2X_GRC_RSV) {
f2e0899f
DK
3056 val = CHIP_IS_E1(bp) ? 0 :
3057 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
34f80b04
EG
3058 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3059 }
877e9aa4 3060 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3061 }
3062}
3063
72fd0718
VZ
3064#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3065#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3066#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3067#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3068#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3069#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
f85582f8 3070
72fd0718
VZ
3071/*
3072 * should be run under rtnl lock
3073 */
3074static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3075{
3076 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3077 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3078 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3079 barrier();
3080 mmiowb();
3081}
3082
3083/*
3084 * should be run under rtnl lock
3085 */
3086static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3087{
3088 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3089 val |= (1 << 16);
3090 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3091 barrier();
3092 mmiowb();
3093}
3094
3095/*
3096 * should be run under rtnl lock
3097 */
9f6c9258 3098bool bnx2x_reset_is_done(struct bnx2x *bp)
72fd0718
VZ
3099{
3100 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3101 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3102 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3103}
3104
3105/*
3106 * should be run under rtnl lock
3107 */
9f6c9258 3108inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
72fd0718
VZ
3109{
3110 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3111
3112 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3113
3114 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3115 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3116 barrier();
3117 mmiowb();
3118}
3119
3120/*
3121 * should be run under rtnl lock
3122 */
9f6c9258 3123u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
72fd0718
VZ
3124{
3125 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3126
3127 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3128
3129 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3130 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3131 barrier();
3132 mmiowb();
3133
3134 return val1;
3135}
3136
3137/*
3138 * should be run under rtnl lock
3139 */
3140static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3141{
3142 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3143}
3144
3145static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3146{
3147 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3148 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3149}
3150
3151static inline void _print_next_block(int idx, const char *blk)
3152{
3153 if (idx)
3154 pr_cont(", ");
3155 pr_cont("%s", blk);
3156}
3157
3158static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3159{
3160 int i = 0;
3161 u32 cur_bit = 0;
3162 for (i = 0; sig; i++) {
3163 cur_bit = ((u32)0x1 << i);
3164 if (sig & cur_bit) {
3165 switch (cur_bit) {
3166 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3167 _print_next_block(par_num++, "BRB");
3168 break;
3169 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3170 _print_next_block(par_num++, "PARSER");
3171 break;
3172 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3173 _print_next_block(par_num++, "TSDM");
3174 break;
3175 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3176 _print_next_block(par_num++, "SEARCHER");
3177 break;
3178 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3179 _print_next_block(par_num++, "TSEMI");
3180 break;
3181 }
3182
3183 /* Clear the bit */
3184 sig &= ~cur_bit;
3185 }
3186 }
3187
3188 return par_num;
3189}
3190
3191static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3192{
3193 int i = 0;
3194 u32 cur_bit = 0;
3195 for (i = 0; sig; i++) {
3196 cur_bit = ((u32)0x1 << i);
3197 if (sig & cur_bit) {
3198 switch (cur_bit) {
3199 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3200 _print_next_block(par_num++, "PBCLIENT");
3201 break;
3202 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3203 _print_next_block(par_num++, "QM");
3204 break;
3205 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3206 _print_next_block(par_num++, "XSDM");
3207 break;
3208 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3209 _print_next_block(par_num++, "XSEMI");
3210 break;
3211 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3212 _print_next_block(par_num++, "DOORBELLQ");
3213 break;
3214 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3215 _print_next_block(par_num++, "VAUX PCI CORE");
3216 break;
3217 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3218 _print_next_block(par_num++, "DEBUG");
3219 break;
3220 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3221 _print_next_block(par_num++, "USDM");
3222 break;
3223 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3224 _print_next_block(par_num++, "USEMI");
3225 break;
3226 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3227 _print_next_block(par_num++, "UPB");
3228 break;
3229 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3230 _print_next_block(par_num++, "CSDM");
3231 break;
3232 }
3233
3234 /* Clear the bit */
3235 sig &= ~cur_bit;
3236 }
3237 }
3238
3239 return par_num;
3240}
3241
3242static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3243{
3244 int i = 0;
3245 u32 cur_bit = 0;
3246 for (i = 0; sig; i++) {
3247 cur_bit = ((u32)0x1 << i);
3248 if (sig & cur_bit) {
3249 switch (cur_bit) {
3250 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3251 _print_next_block(par_num++, "CSEMI");
3252 break;
3253 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3254 _print_next_block(par_num++, "PXP");
3255 break;
3256 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3257 _print_next_block(par_num++,
3258 "PXPPCICLOCKCLIENT");
3259 break;
3260 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3261 _print_next_block(par_num++, "CFC");
3262 break;
3263 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3264 _print_next_block(par_num++, "CDU");
3265 break;
3266 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3267 _print_next_block(par_num++, "IGU");
3268 break;
3269 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3270 _print_next_block(par_num++, "MISC");
3271 break;
3272 }
3273
3274 /* Clear the bit */
3275 sig &= ~cur_bit;
3276 }
3277 }
3278
3279 return par_num;
3280}
3281
3282static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3283{
3284 int i = 0;
3285 u32 cur_bit = 0;
3286 for (i = 0; sig; i++) {
3287 cur_bit = ((u32)0x1 << i);
3288 if (sig & cur_bit) {
3289 switch (cur_bit) {
3290 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3291 _print_next_block(par_num++, "MCP ROM");
3292 break;
3293 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3294 _print_next_block(par_num++, "MCP UMP RX");
3295 break;
3296 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3297 _print_next_block(par_num++, "MCP UMP TX");
3298 break;
3299 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3300 _print_next_block(par_num++, "MCP SCPAD");
3301 break;
3302 }
3303
3304 /* Clear the bit */
3305 sig &= ~cur_bit;
3306 }
3307 }
3308
3309 return par_num;
3310}
3311
3312static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3313 u32 sig2, u32 sig3)
3314{
3315 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3316 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3317 int par_num = 0;
3318 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3319 "[0]:0x%08x [1]:0x%08x "
3320 "[2]:0x%08x [3]:0x%08x\n",
3321 sig0 & HW_PRTY_ASSERT_SET_0,
3322 sig1 & HW_PRTY_ASSERT_SET_1,
3323 sig2 & HW_PRTY_ASSERT_SET_2,
3324 sig3 & HW_PRTY_ASSERT_SET_3);
3325 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3326 bp->dev->name);
3327 par_num = bnx2x_print_blocks_with_parity0(
3328 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3329 par_num = bnx2x_print_blocks_with_parity1(
3330 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3331 par_num = bnx2x_print_blocks_with_parity2(
3332 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3333 par_num = bnx2x_print_blocks_with_parity3(
3334 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3335 printk("\n");
3336 return true;
3337 } else
3338 return false;
3339}
3340
9f6c9258 3341bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 3342{
a2fbb9ea 3343 struct attn_route attn;
72fd0718
VZ
3344 int port = BP_PORT(bp);
3345
3346 attn.sig[0] = REG_RD(bp,
3347 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3348 port*4);
3349 attn.sig[1] = REG_RD(bp,
3350 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3351 port*4);
3352 attn.sig[2] = REG_RD(bp,
3353 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3354 port*4);
3355 attn.sig[3] = REG_RD(bp,
3356 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3357 port*4);
3358
3359 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3360 attn.sig[3]);
3361}
3362
f2e0899f
DK
3363
3364static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3365{
3366 u32 val;
3367 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3368
3369 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3370 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3371 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3372 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3373 "ADDRESS_ERROR\n");
3374 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3375 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3376 "INCORRECT_RCV_BEHAVIOR\n");
3377 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3378 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3379 "WAS_ERROR_ATTN\n");
3380 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3381 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3382 "VF_LENGTH_VIOLATION_ATTN\n");
3383 if (val &
3384 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3385 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3386 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3387 if (val &
3388 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3389 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3390 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3391 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3392 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3393 "TCPL_ERROR_ATTN\n");
3394 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3395 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3396 "TCPL_IN_TWO_RCBS_ATTN\n");
3397 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3398 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3399 "CSSNOOP_FIFO_OVERFLOW\n");
3400 }
3401 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3402 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3403 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3404 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3405 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3406 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3407 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3408 "_ATC_TCPL_TO_NOT_PEND\n");
3409 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3410 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3411 "ATC_GPA_MULTIPLE_HITS\n");
3412 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3413 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3414 "ATC_RCPL_TO_EMPTY_CNT\n");
3415 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3416 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3417 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3418 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3419 "ATC_IREQ_LESS_THAN_STU\n");
3420 }
3421
3422 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3423 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3424 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3425 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3426 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3427 }
3428
3429}
3430
72fd0718
VZ
3431static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3432{
3433 struct attn_route attn, *group_mask;
34f80b04 3434 int port = BP_PORT(bp);
877e9aa4 3435 int index;
a2fbb9ea
ET
3436 u32 reg_addr;
3437 u32 val;
3fcaf2e5 3438 u32 aeu_mask;
a2fbb9ea
ET
3439
3440 /* need to take HW lock because MCP or other port might also
3441 try to handle this event */
4a37fb66 3442 bnx2x_acquire_alr(bp);
a2fbb9ea 3443
72fd0718
VZ
3444 if (bnx2x_chk_parity_attn(bp)) {
3445 bp->recovery_state = BNX2X_RECOVERY_INIT;
3446 bnx2x_set_reset_in_progress(bp);
3447 schedule_delayed_work(&bp->reset_task, 0);
3448 /* Disable HW interrupts */
3449 bnx2x_int_disable(bp);
3450 bnx2x_release_alr(bp);
3451 /* In case of parity errors don't handle attentions so that
3452 * other function would "see" parity errors.
3453 */
3454 return;
3455 }
3456
a2fbb9ea
ET
3457 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3458 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3459 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3460 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
f2e0899f
DK
3461 if (CHIP_IS_E2(bp))
3462 attn.sig[4] =
3463 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3464 else
3465 attn.sig[4] = 0;
3466
3467 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3468 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
a2fbb9ea
ET
3469
3470 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3471 if (deasserted & (1 << index)) {
72fd0718 3472 group_mask = &bp->attn_group[index];
a2fbb9ea 3473
f2e0899f
DK
3474 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3475 "%08x %08x %08x\n",
3476 index,
3477 group_mask->sig[0], group_mask->sig[1],
3478 group_mask->sig[2], group_mask->sig[3],
3479 group_mask->sig[4]);
a2fbb9ea 3480
f2e0899f
DK
3481 bnx2x_attn_int_deasserted4(bp,
3482 attn.sig[4] & group_mask->sig[4]);
877e9aa4 3483 bnx2x_attn_int_deasserted3(bp,
72fd0718 3484 attn.sig[3] & group_mask->sig[3]);
877e9aa4 3485 bnx2x_attn_int_deasserted1(bp,
72fd0718 3486 attn.sig[1] & group_mask->sig[1]);
877e9aa4 3487 bnx2x_attn_int_deasserted2(bp,
72fd0718 3488 attn.sig[2] & group_mask->sig[2]);
877e9aa4 3489 bnx2x_attn_int_deasserted0(bp,
72fd0718 3490 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
3491 }
3492 }
3493
4a37fb66 3494 bnx2x_release_alr(bp);
a2fbb9ea 3495
f2e0899f
DK
3496 if (bp->common.int_block == INT_BLOCK_HC)
3497 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3498 COMMAND_REG_ATTN_BITS_CLR);
3499 else
3500 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
a2fbb9ea
ET
3501
3502 val = ~deasserted;
f2e0899f
DK
3503 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3504 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5c862848 3505 REG_WR(bp, reg_addr, val);
a2fbb9ea 3506
a2fbb9ea 3507 if (~bp->attn_state & deasserted)
3fcaf2e5 3508 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3509
3510 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3511 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3512
3fcaf2e5
EG
3513 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3514 aeu_mask = REG_RD(bp, reg_addr);
3515
3516 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3517 aeu_mask, deasserted);
72fd0718 3518 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 3519 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3520
3fcaf2e5
EG
3521 REG_WR(bp, reg_addr, aeu_mask);
3522 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3523
3524 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3525 bp->attn_state &= ~deasserted;
3526 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3527}
3528
3529static void bnx2x_attn_int(struct bnx2x *bp)
3530{
3531 /* read local copy of bits */
68d59484
EG
3532 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3533 attn_bits);
3534 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3535 attn_bits_ack);
a2fbb9ea
ET
3536 u32 attn_state = bp->attn_state;
3537
3538 /* look for changed bits */
3539 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3540 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3541
3542 DP(NETIF_MSG_HW,
3543 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3544 attn_bits, attn_ack, asserted, deasserted);
3545
3546 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3547 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3548
3549 /* handle bits that were raised */
3550 if (asserted)
3551 bnx2x_attn_int_asserted(bp, asserted);
3552
3553 if (deasserted)
3554 bnx2x_attn_int_deasserted(bp, deasserted);
3555}
3556
523224a3
DK
3557static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3558{
3559 /* No memory barriers */
3560 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3561 mmiowb(); /* keep prod updates ordered */
3562}
3563
3564#ifdef BCM_CNIC
3565static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3566 union event_ring_elem *elem)
3567{
3568 if (!bp->cnic_eth_dev.starting_cid ||
3569 cid < bp->cnic_eth_dev.starting_cid)
3570 return 1;
3571
3572 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3573
3574 if (unlikely(elem->message.data.cfc_del_event.error)) {
3575 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3576 cid);
3577 bnx2x_panic_dump(bp);
3578 }
3579 bnx2x_cnic_cfc_comp(bp, cid);
3580 return 0;
3581}
3582#endif
3583
3584static void bnx2x_eq_int(struct bnx2x *bp)
3585{
3586 u16 hw_cons, sw_cons, sw_prod;
3587 union event_ring_elem *elem;
3588 u32 cid;
3589 u8 opcode;
3590 int spqe_cnt = 0;
3591
3592 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3593
3594 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3595 * when we get the the next-page we nned to adjust so the loop
3596 * condition below will be met. The next element is the size of a
3597 * regular element and hence incrementing by 1
3598 */
3599 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3600 hw_cons++;
3601
3602 /* This function may never run in parralel with itself for a
3603 * specific bp, thus there is no need in "paired" read memory
3604 * barrier here.
3605 */
3606 sw_cons = bp->eq_cons;
3607 sw_prod = bp->eq_prod;
3608
3609 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
8fe23fbd 3610 hw_cons, sw_cons, atomic_read(&bp->spq_left));
523224a3
DK
3611
3612 for (; sw_cons != hw_cons;
3613 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3614
3615
3616 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3617
3618 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3619 opcode = elem->message.opcode;
3620
3621
3622 /* handle eq element */
3623 switch (opcode) {
3624 case EVENT_RING_OPCODE_STAT_QUERY:
3625 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3626 /* nothing to do with stats comp */
3627 continue;
3628
3629 case EVENT_RING_OPCODE_CFC_DEL:
3630 /* handle according to cid range */
3631 /*
3632 * we may want to verify here that the bp state is
3633 * HALTING
3634 */
3635 DP(NETIF_MSG_IFDOWN,
3636 "got delete ramrod for MULTI[%d]\n", cid);
3637#ifdef BCM_CNIC
3638 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3639 goto next_spqe;
3640#endif
3641 bnx2x_fp(bp, cid, state) =
3642 BNX2X_FP_STATE_CLOSED;
3643
3644 goto next_spqe;
3645 }
3646
3647 switch (opcode | bp->state) {
3648 case (EVENT_RING_OPCODE_FUNCTION_START |
3649 BNX2X_STATE_OPENING_WAIT4_PORT):
3650 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3651 bp->state = BNX2X_STATE_FUNC_STARTED;
3652 break;
3653
3654 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3655 BNX2X_STATE_CLOSING_WAIT4_HALT):
3656 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3657 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3658 break;
3659
3660 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3661 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3662 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3663 bp->set_mac_pending = 0;
3664 break;
3665
3666 case (EVENT_RING_OPCODE_SET_MAC |
3667 BNX2X_STATE_CLOSING_WAIT4_HALT):
3668 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3669 bp->set_mac_pending = 0;
3670 break;
3671 default:
3672 /* unknown event log error and continue */
3673 BNX2X_ERR("Unknown EQ event %d\n",
3674 elem->message.opcode);
3675 }
3676next_spqe:
3677 spqe_cnt++;
3678 } /* for */
3679
8fe23fbd
DK
3680 smp_mb__before_atomic_inc();
3681 atomic_add(spqe_cnt, &bp->spq_left);
523224a3
DK
3682
3683 bp->eq_cons = sw_cons;
3684 bp->eq_prod = sw_prod;
3685 /* Make sure that above mem writes were issued towards the memory */
3686 smp_wmb();
3687
3688 /* update producer */
3689 bnx2x_update_eq_prod(bp, bp->eq_prod);
3690}
3691
a2fbb9ea
ET
3692static void bnx2x_sp_task(struct work_struct *work)
3693{
1cf167f2 3694 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3695 u16 status;
3696
3697 /* Return here if interrupt is disabled */
3698 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3699 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3700 return;
3701 }
3702
3703 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3704/* if (status == 0) */
3705/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3706
cdaa7cb8 3707 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 3708
877e9aa4 3709 /* HW attentions */
523224a3 3710 if (status & BNX2X_DEF_SB_ATT_IDX) {
a2fbb9ea 3711 bnx2x_attn_int(bp);
523224a3 3712 status &= ~BNX2X_DEF_SB_ATT_IDX;
cdaa7cb8
VZ
3713 }
3714
523224a3
DK
3715 /* SP events: STAT_QUERY and others */
3716 if (status & BNX2X_DEF_SB_IDX) {
3717
3718 /* Handle EQ completions */
3719 bnx2x_eq_int(bp);
3720
3721 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3722 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3723
3724 status &= ~BNX2X_DEF_SB_IDX;
cdaa7cb8
VZ
3725 }
3726
3727 if (unlikely(status))
3728 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3729 status);
a2fbb9ea 3730
523224a3
DK
3731 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3732 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
a2fbb9ea
ET
3733}
3734
9f6c9258 3735irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
a2fbb9ea
ET
3736{
3737 struct net_device *dev = dev_instance;
3738 struct bnx2x *bp = netdev_priv(dev);
3739
3740 /* Return here if interrupt is disabled */
3741 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3742 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3743 return IRQ_HANDLED;
3744 }
3745
523224a3
DK
3746 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3747 IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3748
3749#ifdef BNX2X_STOP_ON_ERROR
3750 if (unlikely(bp->panic))
3751 return IRQ_HANDLED;
3752#endif
3753
993ac7b5
MC
3754#ifdef BCM_CNIC
3755 {
3756 struct cnic_ops *c_ops;
3757
3758 rcu_read_lock();
3759 c_ops = rcu_dereference(bp->cnic_ops);
3760 if (c_ops)
3761 c_ops->cnic_handler(bp->cnic_data, NULL);
3762 rcu_read_unlock();
3763 }
3764#endif
1cf167f2 3765 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3766
3767 return IRQ_HANDLED;
3768}
3769
3770/* end of slow path */
3771
a2fbb9ea
ET
3772static void bnx2x_timer(unsigned long data)
3773{
3774 struct bnx2x *bp = (struct bnx2x *) data;
3775
3776 if (!netif_running(bp->dev))
3777 return;
3778
3779 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3780 goto timer_restart;
a2fbb9ea
ET
3781
3782 if (poll) {
3783 struct bnx2x_fastpath *fp = &bp->fp[0];
3784 int rc;
3785
7961f791 3786 bnx2x_tx_int(fp);
a2fbb9ea
ET
3787 rc = bnx2x_rx_int(fp, 1000);
3788 }
3789
34f80b04 3790 if (!BP_NOMCP(bp)) {
f2e0899f 3791 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
3792 u32 drv_pulse;
3793 u32 mcp_pulse;
3794
3795 ++bp->fw_drv_pulse_wr_seq;
3796 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3797 /* TBD - add SYSTEM_TIME */
3798 drv_pulse = bp->fw_drv_pulse_wr_seq;
f2e0899f 3799 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
a2fbb9ea 3800
f2e0899f 3801 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
a2fbb9ea
ET
3802 MCP_PULSE_SEQ_MASK);
3803 /* The delta between driver pulse and mcp response
3804 * should be 1 (before mcp response) or 0 (after mcp response)
3805 */
3806 if ((drv_pulse != mcp_pulse) &&
3807 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3808 /* someone lost a heartbeat... */
3809 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3810 drv_pulse, mcp_pulse);
3811 }
3812 }
3813
f34d28ea 3814 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 3815 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3816
f1410647 3817timer_restart:
a2fbb9ea
ET
3818 mod_timer(&bp->timer, jiffies + bp->current_interval);
3819}
3820
3821/* end of Statistics */
3822
3823/* nic init */
3824
3825/*
3826 * nic init service functions
3827 */
3828
523224a3 3829static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
a2fbb9ea 3830{
523224a3
DK
3831 u32 i;
3832 if (!(len%4) && !(addr%4))
3833 for (i = 0; i < len; i += 4)
3834 REG_WR(bp, addr + i, fill);
3835 else
3836 for (i = 0; i < len; i++)
3837 REG_WR8(bp, addr + i, fill);
34f80b04 3838
34f80b04
EG
3839}
3840
523224a3
DK
3841/* helper: writes FP SP data to FW - data_size in dwords */
3842static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3843 int fw_sb_id,
3844 u32 *sb_data_p,
3845 u32 data_size)
34f80b04 3846{
a2fbb9ea 3847 int index;
523224a3
DK
3848 for (index = 0; index < data_size; index++)
3849 REG_WR(bp, BAR_CSTRORM_INTMEM +
3850 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3851 sizeof(u32)*index,
3852 *(sb_data_p + index));
3853}
a2fbb9ea 3854
523224a3
DK
3855static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3856{
3857 u32 *sb_data_p;
3858 u32 data_size = 0;
f2e0899f 3859 struct hc_status_block_data_e2 sb_data_e2;
523224a3 3860 struct hc_status_block_data_e1x sb_data_e1x;
a2fbb9ea 3861
523224a3 3862 /* disable the function first */
f2e0899f
DK
3863 if (CHIP_IS_E2(bp)) {
3864 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3865 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3866 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3867 sb_data_e2.common.p_func.vf_valid = false;
3868 sb_data_p = (u32 *)&sb_data_e2;
3869 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3870 } else {
3871 memset(&sb_data_e1x, 0,
3872 sizeof(struct hc_status_block_data_e1x));
3873 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3874 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3875 sb_data_e1x.common.p_func.vf_valid = false;
3876 sb_data_p = (u32 *)&sb_data_e1x;
3877 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3878 }
523224a3 3879 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
a2fbb9ea 3880
523224a3
DK
3881 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3882 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3883 CSTORM_STATUS_BLOCK_SIZE);
3884 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3885 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3886 CSTORM_SYNC_BLOCK_SIZE);
3887}
34f80b04 3888
523224a3
DK
3889/* helper: writes SP SB data to FW */
3890static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3891 struct hc_sp_status_block_data *sp_sb_data)
3892{
3893 int func = BP_FUNC(bp);
3894 int i;
3895 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3896 REG_WR(bp, BAR_CSTRORM_INTMEM +
3897 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3898 i*sizeof(u32),
3899 *((u32 *)sp_sb_data + i));
34f80b04
EG
3900}
3901
523224a3 3902static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
34f80b04
EG
3903{
3904 int func = BP_FUNC(bp);
523224a3
DK
3905 struct hc_sp_status_block_data sp_sb_data;
3906 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
a2fbb9ea 3907
523224a3
DK
3908 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3909 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3910 sp_sb_data.p_func.vf_valid = false;
3911
3912 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3913
3914 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3915 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3916 CSTORM_SP_STATUS_BLOCK_SIZE);
3917 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3918 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3919 CSTORM_SP_SYNC_BLOCK_SIZE);
3920
3921}
3922
3923
3924static inline
3925void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3926 int igu_sb_id, int igu_seg_id)
3927{
3928 hc_sm->igu_sb_id = igu_sb_id;
3929 hc_sm->igu_seg_id = igu_seg_id;
3930 hc_sm->timer_value = 0xFF;
3931 hc_sm->time_to_expire = 0xFFFFFFFF;
a2fbb9ea
ET
3932}
3933
8d96286a 3934static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
523224a3 3935 u8 vf_valid, int fw_sb_id, int igu_sb_id)
a2fbb9ea 3936{
523224a3
DK
3937 int igu_seg_id;
3938
f2e0899f 3939 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
3940 struct hc_status_block_data_e1x sb_data_e1x;
3941 struct hc_status_block_sm *hc_sm_p;
3942 struct hc_index_data *hc_index_p;
3943 int data_size;
3944 u32 *sb_data_p;
3945
f2e0899f
DK
3946 if (CHIP_INT_MODE_IS_BC(bp))
3947 igu_seg_id = HC_SEG_ACCESS_NORM;
3948 else
3949 igu_seg_id = IGU_SEG_ACCESS_NORM;
523224a3
DK
3950
3951 bnx2x_zero_fp_sb(bp, fw_sb_id);
3952
f2e0899f
DK
3953 if (CHIP_IS_E2(bp)) {
3954 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3955 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
3956 sb_data_e2.common.p_func.vf_id = vfid;
3957 sb_data_e2.common.p_func.vf_valid = vf_valid;
3958 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
3959 sb_data_e2.common.same_igu_sb_1b = true;
3960 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
3961 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
3962 hc_sm_p = sb_data_e2.common.state_machine;
3963 hc_index_p = sb_data_e2.index_data;
3964 sb_data_p = (u32 *)&sb_data_e2;
3965 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3966 } else {
3967 memset(&sb_data_e1x, 0,
3968 sizeof(struct hc_status_block_data_e1x));
3969 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3970 sb_data_e1x.common.p_func.vf_id = 0xff;
3971 sb_data_e1x.common.p_func.vf_valid = false;
3972 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
3973 sb_data_e1x.common.same_igu_sb_1b = true;
3974 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3975 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3976 hc_sm_p = sb_data_e1x.common.state_machine;
3977 hc_index_p = sb_data_e1x.index_data;
3978 sb_data_p = (u32 *)&sb_data_e1x;
3979 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3980 }
523224a3
DK
3981
3982 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3983 igu_sb_id, igu_seg_id);
3984 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
3985 igu_sb_id, igu_seg_id);
3986
3987 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
3988
3989 /* write indecies to HW */
3990 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3991}
3992
3993static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
3994 u8 sb_index, u8 disable, u16 usec)
3995{
3996 int port = BP_PORT(bp);
3997 u8 ticks = usec / BNX2X_BTR;
3998
3999 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4000
4001 disable = disable ? 1 : (usec ? 0 : 1);
4002 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4003}
4004
4005static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4006 u16 tx_usec, u16 rx_usec)
4007{
4008 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4009 false, rx_usec);
4010 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4011 false, tx_usec);
4012}
f2e0899f 4013
523224a3
DK
4014static void bnx2x_init_def_sb(struct bnx2x *bp)
4015{
4016 struct host_sp_status_block *def_sb = bp->def_status_blk;
4017 dma_addr_t mapping = bp->def_status_blk_mapping;
4018 int igu_sp_sb_index;
4019 int igu_seg_id;
34f80b04
EG
4020 int port = BP_PORT(bp);
4021 int func = BP_FUNC(bp);
523224a3 4022 int reg_offset;
a2fbb9ea 4023 u64 section;
523224a3
DK
4024 int index;
4025 struct hc_sp_status_block_data sp_sb_data;
4026 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4027
f2e0899f
DK
4028 if (CHIP_INT_MODE_IS_BC(bp)) {
4029 igu_sp_sb_index = DEF_SB_IGU_ID;
4030 igu_seg_id = HC_SEG_ACCESS_DEF;
4031 } else {
4032 igu_sp_sb_index = bp->igu_dsb_id;
4033 igu_seg_id = IGU_SEG_ACCESS_DEF;
4034 }
a2fbb9ea
ET
4035
4036 /* ATTN */
523224a3 4037 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
a2fbb9ea 4038 atten_status_block);
523224a3 4039 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
a2fbb9ea 4040
49d66772
ET
4041 bp->attn_state = 0;
4042
a2fbb9ea
ET
4043 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4044 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
34f80b04 4045 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
523224a3
DK
4046 int sindex;
4047 /* take care of sig[0]..sig[4] */
4048 for (sindex = 0; sindex < 4; sindex++)
4049 bp->attn_group[index].sig[sindex] =
4050 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
f2e0899f
DK
4051
4052 if (CHIP_IS_E2(bp))
4053 /*
4054 * enable5 is separate from the rest of the registers,
4055 * and therefore the address skip is 4
4056 * and not 16 between the different groups
4057 */
4058 bp->attn_group[index].sig[4] = REG_RD(bp,
4059 reg_offset + 0x10 + 0x4*index);
4060 else
4061 bp->attn_group[index].sig[4] = 0;
a2fbb9ea
ET
4062 }
4063
f2e0899f
DK
4064 if (bp->common.int_block == INT_BLOCK_HC) {
4065 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4066 HC_REG_ATTN_MSG0_ADDR_L);
4067
4068 REG_WR(bp, reg_offset, U64_LO(section));
4069 REG_WR(bp, reg_offset + 4, U64_HI(section));
4070 } else if (CHIP_IS_E2(bp)) {
4071 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4072 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4073 }
a2fbb9ea 4074
523224a3
DK
4075 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4076 sp_sb);
a2fbb9ea 4077
523224a3 4078 bnx2x_zero_sp_sb(bp);
a2fbb9ea 4079
523224a3
DK
4080 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4081 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4082 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4083 sp_sb_data.igu_seg_id = igu_seg_id;
4084 sp_sb_data.p_func.pf_id = func;
f2e0899f 4085 sp_sb_data.p_func.vnic_id = BP_VN(bp);
523224a3 4086 sp_sb_data.p_func.vf_id = 0xff;
a2fbb9ea 4087
523224a3 4088 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
49d66772 4089
bb2a0f7a 4090 bp->stats_pending = 0;
66e855f3 4091 bp->set_mac_pending = 0;
bb2a0f7a 4092
523224a3 4093 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4094}
4095
9f6c9258 4096void bnx2x_update_coalesce(struct bnx2x *bp)
a2fbb9ea 4097{
a2fbb9ea
ET
4098 int i;
4099
523224a3
DK
4100 for_each_queue(bp, i)
4101 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4102 bp->rx_ticks, bp->tx_ticks);
a2fbb9ea
ET
4103}
4104
a2fbb9ea
ET
4105static void bnx2x_init_sp_ring(struct bnx2x *bp)
4106{
a2fbb9ea 4107 spin_lock_init(&bp->spq_lock);
8fe23fbd 4108 atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
a2fbb9ea 4109
a2fbb9ea 4110 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4111 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4112 bp->spq_prod_bd = bp->spq;
4113 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
a2fbb9ea
ET
4114}
4115
523224a3 4116static void bnx2x_init_eq_ring(struct bnx2x *bp)
a2fbb9ea
ET
4117{
4118 int i;
523224a3
DK
4119 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4120 union event_ring_elem *elem =
4121 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
a2fbb9ea 4122
523224a3
DK
4123 elem->next_page.addr.hi =
4124 cpu_to_le32(U64_HI(bp->eq_mapping +
4125 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4126 elem->next_page.addr.lo =
4127 cpu_to_le32(U64_LO(bp->eq_mapping +
4128 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
a2fbb9ea 4129 }
523224a3
DK
4130 bp->eq_cons = 0;
4131 bp->eq_prod = NUM_EQ_DESC;
4132 bp->eq_cons_sb = BNX2X_EQ_INDEX;
a2fbb9ea
ET
4133}
4134
4135static void bnx2x_init_ind_table(struct bnx2x *bp)
4136{
26c8fa4d 4137 int func = BP_FUNC(bp);
a2fbb9ea
ET
4138 int i;
4139
555f6c78 4140 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4141 return;
4142
555f6c78
EG
4143 DP(NETIF_MSG_IFUP,
4144 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4145 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4146 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4147 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 4148 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
4149}
4150
9f6c9258 4151void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
a2fbb9ea 4152{
34f80b04 4153 int mode = bp->rx_mode;
523224a3
DK
4154 u16 cl_id;
4155
581ce43d
EG
4156 /* All but management unicast packets should pass to the host as well */
4157 u32 llh_mask =
4158 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4159 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4160 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4161 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 4162
a2fbb9ea
ET
4163 switch (mode) {
4164 case BNX2X_RX_MODE_NONE: /* no Rx */
523224a3
DK
4165 cl_id = BP_L_ID(bp);
4166 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
a2fbb9ea 4167 break;
356e2385 4168
a2fbb9ea 4169 case BNX2X_RX_MODE_NORMAL:
523224a3
DK
4170 cl_id = BP_L_ID(bp);
4171 bnx2x_rxq_set_mac_filters(bp, cl_id,
4172 BNX2X_ACCEPT_UNICAST |
4173 BNX2X_ACCEPT_BROADCAST |
4174 BNX2X_ACCEPT_MULTICAST);
a2fbb9ea 4175 break;
356e2385 4176
a2fbb9ea 4177 case BNX2X_RX_MODE_ALLMULTI:
523224a3
DK
4178 cl_id = BP_L_ID(bp);
4179 bnx2x_rxq_set_mac_filters(bp, cl_id,
4180 BNX2X_ACCEPT_UNICAST |
4181 BNX2X_ACCEPT_BROADCAST |
4182 BNX2X_ACCEPT_ALL_MULTICAST);
a2fbb9ea 4183 break;
356e2385 4184
a2fbb9ea 4185 case BNX2X_RX_MODE_PROMISC:
523224a3
DK
4186 cl_id = BP_L_ID(bp);
4187 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
4188
581ce43d
EG
4189 /* pass management unicast packets as well */
4190 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 4191 break;
356e2385 4192
a2fbb9ea 4193 default:
34f80b04
EG
4194 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4195 break;
a2fbb9ea
ET
4196 }
4197
581ce43d 4198 REG_WR(bp,
523224a3
DK
4199 BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
4200 NIG_REG_LLH0_BRB1_DRV_MASK,
581ce43d
EG
4201 llh_mask);
4202
523224a3
DK
4203 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4204 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4205 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
4206 bp->mac_filters.ucast_drop_all,
4207 bp->mac_filters.mcast_drop_all,
4208 bp->mac_filters.bcast_drop_all,
4209 bp->mac_filters.ucast_accept_all,
4210 bp->mac_filters.mcast_accept_all,
4211 bp->mac_filters.bcast_accept_all
4212 );
a2fbb9ea 4213
523224a3 4214 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
a2fbb9ea
ET
4215}
4216
471de716
EG
4217static void bnx2x_init_internal_common(struct bnx2x *bp)
4218{
4219 int i;
4220
523224a3 4221 if (!CHIP_IS_E1(bp)) {
de832a55 4222
523224a3
DK
4223 /* xstorm needs to know whether to add ovlan to packets or not,
4224 * in switch-independent we'll write 0 to here... */
34f80b04 4225 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4226 bp->mf_mode);
34f80b04 4227 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4228 bp->mf_mode);
34f80b04 4229 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4230 bp->mf_mode);
34f80b04 4231 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4232 bp->mf_mode);
34f80b04
EG
4233 }
4234
523224a3
DK
4235 /* Zero this manually as its initialization is
4236 currently missing in the initTool */
4237 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
ca00392c 4238 REG_WR(bp, BAR_USTRORM_INTMEM +
523224a3 4239 USTORM_AGG_DATA_OFFSET + i * 4, 0);
f2e0899f
DK
4240 if (CHIP_IS_E2(bp)) {
4241 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4242 CHIP_INT_MODE_IS_BC(bp) ?
4243 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4244 }
523224a3 4245}
8a1c38d1 4246
523224a3
DK
4247static void bnx2x_init_internal_port(struct bnx2x *bp)
4248{
4249 /* port */
a2fbb9ea
ET
4250}
4251
471de716
EG
4252static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4253{
4254 switch (load_code) {
4255 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 4256 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
471de716
EG
4257 bnx2x_init_internal_common(bp);
4258 /* no break */
4259
4260 case FW_MSG_CODE_DRV_LOAD_PORT:
4261 bnx2x_init_internal_port(bp);
4262 /* no break */
4263
4264 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3
DK
4265 /* internal memory per function is
4266 initialized inside bnx2x_pf_init */
471de716
EG
4267 break;
4268
4269 default:
4270 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4271 break;
4272 }
4273}
4274
523224a3
DK
4275static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4276{
4277 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4278
4279 fp->state = BNX2X_FP_STATE_CLOSED;
4280
4281 fp->index = fp->cid = fp_idx;
4282 fp->cl_id = BP_L_ID(bp) + fp_idx;
4283 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4284 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4285 /* qZone id equals to FW (per path) client id */
4286 fp->cl_qzone_id = fp->cl_id +
f2e0899f
DK
4287 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4288 ETH_MAX_RX_CLIENTS_E1H);
523224a3 4289 /* init shortcut */
f2e0899f
DK
4290 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4291 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
523224a3
DK
4292 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4293 /* Setup SB indicies */
4294 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4295 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4296
4297 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4298 "cl_id %d fw_sb %d igu_sb %d\n",
4299 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4300 fp->igu_sb_id);
4301 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4302 fp->fw_sb_id, fp->igu_sb_id);
4303
4304 bnx2x_update_fpsb_idx(fp);
4305}
4306
9f6c9258 4307void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4308{
4309 int i;
4310
523224a3
DK
4311 for_each_queue(bp, i)
4312 bnx2x_init_fp_sb(bp, i);
37b091ba 4313#ifdef BCM_CNIC
523224a3
DK
4314
4315 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4316 BNX2X_VF_ID_INVALID, false,
4317 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4318
37b091ba 4319#endif
a2fbb9ea 4320
16119785
EG
4321 /* ensure status block indices were read */
4322 rmb();
4323
523224a3 4324 bnx2x_init_def_sb(bp);
5c862848 4325 bnx2x_update_dsb_idx(bp);
a2fbb9ea 4326 bnx2x_init_rx_rings(bp);
523224a3 4327 bnx2x_init_tx_rings(bp);
a2fbb9ea 4328 bnx2x_init_sp_ring(bp);
523224a3 4329 bnx2x_init_eq_ring(bp);
471de716 4330 bnx2x_init_internal(bp, load_code);
523224a3 4331 bnx2x_pf_init(bp);
a2fbb9ea 4332 bnx2x_init_ind_table(bp);
0ef00459
EG
4333 bnx2x_stats_init(bp);
4334
4335 /* At this point, we are ready for interrupts */
4336 atomic_set(&bp->intr_sem, 0);
4337
4338 /* flush all before enabling interrupts */
4339 mb();
4340 mmiowb();
4341
615f8fd9 4342 bnx2x_int_enable(bp);
eb8da205
EG
4343
4344 /* Check for SPIO5 */
4345 bnx2x_attn_int_deasserted0(bp,
4346 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4347 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
4348}
4349
4350/* end of nic init */
4351
4352/*
4353 * gzip service functions
4354 */
4355
4356static int bnx2x_gunzip_init(struct bnx2x *bp)
4357{
1a983142
FT
4358 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4359 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
4360 if (bp->gunzip_buf == NULL)
4361 goto gunzip_nomem1;
4362
4363 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4364 if (bp->strm == NULL)
4365 goto gunzip_nomem2;
4366
4367 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4368 GFP_KERNEL);
4369 if (bp->strm->workspace == NULL)
4370 goto gunzip_nomem3;
4371
4372 return 0;
4373
4374gunzip_nomem3:
4375 kfree(bp->strm);
4376 bp->strm = NULL;
4377
4378gunzip_nomem2:
1a983142
FT
4379 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4380 bp->gunzip_mapping);
a2fbb9ea
ET
4381 bp->gunzip_buf = NULL;
4382
4383gunzip_nomem1:
cdaa7cb8
VZ
4384 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4385 " un-compression\n");
a2fbb9ea
ET
4386 return -ENOMEM;
4387}
4388
4389static void bnx2x_gunzip_end(struct bnx2x *bp)
4390{
4391 kfree(bp->strm->workspace);
a2fbb9ea
ET
4392 kfree(bp->strm);
4393 bp->strm = NULL;
4394
4395 if (bp->gunzip_buf) {
1a983142
FT
4396 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4397 bp->gunzip_mapping);
a2fbb9ea
ET
4398 bp->gunzip_buf = NULL;
4399 }
4400}
4401
94a78b79 4402static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
4403{
4404 int n, rc;
4405
4406 /* check gzip header */
94a78b79
VZ
4407 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4408 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 4409 return -EINVAL;
94a78b79 4410 }
a2fbb9ea
ET
4411
4412 n = 10;
4413
34f80b04 4414#define FNAME 0x8
a2fbb9ea
ET
4415
4416 if (zbuf[3] & FNAME)
4417 while ((zbuf[n++] != 0) && (n < len));
4418
94a78b79 4419 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
4420 bp->strm->avail_in = len - n;
4421 bp->strm->next_out = bp->gunzip_buf;
4422 bp->strm->avail_out = FW_BUF_SIZE;
4423
4424 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4425 if (rc != Z_OK)
4426 return rc;
4427
4428 rc = zlib_inflate(bp->strm, Z_FINISH);
4429 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
4430 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4431 bp->strm->msg);
a2fbb9ea
ET
4432
4433 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4434 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
4435 netdev_err(bp->dev, "Firmware decompression error:"
4436 " gunzip_outlen (%d) not aligned\n",
4437 bp->gunzip_outlen);
a2fbb9ea
ET
4438 bp->gunzip_outlen >>= 2;
4439
4440 zlib_inflateEnd(bp->strm);
4441
4442 if (rc == Z_STREAM_END)
4443 return 0;
4444
4445 return rc;
4446}
4447
4448/* nic load/unload */
4449
4450/*
34f80b04 4451 * General service functions
a2fbb9ea
ET
4452 */
4453
4454/* send a NIG loopback debug packet */
4455static void bnx2x_lb_pckt(struct bnx2x *bp)
4456{
a2fbb9ea 4457 u32 wb_write[3];
a2fbb9ea
ET
4458
4459 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4460 wb_write[0] = 0x55555555;
4461 wb_write[1] = 0x55555555;
34f80b04 4462 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4463 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4464
4465 /* NON-IP protocol */
a2fbb9ea
ET
4466 wb_write[0] = 0x09000000;
4467 wb_write[1] = 0x55555555;
34f80b04 4468 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4469 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4470}
4471
4472/* some of the internal memories
4473 * are not directly readable from the driver
4474 * to test them we send debug packets
4475 */
4476static int bnx2x_int_mem_test(struct bnx2x *bp)
4477{
4478 int factor;
4479 int count, i;
4480 u32 val = 0;
4481
ad8d3948 4482 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4483 factor = 120;
ad8d3948
EG
4484 else if (CHIP_REV_IS_EMUL(bp))
4485 factor = 200;
4486 else
a2fbb9ea 4487 factor = 1;
a2fbb9ea 4488
a2fbb9ea
ET
4489 /* Disable inputs of parser neighbor blocks */
4490 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4491 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4492 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4493 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4494
4495 /* Write 0 to parser credits for CFC search request */
4496 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4497
4498 /* send Ethernet packet */
4499 bnx2x_lb_pckt(bp);
4500
4501 /* TODO do i reset NIG statistic? */
4502 /* Wait until NIG register shows 1 packet of size 0x10 */
4503 count = 1000 * factor;
4504 while (count) {
34f80b04 4505
a2fbb9ea
ET
4506 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4507 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4508 if (val == 0x10)
4509 break;
4510
4511 msleep(10);
4512 count--;
4513 }
4514 if (val != 0x10) {
4515 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4516 return -1;
4517 }
4518
4519 /* Wait until PRS register shows 1 packet */
4520 count = 1000 * factor;
4521 while (count) {
4522 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4523 if (val == 1)
4524 break;
4525
4526 msleep(10);
4527 count--;
4528 }
4529 if (val != 0x1) {
4530 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4531 return -2;
4532 }
4533
4534 /* Reset and init BRB, PRS */
34f80b04 4535 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4536 msleep(50);
34f80b04 4537 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 4538 msleep(50);
94a78b79
VZ
4539 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4540 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
4541
4542 DP(NETIF_MSG_HW, "part2\n");
4543
4544 /* Disable inputs of parser neighbor blocks */
4545 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4546 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4547 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4548 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4549
4550 /* Write 0 to parser credits for CFC search request */
4551 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4552
4553 /* send 10 Ethernet packets */
4554 for (i = 0; i < 10; i++)
4555 bnx2x_lb_pckt(bp);
4556
4557 /* Wait until NIG register shows 10 + 1
4558 packets of size 11*0x10 = 0xb0 */
4559 count = 1000 * factor;
4560 while (count) {
34f80b04 4561
a2fbb9ea
ET
4562 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4563 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4564 if (val == 0xb0)
4565 break;
4566
4567 msleep(10);
4568 count--;
4569 }
4570 if (val != 0xb0) {
4571 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4572 return -3;
4573 }
4574
4575 /* Wait until PRS register shows 2 packets */
4576 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4577 if (val != 2)
4578 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4579
4580 /* Write 1 to parser credits for CFC search request */
4581 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4582
4583 /* Wait until PRS register shows 3 packets */
4584 msleep(10 * factor);
4585 /* Wait until NIG register shows 1 packet of size 0x10 */
4586 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4587 if (val != 3)
4588 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4589
4590 /* clear NIG EOP FIFO */
4591 for (i = 0; i < 11; i++)
4592 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4593 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4594 if (val != 1) {
4595 BNX2X_ERR("clear of NIG failed\n");
4596 return -4;
4597 }
4598
4599 /* Reset and init BRB, PRS, NIG */
4600 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4601 msleep(50);
4602 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4603 msleep(50);
94a78b79
VZ
4604 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4605 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 4606#ifndef BCM_CNIC
a2fbb9ea
ET
4607 /* set NIC mode */
4608 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4609#endif
4610
4611 /* Enable inputs of parser neighbor blocks */
4612 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4613 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4614 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 4615 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
4616
4617 DP(NETIF_MSG_HW, "done\n");
4618
4619 return 0; /* OK */
4620}
4621
4622static void enable_blocks_attention(struct bnx2x *bp)
4623{
4624 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
f2e0899f
DK
4625 if (CHIP_IS_E2(bp))
4626 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4627 else
4628 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
a2fbb9ea
ET
4629 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4630 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
f2e0899f
DK
4631 /*
4632 * mask read length error interrupts in brb for parser
4633 * (parsing unit and 'checksum and crc' unit)
4634 * these errors are legal (PU reads fixed length and CAC can cause
4635 * read length error on truncated packets)
4636 */
4637 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
a2fbb9ea
ET
4638 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4639 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4640 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4641 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4642 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
4643/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4644/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4645 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4646 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4647 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
4648/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4649/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4650 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4651 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4652 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4653 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
4654/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4655/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
f85582f8 4656
34f80b04
EG
4657 if (CHIP_REV_IS_FPGA(bp))
4658 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
f2e0899f
DK
4659 else if (CHIP_IS_E2(bp))
4660 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4661 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4662 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4663 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4664 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4665 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
34f80b04
EG
4666 else
4667 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
4668 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4669 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4670 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
4671/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4672/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4673 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4674 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
4675/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4676 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
4677}
4678
72fd0718
VZ
4679static const struct {
4680 u32 addr;
4681 u32 mask;
4682} bnx2x_parity_mask[] = {
f2e0899f
DK
4683 {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
4684 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4685 {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
4686 {HC_REG_HC_PRTY_MASK, 0x7},
4687 {MISC_REG_MISC_PRTY_MASK, 0x1},
f85582f8
DK
4688 {QM_REG_QM_PRTY_MASK, 0x0},
4689 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
72fd0718
VZ
4690 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4691 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
f85582f8
DK
4692 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
4693 {CDU_REG_CDU_PRTY_MASK, 0x0},
4694 {CFC_REG_CFC_PRTY_MASK, 0x0},
4695 {DBG_REG_DBG_PRTY_MASK, 0x0},
4696 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
4697 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
4698 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
4699 {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */
4700 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
4701 {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */
4702 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
4703 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
4704 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
4705 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
4706 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
4707 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
4708 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
4709 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
4710 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
72fd0718
VZ
4711};
4712
4713static void enable_blocks_parity(struct bnx2x *bp)
4714{
cbd9da7b 4715 int i;
72fd0718 4716
cbd9da7b 4717 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
72fd0718
VZ
4718 REG_WR(bp, bnx2x_parity_mask[i].addr,
4719 bnx2x_parity_mask[i].mask);
4720}
4721
34f80b04 4722
81f75bbf
EG
4723static void bnx2x_reset_common(struct bnx2x *bp)
4724{
4725 /* reset_common */
4726 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4727 0xd3ffff7f);
4728 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4729}
4730
573f2035
EG
4731static void bnx2x_init_pxp(struct bnx2x *bp)
4732{
4733 u16 devctl;
4734 int r_order, w_order;
4735
4736 pci_read_config_word(bp->pdev,
4737 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4738 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4739 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4740 if (bp->mrrs == -1)
4741 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4742 else {
4743 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4744 r_order = bp->mrrs;
4745 }
4746
4747 bnx2x_init_pxp_arb(bp, r_order, w_order);
4748}
fd4ef40d
EG
4749
4750static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4751{
2145a920 4752 int is_required;
fd4ef40d 4753 u32 val;
2145a920 4754 int port;
fd4ef40d 4755
2145a920
VZ
4756 if (BP_NOMCP(bp))
4757 return;
4758
4759 is_required = 0;
fd4ef40d
EG
4760 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4761 SHARED_HW_CFG_FAN_FAILURE_MASK;
4762
4763 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4764 is_required = 1;
4765
4766 /*
4767 * The fan failure mechanism is usually related to the PHY type since
4768 * the power consumption of the board is affected by the PHY. Currently,
4769 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4770 */
4771 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4772 for (port = PORT_0; port < PORT_MAX; port++) {
fd4ef40d 4773 is_required |=
d90d96ba
YR
4774 bnx2x_fan_failure_det_req(
4775 bp,
4776 bp->common.shmem_base,
a22f0788 4777 bp->common.shmem2_base,
d90d96ba 4778 port);
fd4ef40d
EG
4779 }
4780
4781 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4782
4783 if (is_required == 0)
4784 return;
4785
4786 /* Fan failure is indicated by SPIO 5 */
4787 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4788 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4789
4790 /* set to active low mode */
4791 val = REG_RD(bp, MISC_REG_SPIO_INT);
4792 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 4793 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
4794 REG_WR(bp, MISC_REG_SPIO_INT, val);
4795
4796 /* enable interrupt to signal the IGU */
4797 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4798 val |= (1 << MISC_REGISTERS_SPIO_5);
4799 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4800}
4801
f2e0899f
DK
4802static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4803{
4804 u32 offset = 0;
4805
4806 if (CHIP_IS_E1(bp))
4807 return;
4808 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4809 return;
4810
4811 switch (BP_ABS_FUNC(bp)) {
4812 case 0:
4813 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4814 break;
4815 case 1:
4816 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4817 break;
4818 case 2:
4819 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4820 break;
4821 case 3:
4822 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4823 break;
4824 case 4:
4825 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4826 break;
4827 case 5:
4828 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4829 break;
4830 case 6:
4831 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4832 break;
4833 case 7:
4834 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4835 break;
4836 default:
4837 return;
4838 }
4839
4840 REG_WR(bp, offset, pretend_func_num);
4841 REG_RD(bp, offset);
4842 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4843}
4844
4845static void bnx2x_pf_disable(struct bnx2x *bp)
4846{
4847 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4848 val &= ~IGU_PF_CONF_FUNC_EN;
4849
4850 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4851 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4852 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4853}
4854
523224a3 4855static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
a2fbb9ea 4856{
a2fbb9ea 4857 u32 val, i;
a2fbb9ea 4858
f2e0899f 4859 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
a2fbb9ea 4860
81f75bbf 4861 bnx2x_reset_common(bp);
34f80b04
EG
4862 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4863 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 4864
94a78b79 4865 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
f2e0899f 4866 if (!CHIP_IS_E1(bp))
fb3bff17 4867 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
a2fbb9ea 4868
f2e0899f
DK
4869 if (CHIP_IS_E2(bp)) {
4870 u8 fid;
4871
4872 /**
4873 * 4-port mode or 2-port mode we need to turn of master-enable
4874 * for everyone, after that, turn it back on for self.
4875 * so, we disregard multi-function or not, and always disable
4876 * for all functions on the given path, this means 0,2,4,6 for
4877 * path 0 and 1,3,5,7 for path 1
4878 */
4879 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4880 if (fid == BP_ABS_FUNC(bp)) {
4881 REG_WR(bp,
4882 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4883 1);
4884 continue;
4885 }
4886
4887 bnx2x_pretend_func(bp, fid);
4888 /* clear pf enable */
4889 bnx2x_pf_disable(bp);
4890 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4891 }
4892 }
a2fbb9ea 4893
94a78b79 4894 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
4895 if (CHIP_IS_E1(bp)) {
4896 /* enable HW interrupt from PXP on USDM overflow
4897 bit 16 on INT_MASK_0 */
4898 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4899 }
a2fbb9ea 4900
94a78b79 4901 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 4902 bnx2x_init_pxp(bp);
a2fbb9ea
ET
4903
4904#ifdef __BIG_ENDIAN
34f80b04
EG
4905 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4906 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4907 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4908 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4909 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
4910 /* make sure this value is 0 */
4911 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
4912
4913/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4914 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4915 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4916 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4917 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
4918#endif
4919
523224a3
DK
4920 bnx2x_ilt_init_page_size(bp, INITOP_SET);
4921
34f80b04
EG
4922 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4923 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 4924
34f80b04
EG
4925 /* let the HW do it's magic ... */
4926 msleep(100);
4927 /* finish PXP init */
4928 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4929 if (val != 1) {
4930 BNX2X_ERR("PXP2 CFG failed\n");
4931 return -EBUSY;
4932 }
4933 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4934 if (val != 1) {
4935 BNX2X_ERR("PXP2 RD_INIT failed\n");
4936 return -EBUSY;
4937 }
a2fbb9ea 4938
f2e0899f
DK
4939 /* Timers bug workaround E2 only. We need to set the entire ILT to
4940 * have entries with value "0" and valid bit on.
4941 * This needs to be done by the first PF that is loaded in a path
4942 * (i.e. common phase)
4943 */
4944 if (CHIP_IS_E2(bp)) {
4945 struct ilt_client_info ilt_cli;
4946 struct bnx2x_ilt ilt;
4947 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
4948 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
4949
4950 /* initalize dummy TM client */
4951 ilt_cli.start = 0;
4952 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
4953 ilt_cli.client_num = ILT_CLIENT_TM;
4954
4955 /* Step 1: set zeroes to all ilt page entries with valid bit on
4956 * Step 2: set the timers first/last ilt entry to point
4957 * to the entire range to prevent ILT range error for 3rd/4th
4958 * vnic (this code assumes existance of the vnic)
4959 *
4960 * both steps performed by call to bnx2x_ilt_client_init_op()
4961 * with dummy TM client
4962 *
4963 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
4964 * and his brother are split registers
4965 */
4966 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
4967 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
4968 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4969
4970 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
4971 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
4972 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
4973 }
4974
4975
34f80b04
EG
4976 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4977 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 4978
f2e0899f
DK
4979 if (CHIP_IS_E2(bp)) {
4980 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
4981 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
4982 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
4983
4984 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
4985
4986 /* let the HW do it's magic ... */
4987 do {
4988 msleep(200);
4989 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
4990 } while (factor-- && (val != 1));
4991
4992 if (val != 1) {
4993 BNX2X_ERR("ATC_INIT failed\n");
4994 return -EBUSY;
4995 }
4996 }
4997
94a78b79 4998 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 4999
34f80b04
EG
5000 /* clean the DMAE memory */
5001 bp->dmae_ready = 1;
5002 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5003
94a78b79
VZ
5004 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5005 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5006 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5007 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 5008
34f80b04
EG
5009 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5010 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5011 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5012 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5013
94a78b79 5014 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba 5015
f2e0899f
DK
5016 if (CHIP_MODE_IS_4_PORT(bp))
5017 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
f85582f8 5018
523224a3
DK
5019 /* QM queues pointers table */
5020 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
5021
34f80b04
EG
5022 /* soft reset pulse */
5023 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5024 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 5025
37b091ba 5026#ifdef BCM_CNIC
94a78b79 5027 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 5028#endif
a2fbb9ea 5029
94a78b79 5030 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
523224a3
DK
5031 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5032
34f80b04
EG
5033 if (!CHIP_REV_IS_SLOW(bp)) {
5034 /* enable hw interrupt from doorbell Q */
5035 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5036 }
a2fbb9ea 5037
94a78b79 5038 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
f2e0899f
DK
5039 if (CHIP_MODE_IS_4_PORT(bp)) {
5040 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5041 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5042 }
5043
94a78b79 5044 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 5045 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 5046#ifndef BCM_CNIC
3196a88a
EG
5047 /* set NIC mode */
5048 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 5049#endif
f2e0899f 5050 if (!CHIP_IS_E1(bp))
fb3bff17 5051 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
f85582f8 5052
f2e0899f
DK
5053 if (CHIP_IS_E2(bp)) {
5054 /* Bit-map indicating which L2 hdrs may appear after the
5055 basic Ethernet header */
5056 int has_ovlan = IS_MF(bp);
5057 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5058 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5059 }
a2fbb9ea 5060
94a78b79
VZ
5061 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5062 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5063 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5064 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 5065
ca00392c
EG
5066 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5067 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5068 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5069 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5070
94a78b79
VZ
5071 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5072 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5073 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5074 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 5075
f2e0899f
DK
5076 if (CHIP_MODE_IS_4_PORT(bp))
5077 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5078
34f80b04
EG
5079 /* sync semi rtc */
5080 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5081 0x80000000);
5082 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5083 0x80000000);
a2fbb9ea 5084
94a78b79
VZ
5085 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5086 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5087 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 5088
f2e0899f
DK
5089 if (CHIP_IS_E2(bp)) {
5090 int has_ovlan = IS_MF(bp);
5091 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5092 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5093 }
5094
34f80b04 5095 REG_WR(bp, SRC_REG_SOFT_RST, 1);
c68ed255
TH
5096 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5097 REG_WR(bp, i, random32());
f85582f8 5098
94a78b79 5099 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
5100#ifdef BCM_CNIC
5101 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5102 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5103 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5104 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5105 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5106 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5107 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5108 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5109 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5110 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5111#endif
34f80b04 5112 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5113
34f80b04
EG
5114 if (sizeof(union cdu_context) != 1024)
5115 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
5116 dev_alert(&bp->pdev->dev, "please adjust the size "
5117 "of cdu_context(%ld)\n",
7995c64e 5118 (long)sizeof(union cdu_context));
a2fbb9ea 5119
94a78b79 5120 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
5121 val = (4 << 24) + (0 << 12) + 1024;
5122 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 5123
94a78b79 5124 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 5125 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5126 /* enable context validation interrupt from CFC */
5127 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5128
5129 /* set the thresholds to prevent CFC/CDU race */
5130 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5131
94a78b79 5132 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
f2e0899f
DK
5133
5134 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5135 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5136
5137 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
94a78b79 5138 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 5139
94a78b79 5140 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
5141 /* Reset PCIE errors for debug */
5142 REG_WR(bp, 0x2814, 0xffffffff);
5143 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5144
f2e0899f
DK
5145 if (CHIP_IS_E2(bp)) {
5146 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5147 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5148 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5149 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5150 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5151 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5152 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5153 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5154 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5155 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5156 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5157 }
5158
94a78b79 5159 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 5160 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 5161 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 5162 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 5163
94a78b79 5164 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
f2e0899f 5165 if (!CHIP_IS_E1(bp)) {
fb3bff17
DK
5166 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5167 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
34f80b04 5168 }
f2e0899f
DK
5169 if (CHIP_IS_E2(bp)) {
5170 /* Bit-map indicating which L2 hdrs may appear after the
5171 basic Ethernet header */
5172 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
5173 }
34f80b04
EG
5174
5175 if (CHIP_REV_IS_SLOW(bp))
5176 msleep(200);
5177
5178 /* finish CFC init */
5179 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5180 if (val != 1) {
5181 BNX2X_ERR("CFC LL_INIT failed\n");
5182 return -EBUSY;
5183 }
5184 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5185 if (val != 1) {
5186 BNX2X_ERR("CFC AC_INIT failed\n");
5187 return -EBUSY;
5188 }
5189 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5190 if (val != 1) {
5191 BNX2X_ERR("CFC CAM_INIT failed\n");
5192 return -EBUSY;
5193 }
5194 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5195
f2e0899f
DK
5196 if (CHIP_IS_E1(bp)) {
5197 /* read NIG statistic
5198 to see if this is our first up since powerup */
5199 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5200 val = *bnx2x_sp(bp, wb_data[0]);
34f80b04 5201
f2e0899f
DK
5202 /* do internal memory self test */
5203 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5204 BNX2X_ERR("internal mem self test failed\n");
5205 return -EBUSY;
5206 }
34f80b04
EG
5207 }
5208
d90d96ba 5209 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
a22f0788
YR
5210 bp->common.shmem_base,
5211 bp->common.shmem2_base);
f1410647 5212
fd4ef40d
EG
5213 bnx2x_setup_fan_failure_detection(bp);
5214
34f80b04
EG
5215 /* clear PXP2 attentions */
5216 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5217
34f80b04 5218 enable_blocks_attention(bp);
72fd0718
VZ
5219 if (CHIP_PARITY_SUPPORTED(bp))
5220 enable_blocks_parity(bp);
a2fbb9ea 5221
6bbca910 5222 if (!BP_NOMCP(bp)) {
f2e0899f
DK
5223 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5224 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5225 CHIP_IS_E1x(bp)) {
5226 u32 shmem_base[2], shmem2_base[2];
5227 shmem_base[0] = bp->common.shmem_base;
5228 shmem2_base[0] = bp->common.shmem2_base;
5229 if (CHIP_IS_E2(bp)) {
5230 shmem_base[1] =
5231 SHMEM2_RD(bp, other_shmem_base_addr);
5232 shmem2_base[1] =
5233 SHMEM2_RD(bp, other_shmem2_base_addr);
5234 }
5235 bnx2x_acquire_phy_lock(bp);
5236 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5237 bp->common.chip_id);
5238 bnx2x_release_phy_lock(bp);
5239 }
6bbca910
YR
5240 } else
5241 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5242
34f80b04
EG
5243 return 0;
5244}
a2fbb9ea 5245
523224a3 5246static int bnx2x_init_hw_port(struct bnx2x *bp)
34f80b04
EG
5247{
5248 int port = BP_PORT(bp);
94a78b79 5249 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 5250 u32 low, high;
34f80b04 5251 u32 val;
a2fbb9ea 5252
cdaa7cb8 5253 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
5254
5255 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 5256
94a78b79 5257 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 5258 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c 5259
f2e0899f
DK
5260 /* Timers bug workaround: disables the pf_master bit in pglue at
5261 * common phase, we need to enable it here before any dmae access are
5262 * attempted. Therefore we manually added the enable-master to the
5263 * port phase (it also happens in the function phase)
5264 */
5265 if (CHIP_IS_E2(bp))
5266 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5267
ca00392c
EG
5268 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5269 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5270 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 5271 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 5272
523224a3
DK
5273 /* QM cid (connection) count */
5274 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
a2fbb9ea 5275
523224a3 5276#ifdef BCM_CNIC
94a78b79 5277 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
5278 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5279 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 5280#endif
cdaa7cb8 5281
94a78b79 5282 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 5283
f2e0899f
DK
5284 if (CHIP_MODE_IS_4_PORT(bp))
5285 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
5286
5287 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5288 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5289 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5290 /* no pause for emulation and FPGA */
5291 low = 0;
5292 high = 513;
5293 } else {
5294 if (IS_MF(bp))
5295 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5296 else if (bp->dev->mtu > 4096) {
5297 if (bp->flags & ONE_PORT_FLAG)
5298 low = 160;
5299 else {
5300 val = bp->dev->mtu;
5301 /* (24*1024 + val*4)/256 */
5302 low = 96 + (val/64) +
5303 ((val % 64) ? 1 : 0);
5304 }
5305 } else
5306 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5307 high = low + 56; /* 14*1024/256 */
5308 }
5309 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5310 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
1c06328c 5311 }
1c06328c 5312
f2e0899f
DK
5313 if (CHIP_MODE_IS_4_PORT(bp)) {
5314 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5315 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5316 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5317 BRB1_REG_MAC_GUARANTIED_0), 40);
5318 }
1c06328c 5319
94a78b79 5320 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 5321
94a78b79 5322 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 5323 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 5324 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 5325 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 5326
94a78b79
VZ
5327 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5328 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5329 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5330 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
f2e0899f
DK
5331 if (CHIP_MODE_IS_4_PORT(bp))
5332 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
356e2385 5333
94a78b79 5334 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 5335 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 5336
94a78b79 5337 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea 5338
f2e0899f
DK
5339 if (!CHIP_IS_E2(bp)) {
5340 /* configure PBF to work without PAUSE mtu 9000 */
5341 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea 5342
f2e0899f
DK
5343 /* update threshold */
5344 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5345 /* update init credit */
5346 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea 5347
f2e0899f
DK
5348 /* probe changes */
5349 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5350 udelay(50);
5351 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5352 }
a2fbb9ea 5353
37b091ba
MC
5354#ifdef BCM_CNIC
5355 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 5356#endif
94a78b79 5357 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 5358 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
5359
5360 if (CHIP_IS_E1(bp)) {
5361 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5362 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5363 }
94a78b79 5364 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 5365
f2e0899f
DK
5366 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5367
94a78b79 5368 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
5369 /* init aeu_mask_attn_func_0/1:
5370 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5371 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5372 * bits 4-7 are used for "per vn group attention" */
5373 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
fb3bff17 5374 (IS_MF(bp) ? 0xF7 : 0x7));
34f80b04 5375
94a78b79 5376 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 5377 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 5378 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 5379 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 5380 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 5381
94a78b79 5382 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
5383
5384 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5385
f2e0899f 5386 if (!CHIP_IS_E1(bp)) {
fb3bff17 5387 /* 0x2 disable mf_ov, 0x1 enable */
34f80b04 5388 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
fb3bff17 5389 (IS_MF(bp) ? 0x1 : 0x2));
34f80b04 5390
f2e0899f
DK
5391 if (CHIP_IS_E2(bp)) {
5392 val = 0;
5393 switch (bp->mf_mode) {
5394 case MULTI_FUNCTION_SD:
5395 val = 1;
5396 break;
5397 case MULTI_FUNCTION_SI:
5398 val = 2;
5399 break;
5400 }
5401
5402 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5403 NIG_REG_LLH0_CLS_TYPE), val);
5404 }
1c06328c
EG
5405 {
5406 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5407 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5408 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5409 }
34f80b04
EG
5410 }
5411
94a78b79 5412 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 5413 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
d90d96ba 5414 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
a22f0788
YR
5415 bp->common.shmem_base,
5416 bp->common.shmem2_base);
d90d96ba 5417 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
a22f0788 5418 bp->common.shmem2_base, port)) {
4d295db0
EG
5419 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5420 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5421 val = REG_RD(bp, reg_addr);
f1410647 5422 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0 5423 REG_WR(bp, reg_addr, val);
f1410647 5424 }
c18487ee 5425 bnx2x__link_reset(bp);
a2fbb9ea 5426
34f80b04
EG
5427 return 0;
5428}
5429
34f80b04
EG
5430static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5431{
5432 int reg;
5433
f2e0899f 5434 if (CHIP_IS_E1(bp))
34f80b04 5435 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
f2e0899f
DK
5436 else
5437 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
34f80b04
EG
5438
5439 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5440}
5441
f2e0899f
DK
5442static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5443{
5444 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5445}
5446
5447static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5448{
5449 u32 i, base = FUNC_ILT_BASE(func);
5450 for (i = base; i < base + ILT_PER_FUNC; i++)
5451 bnx2x_ilt_wr(bp, i, 0);
5452}
5453
523224a3 5454static int bnx2x_init_hw_func(struct bnx2x *bp)
34f80b04
EG
5455{
5456 int port = BP_PORT(bp);
5457 int func = BP_FUNC(bp);
523224a3
DK
5458 struct bnx2x_ilt *ilt = BP_ILT(bp);
5459 u16 cdu_ilt_start;
8badd27a 5460 u32 addr, val;
f4a66897
VZ
5461 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5462 int i, main_mem_width;
34f80b04 5463
cdaa7cb8 5464 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 5465
8badd27a 5466 /* set MSI reconfigure capability */
f2e0899f
DK
5467 if (bp->common.int_block == INT_BLOCK_HC) {
5468 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5469 val = REG_RD(bp, addr);
5470 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5471 REG_WR(bp, addr, val);
5472 }
8badd27a 5473
523224a3
DK
5474 ilt = BP_ILT(bp);
5475 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
37b091ba 5476
523224a3
DK
5477 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5478 ilt->lines[cdu_ilt_start + i].page =
5479 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5480 ilt->lines[cdu_ilt_start + i].page_mapping =
5481 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5482 /* cdu ilt pages are allocated manually so there's no need to
5483 set the size */
37b091ba 5484 }
523224a3 5485 bnx2x_ilt_init_op(bp, INITOP_SET);
f85582f8 5486
523224a3
DK
5487#ifdef BCM_CNIC
5488 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
37b091ba 5489
523224a3
DK
5490 /* T1 hash bits value determines the T1 number of entries */
5491 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
5492#endif
37b091ba 5493
523224a3
DK
5494#ifndef BCM_CNIC
5495 /* set NIC mode */
5496 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5497#endif /* BCM_CNIC */
37b091ba 5498
f2e0899f
DK
5499 if (CHIP_IS_E2(bp)) {
5500 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5501
5502 /* Turn on a single ISR mode in IGU if driver is going to use
5503 * INT#x or MSI
5504 */
5505 if (!(bp->flags & USING_MSIX_FLAG))
5506 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5507 /*
5508 * Timers workaround bug: function init part.
5509 * Need to wait 20msec after initializing ILT,
5510 * needed to make sure there are no requests in
5511 * one of the PXP internal queues with "old" ILT addresses
5512 */
5513 msleep(20);
5514 /*
5515 * Master enable - Due to WB DMAE writes performed before this
5516 * register is re-initialized as part of the regular function
5517 * init
5518 */
5519 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5520 /* Enable the function in IGU */
5521 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5522 }
5523
523224a3 5524 bp->dmae_ready = 1;
34f80b04 5525
523224a3
DK
5526 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5527
f2e0899f
DK
5528 if (CHIP_IS_E2(bp))
5529 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5530
523224a3
DK
5531 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5532 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5533 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5534 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5535 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5536 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5537 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5538 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5539 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5540
f2e0899f
DK
5541 if (CHIP_IS_E2(bp)) {
5542 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5543 BP_PATH(bp));
5544 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5545 BP_PATH(bp));
5546 }
5547
5548 if (CHIP_MODE_IS_4_PORT(bp))
5549 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5550
5551 if (CHIP_IS_E2(bp))
5552 REG_WR(bp, QM_REG_PF_EN, 1);
5553
523224a3 5554 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
f2e0899f
DK
5555
5556 if (CHIP_MODE_IS_4_PORT(bp))
5557 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5558
523224a3
DK
5559 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5560 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5561 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5562 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5563 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5564 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5565 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5566 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5567 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5568 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5569 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
f2e0899f
DK
5570 if (CHIP_IS_E2(bp))
5571 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5572
523224a3
DK
5573 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5574
5575 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
34f80b04 5576
f2e0899f
DK
5577 if (CHIP_IS_E2(bp))
5578 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5579
fb3bff17 5580 if (IS_MF(bp)) {
34f80b04 5581 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
fb3bff17 5582 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
34f80b04
EG
5583 }
5584
523224a3
DK
5585 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5586
34f80b04 5587 /* HC init per function */
f2e0899f
DK
5588 if (bp->common.int_block == INT_BLOCK_HC) {
5589 if (CHIP_IS_E1H(bp)) {
5590 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5591
5592 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5593 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5594 }
5595 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5596
5597 } else {
5598 int num_segs, sb_idx, prod_offset;
5599
34f80b04
EG
5600 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5601
f2e0899f
DK
5602 if (CHIP_IS_E2(bp)) {
5603 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5604 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5605 }
5606
5607 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5608
5609 if (CHIP_IS_E2(bp)) {
5610 int dsb_idx = 0;
5611 /**
5612 * Producer memory:
5613 * E2 mode: address 0-135 match to the mapping memory;
5614 * 136 - PF0 default prod; 137 - PF1 default prod;
5615 * 138 - PF2 default prod; 139 - PF3 default prod;
5616 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5617 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5618 * 144-147 reserved.
5619 *
5620 * E1.5 mode - In backward compatible mode;
5621 * for non default SB; each even line in the memory
5622 * holds the U producer and each odd line hold
5623 * the C producer. The first 128 producers are for
5624 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5625 * producers are for the DSB for each PF.
5626 * Each PF has five segments: (the order inside each
5627 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5628 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5629 * 144-147 attn prods;
5630 */
5631 /* non-default-status-blocks */
5632 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5633 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5634 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5635 prod_offset = (bp->igu_base_sb + sb_idx) *
5636 num_segs;
5637
5638 for (i = 0; i < num_segs; i++) {
5639 addr = IGU_REG_PROD_CONS_MEMORY +
5640 (prod_offset + i) * 4;
5641 REG_WR(bp, addr, 0);
5642 }
5643 /* send consumer update with value 0 */
5644 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5645 USTORM_ID, 0, IGU_INT_NOP, 1);
5646 bnx2x_igu_clear_sb(bp,
5647 bp->igu_base_sb + sb_idx);
5648 }
5649
5650 /* default-status-blocks */
5651 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5652 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5653
5654 if (CHIP_MODE_IS_4_PORT(bp))
5655 dsb_idx = BP_FUNC(bp);
5656 else
5657 dsb_idx = BP_E1HVN(bp);
5658
5659 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5660 IGU_BC_BASE_DSB_PROD + dsb_idx :
5661 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5662
5663 for (i = 0; i < (num_segs * E1HVN_MAX);
5664 i += E1HVN_MAX) {
5665 addr = IGU_REG_PROD_CONS_MEMORY +
5666 (prod_offset + i)*4;
5667 REG_WR(bp, addr, 0);
5668 }
5669 /* send consumer update with 0 */
5670 if (CHIP_INT_MODE_IS_BC(bp)) {
5671 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5672 USTORM_ID, 0, IGU_INT_NOP, 1);
5673 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5674 CSTORM_ID, 0, IGU_INT_NOP, 1);
5675 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5676 XSTORM_ID, 0, IGU_INT_NOP, 1);
5677 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5678 TSTORM_ID, 0, IGU_INT_NOP, 1);
5679 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5680 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5681 } else {
5682 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5683 USTORM_ID, 0, IGU_INT_NOP, 1);
5684 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5685 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5686 }
5687 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5688
5689 /* !!! these should become driver const once
5690 rf-tool supports split-68 const */
5691 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5692 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5693 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5694 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5695 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5696 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5697 }
34f80b04 5698 }
34f80b04 5699
c14423fe 5700 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5701 REG_WR(bp, 0x2114, 0xffffffff);
5702 REG_WR(bp, 0x2120, 0xffffffff);
523224a3
DK
5703
5704 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5705 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5706 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5707 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5708 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5709 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5710
f4a66897
VZ
5711 if (CHIP_IS_E1x(bp)) {
5712 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5713 main_mem_base = HC_REG_MAIN_MEMORY +
5714 BP_PORT(bp) * (main_mem_size * 4);
5715 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5716 main_mem_width = 8;
5717
5718 val = REG_RD(bp, main_mem_prty_clr);
5719 if (val)
5720 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5721 "block during "
5722 "function init (0x%x)!\n", val);
5723
5724 /* Clear "false" parity errors in MSI-X table */
5725 for (i = main_mem_base;
5726 i < main_mem_base + main_mem_size * 4;
5727 i += main_mem_width) {
5728 bnx2x_read_dmae(bp, i, main_mem_width / 4);
5729 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5730 i, main_mem_width / 4);
5731 }
5732 /* Clear HC parity attention */
5733 REG_RD(bp, main_mem_prty_clr);
5734 }
5735
b7737c9b 5736 bnx2x_phy_probe(&bp->link_params);
f85582f8 5737
34f80b04
EG
5738 return 0;
5739}
5740
9f6c9258 5741int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
34f80b04 5742{
523224a3 5743 int rc = 0;
a2fbb9ea 5744
34f80b04 5745 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
f2e0899f 5746 BP_ABS_FUNC(bp), load_code);
a2fbb9ea 5747
34f80b04
EG
5748 bp->dmae_ready = 0;
5749 mutex_init(&bp->dmae_mutex);
54016b26
EG
5750 rc = bnx2x_gunzip_init(bp);
5751 if (rc)
5752 return rc;
a2fbb9ea 5753
34f80b04
EG
5754 switch (load_code) {
5755 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 5756 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
523224a3 5757 rc = bnx2x_init_hw_common(bp, load_code);
34f80b04
EG
5758 if (rc)
5759 goto init_hw_err;
5760 /* no break */
5761
5762 case FW_MSG_CODE_DRV_LOAD_PORT:
523224a3 5763 rc = bnx2x_init_hw_port(bp);
34f80b04
EG
5764 if (rc)
5765 goto init_hw_err;
5766 /* no break */
5767
5768 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3 5769 rc = bnx2x_init_hw_func(bp);
34f80b04
EG
5770 if (rc)
5771 goto init_hw_err;
5772 break;
5773
5774 default:
5775 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5776 break;
5777 }
5778
5779 if (!BP_NOMCP(bp)) {
f2e0899f 5780 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
5781
5782 bp->fw_drv_pulse_wr_seq =
f2e0899f 5783 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
a2fbb9ea 5784 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
5785 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5786 }
a2fbb9ea 5787
34f80b04
EG
5788init_hw_err:
5789 bnx2x_gunzip_end(bp);
5790
5791 return rc;
a2fbb9ea
ET
5792}
5793
9f6c9258 5794void bnx2x_free_mem(struct bnx2x *bp)
a2fbb9ea
ET
5795{
5796
5797#define BNX2X_PCI_FREE(x, y, size) \
5798 do { \
5799 if (x) { \
523224a3 5800 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
a2fbb9ea
ET
5801 x = NULL; \
5802 y = 0; \
5803 } \
5804 } while (0)
5805
5806#define BNX2X_FREE(x) \
5807 do { \
5808 if (x) { \
523224a3 5809 kfree((void *)x); \
a2fbb9ea
ET
5810 x = NULL; \
5811 } \
5812 } while (0)
5813
5814 int i;
5815
5816 /* fastpath */
555f6c78 5817 /* Common */
a2fbb9ea 5818 for_each_queue(bp, i) {
555f6c78 5819 /* status blocks */
f2e0899f
DK
5820 if (CHIP_IS_E2(bp))
5821 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5822 bnx2x_fp(bp, i, status_blk_mapping),
5823 sizeof(struct host_hc_status_block_e2));
5824 else
5825 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5826 bnx2x_fp(bp, i, status_blk_mapping),
5827 sizeof(struct host_hc_status_block_e1x));
555f6c78
EG
5828 }
5829 /* Rx */
54b9ddaa 5830 for_each_queue(bp, i) {
a2fbb9ea 5831
555f6c78 5832 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
5833 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5834 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5835 bnx2x_fp(bp, i, rx_desc_mapping),
5836 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5837
5838 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5839 bnx2x_fp(bp, i, rx_comp_mapping),
5840 sizeof(struct eth_fast_path_rx_cqe) *
5841 NUM_RCQ_BD);
a2fbb9ea 5842
7a9b2557 5843 /* SGE ring */
32626230 5844 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5845 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5846 bnx2x_fp(bp, i, rx_sge_mapping),
5847 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5848 }
555f6c78 5849 /* Tx */
54b9ddaa 5850 for_each_queue(bp, i) {
555f6c78
EG
5851
5852 /* fastpath tx rings: tx_buf tx_desc */
5853 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5854 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5855 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 5856 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 5857 }
a2fbb9ea
ET
5858 /* end of fastpath */
5859
5860 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
523224a3 5861 sizeof(struct host_sp_status_block));
a2fbb9ea
ET
5862
5863 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5864 sizeof(struct bnx2x_slowpath));
a2fbb9ea 5865
523224a3
DK
5866 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5867 bp->context.size);
5868
5869 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5870
5871 BNX2X_FREE(bp->ilt->lines);
f85582f8 5872
37b091ba 5873#ifdef BCM_CNIC
f2e0899f
DK
5874 if (CHIP_IS_E2(bp))
5875 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5876 sizeof(struct host_hc_status_block_e2));
5877 else
5878 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5879 sizeof(struct host_hc_status_block_e1x));
f85582f8 5880
523224a3 5881 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
a2fbb9ea 5882#endif
f85582f8 5883
7a9b2557 5884 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea 5885
523224a3
DK
5886 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5887 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5888
a2fbb9ea
ET
5889#undef BNX2X_PCI_FREE
5890#undef BNX2X_KFREE
5891}
5892
f2e0899f
DK
5893static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
5894{
5895 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
5896 if (CHIP_IS_E2(bp)) {
5897 bnx2x_fp(bp, index, sb_index_values) =
5898 (__le16 *)status_blk.e2_sb->sb.index_values;
5899 bnx2x_fp(bp, index, sb_running_index) =
5900 (__le16 *)status_blk.e2_sb->sb.running_index;
5901 } else {
5902 bnx2x_fp(bp, index, sb_index_values) =
5903 (__le16 *)status_blk.e1x_sb->sb.index_values;
5904 bnx2x_fp(bp, index, sb_running_index) =
5905 (__le16 *)status_blk.e1x_sb->sb.running_index;
5906 }
5907}
5908
9f6c9258 5909int bnx2x_alloc_mem(struct bnx2x *bp)
a2fbb9ea 5910{
a2fbb9ea
ET
5911#define BNX2X_PCI_ALLOC(x, y, size) \
5912 do { \
1a983142 5913 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
9f6c9258
DK
5914 if (x == NULL) \
5915 goto alloc_mem_err; \
5916 memset(x, 0, size); \
5917 } while (0)
a2fbb9ea 5918
9f6c9258
DK
5919#define BNX2X_ALLOC(x, size) \
5920 do { \
523224a3 5921 x = kzalloc(size, GFP_KERNEL); \
9f6c9258
DK
5922 if (x == NULL) \
5923 goto alloc_mem_err; \
9f6c9258 5924 } while (0)
a2fbb9ea 5925
9f6c9258 5926 int i;
a2fbb9ea 5927
9f6c9258
DK
5928 /* fastpath */
5929 /* Common */
a2fbb9ea 5930 for_each_queue(bp, i) {
f2e0899f 5931 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
9f6c9258 5932 bnx2x_fp(bp, i, bp) = bp;
9f6c9258 5933 /* status blocks */
f2e0899f
DK
5934 if (CHIP_IS_E2(bp))
5935 BNX2X_PCI_ALLOC(sb->e2_sb,
5936 &bnx2x_fp(bp, i, status_blk_mapping),
5937 sizeof(struct host_hc_status_block_e2));
5938 else
5939 BNX2X_PCI_ALLOC(sb->e1x_sb,
9f6c9258 5940 &bnx2x_fp(bp, i, status_blk_mapping),
523224a3
DK
5941 sizeof(struct host_hc_status_block_e1x));
5942
f2e0899f 5943 set_sb_shortcuts(bp, i);
a2fbb9ea 5944 }
9f6c9258
DK
5945 /* Rx */
5946 for_each_queue(bp, i) {
a2fbb9ea 5947
9f6c9258
DK
5948 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5949 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5950 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5951 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5952 &bnx2x_fp(bp, i, rx_desc_mapping),
5953 sizeof(struct eth_rx_bd) * NUM_RX_BD);
555f6c78 5954
9f6c9258
DK
5955 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5956 &bnx2x_fp(bp, i, rx_comp_mapping),
5957 sizeof(struct eth_fast_path_rx_cqe) *
5958 NUM_RCQ_BD);
a2fbb9ea 5959
9f6c9258
DK
5960 /* SGE ring */
5961 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5962 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5963 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5964 &bnx2x_fp(bp, i, rx_sge_mapping),
5965 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5966 }
5967 /* Tx */
5968 for_each_queue(bp, i) {
8badd27a 5969
9f6c9258
DK
5970 /* fastpath tx rings: tx_buf tx_desc */
5971 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5972 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5973 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5974 &bnx2x_fp(bp, i, tx_desc_mapping),
5975 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
8badd27a 5976 }
9f6c9258 5977 /* end of fastpath */
8badd27a 5978
523224a3 5979#ifdef BCM_CNIC
f2e0899f
DK
5980 if (CHIP_IS_E2(bp))
5981 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5982 sizeof(struct host_hc_status_block_e2));
5983 else
5984 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5985 sizeof(struct host_hc_status_block_e1x));
8badd27a 5986
523224a3
DK
5987 /* allocate searcher T2 table */
5988 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5989#endif
a2fbb9ea 5990
8badd27a 5991
523224a3
DK
5992 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5993 sizeof(struct host_sp_status_block));
a2fbb9ea 5994
523224a3
DK
5995 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5996 sizeof(struct bnx2x_slowpath));
a2fbb9ea 5997
523224a3 5998 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
f85582f8 5999
523224a3
DK
6000 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
6001 bp->context.size);
65abd74d 6002
523224a3 6003 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
65abd74d 6004
523224a3
DK
6005 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
6006 goto alloc_mem_err;
65abd74d 6007
9f6c9258
DK
6008 /* Slow path ring */
6009 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
65abd74d 6010
523224a3
DK
6011 /* EQ */
6012 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6013 BCM_PAGE_SIZE * NUM_EQ_PAGES);
9f6c9258 6014 return 0;
e1510706 6015
9f6c9258
DK
6016alloc_mem_err:
6017 bnx2x_free_mem(bp);
6018 return -ENOMEM;
e1510706 6019
9f6c9258
DK
6020#undef BNX2X_PCI_ALLOC
6021#undef BNX2X_ALLOC
65abd74d
YG
6022}
6023
a2fbb9ea
ET
6024/*
6025 * Init service functions
6026 */
8d96286a 6027static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6028 int *state_p, int flags);
6029
523224a3 6030int bnx2x_func_start(struct bnx2x *bp)
a2fbb9ea 6031{
523224a3 6032 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
a2fbb9ea 6033
523224a3
DK
6034 /* Wait for completion */
6035 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
6036 WAIT_RAMROD_COMMON);
6037}
a2fbb9ea 6038
8d96286a 6039static int bnx2x_func_stop(struct bnx2x *bp)
523224a3
DK
6040{
6041 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
a2fbb9ea 6042
523224a3
DK
6043 /* Wait for completion */
6044 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6045 0, &(bp->state), WAIT_RAMROD_COMMON);
a2fbb9ea
ET
6046}
6047
e665bfda 6048/**
f85582f8 6049 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
e665bfda
MC
6050 *
6051 * @param bp driver descriptor
6052 * @param set set or clear an entry (1 or 0)
6053 * @param mac pointer to a buffer containing a MAC
6054 * @param cl_bit_vec bit vector of clients to register a MAC for
6055 * @param cam_offset offset in a CAM to use
523224a3 6056 * @param is_bcast is the set MAC a broadcast address (for E1 only)
e665bfda 6057 */
523224a3 6058static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
f85582f8
DK
6059 u32 cl_bit_vec, u8 cam_offset,
6060 u8 is_bcast)
34f80b04 6061{
523224a3
DK
6062 struct mac_configuration_cmd *config =
6063 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6064 int ramrod_flags = WAIT_RAMROD_COMMON;
6065
6066 bp->set_mac_pending = 1;
6067 smp_wmb();
6068
8d9c5f34 6069 config->hdr.length = 1;
e665bfda
MC
6070 config->hdr.offset = cam_offset;
6071 config->hdr.client_id = 0xff;
34f80b04
EG
6072 config->hdr.reserved1 = 0;
6073
6074 /* primary MAC */
6075 config->config_table[0].msb_mac_addr =
e665bfda 6076 swab16(*(u16 *)&mac[0]);
34f80b04 6077 config->config_table[0].middle_mac_addr =
e665bfda 6078 swab16(*(u16 *)&mac[2]);
34f80b04 6079 config->config_table[0].lsb_mac_addr =
e665bfda 6080 swab16(*(u16 *)&mac[4]);
ca00392c 6081 config->config_table[0].clients_bit_vector =
e665bfda 6082 cpu_to_le32(cl_bit_vec);
34f80b04 6083 config->config_table[0].vlan_id = 0;
523224a3 6084 config->config_table[0].pf_id = BP_FUNC(bp);
3101c2bc 6085 if (set)
523224a3
DK
6086 SET_FLAG(config->config_table[0].flags,
6087 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6088 T_ETH_MAC_COMMAND_SET);
3101c2bc 6089 else
523224a3
DK
6090 SET_FLAG(config->config_table[0].flags,
6091 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6092 T_ETH_MAC_COMMAND_INVALIDATE);
34f80b04 6093
523224a3
DK
6094 if (is_bcast)
6095 SET_FLAG(config->config_table[0].flags,
6096 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6097
6098 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
3101c2bc 6099 (set ? "setting" : "clearing"),
34f80b04
EG
6100 config->config_table[0].msb_mac_addr,
6101 config->config_table[0].middle_mac_addr,
523224a3 6102 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
34f80b04 6103
523224a3 6104 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
34f80b04 6105 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
523224a3
DK
6106 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6107
6108 /* Wait for a completion */
6109 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
34f80b04
EG
6110}
6111
8d96286a 6112static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6113 int *state_p, int flags)
a2fbb9ea
ET
6114{
6115 /* can take a while if any port is running */
8b3a0f0b 6116 int cnt = 5000;
523224a3
DK
6117 u8 poll = flags & WAIT_RAMROD_POLL;
6118 u8 common = flags & WAIT_RAMROD_COMMON;
a2fbb9ea 6119
c14423fe
ET
6120 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6121 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6122
6123 might_sleep();
34f80b04 6124 while (cnt--) {
a2fbb9ea 6125 if (poll) {
523224a3
DK
6126 if (common)
6127 bnx2x_eq_int(bp);
6128 else {
6129 bnx2x_rx_int(bp->fp, 10);
6130 /* if index is different from 0
6131 * the reply for some commands will
6132 * be on the non default queue
6133 */
6134 if (idx)
6135 bnx2x_rx_int(&bp->fp[idx], 10);
6136 }
a2fbb9ea 6137 }
a2fbb9ea 6138
3101c2bc 6139 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6140 if (*state_p == state) {
6141#ifdef BNX2X_STOP_ON_ERROR
6142 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6143#endif
a2fbb9ea 6144 return 0;
8b3a0f0b 6145 }
a2fbb9ea 6146
a2fbb9ea 6147 msleep(1);
e3553b29
EG
6148
6149 if (bp->panic)
6150 return -EIO;
a2fbb9ea
ET
6151 }
6152
a2fbb9ea 6153 /* timeout! */
49d66772
ET
6154 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6155 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6156#ifdef BNX2X_STOP_ON_ERROR
6157 bnx2x_panic();
6158#endif
a2fbb9ea 6159
49d66772 6160 return -EBUSY;
a2fbb9ea
ET
6161}
6162
8d96286a 6163static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
e665bfda 6164{
f2e0899f
DK
6165 if (CHIP_IS_E1H(bp))
6166 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6167 else if (CHIP_MODE_IS_4_PORT(bp))
6168 return BP_FUNC(bp) * 32 + rel_offset;
6169 else
6170 return BP_VN(bp) * 32 + rel_offset;
523224a3
DK
6171}
6172
6173void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6174{
6175 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6176 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
e665bfda 6177
523224a3
DK
6178 /* networking MAC */
6179 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6180 (1 << bp->fp->cl_id), cam_offset , 0);
e665bfda 6181
523224a3
DK
6182 if (CHIP_IS_E1(bp)) {
6183 /* broadcast MAC */
6184 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6185 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6186 }
e665bfda 6187}
523224a3
DK
6188static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6189{
6190 int i = 0, old;
6191 struct net_device *dev = bp->dev;
6192 struct netdev_hw_addr *ha;
6193 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6194 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6195
6196 netdev_for_each_mc_addr(ha, dev) {
6197 /* copy mac */
6198 config_cmd->config_table[i].msb_mac_addr =
6199 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6200 config_cmd->config_table[i].middle_mac_addr =
6201 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6202 config_cmd->config_table[i].lsb_mac_addr =
6203 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
e665bfda 6204
523224a3
DK
6205 config_cmd->config_table[i].vlan_id = 0;
6206 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6207 config_cmd->config_table[i].clients_bit_vector =
6208 cpu_to_le32(1 << BP_L_ID(bp));
6209
6210 SET_FLAG(config_cmd->config_table[i].flags,
6211 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6212 T_ETH_MAC_COMMAND_SET);
6213
6214 DP(NETIF_MSG_IFUP,
6215 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6216 config_cmd->config_table[i].msb_mac_addr,
6217 config_cmd->config_table[i].middle_mac_addr,
6218 config_cmd->config_table[i].lsb_mac_addr);
6219 i++;
6220 }
6221 old = config_cmd->hdr.length;
6222 if (old > i) {
6223 for (; i < old; i++) {
6224 if (CAM_IS_INVALID(config_cmd->
6225 config_table[i])) {
6226 /* already invalidated */
6227 break;
6228 }
6229 /* invalidate */
6230 SET_FLAG(config_cmd->config_table[i].flags,
6231 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6232 T_ETH_MAC_COMMAND_INVALIDATE);
6233 }
6234 }
6235
6236 config_cmd->hdr.length = i;
6237 config_cmd->hdr.offset = offset;
6238 config_cmd->hdr.client_id = 0xff;
6239 config_cmd->hdr.reserved1 = 0;
6240
6241 bp->set_mac_pending = 1;
6242 smp_wmb();
6243
6244 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6245 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6246}
6247static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
e665bfda 6248{
523224a3
DK
6249 int i;
6250 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6251 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6252 int ramrod_flags = WAIT_RAMROD_COMMON;
6253
6254 bp->set_mac_pending = 1;
e665bfda
MC
6255 smp_wmb();
6256
523224a3
DK
6257 for (i = 0; i < config_cmd->hdr.length; i++)
6258 SET_FLAG(config_cmd->config_table[i].flags,
6259 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6260 T_ETH_MAC_COMMAND_INVALIDATE);
6261
6262 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6263 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
e665bfda
MC
6264
6265 /* Wait for a completion */
523224a3
DK
6266 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6267 ramrod_flags);
6268
e665bfda
MC
6269}
6270
993ac7b5
MC
6271#ifdef BCM_CNIC
6272/**
6273 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6274 * MAC(s). This function will wait until the ramdord completion
6275 * returns.
6276 *
6277 * @param bp driver handle
6278 * @param set set or clear the CAM entry
6279 *
6280 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6281 */
8d96286a 6282static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
993ac7b5 6283{
523224a3
DK
6284 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6285 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6286 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
6287 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
993ac7b5
MC
6288
6289 /* Send a SET_MAC ramrod */
523224a3
DK
6290 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6291 cam_offset, 0);
993ac7b5
MC
6292 return 0;
6293}
6294#endif
6295
523224a3
DK
6296static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6297 struct bnx2x_client_init_params *params,
6298 u8 activate,
6299 struct client_init_ramrod_data *data)
6300{
6301 /* Clear the buffer */
6302 memset(data, 0, sizeof(*data));
6303
6304 /* general */
6305 data->general.client_id = params->rxq_params.cl_id;
6306 data->general.statistics_counter_id = params->rxq_params.stat_id;
6307 data->general.statistics_en_flg =
6308 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6309 data->general.activate_flg = activate;
6310 data->general.sp_client_id = params->rxq_params.spcl_id;
6311
6312 /* Rx data */
6313 data->rx.tpa_en_flg =
6314 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6315 data->rx.vmqueue_mode_en_flg = 0;
6316 data->rx.cache_line_alignment_log_size =
6317 params->rxq_params.cache_line_log;
6318 data->rx.enable_dynamic_hc =
6319 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6320 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6321 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6322 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6323
6324 /* We don't set drop flags */
6325 data->rx.drop_ip_cs_err_flg = 0;
6326 data->rx.drop_tcp_cs_err_flg = 0;
6327 data->rx.drop_ttl0_flg = 0;
6328 data->rx.drop_udp_cs_err_flg = 0;
6329
6330 data->rx.inner_vlan_removal_enable_flg =
6331 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6332 data->rx.outer_vlan_removal_enable_flg =
6333 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6334 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6335 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6336 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6337 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6338 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6339 data->rx.bd_page_base.lo =
6340 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6341 data->rx.bd_page_base.hi =
6342 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6343 data->rx.sge_page_base.lo =
6344 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6345 data->rx.sge_page_base.hi =
6346 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6347 data->rx.cqe_page_base.lo =
6348 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6349 data->rx.cqe_page_base.hi =
6350 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6351 data->rx.is_leading_rss =
6352 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6353 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6354
6355 /* Tx data */
6356 data->tx.enforce_security_flg = 0; /* VF specific */
6357 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6358 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6359 data->tx.mtu = 0; /* VF specific */
6360 data->tx.tx_bd_page_base.lo =
6361 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6362 data->tx.tx_bd_page_base.hi =
6363 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6364
6365 /* flow control data */
6366 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6367 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6368 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6369 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6370 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6371 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6372 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6373
6374 data->fc.safc_group_num = params->txq_params.cos;
6375 data->fc.safc_group_en_flg =
6376 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6377 data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
6378}
6379
6380static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6381{
6382 /* ustorm cxt validation */
6383 cxt->ustorm_ag_context.cdu_usage =
6384 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6385 ETH_CONNECTION_TYPE);
6386 /* xcontext validation */
6387 cxt->xstorm_ag_context.cdu_reserved =
6388 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6389 ETH_CONNECTION_TYPE);
6390}
6391
8d96286a 6392static int bnx2x_setup_fw_client(struct bnx2x *bp,
6393 struct bnx2x_client_init_params *params,
6394 u8 activate,
6395 struct client_init_ramrod_data *data,
6396 dma_addr_t data_mapping)
523224a3
DK
6397{
6398 u16 hc_usec;
6399 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6400 int ramrod_flags = 0, rc;
6401
6402 /* HC and context validation values */
6403 hc_usec = params->txq_params.hc_rate ?
6404 1000000 / params->txq_params.hc_rate : 0;
6405 bnx2x_update_coalesce_sb_index(bp,
6406 params->txq_params.fw_sb_id,
6407 params->txq_params.sb_cq_index,
6408 !(params->txq_params.flags & QUEUE_FLG_HC),
6409 hc_usec);
6410
6411 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6412
6413 hc_usec = params->rxq_params.hc_rate ?
6414 1000000 / params->rxq_params.hc_rate : 0;
6415 bnx2x_update_coalesce_sb_index(bp,
6416 params->rxq_params.fw_sb_id,
6417 params->rxq_params.sb_cq_index,
6418 !(params->rxq_params.flags & QUEUE_FLG_HC),
6419 hc_usec);
6420
6421 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6422 params->rxq_params.cid);
6423
6424 /* zero stats */
6425 if (params->txq_params.flags & QUEUE_FLG_STATS)
6426 storm_memset_xstats_zero(bp, BP_PORT(bp),
6427 params->txq_params.stat_id);
6428
6429 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6430 storm_memset_ustats_zero(bp, BP_PORT(bp),
6431 params->rxq_params.stat_id);
6432 storm_memset_tstats_zero(bp, BP_PORT(bp),
6433 params->rxq_params.stat_id);
6434 }
6435
6436 /* Fill the ramrod data */
6437 bnx2x_fill_cl_init_data(bp, params, activate, data);
6438
6439 /* SETUP ramrod.
6440 *
6441 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6442 * barrier except from mmiowb() is needed to impose a
6443 * proper ordering of memory operations.
6444 */
6445 mmiowb();
a2fbb9ea 6446
a2fbb9ea 6447
523224a3
DK
6448 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6449 U64_HI(data_mapping), U64_LO(data_mapping), 0);
a2fbb9ea 6450
34f80b04 6451 /* Wait for completion */
523224a3
DK
6452 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6453 params->ramrod_params.index,
6454 params->ramrod_params.pstate,
6455 ramrod_flags);
34f80b04 6456 return rc;
a2fbb9ea
ET
6457}
6458
d6214d7a
DK
6459/**
6460 * Configure interrupt mode according to current configuration.
6461 * In case of MSI-X it will also try to enable MSI-X.
6462 *
6463 * @param bp
6464 *
6465 * @return int
6466 */
6467static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
ca00392c 6468{
d6214d7a 6469 int rc = 0;
ca00392c 6470
d6214d7a
DK
6471 switch (bp->int_mode) {
6472 case INT_MODE_MSI:
6473 bnx2x_enable_msi(bp);
6474 /* falling through... */
6475 case INT_MODE_INTx:
54b9ddaa 6476 bp->num_queues = 1;
d6214d7a 6477 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
ca00392c 6478 break;
d6214d7a
DK
6479 default:
6480 /* Set number of queues according to bp->multi_mode value */
6481 bnx2x_set_num_queues(bp);
ca00392c 6482
d6214d7a
DK
6483 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6484 bp->num_queues);
ca00392c 6485
d6214d7a
DK
6486 /* if we can't use MSI-X we only need one fp,
6487 * so try to enable MSI-X with the requested number of fp's
6488 * and fallback to MSI or legacy INTx with one fp
6489 */
6490 rc = bnx2x_enable_msix(bp);
6491 if (rc) {
6492 /* failed to enable MSI-X */
6493 if (bp->multi_mode)
6494 DP(NETIF_MSG_IFUP,
6495 "Multi requested but failed to "
6496 "enable MSI-X (%d), "
6497 "set number of queues to %d\n",
6498 bp->num_queues,
6499 1);
6500 bp->num_queues = 1;
6501
6502 if (!(bp->flags & DISABLE_MSI_FLAG))
6503 bnx2x_enable_msi(bp);
6504 }
ca00392c 6505
9f6c9258
DK
6506 break;
6507 }
d6214d7a
DK
6508
6509 return rc;
a2fbb9ea
ET
6510}
6511
c2bff63f
DK
6512/* must be called prioir to any HW initializations */
6513static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6514{
6515 return L2_ILT_LINES(bp);
6516}
6517
523224a3
DK
6518void bnx2x_ilt_set_info(struct bnx2x *bp)
6519{
6520 struct ilt_client_info *ilt_client;
6521 struct bnx2x_ilt *ilt = BP_ILT(bp);
6522 u16 line = 0;
6523
6524 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6525 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6526
6527 /* CDU */
6528 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6529 ilt_client->client_num = ILT_CLIENT_CDU;
6530 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6531 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6532 ilt_client->start = line;
6533 line += L2_ILT_LINES(bp);
6534#ifdef BCM_CNIC
6535 line += CNIC_ILT_LINES;
6536#endif
6537 ilt_client->end = line - 1;
6538
6539 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6540 "flags 0x%x, hw psz %d\n",
6541 ilt_client->start,
6542 ilt_client->end,
6543 ilt_client->page_size,
6544 ilt_client->flags,
6545 ilog2(ilt_client->page_size >> 12));
6546
6547 /* QM */
6548 if (QM_INIT(bp->qm_cid_count)) {
6549 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6550 ilt_client->client_num = ILT_CLIENT_QM;
6551 ilt_client->page_size = QM_ILT_PAGE_SZ;
6552 ilt_client->flags = 0;
6553 ilt_client->start = line;
6554
6555 /* 4 bytes for each cid */
6556 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6557 QM_ILT_PAGE_SZ);
6558
6559 ilt_client->end = line - 1;
6560
6561 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6562 "flags 0x%x, hw psz %d\n",
6563 ilt_client->start,
6564 ilt_client->end,
6565 ilt_client->page_size,
6566 ilt_client->flags,
6567 ilog2(ilt_client->page_size >> 12));
6568
6569 }
6570 /* SRC */
6571 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6572#ifdef BCM_CNIC
6573 ilt_client->client_num = ILT_CLIENT_SRC;
6574 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6575 ilt_client->flags = 0;
6576 ilt_client->start = line;
6577 line += SRC_ILT_LINES;
6578 ilt_client->end = line - 1;
6579
6580 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6581 "flags 0x%x, hw psz %d\n",
6582 ilt_client->start,
6583 ilt_client->end,
6584 ilt_client->page_size,
6585 ilt_client->flags,
6586 ilog2(ilt_client->page_size >> 12));
6587
6588#else
6589 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6590#endif
9f6c9258 6591
523224a3
DK
6592 /* TM */
6593 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6594#ifdef BCM_CNIC
6595 ilt_client->client_num = ILT_CLIENT_TM;
6596 ilt_client->page_size = TM_ILT_PAGE_SZ;
6597 ilt_client->flags = 0;
6598 ilt_client->start = line;
6599 line += TM_ILT_LINES;
6600 ilt_client->end = line - 1;
6601
6602 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6603 "flags 0x%x, hw psz %d\n",
6604 ilt_client->start,
6605 ilt_client->end,
6606 ilt_client->page_size,
6607 ilt_client->flags,
6608 ilog2(ilt_client->page_size >> 12));
9f6c9258 6609
523224a3
DK
6610#else
6611 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6612#endif
6613}
f85582f8 6614
523224a3
DK
6615int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6616 int is_leading)
a2fbb9ea 6617{
523224a3 6618 struct bnx2x_client_init_params params = { {0} };
a2fbb9ea
ET
6619 int rc;
6620
523224a3
DK
6621 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6622 IGU_INT_ENABLE, 0);
a2fbb9ea 6623
523224a3
DK
6624 params.ramrod_params.pstate = &fp->state;
6625 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6626 params.ramrod_params.index = fp->index;
6627 params.ramrod_params.cid = fp->cid;
a2fbb9ea 6628
523224a3
DK
6629 if (is_leading)
6630 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
a2fbb9ea 6631
523224a3
DK
6632 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6633
6634 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6635
6636 rc = bnx2x_setup_fw_client(bp, &params, 1,
6637 bnx2x_sp(bp, client_init_data),
6638 bnx2x_sp_mapping(bp, client_init_data));
34f80b04 6639 return rc;
a2fbb9ea
ET
6640}
6641
8d96286a 6642static int bnx2x_stop_fw_client(struct bnx2x *bp,
6643 struct bnx2x_client_ramrod_params *p)
a2fbb9ea 6644{
34f80b04 6645 int rc;
a2fbb9ea 6646
523224a3 6647 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
a2fbb9ea 6648
523224a3
DK
6649 /* halt the connection */
6650 *p->pstate = BNX2X_FP_STATE_HALTING;
6651 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6652 p->cl_id, 0);
a2fbb9ea 6653
34f80b04 6654 /* Wait for completion */
523224a3
DK
6655 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6656 p->pstate, poll_flag);
34f80b04 6657 if (rc) /* timeout */
da5a662a 6658 return rc;
a2fbb9ea 6659
523224a3
DK
6660 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6661 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6662 p->cl_id, 0);
6663 /* Wait for completion */
6664 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6665 p->pstate, poll_flag);
6666 if (rc) /* timeout */
6667 return rc;
a2fbb9ea 6668
a2fbb9ea 6669
523224a3
DK
6670 /* delete cfc entry */
6671 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
da5a662a 6672
523224a3
DK
6673 /* Wait for completion */
6674 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6675 p->pstate, WAIT_RAMROD_COMMON);
da5a662a 6676 return rc;
a2fbb9ea
ET
6677}
6678
523224a3
DK
6679static int bnx2x_stop_client(struct bnx2x *bp, int index)
6680{
6681 struct bnx2x_client_ramrod_params client_stop = {0};
6682 struct bnx2x_fastpath *fp = &bp->fp[index];
6683
6684 client_stop.index = index;
6685 client_stop.cid = fp->cid;
6686 client_stop.cl_id = fp->cl_id;
6687 client_stop.pstate = &(fp->state);
6688 client_stop.poll = 0;
6689
6690 return bnx2x_stop_fw_client(bp, &client_stop);
6691}
6692
6693
34f80b04
EG
6694static void bnx2x_reset_func(struct bnx2x *bp)
6695{
6696 int port = BP_PORT(bp);
6697 int func = BP_FUNC(bp);
f2e0899f 6698 int i;
523224a3 6699 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
f2e0899f
DK
6700 (CHIP_IS_E2(bp) ?
6701 offsetof(struct hc_status_block_data_e2, common) :
6702 offsetof(struct hc_status_block_data_e1x, common));
523224a3
DK
6703 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6704 int pfid_offset = offsetof(struct pci_entity, pf_id);
6705
6706 /* Disable the function in the FW */
6707 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6708 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6709 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6710 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6711
6712 /* FP SBs */
6713 for_each_queue(bp, i) {
6714 struct bnx2x_fastpath *fp = &bp->fp[i];
6715 REG_WR8(bp,
6716 BAR_CSTRORM_INTMEM +
6717 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6718 + pfunc_offset_fp + pfid_offset,
6719 HC_FUNCTION_DISABLED);
6720 }
6721
6722 /* SP SB */
6723 REG_WR8(bp,
6724 BAR_CSTRORM_INTMEM +
6725 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6726 pfunc_offset_sp + pfid_offset,
6727 HC_FUNCTION_DISABLED);
6728
6729
6730 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6731 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6732 0);
34f80b04
EG
6733
6734 /* Configure IGU */
f2e0899f
DK
6735 if (bp->common.int_block == INT_BLOCK_HC) {
6736 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6737 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6738 } else {
6739 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6740 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6741 }
34f80b04 6742
37b091ba
MC
6743#ifdef BCM_CNIC
6744 /* Disable Timer scan */
6745 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6746 /*
6747 * Wait for at least 10ms and up to 2 second for the timers scan to
6748 * complete
6749 */
6750 for (i = 0; i < 200; i++) {
6751 msleep(10);
6752 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6753 break;
6754 }
6755#endif
34f80b04 6756 /* Clear ILT */
f2e0899f
DK
6757 bnx2x_clear_func_ilt(bp, func);
6758
6759 /* Timers workaround bug for E2: if this is vnic-3,
6760 * we need to set the entire ilt range for this timers.
6761 */
6762 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6763 struct ilt_client_info ilt_cli;
6764 /* use dummy TM client */
6765 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6766 ilt_cli.start = 0;
6767 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6768 ilt_cli.client_num = ILT_CLIENT_TM;
6769
6770 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6771 }
6772
6773 /* this assumes that reset_port() called before reset_func()*/
6774 if (CHIP_IS_E2(bp))
6775 bnx2x_pf_disable(bp);
523224a3
DK
6776
6777 bp->dmae_ready = 0;
34f80b04
EG
6778}
6779
6780static void bnx2x_reset_port(struct bnx2x *bp)
6781{
6782 int port = BP_PORT(bp);
6783 u32 val;
6784
6785 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6786
6787 /* Do not rcv packets to BRB */
6788 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6789 /* Do not direct rcv packets that are not for MCP to the BRB */
6790 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6791 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6792
6793 /* Configure AEU */
6794 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6795
6796 msleep(100);
6797 /* Check for BRB port occupancy */
6798 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6799 if (val)
6800 DP(NETIF_MSG_IFDOWN,
33471629 6801 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6802
6803 /* TODO: Close Doorbell port? */
6804}
6805
34f80b04
EG
6806static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6807{
6808 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
f2e0899f 6809 BP_ABS_FUNC(bp), reset_code);
34f80b04
EG
6810
6811 switch (reset_code) {
6812 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6813 bnx2x_reset_port(bp);
6814 bnx2x_reset_func(bp);
6815 bnx2x_reset_common(bp);
6816 break;
6817
6818 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6819 bnx2x_reset_port(bp);
6820 bnx2x_reset_func(bp);
6821 break;
6822
6823 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6824 bnx2x_reset_func(bp);
6825 break;
49d66772 6826
34f80b04
EG
6827 default:
6828 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6829 break;
6830 }
6831}
6832
9f6c9258 6833void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6834{
da5a662a 6835 int port = BP_PORT(bp);
a2fbb9ea 6836 u32 reset_code = 0;
da5a662a 6837 int i, cnt, rc;
a2fbb9ea 6838
555f6c78 6839 /* Wait until tx fastpath tasks complete */
54b9ddaa 6840 for_each_queue(bp, i) {
228241eb
ET
6841 struct bnx2x_fastpath *fp = &bp->fp[i];
6842
34f80b04 6843 cnt = 1000;
e8b5fc51 6844 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 6845
34f80b04
EG
6846 if (!cnt) {
6847 BNX2X_ERR("timeout waiting for queue[%d]\n",
6848 i);
6849#ifdef BNX2X_STOP_ON_ERROR
6850 bnx2x_panic();
6851 return -EBUSY;
6852#else
6853 break;
6854#endif
6855 }
6856 cnt--;
da5a662a 6857 msleep(1);
34f80b04 6858 }
228241eb 6859 }
da5a662a
VZ
6860 /* Give HW time to discard old tx messages */
6861 msleep(1);
a2fbb9ea 6862
3101c2bc 6863 if (CHIP_IS_E1(bp)) {
523224a3
DK
6864 /* invalidate mc list,
6865 * wait and poll (interrupts are off)
6866 */
6867 bnx2x_invlidate_e1_mc_list(bp);
6868 bnx2x_set_eth_mac(bp, 0);
3101c2bc 6869
523224a3 6870 } else {
65abd74d
YG
6871 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6872
523224a3 6873 bnx2x_set_eth_mac(bp, 0);
3101c2bc
YG
6874
6875 for (i = 0; i < MC_HASH_SIZE; i++)
6876 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6877 }
523224a3 6878
993ac7b5
MC
6879#ifdef BCM_CNIC
6880 /* Clear iSCSI L2 MAC */
6881 mutex_lock(&bp->cnic_mutex);
6882 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
6883 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
6884 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
6885 }
6886 mutex_unlock(&bp->cnic_mutex);
6887#endif
3101c2bc 6888
65abd74d
YG
6889 if (unload_mode == UNLOAD_NORMAL)
6890 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6891
7d0446c2 6892 else if (bp->flags & NO_WOL_FLAG)
65abd74d 6893 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 6894
7d0446c2 6895 else if (bp->wol) {
65abd74d
YG
6896 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6897 u8 *mac_addr = bp->dev->dev_addr;
6898 u32 val;
6899 /* The mac address is written to entries 1-4 to
6900 preserve entry 0 which is used by the PMF */
6901 u8 entry = (BP_E1HVN(bp) + 1)*8;
6902
6903 val = (mac_addr[0] << 8) | mac_addr[1];
6904 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6905
6906 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6907 (mac_addr[4] << 8) | mac_addr[5];
6908 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6909
6910 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6911
6912 } else
6913 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6914
34f80b04
EG
6915 /* Close multi and leading connections
6916 Completions for ramrods are collected in a synchronous way */
523224a3
DK
6917 for_each_queue(bp, i)
6918
6919 if (bnx2x_stop_client(bp, i))
6920#ifdef BNX2X_STOP_ON_ERROR
6921 return;
6922#else
228241eb 6923 goto unload_error;
523224a3 6924#endif
a2fbb9ea 6925
523224a3 6926 rc = bnx2x_func_stop(bp);
da5a662a 6927 if (rc) {
523224a3 6928 BNX2X_ERR("Function stop failed!\n");
da5a662a 6929#ifdef BNX2X_STOP_ON_ERROR
523224a3 6930 return;
da5a662a
VZ
6931#else
6932 goto unload_error;
34f80b04 6933#endif
228241eb 6934 }
523224a3 6935#ifndef BNX2X_STOP_ON_ERROR
228241eb 6936unload_error:
523224a3 6937#endif
34f80b04 6938 if (!BP_NOMCP(bp))
a22f0788 6939 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04 6940 else {
f2e0899f
DK
6941 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
6942 "%d, %d, %d\n", BP_PATH(bp),
6943 load_count[BP_PATH(bp)][0],
6944 load_count[BP_PATH(bp)][1],
6945 load_count[BP_PATH(bp)][2]);
6946 load_count[BP_PATH(bp)][0]--;
6947 load_count[BP_PATH(bp)][1 + port]--;
6948 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
6949 "%d, %d, %d\n", BP_PATH(bp),
6950 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
6951 load_count[BP_PATH(bp)][2]);
6952 if (load_count[BP_PATH(bp)][0] == 0)
34f80b04 6953 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
f2e0899f 6954 else if (load_count[BP_PATH(bp)][1 + port] == 0)
34f80b04
EG
6955 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6956 else
6957 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6958 }
a2fbb9ea 6959
34f80b04
EG
6960 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6961 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6962 bnx2x__link_reset(bp);
a2fbb9ea 6963
523224a3
DK
6964 /* Disable HW interrupts, NAPI */
6965 bnx2x_netif_stop(bp, 1);
6966
6967 /* Release IRQs */
d6214d7a 6968 bnx2x_free_irq(bp);
523224a3 6969
a2fbb9ea 6970 /* Reset the chip */
228241eb 6971 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6972
6973 /* Report UNLOAD_DONE to MCP */
34f80b04 6974 if (!BP_NOMCP(bp))
a22f0788 6975 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
356e2385 6976
72fd0718
VZ
6977}
6978
9f6c9258 6979void bnx2x_disable_close_the_gate(struct bnx2x *bp)
72fd0718
VZ
6980{
6981 u32 val;
6982
6983 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6984
6985 if (CHIP_IS_E1(bp)) {
6986 int port = BP_PORT(bp);
6987 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6988 MISC_REG_AEU_MASK_ATTN_FUNC_0;
6989
6990 val = REG_RD(bp, addr);
6991 val &= ~(0x300);
6992 REG_WR(bp, addr, val);
6993 } else if (CHIP_IS_E1H(bp)) {
6994 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6995 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6996 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6997 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6998 }
6999}
7000
72fd0718
VZ
7001/* Close gates #2, #3 and #4: */
7002static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
7003{
7004 u32 val, addr;
7005
7006 /* Gates #2 and #4a are closed/opened for "not E1" only */
7007 if (!CHIP_IS_E1(bp)) {
7008 /* #4 */
7009 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7010 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7011 close ? (val | 0x1) : (val & (~(u32)1)));
7012 /* #2 */
7013 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7014 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7015 close ? (val | 0x1) : (val & (~(u32)1)));
7016 }
7017
7018 /* #3 */
7019 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7020 val = REG_RD(bp, addr);
7021 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7022
7023 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7024 close ? "closing" : "opening");
7025 mmiowb();
7026}
7027
7028#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
7029
7030static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7031{
7032 /* Do some magic... */
7033 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7034 *magic_val = val & SHARED_MF_CLP_MAGIC;
7035 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7036}
7037
7038/* Restore the value of the `magic' bit.
7039 *
7040 * @param pdev Device handle.
7041 * @param magic_val Old value of the `magic' bit.
7042 */
7043static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7044{
7045 /* Restore the `magic' bit value... */
72fd0718
VZ
7046 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7047 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7048 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7049}
7050
f85582f8
DK
7051/**
7052 * Prepares for MCP reset: takes care of CLP configurations.
72fd0718
VZ
7053 *
7054 * @param bp
7055 * @param magic_val Old value of 'magic' bit.
7056 */
7057static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7058{
7059 u32 shmem;
7060 u32 validity_offset;
7061
7062 DP(NETIF_MSG_HW, "Starting\n");
7063
7064 /* Set `magic' bit in order to save MF config */
7065 if (!CHIP_IS_E1(bp))
7066 bnx2x_clp_reset_prep(bp, magic_val);
7067
7068 /* Get shmem offset */
7069 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7070 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7071
7072 /* Clear validity map flags */
7073 if (shmem > 0)
7074 REG_WR(bp, shmem + validity_offset, 0);
7075}
7076
7077#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7078#define MCP_ONE_TIMEOUT 100 /* 100 ms */
7079
7080/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7081 * depending on the HW type.
7082 *
7083 * @param bp
7084 */
7085static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7086{
7087 /* special handling for emulation and FPGA,
7088 wait 10 times longer */
7089 if (CHIP_REV_IS_SLOW(bp))
7090 msleep(MCP_ONE_TIMEOUT*10);
7091 else
7092 msleep(MCP_ONE_TIMEOUT);
7093}
7094
7095static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7096{
7097 u32 shmem, cnt, validity_offset, val;
7098 int rc = 0;
7099
7100 msleep(100);
7101
7102 /* Get shmem offset */
7103 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7104 if (shmem == 0) {
7105 BNX2X_ERR("Shmem 0 return failure\n");
7106 rc = -ENOTTY;
7107 goto exit_lbl;
7108 }
7109
7110 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7111
7112 /* Wait for MCP to come up */
7113 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7114 /* TBD: its best to check validity map of last port.
7115 * currently checks on port 0.
7116 */
7117 val = REG_RD(bp, shmem + validity_offset);
7118 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7119 shmem + validity_offset, val);
7120
7121 /* check that shared memory is valid. */
7122 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7123 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7124 break;
7125
7126 bnx2x_mcp_wait_one(bp);
7127 }
7128
7129 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7130
7131 /* Check that shared memory is valid. This indicates that MCP is up. */
7132 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7133 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7134 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7135 rc = -ENOTTY;
7136 goto exit_lbl;
7137 }
7138
7139exit_lbl:
7140 /* Restore the `magic' bit value */
7141 if (!CHIP_IS_E1(bp))
7142 bnx2x_clp_reset_done(bp, magic_val);
7143
7144 return rc;
7145}
7146
7147static void bnx2x_pxp_prep(struct bnx2x *bp)
7148{
7149 if (!CHIP_IS_E1(bp)) {
7150 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7151 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7152 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7153 mmiowb();
7154 }
7155}
7156
7157/*
7158 * Reset the whole chip except for:
7159 * - PCIE core
7160 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7161 * one reset bit)
7162 * - IGU
7163 * - MISC (including AEU)
7164 * - GRC
7165 * - RBCN, RBCP
7166 */
7167static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7168{
7169 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7170
7171 not_reset_mask1 =
7172 MISC_REGISTERS_RESET_REG_1_RST_HC |
7173 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7174 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7175
7176 not_reset_mask2 =
7177 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7178 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7179 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7180 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7181 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7182 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7183 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7184 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7185
7186 reset_mask1 = 0xffffffff;
7187
7188 if (CHIP_IS_E1(bp))
7189 reset_mask2 = 0xffff;
7190 else
7191 reset_mask2 = 0x1ffff;
7192
7193 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7194 reset_mask1 & (~not_reset_mask1));
7195 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7196 reset_mask2 & (~not_reset_mask2));
7197
7198 barrier();
7199 mmiowb();
7200
7201 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7202 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7203 mmiowb();
7204}
7205
7206static int bnx2x_process_kill(struct bnx2x *bp)
7207{
7208 int cnt = 1000;
7209 u32 val = 0;
7210 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7211
7212
7213 /* Empty the Tetris buffer, wait for 1s */
7214 do {
7215 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7216 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7217 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7218 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7219 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7220 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7221 ((port_is_idle_0 & 0x1) == 0x1) &&
7222 ((port_is_idle_1 & 0x1) == 0x1) &&
7223 (pgl_exp_rom2 == 0xffffffff))
7224 break;
7225 msleep(1);
7226 } while (cnt-- > 0);
7227
7228 if (cnt <= 0) {
7229 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7230 " are still"
7231 " outstanding read requests after 1s!\n");
7232 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7233 " port_is_idle_0=0x%08x,"
7234 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7235 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7236 pgl_exp_rom2);
7237 return -EAGAIN;
7238 }
7239
7240 barrier();
7241
7242 /* Close gates #2, #3 and #4 */
7243 bnx2x_set_234_gates(bp, true);
7244
7245 /* TBD: Indicate that "process kill" is in progress to MCP */
7246
7247 /* Clear "unprepared" bit */
7248 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7249 barrier();
7250
7251 /* Make sure all is written to the chip before the reset */
7252 mmiowb();
7253
7254 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7255 * PSWHST, GRC and PSWRD Tetris buffer.
7256 */
7257 msleep(1);
7258
7259 /* Prepare to chip reset: */
7260 /* MCP */
7261 bnx2x_reset_mcp_prep(bp, &val);
7262
7263 /* PXP */
7264 bnx2x_pxp_prep(bp);
7265 barrier();
7266
7267 /* reset the chip */
7268 bnx2x_process_kill_chip_reset(bp);
7269 barrier();
7270
7271 /* Recover after reset: */
7272 /* MCP */
7273 if (bnx2x_reset_mcp_comp(bp, val))
7274 return -EAGAIN;
7275
7276 /* PXP */
7277 bnx2x_pxp_prep(bp);
7278
7279 /* Open the gates #2, #3 and #4 */
7280 bnx2x_set_234_gates(bp, false);
7281
7282 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7283 * reset state, re-enable attentions. */
7284
a2fbb9ea
ET
7285 return 0;
7286}
7287
72fd0718
VZ
7288static int bnx2x_leader_reset(struct bnx2x *bp)
7289{
7290 int rc = 0;
7291 /* Try to recover after the failure */
7292 if (bnx2x_process_kill(bp)) {
7293 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7294 bp->dev->name);
7295 rc = -EAGAIN;
7296 goto exit_leader_reset;
7297 }
7298
7299 /* Clear "reset is in progress" bit and update the driver state */
7300 bnx2x_set_reset_done(bp);
7301 bp->recovery_state = BNX2X_RECOVERY_DONE;
7302
7303exit_leader_reset:
7304 bp->is_leader = 0;
7305 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7306 smp_wmb();
7307 return rc;
7308}
7309
72fd0718
VZ
7310/* Assumption: runs under rtnl lock. This together with the fact
7311 * that it's called only from bnx2x_reset_task() ensure that it
7312 * will never be called when netif_running(bp->dev) is false.
7313 */
7314static void bnx2x_parity_recover(struct bnx2x *bp)
7315{
7316 DP(NETIF_MSG_HW, "Handling parity\n");
7317 while (1) {
7318 switch (bp->recovery_state) {
7319 case BNX2X_RECOVERY_INIT:
7320 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7321 /* Try to get a LEADER_LOCK HW lock */
7322 if (bnx2x_trylock_hw_lock(bp,
7323 HW_LOCK_RESOURCE_RESERVED_08))
7324 bp->is_leader = 1;
7325
7326 /* Stop the driver */
7327 /* If interface has been removed - break */
7328 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7329 return;
7330
7331 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7332 /* Ensure "is_leader" and "recovery_state"
7333 * update values are seen on other CPUs
7334 */
7335 smp_wmb();
7336 break;
7337
7338 case BNX2X_RECOVERY_WAIT:
7339 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7340 if (bp->is_leader) {
7341 u32 load_counter = bnx2x_get_load_cnt(bp);
7342 if (load_counter) {
7343 /* Wait until all other functions get
7344 * down.
7345 */
7346 schedule_delayed_work(&bp->reset_task,
7347 HZ/10);
7348 return;
7349 } else {
7350 /* If all other functions got down -
7351 * try to bring the chip back to
7352 * normal. In any case it's an exit
7353 * point for a leader.
7354 */
7355 if (bnx2x_leader_reset(bp) ||
7356 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7357 printk(KERN_ERR"%s: Recovery "
7358 "has failed. Power cycle is "
7359 "needed.\n", bp->dev->name);
7360 /* Disconnect this device */
7361 netif_device_detach(bp->dev);
7362 /* Block ifup for all function
7363 * of this ASIC until
7364 * "process kill" or power
7365 * cycle.
7366 */
7367 bnx2x_set_reset_in_progress(bp);
7368 /* Shut down the power */
7369 bnx2x_set_power_state(bp,
7370 PCI_D3hot);
7371 return;
7372 }
7373
7374 return;
7375 }
7376 } else { /* non-leader */
7377 if (!bnx2x_reset_is_done(bp)) {
7378 /* Try to get a LEADER_LOCK HW lock as
7379 * long as a former leader may have
7380 * been unloaded by the user or
7381 * released a leadership by another
7382 * reason.
7383 */
7384 if (bnx2x_trylock_hw_lock(bp,
7385 HW_LOCK_RESOURCE_RESERVED_08)) {
7386 /* I'm a leader now! Restart a
7387 * switch case.
7388 */
7389 bp->is_leader = 1;
7390 break;
7391 }
7392
7393 schedule_delayed_work(&bp->reset_task,
7394 HZ/10);
7395 return;
7396
7397 } else { /* A leader has completed
7398 * the "process kill". It's an exit
7399 * point for a non-leader.
7400 */
7401 bnx2x_nic_load(bp, LOAD_NORMAL);
7402 bp->recovery_state =
7403 BNX2X_RECOVERY_DONE;
7404 smp_wmb();
7405 return;
7406 }
7407 }
7408 default:
7409 return;
7410 }
7411 }
7412}
7413
7414/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7415 * scheduled on a general queue in order to prevent a dead lock.
7416 */
34f80b04
EG
7417static void bnx2x_reset_task(struct work_struct *work)
7418{
72fd0718 7419 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
7420
7421#ifdef BNX2X_STOP_ON_ERROR
7422 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7423 " so reset not done to allow debug dump,\n"
72fd0718 7424 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
7425 return;
7426#endif
7427
7428 rtnl_lock();
7429
7430 if (!netif_running(bp->dev))
7431 goto reset_task_exit;
7432
72fd0718
VZ
7433 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7434 bnx2x_parity_recover(bp);
7435 else {
7436 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7437 bnx2x_nic_load(bp, LOAD_NORMAL);
7438 }
34f80b04
EG
7439
7440reset_task_exit:
7441 rtnl_unlock();
7442}
7443
a2fbb9ea
ET
7444/* end of nic load/unload */
7445
a2fbb9ea
ET
7446/*
7447 * Init service functions
7448 */
7449
8d96286a 7450static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
f2e0899f
DK
7451{
7452 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7453 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7454 return base + (BP_ABS_FUNC(bp)) * stride;
f1ef27ef
EG
7455}
7456
f2e0899f 7457static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
f1ef27ef 7458{
f2e0899f 7459 u32 reg = bnx2x_get_pretend_reg(bp);
f1ef27ef
EG
7460
7461 /* Flush all outstanding writes */
7462 mmiowb();
7463
7464 /* Pretend to be function 0 */
7465 REG_WR(bp, reg, 0);
f2e0899f 7466 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
f1ef27ef
EG
7467
7468 /* From now we are in the "like-E1" mode */
7469 bnx2x_int_disable(bp);
7470
7471 /* Flush all outstanding writes */
7472 mmiowb();
7473
f2e0899f
DK
7474 /* Restore the original function */
7475 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7476 REG_RD(bp, reg);
f1ef27ef
EG
7477}
7478
f2e0899f 7479static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
f1ef27ef 7480{
f2e0899f 7481 if (CHIP_IS_E1(bp))
f1ef27ef 7482 bnx2x_int_disable(bp);
f2e0899f
DK
7483 else
7484 bnx2x_undi_int_disable_e1h(bp);
f1ef27ef
EG
7485}
7486
34f80b04
EG
7487static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7488{
7489 u32 val;
7490
7491 /* Check if there is any driver already loaded */
7492 val = REG_RD(bp, MISC_REG_UNPREPARED);
7493 if (val == 0x1) {
7494 /* Check if it is the UNDI driver
7495 * UNDI driver initializes CID offset for normal bell to 0x7
7496 */
4a37fb66 7497 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7498 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7499 if (val == 0x7) {
7500 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
f2e0899f
DK
7501 /* save our pf_num */
7502 int orig_pf_num = bp->pf_num;
da5a662a
VZ
7503 u32 swap_en;
7504 u32 swap_val;
34f80b04 7505
b4661739
EG
7506 /* clear the UNDI indication */
7507 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7508
34f80b04
EG
7509 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7510
7511 /* try unload UNDI on port 0 */
f2e0899f 7512 bp->pf_num = 0;
da5a662a 7513 bp->fw_seq =
f2e0899f 7514 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a 7515 DRV_MSG_SEQ_NUMBER_MASK);
a22f0788 7516 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
7517
7518 /* if UNDI is loaded on the other port */
7519 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7520
da5a662a 7521 /* send "DONE" for previous unload */
a22f0788
YR
7522 bnx2x_fw_command(bp,
7523 DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
7524
7525 /* unload UNDI on port 1 */
f2e0899f 7526 bp->pf_num = 1;
da5a662a 7527 bp->fw_seq =
f2e0899f 7528 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a
VZ
7529 DRV_MSG_SEQ_NUMBER_MASK);
7530 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7531
a22f0788 7532 bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
7533 }
7534
b4661739
EG
7535 /* now it's safe to release the lock */
7536 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7537
f2e0899f 7538 bnx2x_undi_int_disable(bp);
da5a662a
VZ
7539
7540 /* close input traffic and wait for it */
7541 /* Do not rcv packets to BRB */
7542 REG_WR(bp,
7543 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7544 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7545 /* Do not direct rcv packets that are not for MCP to
7546 * the BRB */
7547 REG_WR(bp,
7548 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7549 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7550 /* clear AEU */
7551 REG_WR(bp,
7552 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7553 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7554 msleep(10);
7555
7556 /* save NIG port swap info */
7557 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7558 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7559 /* reset device */
7560 REG_WR(bp,
7561 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7562 0xd3ffffff);
34f80b04
EG
7563 REG_WR(bp,
7564 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7565 0x1403);
da5a662a
VZ
7566 /* take the NIG out of reset and restore swap values */
7567 REG_WR(bp,
7568 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7569 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7570 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7571 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7572
7573 /* send unload done to the MCP */
a22f0788 7574 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
7575
7576 /* restore our func and fw_seq */
f2e0899f 7577 bp->pf_num = orig_pf_num;
da5a662a 7578 bp->fw_seq =
f2e0899f 7579 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a 7580 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7581 } else
7582 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7583 }
7584}
7585
7586static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7587{
7588 u32 val, val2, val3, val4, id;
72ce58c3 7589 u16 pmc;
34f80b04
EG
7590
7591 /* Get the chip revision id and number. */
7592 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7593 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7594 id = ((val & 0xffff) << 16);
7595 val = REG_RD(bp, MISC_REG_CHIP_REV);
7596 id |= ((val & 0xf) << 12);
7597 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7598 id |= ((val & 0xff) << 4);
5a40e08e 7599 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7600 id |= (val & 0xf);
7601 bp->common.chip_id = id;
523224a3
DK
7602
7603 /* Set doorbell size */
7604 bp->db_size = (1 << BNX2X_DB_SHIFT);
7605
f2e0899f
DK
7606 if (CHIP_IS_E2(bp)) {
7607 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7608 if ((val & 1) == 0)
7609 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7610 else
7611 val = (val >> 1) & 1;
7612 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7613 "2_PORT_MODE");
7614 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7615 CHIP_2_PORT_MODE;
7616
7617 if (CHIP_MODE_IS_4_PORT(bp))
7618 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7619 else
7620 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7621 } else {
7622 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7623 bp->pfid = bp->pf_num; /* 0..7 */
7624 }
7625
523224a3
DK
7626 /*
7627 * set base FW non-default (fast path) status block id, this value is
7628 * used to initialize the fw_sb_id saved on the fp/queue structure to
7629 * determine the id used by the FW.
7630 */
f2e0899f
DK
7631 if (CHIP_IS_E1x(bp))
7632 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7633 else /* E2 */
7634 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7635
7636 bp->link_params.chip_id = bp->common.chip_id;
7637 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
523224a3 7638
1c06328c
EG
7639 val = (REG_RD(bp, 0x2874) & 0x55);
7640 if ((bp->common.chip_id & 0x1) ||
7641 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7642 bp->flags |= ONE_PORT_FLAG;
7643 BNX2X_DEV_INFO("single port device\n");
7644 }
7645
34f80b04
EG
7646 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7647 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7648 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7649 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7650 bp->common.flash_size, bp->common.flash_size);
7651
7652 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
f2e0899f
DK
7653 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7654 MISC_REG_GENERIC_CR_1 :
7655 MISC_REG_GENERIC_CR_0));
34f80b04 7656 bp->link_params.shmem_base = bp->common.shmem_base;
a22f0788 7657 bp->link_params.shmem2_base = bp->common.shmem2_base;
2691d51d
EG
7658 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7659 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04 7660
f2e0899f 7661 if (!bp->common.shmem_base) {
34f80b04
EG
7662 BNX2X_DEV_INFO("MCP not active\n");
7663 bp->flags |= NO_MCP_FLAG;
7664 return;
7665 }
7666
7667 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7668 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7669 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
f2e0899f 7670 BNX2X_ERR("BAD MCP validity signature\n");
34f80b04
EG
7671
7672 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7673 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7674
7675 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7676 SHARED_HW_CFG_LED_MODE_MASK) >>
7677 SHARED_HW_CFG_LED_MODE_SHIFT);
7678
c2c8b03e
EG
7679 bp->link_params.feature_config_flags = 0;
7680 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7681 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7682 bp->link_params.feature_config_flags |=
7683 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7684 else
7685 bp->link_params.feature_config_flags &=
7686 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7687
34f80b04
EG
7688 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7689 bp->common.bc_ver = val;
7690 BNX2X_DEV_INFO("bc_ver %X\n", val);
7691 if (val < BNX2X_BC_VER) {
7692 /* for now only warn
7693 * later we might need to enforce this */
f2e0899f
DK
7694 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7695 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 7696 }
4d295db0 7697 bp->link_params.feature_config_flags |=
a22f0788 7698 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
f85582f8
DK
7699 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7700
a22f0788
YR
7701 bp->link_params.feature_config_flags |=
7702 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7703 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
72ce58c3
EG
7704
7705 if (BP_E1HVN(bp) == 0) {
7706 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7707 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7708 } else {
7709 /* no WOL capability for E1HVN != 0 */
7710 bp->flags |= NO_WOL_FLAG;
7711 }
7712 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7713 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7714
7715 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7716 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7717 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7718 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7719
cdaa7cb8
VZ
7720 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7721 val, val2, val3, val4);
34f80b04
EG
7722}
7723
f2e0899f
DK
7724#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7725#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7726
7727static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7728{
7729 int pfid = BP_FUNC(bp);
7730 int vn = BP_E1HVN(bp);
7731 int igu_sb_id;
7732 u32 val;
7733 u8 fid;
7734
7735 bp->igu_base_sb = 0xff;
7736 bp->igu_sb_cnt = 0;
7737 if (CHIP_INT_MODE_IS_BC(bp)) {
7738 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7739 bp->l2_cid_count);
7740
7741 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7742 FP_SB_MAX_E1x;
7743
7744 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7745 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7746
7747 return;
7748 }
7749
7750 /* IGU in normal mode - read CAM */
7751 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7752 igu_sb_id++) {
7753 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7754 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7755 continue;
7756 fid = IGU_FID(val);
7757 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7758 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7759 continue;
7760 if (IGU_VEC(val) == 0)
7761 /* default status block */
7762 bp->igu_dsb_id = igu_sb_id;
7763 else {
7764 if (bp->igu_base_sb == 0xff)
7765 bp->igu_base_sb = igu_sb_id;
7766 bp->igu_sb_cnt++;
7767 }
7768 }
7769 }
7770 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
7771 if (bp->igu_sb_cnt == 0)
7772 BNX2X_ERR("CAM configuration error\n");
7773}
7774
34f80b04
EG
7775static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7776 u32 switch_cfg)
a2fbb9ea 7777{
a22f0788
YR
7778 int cfg_size = 0, idx, port = BP_PORT(bp);
7779
7780 /* Aggregation of supported attributes of all external phys */
7781 bp->port.supported[0] = 0;
7782 bp->port.supported[1] = 0;
b7737c9b
YR
7783 switch (bp->link_params.num_phys) {
7784 case 1:
a22f0788
YR
7785 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
7786 cfg_size = 1;
7787 break;
b7737c9b 7788 case 2:
a22f0788
YR
7789 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
7790 cfg_size = 1;
7791 break;
7792 case 3:
7793 if (bp->link_params.multi_phy_config &
7794 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7795 bp->port.supported[1] =
7796 bp->link_params.phy[EXT_PHY1].supported;
7797 bp->port.supported[0] =
7798 bp->link_params.phy[EXT_PHY2].supported;
7799 } else {
7800 bp->port.supported[0] =
7801 bp->link_params.phy[EXT_PHY1].supported;
7802 bp->port.supported[1] =
7803 bp->link_params.phy[EXT_PHY2].supported;
7804 }
7805 cfg_size = 2;
7806 break;
b7737c9b 7807 }
a2fbb9ea 7808
a22f0788 7809 if (!(bp->port.supported[0] || bp->port.supported[1])) {
b7737c9b 7810 BNX2X_ERR("NVRAM config error. BAD phy config."
a22f0788 7811 "PHY1 config 0x%x, PHY2 config 0x%x\n",
b7737c9b 7812 SHMEM_RD(bp,
a22f0788
YR
7813 dev_info.port_hw_config[port].external_phy_config),
7814 SHMEM_RD(bp,
7815 dev_info.port_hw_config[port].external_phy_config2));
a2fbb9ea 7816 return;
f85582f8 7817 }
a2fbb9ea 7818
b7737c9b
YR
7819 switch (switch_cfg) {
7820 case SWITCH_CFG_1G:
34f80b04
EG
7821 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7822 port*0x10);
7823 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7824 break;
7825
7826 case SWITCH_CFG_10G:
34f80b04
EG
7827 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7828 port*0x18);
7829 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7830 break;
7831
7832 default:
7833 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
a22f0788 7834 bp->port.link_config[0]);
a2fbb9ea
ET
7835 return;
7836 }
a22f0788
YR
7837 /* mask what we support according to speed_cap_mask per configuration */
7838 for (idx = 0; idx < cfg_size; idx++) {
7839 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7840 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
a22f0788 7841 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7842
a22f0788 7843 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7844 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
a22f0788 7845 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7846
a22f0788 7847 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7848 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
a22f0788 7849 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7850
a22f0788 7851 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7852 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
a22f0788 7853 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7854
a22f0788 7855 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7856 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
a22f0788 7857 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
f85582f8 7858 SUPPORTED_1000baseT_Full);
a2fbb9ea 7859
a22f0788 7860 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7861 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
a22f0788 7862 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7863
a22f0788 7864 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 7865 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
a22f0788
YR
7866 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
7867
7868 }
a2fbb9ea 7869
a22f0788
YR
7870 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
7871 bp->port.supported[1]);
a2fbb9ea
ET
7872}
7873
34f80b04 7874static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7875{
a22f0788
YR
7876 u32 link_config, idx, cfg_size = 0;
7877 bp->port.advertising[0] = 0;
7878 bp->port.advertising[1] = 0;
7879 switch (bp->link_params.num_phys) {
7880 case 1:
7881 case 2:
7882 cfg_size = 1;
7883 break;
7884 case 3:
7885 cfg_size = 2;
7886 break;
7887 }
7888 for (idx = 0; idx < cfg_size; idx++) {
7889 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
7890 link_config = bp->port.link_config[idx];
7891 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
f85582f8 7892 case PORT_FEATURE_LINK_SPEED_AUTO:
a22f0788
YR
7893 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
7894 bp->link_params.req_line_speed[idx] =
7895 SPEED_AUTO_NEG;
7896 bp->port.advertising[idx] |=
7897 bp->port.supported[idx];
f85582f8
DK
7898 } else {
7899 /* force 10G, no AN */
a22f0788
YR
7900 bp->link_params.req_line_speed[idx] =
7901 SPEED_10000;
7902 bp->port.advertising[idx] |=
7903 (ADVERTISED_10000baseT_Full |
f85582f8 7904 ADVERTISED_FIBRE);
a22f0788 7905 continue;
f85582f8
DK
7906 }
7907 break;
a2fbb9ea 7908
f85582f8 7909 case PORT_FEATURE_LINK_SPEED_10M_FULL:
a22f0788
YR
7910 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
7911 bp->link_params.req_line_speed[idx] =
7912 SPEED_10;
7913 bp->port.advertising[idx] |=
7914 (ADVERTISED_10baseT_Full |
f85582f8
DK
7915 ADVERTISED_TP);
7916 } else {
7917 BNX2X_ERROR("NVRAM config error. "
7918 "Invalid link_config 0x%x"
7919 " speed_cap_mask 0x%x\n",
7920 link_config,
a22f0788 7921 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
7922 return;
7923 }
7924 break;
a2fbb9ea 7925
f85582f8 7926 case PORT_FEATURE_LINK_SPEED_10M_HALF:
a22f0788
YR
7927 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
7928 bp->link_params.req_line_speed[idx] =
7929 SPEED_10;
7930 bp->link_params.req_duplex[idx] =
7931 DUPLEX_HALF;
7932 bp->port.advertising[idx] |=
7933 (ADVERTISED_10baseT_Half |
f85582f8
DK
7934 ADVERTISED_TP);
7935 } else {
7936 BNX2X_ERROR("NVRAM config error. "
7937 "Invalid link_config 0x%x"
7938 " speed_cap_mask 0x%x\n",
7939 link_config,
7940 bp->link_params.speed_cap_mask[idx]);
7941 return;
7942 }
7943 break;
a2fbb9ea 7944
f85582f8
DK
7945 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7946 if (bp->port.supported[idx] &
7947 SUPPORTED_100baseT_Full) {
a22f0788
YR
7948 bp->link_params.req_line_speed[idx] =
7949 SPEED_100;
7950 bp->port.advertising[idx] |=
7951 (ADVERTISED_100baseT_Full |
f85582f8
DK
7952 ADVERTISED_TP);
7953 } else {
7954 BNX2X_ERROR("NVRAM config error. "
7955 "Invalid link_config 0x%x"
7956 " speed_cap_mask 0x%x\n",
7957 link_config,
7958 bp->link_params.speed_cap_mask[idx]);
7959 return;
7960 }
7961 break;
a2fbb9ea 7962
f85582f8
DK
7963 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7964 if (bp->port.supported[idx] &
7965 SUPPORTED_100baseT_Half) {
7966 bp->link_params.req_line_speed[idx] =
7967 SPEED_100;
7968 bp->link_params.req_duplex[idx] =
7969 DUPLEX_HALF;
a22f0788
YR
7970 bp->port.advertising[idx] |=
7971 (ADVERTISED_100baseT_Half |
f85582f8
DK
7972 ADVERTISED_TP);
7973 } else {
7974 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
7975 "Invalid link_config 0x%x"
7976 " speed_cap_mask 0x%x\n",
a22f0788
YR
7977 link_config,
7978 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
7979 return;
7980 }
7981 break;
a2fbb9ea 7982
f85582f8 7983 case PORT_FEATURE_LINK_SPEED_1G:
a22f0788
YR
7984 if (bp->port.supported[idx] &
7985 SUPPORTED_1000baseT_Full) {
7986 bp->link_params.req_line_speed[idx] =
7987 SPEED_1000;
7988 bp->port.advertising[idx] |=
7989 (ADVERTISED_1000baseT_Full |
f85582f8
DK
7990 ADVERTISED_TP);
7991 } else {
7992 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
7993 "Invalid link_config 0x%x"
7994 " speed_cap_mask 0x%x\n",
a22f0788
YR
7995 link_config,
7996 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
7997 return;
7998 }
7999 break;
a2fbb9ea 8000
f85582f8 8001 case PORT_FEATURE_LINK_SPEED_2_5G:
a22f0788
YR
8002 if (bp->port.supported[idx] &
8003 SUPPORTED_2500baseX_Full) {
8004 bp->link_params.req_line_speed[idx] =
8005 SPEED_2500;
8006 bp->port.advertising[idx] |=
8007 (ADVERTISED_2500baseX_Full |
34f80b04 8008 ADVERTISED_TP);
f85582f8
DK
8009 } else {
8010 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8011 "Invalid link_config 0x%x"
8012 " speed_cap_mask 0x%x\n",
a22f0788 8013 link_config,
f85582f8
DK
8014 bp->link_params.speed_cap_mask[idx]);
8015 return;
8016 }
8017 break;
a2fbb9ea 8018
f85582f8
DK
8019 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8020 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8021 case PORT_FEATURE_LINK_SPEED_10G_KR:
a22f0788
YR
8022 if (bp->port.supported[idx] &
8023 SUPPORTED_10000baseT_Full) {
8024 bp->link_params.req_line_speed[idx] =
8025 SPEED_10000;
8026 bp->port.advertising[idx] |=
8027 (ADVERTISED_10000baseT_Full |
34f80b04 8028 ADVERTISED_FIBRE);
f85582f8
DK
8029 } else {
8030 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8031 "Invalid link_config 0x%x"
8032 " speed_cap_mask 0x%x\n",
a22f0788 8033 link_config,
f85582f8
DK
8034 bp->link_params.speed_cap_mask[idx]);
8035 return;
8036 }
8037 break;
a2fbb9ea 8038
f85582f8
DK
8039 default:
8040 BNX2X_ERROR("NVRAM config error. "
8041 "BAD link speed link_config 0x%x\n",
8042 link_config);
8043 bp->link_params.req_line_speed[idx] =
8044 SPEED_AUTO_NEG;
8045 bp->port.advertising[idx] =
8046 bp->port.supported[idx];
8047 break;
8048 }
a2fbb9ea 8049
a22f0788 8050 bp->link_params.req_flow_ctrl[idx] = (link_config &
34f80b04 8051 PORT_FEATURE_FLOW_CONTROL_MASK);
a22f0788
YR
8052 if ((bp->link_params.req_flow_ctrl[idx] ==
8053 BNX2X_FLOW_CTRL_AUTO) &&
8054 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8055 bp->link_params.req_flow_ctrl[idx] =
8056 BNX2X_FLOW_CTRL_NONE;
8057 }
a2fbb9ea 8058
a22f0788
YR
8059 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8060 " 0x%x advertising 0x%x\n",
8061 bp->link_params.req_line_speed[idx],
8062 bp->link_params.req_duplex[idx],
8063 bp->link_params.req_flow_ctrl[idx],
8064 bp->port.advertising[idx]);
8065 }
a2fbb9ea
ET
8066}
8067
e665bfda
MC
8068static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8069{
8070 mac_hi = cpu_to_be16(mac_hi);
8071 mac_lo = cpu_to_be32(mac_lo);
8072 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8073 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8074}
8075
34f80b04 8076static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8077{
34f80b04
EG
8078 int port = BP_PORT(bp);
8079 u32 val, val2;
589abe3a 8080 u32 config;
6f38ad93 8081 u32 ext_phy_type, ext_phy_config;
a2fbb9ea 8082
c18487ee 8083 bp->link_params.bp = bp;
34f80b04 8084 bp->link_params.port = port;
c18487ee 8085
c18487ee 8086 bp->link_params.lane_config =
a2fbb9ea 8087 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
4d295db0 8088
a22f0788 8089 bp->link_params.speed_cap_mask[0] =
a2fbb9ea
ET
8090 SHMEM_RD(bp,
8091 dev_info.port_hw_config[port].speed_capability_mask);
a22f0788
YR
8092 bp->link_params.speed_cap_mask[1] =
8093 SHMEM_RD(bp,
8094 dev_info.port_hw_config[port].speed_capability_mask2);
8095 bp->port.link_config[0] =
a2fbb9ea
ET
8096 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8097
a22f0788
YR
8098 bp->port.link_config[1] =
8099 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
c2c8b03e 8100
a22f0788
YR
8101 bp->link_params.multi_phy_config =
8102 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
3ce2c3f9
EG
8103 /* If the device is capable of WoL, set the default state according
8104 * to the HW
8105 */
4d295db0 8106 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8107 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8108 (config & PORT_FEATURE_WOL_ENABLED));
8109
f85582f8 8110 BNX2X_DEV_INFO("lane_config 0x%08x "
a22f0788 8111 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
c18487ee 8112 bp->link_params.lane_config,
a22f0788
YR
8113 bp->link_params.speed_cap_mask[0],
8114 bp->port.link_config[0]);
a2fbb9ea 8115
a22f0788 8116 bp->link_params.switch_cfg = (bp->port.link_config[0] &
f85582f8 8117 PORT_FEATURE_CONNECTED_SWITCH_MASK);
b7737c9b 8118 bnx2x_phy_probe(&bp->link_params);
c18487ee 8119 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8120
8121 bnx2x_link_settings_requested(bp);
8122
01cd4528
EG
8123 /*
8124 * If connected directly, work with the internal PHY, otherwise, work
8125 * with the external PHY
8126 */
b7737c9b
YR
8127 ext_phy_config =
8128 SHMEM_RD(bp,
8129 dev_info.port_hw_config[port].external_phy_config);
8130 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
01cd4528 8131 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
b7737c9b 8132 bp->mdio.prtad = bp->port.phy_addr;
01cd4528
EG
8133
8134 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8135 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8136 bp->mdio.prtad =
b7737c9b 8137 XGXS_EXT_PHY_ADDR(ext_phy_config);
01cd4528 8138
a2fbb9ea
ET
8139 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8140 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 8141 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
8142 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8143 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
8144
8145#ifdef BCM_CNIC
8146 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8147 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8148 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8149#endif
34f80b04
EG
8150}
8151
8152static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8153{
f2e0899f
DK
8154 int func = BP_ABS_FUNC(bp);
8155 int vn;
34f80b04
EG
8156 u32 val, val2;
8157 int rc = 0;
a2fbb9ea 8158
34f80b04 8159 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8160
f2e0899f
DK
8161 if (CHIP_IS_E1x(bp)) {
8162 bp->common.int_block = INT_BLOCK_HC;
8163
8164 bp->igu_dsb_id = DEF_SB_IGU_ID;
8165 bp->igu_base_sb = 0;
8166 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
8167 } else {
8168 bp->common.int_block = INT_BLOCK_IGU;
8169 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8170 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8171 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8172 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8173 } else
8174 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
523224a3 8175
f2e0899f
DK
8176 bnx2x_get_igu_cam_info(bp);
8177
8178 }
8179 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8180 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8181
8182 /*
8183 * Initialize MF configuration
8184 */
523224a3 8185
fb3bff17
DK
8186 bp->mf_ov = 0;
8187 bp->mf_mode = 0;
f2e0899f
DK
8188 vn = BP_E1HVN(bp);
8189 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8190 if (SHMEM2_HAS(bp, mf_cfg_addr))
8191 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8192 else
8193 bp->common.mf_cfg_base = bp->common.shmem_base +
523224a3
DK
8194 offsetof(struct shmem_region, func_mb) +
8195 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
f2e0899f 8196 bp->mf_config[vn] =
523224a3 8197 MF_CFG_RD(bp, func_mf_config[func].config);
a2fbb9ea 8198
523224a3 8199 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8200 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8201 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
fb3bff17 8202 bp->mf_mode = 1;
2691d51d 8203 BNX2X_DEV_INFO("%s function mode\n",
fb3bff17 8204 IS_MF(bp) ? "multi" : "single");
2691d51d 8205
fb3bff17 8206 if (IS_MF(bp)) {
523224a3 8207 val = (MF_CFG_RD(bp, func_mf_config[func].
2691d51d
EG
8208 e1hov_tag) &
8209 FUNC_MF_CFG_E1HOV_TAG_MASK);
8210 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
fb3bff17 8211 bp->mf_ov = val;
f2e0899f 8212 BNX2X_DEV_INFO("MF OV for func %d is %d "
2691d51d 8213 "(0x%04x)\n",
fb3bff17 8214 func, bp->mf_ov, bp->mf_ov);
2691d51d 8215 } else {
f2e0899f 8216 BNX2X_ERROR("No valid MF OV for func %d,"
cdaa7cb8 8217 " aborting\n", func);
34f80b04
EG
8218 rc = -EPERM;
8219 }
2691d51d 8220 } else {
f2e0899f 8221 if (BP_VN(bp)) {
cdaa7cb8
VZ
8222 BNX2X_ERROR("VN %d in single function mode,"
8223 " aborting\n", BP_E1HVN(bp));
2691d51d
EG
8224 rc = -EPERM;
8225 }
34f80b04
EG
8226 }
8227 }
a2fbb9ea 8228
f2e0899f
DK
8229 /* adjust igu_sb_cnt to MF for E1x */
8230 if (CHIP_IS_E1x(bp) && IS_MF(bp))
523224a3
DK
8231 bp->igu_sb_cnt /= E1HVN_MAX;
8232
f2e0899f
DK
8233 /*
8234 * adjust E2 sb count: to be removed when FW will support
8235 * more then 16 L2 clients
8236 */
8237#define MAX_L2_CLIENTS 16
8238 if (CHIP_IS_E2(bp))
8239 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8240 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8241
34f80b04
EG
8242 if (!BP_NOMCP(bp)) {
8243 bnx2x_get_port_hwinfo(bp);
8244
f2e0899f
DK
8245 bp->fw_seq =
8246 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8247 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04
EG
8248 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8249 }
8250
fb3bff17 8251 if (IS_MF(bp)) {
523224a3
DK
8252 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8253 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
34f80b04
EG
8254 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8255 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8256 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8257 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8258 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8259 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8260 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8261 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8262 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8263 ETH_ALEN);
8264 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8265 ETH_ALEN);
a2fbb9ea 8266 }
34f80b04
EG
8267
8268 return rc;
a2fbb9ea
ET
8269 }
8270
34f80b04
EG
8271 if (BP_NOMCP(bp)) {
8272 /* only supposed to happen on emulation/FPGA */
cdaa7cb8 8273 BNX2X_ERROR("warning: random MAC workaround active\n");
34f80b04
EG
8274 random_ether_addr(bp->dev->dev_addr);
8275 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8276 }
a2fbb9ea 8277
34f80b04
EG
8278 return rc;
8279}
8280
34f24c7f
VZ
8281static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8282{
8283 int cnt, i, block_end, rodi;
8284 char vpd_data[BNX2X_VPD_LEN+1];
8285 char str_id_reg[VENDOR_ID_LEN+1];
8286 char str_id_cap[VENDOR_ID_LEN+1];
8287 u8 len;
8288
8289 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8290 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8291
8292 if (cnt < BNX2X_VPD_LEN)
8293 goto out_not_found;
8294
8295 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8296 PCI_VPD_LRDT_RO_DATA);
8297 if (i < 0)
8298 goto out_not_found;
8299
8300
8301 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8302 pci_vpd_lrdt_size(&vpd_data[i]);
8303
8304 i += PCI_VPD_LRDT_TAG_SIZE;
8305
8306 if (block_end > BNX2X_VPD_LEN)
8307 goto out_not_found;
8308
8309 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8310 PCI_VPD_RO_KEYWORD_MFR_ID);
8311 if (rodi < 0)
8312 goto out_not_found;
8313
8314 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8315
8316 if (len != VENDOR_ID_LEN)
8317 goto out_not_found;
8318
8319 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8320
8321 /* vendor specific info */
8322 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8323 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8324 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8325 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8326
8327 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8328 PCI_VPD_RO_KEYWORD_VENDOR0);
8329 if (rodi >= 0) {
8330 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8331
8332 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8333
8334 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8335 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8336 bp->fw_ver[len] = ' ';
8337 }
8338 }
8339 return;
8340 }
8341out_not_found:
8342 return;
8343}
8344
34f80b04
EG
8345static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8346{
f2e0899f 8347 int func;
87942b46 8348 int timer_interval;
34f80b04
EG
8349 int rc;
8350
da5a662a
VZ
8351 /* Disable interrupt handling until HW is initialized */
8352 atomic_set(&bp->intr_sem, 1);
e1510706 8353 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8354
34f80b04 8355 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 8356 mutex_init(&bp->fw_mb_mutex);
bb7e95c8 8357 spin_lock_init(&bp->stats_lock);
993ac7b5
MC
8358#ifdef BCM_CNIC
8359 mutex_init(&bp->cnic_mutex);
8360#endif
a2fbb9ea 8361
1cf167f2 8362 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 8363 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
8364
8365 rc = bnx2x_get_hwinfo(bp);
8366
523224a3
DK
8367 if (!rc)
8368 rc = bnx2x_alloc_mem_bp(bp);
8369
34f24c7f 8370 bnx2x_read_fwinfo(bp);
f2e0899f
DK
8371
8372 func = BP_FUNC(bp);
8373
34f80b04
EG
8374 /* need to reset chip if undi was active */
8375 if (!BP_NOMCP(bp))
8376 bnx2x_undi_unload(bp);
8377
8378 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 8379 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
8380
8381 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
8382 dev_err(&bp->pdev->dev, "MCP disabled, "
8383 "must load devices in order!\n");
34f80b04 8384
555f6c78 8385 /* Set multi queue mode */
8badd27a
EG
8386 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8387 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
cdaa7cb8
VZ
8388 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8389 "requested is not MSI-X\n");
555f6c78
EG
8390 multi_mode = ETH_RSS_MODE_DISABLED;
8391 }
8392 bp->multi_mode = multi_mode;
5d7cd496 8393 bp->int_mode = int_mode;
555f6c78 8394
4fd89b7a
DK
8395 bp->dev->features |= NETIF_F_GRO;
8396
7a9b2557
VZ
8397 /* Set TPA flags */
8398 if (disable_tpa) {
8399 bp->flags &= ~TPA_ENABLE_FLAG;
8400 bp->dev->features &= ~NETIF_F_LRO;
8401 } else {
8402 bp->flags |= TPA_ENABLE_FLAG;
8403 bp->dev->features |= NETIF_F_LRO;
8404 }
5d7cd496 8405 bp->disable_tpa = disable_tpa;
7a9b2557 8406
a18f5128
EG
8407 if (CHIP_IS_E1(bp))
8408 bp->dropless_fc = 0;
8409 else
8410 bp->dropless_fc = dropless_fc;
8411
8d5726c4 8412 bp->mrrs = mrrs;
7a9b2557 8413
34f80b04 8414 bp->tx_ring_size = MAX_TX_AVAIL;
34f80b04
EG
8415
8416 bp->rx_csum = 1;
34f80b04 8417
7d323bfd 8418 /* make sure that the numbers are in the right granularity */
523224a3
DK
8419 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8420 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
34f80b04 8421
87942b46
EG
8422 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8423 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8424
8425 init_timer(&bp->timer);
8426 bp->timer.expires = jiffies + bp->current_interval;
8427 bp->timer.data = (unsigned long) bp;
8428 bp->timer.function = bnx2x_timer;
8429
8430 return rc;
a2fbb9ea
ET
8431}
8432
a2fbb9ea 8433
de0c62db
DK
8434/****************************************************************************
8435* General service functions
8436****************************************************************************/
a2fbb9ea 8437
bb2a0f7a 8438/* called with rtnl_lock */
a2fbb9ea
ET
8439static int bnx2x_open(struct net_device *dev)
8440{
8441 struct bnx2x *bp = netdev_priv(dev);
8442
6eccabb3
EG
8443 netif_carrier_off(dev);
8444
a2fbb9ea
ET
8445 bnx2x_set_power_state(bp, PCI_D0);
8446
72fd0718
VZ
8447 if (!bnx2x_reset_is_done(bp)) {
8448 do {
8449 /* Reset MCP mail box sequence if there is on going
8450 * recovery
8451 */
8452 bp->fw_seq = 0;
8453
8454 /* If it's the first function to load and reset done
8455 * is still not cleared it may mean that. We don't
8456 * check the attention state here because it may have
8457 * already been cleared by a "common" reset but we
8458 * shell proceed with "process kill" anyway.
8459 */
8460 if ((bnx2x_get_load_cnt(bp) == 0) &&
8461 bnx2x_trylock_hw_lock(bp,
8462 HW_LOCK_RESOURCE_RESERVED_08) &&
8463 (!bnx2x_leader_reset(bp))) {
8464 DP(NETIF_MSG_HW, "Recovered in open\n");
8465 break;
8466 }
8467
8468 bnx2x_set_power_state(bp, PCI_D3hot);
8469
8470 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8471 " completed yet. Try again later. If u still see this"
8472 " message after a few retries then power cycle is"
8473 " required.\n", bp->dev->name);
8474
8475 return -EAGAIN;
8476 } while (0);
8477 }
8478
8479 bp->recovery_state = BNX2X_RECOVERY_DONE;
8480
bb2a0f7a 8481 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
8482}
8483
bb2a0f7a 8484/* called with rtnl_lock */
a2fbb9ea
ET
8485static int bnx2x_close(struct net_device *dev)
8486{
a2fbb9ea
ET
8487 struct bnx2x *bp = netdev_priv(dev);
8488
8489 /* Unload the driver, release IRQs */
bb2a0f7a 8490 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 8491 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
8492
8493 return 0;
8494}
8495
f5372251 8496/* called with netif_tx_lock from dev_mcast.c */
9f6c9258 8497void bnx2x_set_rx_mode(struct net_device *dev)
34f80b04
EG
8498{
8499 struct bnx2x *bp = netdev_priv(dev);
8500 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8501 int port = BP_PORT(bp);
8502
8503 if (bp->state != BNX2X_STATE_OPEN) {
8504 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8505 return;
8506 }
8507
8508 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8509
8510 if (dev->flags & IFF_PROMISC)
8511 rx_mode = BNX2X_RX_MODE_PROMISC;
34f80b04 8512 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
8513 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8514 CHIP_IS_E1(bp)))
34f80b04 8515 rx_mode = BNX2X_RX_MODE_ALLMULTI;
34f80b04
EG
8516 else { /* some multicasts */
8517 if (CHIP_IS_E1(bp)) {
523224a3
DK
8518 /*
8519 * set mc list, do not wait as wait implies sleep
8520 * and set_rx_mode can be invoked from non-sleepable
8521 * context
8522 */
8523 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8524 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8525 BNX2X_MAX_MULTICAST*(1 + port));
e665bfda 8526
523224a3 8527 bnx2x_set_e1_mc_list(bp, offset);
34f80b04
EG
8528 } else { /* E1H */
8529 /* Accept one or more multicasts */
22bedad3 8530 struct netdev_hw_addr *ha;
34f80b04
EG
8531 u32 mc_filter[MC_HASH_SIZE];
8532 u32 crc, bit, regidx;
8533 int i;
8534
8535 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8536
22bedad3 8537 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 8538 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
523224a3 8539 bnx2x_mc_addr(ha));
34f80b04 8540
523224a3
DK
8541 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8542 ETH_ALEN);
34f80b04
EG
8543 bit = (crc >> 24) & 0xff;
8544 regidx = bit >> 5;
8545 bit &= 0x1f;
8546 mc_filter[regidx] |= (1 << bit);
8547 }
8548
8549 for (i = 0; i < MC_HASH_SIZE; i++)
8550 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8551 mc_filter[i]);
8552 }
8553 }
8554
8555 bp->rx_mode = rx_mode;
8556 bnx2x_set_storm_rx_mode(bp);
8557}
8558
c18487ee 8559/* called with rtnl_lock */
01cd4528
EG
8560static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8561 int devad, u16 addr)
a2fbb9ea 8562{
01cd4528
EG
8563 struct bnx2x *bp = netdev_priv(netdev);
8564 u16 value;
8565 int rc;
a2fbb9ea 8566
01cd4528
EG
8567 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8568 prtad, devad, addr);
a2fbb9ea 8569
01cd4528
EG
8570 /* The HW expects different devad if CL22 is used */
8571 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 8572
01cd4528 8573 bnx2x_acquire_phy_lock(bp);
e10bc84d 8574 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
01cd4528
EG
8575 bnx2x_release_phy_lock(bp);
8576 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 8577
01cd4528
EG
8578 if (!rc)
8579 rc = value;
8580 return rc;
8581}
a2fbb9ea 8582
01cd4528
EG
8583/* called with rtnl_lock */
8584static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8585 u16 addr, u16 value)
8586{
8587 struct bnx2x *bp = netdev_priv(netdev);
01cd4528
EG
8588 int rc;
8589
8590 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8591 " value 0x%x\n", prtad, devad, addr, value);
8592
01cd4528
EG
8593 /* The HW expects different devad if CL22 is used */
8594 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 8595
01cd4528 8596 bnx2x_acquire_phy_lock(bp);
e10bc84d 8597 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
01cd4528
EG
8598 bnx2x_release_phy_lock(bp);
8599 return rc;
8600}
c18487ee 8601
01cd4528
EG
8602/* called with rtnl_lock */
8603static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8604{
8605 struct bnx2x *bp = netdev_priv(dev);
8606 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 8607
01cd4528
EG
8608 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8609 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 8610
01cd4528
EG
8611 if (!netif_running(dev))
8612 return -EAGAIN;
8613
8614 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
8615}
8616
257ddbda 8617#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
8618static void poll_bnx2x(struct net_device *dev)
8619{
8620 struct bnx2x *bp = netdev_priv(dev);
8621
8622 disable_irq(bp->pdev->irq);
8623 bnx2x_interrupt(bp->pdev->irq, dev);
8624 enable_irq(bp->pdev->irq);
8625}
8626#endif
8627
c64213cd
SH
8628static const struct net_device_ops bnx2x_netdev_ops = {
8629 .ndo_open = bnx2x_open,
8630 .ndo_stop = bnx2x_close,
8631 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 8632 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
8633 .ndo_set_mac_address = bnx2x_change_mac_addr,
8634 .ndo_validate_addr = eth_validate_addr,
8635 .ndo_do_ioctl = bnx2x_ioctl,
8636 .ndo_change_mtu = bnx2x_change_mtu,
8637 .ndo_tx_timeout = bnx2x_tx_timeout,
257ddbda 8638#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
8639 .ndo_poll_controller = poll_bnx2x,
8640#endif
8641};
8642
34f80b04
EG
8643static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8644 struct net_device *dev)
a2fbb9ea
ET
8645{
8646 struct bnx2x *bp;
8647 int rc;
8648
8649 SET_NETDEV_DEV(dev, &pdev->dev);
8650 bp = netdev_priv(dev);
8651
34f80b04
EG
8652 bp->dev = dev;
8653 bp->pdev = pdev;
a2fbb9ea 8654 bp->flags = 0;
f2e0899f 8655 bp->pf_num = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
8656
8657 rc = pci_enable_device(pdev);
8658 if (rc) {
cdaa7cb8
VZ
8659 dev_err(&bp->pdev->dev,
8660 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
8661 goto err_out;
8662 }
8663
8664 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
8665 dev_err(&bp->pdev->dev,
8666 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
8667 rc = -ENODEV;
8668 goto err_out_disable;
8669 }
8670
8671 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
8672 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
8673 " base address, aborting\n");
a2fbb9ea
ET
8674 rc = -ENODEV;
8675 goto err_out_disable;
8676 }
8677
34f80b04
EG
8678 if (atomic_read(&pdev->enable_cnt) == 1) {
8679 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8680 if (rc) {
cdaa7cb8
VZ
8681 dev_err(&bp->pdev->dev,
8682 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
8683 goto err_out_disable;
8684 }
a2fbb9ea 8685
34f80b04
EG
8686 pci_set_master(pdev);
8687 pci_save_state(pdev);
8688 }
a2fbb9ea
ET
8689
8690 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8691 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
8692 dev_err(&bp->pdev->dev,
8693 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
8694 rc = -EIO;
8695 goto err_out_release;
8696 }
8697
8698 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8699 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
8700 dev_err(&bp->pdev->dev,
8701 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
8702 rc = -EIO;
8703 goto err_out_release;
8704 }
8705
1a983142 8706 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 8707 bp->flags |= USING_DAC_FLAG;
1a983142 8708 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
8709 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
8710 " failed, aborting\n");
a2fbb9ea
ET
8711 rc = -EIO;
8712 goto err_out_release;
8713 }
8714
1a983142 8715 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
8716 dev_err(&bp->pdev->dev,
8717 "System does not support DMA, aborting\n");
a2fbb9ea
ET
8718 rc = -EIO;
8719 goto err_out_release;
8720 }
8721
34f80b04
EG
8722 dev->mem_start = pci_resource_start(pdev, 0);
8723 dev->base_addr = dev->mem_start;
8724 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
8725
8726 dev->irq = pdev->irq;
8727
275f165f 8728 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 8729 if (!bp->regview) {
cdaa7cb8
VZ
8730 dev_err(&bp->pdev->dev,
8731 "Cannot map register space, aborting\n");
a2fbb9ea
ET
8732 rc = -ENOMEM;
8733 goto err_out_release;
8734 }
8735
34f80b04 8736 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
523224a3 8737 min_t(u64, BNX2X_DB_SIZE(bp),
34f80b04 8738 pci_resource_len(pdev, 2)));
a2fbb9ea 8739 if (!bp->doorbells) {
cdaa7cb8
VZ
8740 dev_err(&bp->pdev->dev,
8741 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
8742 rc = -ENOMEM;
8743 goto err_out_unmap;
8744 }
8745
8746 bnx2x_set_power_state(bp, PCI_D0);
8747
34f80b04
EG
8748 /* clean indirect addresses */
8749 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
8750 PCICFG_VENDOR_ID_OFFSET);
8751 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
8752 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
8753 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
8754 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 8755
72fd0718
VZ
8756 /* Reset the load counter */
8757 bnx2x_clear_load_cnt(bp);
8758
34f80b04 8759 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 8760
c64213cd 8761 dev->netdev_ops = &bnx2x_netdev_ops;
de0c62db 8762 bnx2x_set_ethtool_ops(dev);
34f80b04
EG
8763 dev->features |= NETIF_F_SG;
8764 dev->features |= NETIF_F_HW_CSUM;
8765 if (bp->flags & USING_DAC_FLAG)
8766 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
8767 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8768 dev->features |= NETIF_F_TSO6;
34f80b04 8769 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
5316bc0b
EG
8770
8771 dev->vlan_features |= NETIF_F_SG;
8772 dev->vlan_features |= NETIF_F_HW_CSUM;
8773 if (bp->flags & USING_DAC_FLAG)
8774 dev->vlan_features |= NETIF_F_HIGHDMA;
8775 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8776 dev->vlan_features |= NETIF_F_TSO6;
a2fbb9ea 8777
01cd4528
EG
8778 /* get_port_hwinfo() will set prtad and mmds properly */
8779 bp->mdio.prtad = MDIO_PRTAD_NONE;
8780 bp->mdio.mmds = 0;
8781 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8782 bp->mdio.dev = dev;
8783 bp->mdio.mdio_read = bnx2x_mdio_read;
8784 bp->mdio.mdio_write = bnx2x_mdio_write;
8785
a2fbb9ea
ET
8786 return 0;
8787
8788err_out_unmap:
8789 if (bp->regview) {
8790 iounmap(bp->regview);
8791 bp->regview = NULL;
8792 }
a2fbb9ea
ET
8793 if (bp->doorbells) {
8794 iounmap(bp->doorbells);
8795 bp->doorbells = NULL;
8796 }
8797
8798err_out_release:
34f80b04
EG
8799 if (atomic_read(&pdev->enable_cnt) == 1)
8800 pci_release_regions(pdev);
a2fbb9ea
ET
8801
8802err_out_disable:
8803 pci_disable_device(pdev);
8804 pci_set_drvdata(pdev, NULL);
8805
8806err_out:
8807 return rc;
8808}
8809
37f9ce62
EG
8810static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
8811 int *width, int *speed)
25047950
ET
8812{
8813 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
8814
37f9ce62 8815 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 8816
37f9ce62
EG
8817 /* return value of 1=2.5GHz 2=5GHz */
8818 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 8819}
37f9ce62 8820
6891dd25 8821static int bnx2x_check_firmware(struct bnx2x *bp)
94a78b79 8822{
37f9ce62 8823 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
8824 struct bnx2x_fw_file_hdr *fw_hdr;
8825 struct bnx2x_fw_file_section *sections;
94a78b79 8826 u32 offset, len, num_ops;
37f9ce62 8827 u16 *ops_offsets;
94a78b79 8828 int i;
37f9ce62 8829 const u8 *fw_ver;
94a78b79
VZ
8830
8831 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
8832 return -EINVAL;
8833
8834 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
8835 sections = (struct bnx2x_fw_file_section *)fw_hdr;
8836
8837 /* Make sure none of the offsets and sizes make us read beyond
8838 * the end of the firmware data */
8839 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
8840 offset = be32_to_cpu(sections[i].offset);
8841 len = be32_to_cpu(sections[i].len);
8842 if (offset + len > firmware->size) {
cdaa7cb8
VZ
8843 dev_err(&bp->pdev->dev,
8844 "Section %d length is out of bounds\n", i);
94a78b79
VZ
8845 return -EINVAL;
8846 }
8847 }
8848
8849 /* Likewise for the init_ops offsets */
8850 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
8851 ops_offsets = (u16 *)(firmware->data + offset);
8852 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
8853
8854 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
8855 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
8856 dev_err(&bp->pdev->dev,
8857 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
8858 return -EINVAL;
8859 }
8860 }
8861
8862 /* Check FW version */
8863 offset = be32_to_cpu(fw_hdr->fw_version.offset);
8864 fw_ver = firmware->data + offset;
8865 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
8866 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
8867 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
8868 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
8869 dev_err(&bp->pdev->dev,
8870 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
8871 fw_ver[0], fw_ver[1], fw_ver[2],
8872 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
8873 BCM_5710_FW_MINOR_VERSION,
8874 BCM_5710_FW_REVISION_VERSION,
8875 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 8876 return -EINVAL;
94a78b79
VZ
8877 }
8878
8879 return 0;
8880}
8881
ab6ad5a4 8882static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 8883{
ab6ad5a4
EG
8884 const __be32 *source = (const __be32 *)_source;
8885 u32 *target = (u32 *)_target;
94a78b79 8886 u32 i;
94a78b79
VZ
8887
8888 for (i = 0; i < n/4; i++)
8889 target[i] = be32_to_cpu(source[i]);
8890}
8891
8892/*
8893 Ops array is stored in the following format:
8894 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
8895 */
ab6ad5a4 8896static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 8897{
ab6ad5a4
EG
8898 const __be32 *source = (const __be32 *)_source;
8899 struct raw_op *target = (struct raw_op *)_target;
94a78b79 8900 u32 i, j, tmp;
94a78b79 8901
ab6ad5a4 8902 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
8903 tmp = be32_to_cpu(source[j]);
8904 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
8905 target[i].offset = tmp & 0xffffff;
8906 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
8907 }
8908}
ab6ad5a4 8909
523224a3
DK
8910/**
8911 * IRO array is stored in the following format:
8912 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
8913 */
8914static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
8915{
8916 const __be32 *source = (const __be32 *)_source;
8917 struct iro *target = (struct iro *)_target;
8918 u32 i, j, tmp;
8919
8920 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
8921 target[i].base = be32_to_cpu(source[j]);
8922 j++;
8923 tmp = be32_to_cpu(source[j]);
8924 target[i].m1 = (tmp >> 16) & 0xffff;
8925 target[i].m2 = tmp & 0xffff;
8926 j++;
8927 tmp = be32_to_cpu(source[j]);
8928 target[i].m3 = (tmp >> 16) & 0xffff;
8929 target[i].size = tmp & 0xffff;
8930 j++;
8931 }
8932}
8933
ab6ad5a4 8934static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 8935{
ab6ad5a4
EG
8936 const __be16 *source = (const __be16 *)_source;
8937 u16 *target = (u16 *)_target;
94a78b79 8938 u32 i;
94a78b79
VZ
8939
8940 for (i = 0; i < n/2; i++)
8941 target[i] = be16_to_cpu(source[i]);
8942}
8943
7995c64e
JP
8944#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
8945do { \
8946 u32 len = be32_to_cpu(fw_hdr->arr.len); \
8947 bp->arr = kmalloc(len, GFP_KERNEL); \
8948 if (!bp->arr) { \
8949 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
8950 goto lbl; \
8951 } \
8952 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
8953 (u8 *)bp->arr, len); \
8954} while (0)
94a78b79 8955
6891dd25 8956int bnx2x_init_firmware(struct bnx2x *bp)
94a78b79 8957{
45229b42 8958 const char *fw_file_name;
94a78b79 8959 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 8960 int rc;
94a78b79 8961
94a78b79 8962 if (CHIP_IS_E1(bp))
45229b42 8963 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 8964 else if (CHIP_IS_E1H(bp))
45229b42 8965 fw_file_name = FW_FILE_NAME_E1H;
f2e0899f
DK
8966 else if (CHIP_IS_E2(bp))
8967 fw_file_name = FW_FILE_NAME_E2;
cdaa7cb8 8968 else {
6891dd25 8969 BNX2X_ERR("Unsupported chip revision\n");
cdaa7cb8
VZ
8970 return -EINVAL;
8971 }
94a78b79 8972
6891dd25 8973 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
94a78b79 8974
6891dd25 8975 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
94a78b79 8976 if (rc) {
6891dd25 8977 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
8978 goto request_firmware_exit;
8979 }
8980
8981 rc = bnx2x_check_firmware(bp);
8982 if (rc) {
6891dd25 8983 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
8984 goto request_firmware_exit;
8985 }
8986
8987 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
8988
8989 /* Initialize the pointers to the init arrays */
8990 /* Blob */
8991 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
8992
8993 /* Opcodes */
8994 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
8995
8996 /* Offsets */
ab6ad5a4
EG
8997 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
8998 be16_to_cpu_n);
94a78b79
VZ
8999
9000 /* STORMs firmware */
573f2035
EG
9001 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9002 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
9003 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
9004 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
9005 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9006 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9007 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
9008 be32_to_cpu(fw_hdr->usem_pram_data.offset);
9009 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9010 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9011 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
9012 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9013 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9014 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9015 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
9016 be32_to_cpu(fw_hdr->csem_pram_data.offset);
523224a3
DK
9017 /* IRO */
9018 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
94a78b79
VZ
9019
9020 return 0;
ab6ad5a4 9021
523224a3
DK
9022iro_alloc_err:
9023 kfree(bp->init_ops_offsets);
94a78b79
VZ
9024init_offsets_alloc_err:
9025 kfree(bp->init_ops);
9026init_ops_alloc_err:
9027 kfree(bp->init_data);
9028request_firmware_exit:
9029 release_firmware(bp->firmware);
9030
9031 return rc;
9032}
9033
523224a3
DK
9034static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9035{
9036 int cid_count = L2_FP_COUNT(l2_cid_count);
94a78b79 9037
523224a3
DK
9038#ifdef BCM_CNIC
9039 cid_count += CNIC_CID_MAX;
9040#endif
9041 return roundup(cid_count, QM_CID_ROUND);
9042}
f85582f8 9043
a2fbb9ea
ET
9044static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9045 const struct pci_device_id *ent)
9046{
a2fbb9ea
ET
9047 struct net_device *dev = NULL;
9048 struct bnx2x *bp;
37f9ce62 9049 int pcie_width, pcie_speed;
523224a3
DK
9050 int rc, cid_count;
9051
f2e0899f
DK
9052 switch (ent->driver_data) {
9053 case BCM57710:
9054 case BCM57711:
9055 case BCM57711E:
9056 cid_count = FP_SB_MAX_E1x;
9057 break;
9058
9059 case BCM57712:
9060 case BCM57712E:
9061 cid_count = FP_SB_MAX_E2;
9062 break;
a2fbb9ea 9063
f2e0899f
DK
9064 default:
9065 pr_err("Unknown board_type (%ld), aborting\n",
9066 ent->driver_data);
870634b0 9067 return -ENODEV;
f2e0899f
DK
9068 }
9069
9070 cid_count += CNIC_CONTEXT_USE;
f85582f8 9071
a2fbb9ea 9072 /* dev zeroed in init_etherdev */
523224a3 9073 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
34f80b04 9074 if (!dev) {
cdaa7cb8 9075 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 9076 return -ENOMEM;
34f80b04 9077 }
a2fbb9ea 9078
a2fbb9ea 9079 bp = netdev_priv(dev);
7995c64e 9080 bp->msg_enable = debug;
a2fbb9ea 9081
df4770de
EG
9082 pci_set_drvdata(pdev, dev);
9083
523224a3
DK
9084 bp->l2_cid_count = cid_count;
9085
34f80b04 9086 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
9087 if (rc < 0) {
9088 free_netdev(dev);
9089 return rc;
9090 }
9091
34f80b04 9092 rc = bnx2x_init_bp(bp);
693fc0d1
EG
9093 if (rc)
9094 goto init_one_exit;
9095
523224a3
DK
9096 /* calc qm_cid_count */
9097 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9098
d6214d7a
DK
9099 /* Configure interupt mode: try to enable MSI-X/MSI if
9100 * needed, set bp->num_queues appropriately.
9101 */
9102 bnx2x_set_int_mode(bp);
9103
9104 /* Add all NAPI objects */
9105 bnx2x_add_all_napi(bp);
9106
b340007f
VZ
9107 rc = register_netdev(dev);
9108 if (rc) {
9109 dev_err(&pdev->dev, "Cannot register net device\n");
9110 goto init_one_exit;
9111 }
9112
37f9ce62 9113 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
d6214d7a 9114
cdaa7cb8
VZ
9115 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9116 " IRQ %d, ", board_info[ent->driver_data].name,
9117 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
f2e0899f
DK
9118 pcie_width,
9119 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9120 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9121 "5GHz (Gen2)" : "2.5GHz",
cdaa7cb8
VZ
9122 dev->base_addr, bp->pdev->irq);
9123 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 9124
a2fbb9ea 9125 return 0;
34f80b04
EG
9126
9127init_one_exit:
9128 if (bp->regview)
9129 iounmap(bp->regview);
9130
9131 if (bp->doorbells)
9132 iounmap(bp->doorbells);
9133
9134 free_netdev(dev);
9135
9136 if (atomic_read(&pdev->enable_cnt) == 1)
9137 pci_release_regions(pdev);
9138
9139 pci_disable_device(pdev);
9140 pci_set_drvdata(pdev, NULL);
9141
9142 return rc;
a2fbb9ea
ET
9143}
9144
9145static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9146{
9147 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
9148 struct bnx2x *bp;
9149
9150 if (!dev) {
cdaa7cb8 9151 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
9152 return;
9153 }
228241eb 9154 bp = netdev_priv(dev);
a2fbb9ea 9155
a2fbb9ea
ET
9156 unregister_netdev(dev);
9157
d6214d7a
DK
9158 /* Delete all NAPI objects */
9159 bnx2x_del_all_napi(bp);
9160
9161 /* Disable MSI/MSI-X */
9162 bnx2x_disable_msi(bp);
f85582f8 9163
72fd0718
VZ
9164 /* Make sure RESET task is not scheduled before continuing */
9165 cancel_delayed_work_sync(&bp->reset_task);
9166
a2fbb9ea
ET
9167 if (bp->regview)
9168 iounmap(bp->regview);
9169
9170 if (bp->doorbells)
9171 iounmap(bp->doorbells);
9172
523224a3
DK
9173 bnx2x_free_mem_bp(bp);
9174
a2fbb9ea 9175 free_netdev(dev);
34f80b04
EG
9176
9177 if (atomic_read(&pdev->enable_cnt) == 1)
9178 pci_release_regions(pdev);
9179
a2fbb9ea
ET
9180 pci_disable_device(pdev);
9181 pci_set_drvdata(pdev, NULL);
9182}
9183
f8ef6e44
YG
9184static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9185{
9186 int i;
9187
9188 bp->state = BNX2X_STATE_ERROR;
9189
9190 bp->rx_mode = BNX2X_RX_MODE_NONE;
9191
9192 bnx2x_netif_stop(bp, 0);
c89af1a3 9193 netif_carrier_off(bp->dev);
f8ef6e44
YG
9194
9195 del_timer_sync(&bp->timer);
9196 bp->stats_state = STATS_STATE_DISABLED;
9197 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9198
9199 /* Release IRQs */
d6214d7a 9200 bnx2x_free_irq(bp);
f8ef6e44 9201
f8ef6e44
YG
9202 /* Free SKBs, SGEs, TPA pool and driver internals */
9203 bnx2x_free_skbs(bp);
523224a3 9204
54b9ddaa 9205 for_each_queue(bp, i)
f8ef6e44 9206 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 9207
f8ef6e44
YG
9208 bnx2x_free_mem(bp);
9209
9210 bp->state = BNX2X_STATE_CLOSED;
9211
f8ef6e44
YG
9212 return 0;
9213}
9214
9215static void bnx2x_eeh_recover(struct bnx2x *bp)
9216{
9217 u32 val;
9218
9219 mutex_init(&bp->port.phy_mutex);
9220
9221 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9222 bp->link_params.shmem_base = bp->common.shmem_base;
9223 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9224
9225 if (!bp->common.shmem_base ||
9226 (bp->common.shmem_base < 0xA0000) ||
9227 (bp->common.shmem_base >= 0xC0000)) {
9228 BNX2X_DEV_INFO("MCP not active\n");
9229 bp->flags |= NO_MCP_FLAG;
9230 return;
9231 }
9232
9233 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9234 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9235 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9236 BNX2X_ERR("BAD MCP validity signature\n");
9237
9238 if (!BP_NOMCP(bp)) {
f2e0899f
DK
9239 bp->fw_seq =
9240 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9241 DRV_MSG_SEQ_NUMBER_MASK);
f8ef6e44
YG
9242 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9243 }
9244}
9245
493adb1f
WX
9246/**
9247 * bnx2x_io_error_detected - called when PCI error is detected
9248 * @pdev: Pointer to PCI device
9249 * @state: The current pci connection state
9250 *
9251 * This function is called after a PCI bus error affecting
9252 * this device has been detected.
9253 */
9254static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9255 pci_channel_state_t state)
9256{
9257 struct net_device *dev = pci_get_drvdata(pdev);
9258 struct bnx2x *bp = netdev_priv(dev);
9259
9260 rtnl_lock();
9261
9262 netif_device_detach(dev);
9263
07ce50e4
DN
9264 if (state == pci_channel_io_perm_failure) {
9265 rtnl_unlock();
9266 return PCI_ERS_RESULT_DISCONNECT;
9267 }
9268
493adb1f 9269 if (netif_running(dev))
f8ef6e44 9270 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
9271
9272 pci_disable_device(pdev);
9273
9274 rtnl_unlock();
9275
9276 /* Request a slot reset */
9277 return PCI_ERS_RESULT_NEED_RESET;
9278}
9279
9280/**
9281 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9282 * @pdev: Pointer to PCI device
9283 *
9284 * Restart the card from scratch, as if from a cold-boot.
9285 */
9286static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9287{
9288 struct net_device *dev = pci_get_drvdata(pdev);
9289 struct bnx2x *bp = netdev_priv(dev);
9290
9291 rtnl_lock();
9292
9293 if (pci_enable_device(pdev)) {
9294 dev_err(&pdev->dev,
9295 "Cannot re-enable PCI device after reset\n");
9296 rtnl_unlock();
9297 return PCI_ERS_RESULT_DISCONNECT;
9298 }
9299
9300 pci_set_master(pdev);
9301 pci_restore_state(pdev);
9302
9303 if (netif_running(dev))
9304 bnx2x_set_power_state(bp, PCI_D0);
9305
9306 rtnl_unlock();
9307
9308 return PCI_ERS_RESULT_RECOVERED;
9309}
9310
9311/**
9312 * bnx2x_io_resume - called when traffic can start flowing again
9313 * @pdev: Pointer to PCI device
9314 *
9315 * This callback is called when the error recovery driver tells us that
9316 * its OK to resume normal operation.
9317 */
9318static void bnx2x_io_resume(struct pci_dev *pdev)
9319{
9320 struct net_device *dev = pci_get_drvdata(pdev);
9321 struct bnx2x *bp = netdev_priv(dev);
9322
72fd0718 9323 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
f2e0899f
DK
9324 printk(KERN_ERR "Handling parity error recovery. "
9325 "Try again later\n");
72fd0718
VZ
9326 return;
9327 }
9328
493adb1f
WX
9329 rtnl_lock();
9330
f8ef6e44
YG
9331 bnx2x_eeh_recover(bp);
9332
493adb1f 9333 if (netif_running(dev))
f8ef6e44 9334 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
9335
9336 netif_device_attach(dev);
9337
9338 rtnl_unlock();
9339}
9340
9341static struct pci_error_handlers bnx2x_err_handler = {
9342 .error_detected = bnx2x_io_error_detected,
356e2385
EG
9343 .slot_reset = bnx2x_io_slot_reset,
9344 .resume = bnx2x_io_resume,
493adb1f
WX
9345};
9346
a2fbb9ea 9347static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
9348 .name = DRV_MODULE_NAME,
9349 .id_table = bnx2x_pci_tbl,
9350 .probe = bnx2x_init_one,
9351 .remove = __devexit_p(bnx2x_remove_one),
9352 .suspend = bnx2x_suspend,
9353 .resume = bnx2x_resume,
9354 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
9355};
9356
9357static int __init bnx2x_init(void)
9358{
dd21ca6d
SG
9359 int ret;
9360
7995c64e 9361 pr_info("%s", version);
938cf541 9362
1cf167f2
EG
9363 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9364 if (bnx2x_wq == NULL) {
7995c64e 9365 pr_err("Cannot create workqueue\n");
1cf167f2
EG
9366 return -ENOMEM;
9367 }
9368
dd21ca6d
SG
9369 ret = pci_register_driver(&bnx2x_pci_driver);
9370 if (ret) {
7995c64e 9371 pr_err("Cannot register driver\n");
dd21ca6d
SG
9372 destroy_workqueue(bnx2x_wq);
9373 }
9374 return ret;
a2fbb9ea
ET
9375}
9376
9377static void __exit bnx2x_cleanup(void)
9378{
9379 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
9380
9381 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
9382}
9383
9384module_init(bnx2x_init);
9385module_exit(bnx2x_cleanup);
9386
993ac7b5
MC
9387#ifdef BCM_CNIC
9388
9389/* count denotes the number of new completions we have seen */
9390static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9391{
9392 struct eth_spe *spe;
9393
9394#ifdef BNX2X_STOP_ON_ERROR
9395 if (unlikely(bp->panic))
9396 return;
9397#endif
9398
9399 spin_lock_bh(&bp->spq_lock);
c2bff63f 9400 BUG_ON(bp->cnic_spq_pending < count);
993ac7b5
MC
9401 bp->cnic_spq_pending -= count;
9402
993ac7b5 9403
c2bff63f
DK
9404 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9405 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9406 & SPE_HDR_CONN_TYPE) >>
9407 SPE_HDR_CONN_TYPE_SHIFT;
9408
9409 /* Set validation for iSCSI L2 client before sending SETUP
9410 * ramrod
9411 */
9412 if (type == ETH_CONNECTION_TYPE) {
9413 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9414 hdr.conn_and_cmd_data) >>
9415 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9416
9417 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9418 bnx2x_set_ctx_validation(&bp->context.
9419 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9420 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9421 }
9422
9423 /* There may be not more than 8 L2 and COMMON SPEs and not more
9424 * than 8 L5 SPEs in the air.
9425 */
9426 if ((type == NONE_CONNECTION_TYPE) ||
9427 (type == ETH_CONNECTION_TYPE)) {
9428 if (!atomic_read(&bp->spq_left))
9429 break;
9430 else
9431 atomic_dec(&bp->spq_left);
9432 } else if (type == ISCSI_CONNECTION_TYPE) {
9433 if (bp->cnic_spq_pending >=
9434 bp->cnic_eth_dev.max_kwqe_pending)
9435 break;
9436 else
9437 bp->cnic_spq_pending++;
9438 } else {
9439 BNX2X_ERR("Unknown SPE type: %d\n", type);
9440 bnx2x_panic();
993ac7b5 9441 break;
c2bff63f 9442 }
993ac7b5
MC
9443
9444 spe = bnx2x_sp_get_next(bp);
9445 *spe = *bp->cnic_kwq_cons;
9446
993ac7b5
MC
9447 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9448 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9449
9450 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9451 bp->cnic_kwq_cons = bp->cnic_kwq;
9452 else
9453 bp->cnic_kwq_cons++;
9454 }
9455 bnx2x_sp_prod_update(bp);
9456 spin_unlock_bh(&bp->spq_lock);
9457}
9458
9459static int bnx2x_cnic_sp_queue(struct net_device *dev,
9460 struct kwqe_16 *kwqes[], u32 count)
9461{
9462 struct bnx2x *bp = netdev_priv(dev);
9463 int i;
9464
9465#ifdef BNX2X_STOP_ON_ERROR
9466 if (unlikely(bp->panic))
9467 return -EIO;
9468#endif
9469
9470 spin_lock_bh(&bp->spq_lock);
9471
9472 for (i = 0; i < count; i++) {
9473 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9474
9475 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9476 break;
9477
9478 *bp->cnic_kwq_prod = *spe;
9479
9480 bp->cnic_kwq_pending++;
9481
9482 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9483 spe->hdr.conn_and_cmd_data, spe->hdr.type,
523224a3
DK
9484 spe->data.update_data_addr.hi,
9485 spe->data.update_data_addr.lo,
993ac7b5
MC
9486 bp->cnic_kwq_pending);
9487
9488 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9489 bp->cnic_kwq_prod = bp->cnic_kwq;
9490 else
9491 bp->cnic_kwq_prod++;
9492 }
9493
9494 spin_unlock_bh(&bp->spq_lock);
9495
9496 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9497 bnx2x_cnic_sp_post(bp, 0);
9498
9499 return i;
9500}
9501
9502static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9503{
9504 struct cnic_ops *c_ops;
9505 int rc = 0;
9506
9507 mutex_lock(&bp->cnic_mutex);
9508 c_ops = bp->cnic_ops;
9509 if (c_ops)
9510 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9511 mutex_unlock(&bp->cnic_mutex);
9512
9513 return rc;
9514}
9515
9516static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9517{
9518 struct cnic_ops *c_ops;
9519 int rc = 0;
9520
9521 rcu_read_lock();
9522 c_ops = rcu_dereference(bp->cnic_ops);
9523 if (c_ops)
9524 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9525 rcu_read_unlock();
9526
9527 return rc;
9528}
9529
9530/*
9531 * for commands that have no data
9532 */
9f6c9258 9533int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
993ac7b5
MC
9534{
9535 struct cnic_ctl_info ctl = {0};
9536
9537 ctl.cmd = cmd;
9538
9539 return bnx2x_cnic_ctl_send(bp, &ctl);
9540}
9541
9542static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9543{
9544 struct cnic_ctl_info ctl;
9545
9546 /* first we tell CNIC and only then we count this as a completion */
9547 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9548 ctl.data.comp.cid = cid;
9549
9550 bnx2x_cnic_ctl_send_bh(bp, &ctl);
c2bff63f 9551 bnx2x_cnic_sp_post(bp, 0);
993ac7b5
MC
9552}
9553
9554static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9555{
9556 struct bnx2x *bp = netdev_priv(dev);
9557 int rc = 0;
9558
9559 switch (ctl->cmd) {
9560 case DRV_CTL_CTXTBL_WR_CMD: {
9561 u32 index = ctl->data.io.offset;
9562 dma_addr_t addr = ctl->data.io.dma_addr;
9563
9564 bnx2x_ilt_wr(bp, index, addr);
9565 break;
9566 }
9567
c2bff63f
DK
9568 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9569 int count = ctl->data.credit.credit_count;
993ac7b5
MC
9570
9571 bnx2x_cnic_sp_post(bp, count);
9572 break;
9573 }
9574
9575 /* rtnl_lock is held. */
9576 case DRV_CTL_START_L2_CMD: {
9577 u32 cli = ctl->data.ring.client_id;
9578
523224a3
DK
9579 /* Set iSCSI MAC address */
9580 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9581
9582 mmiowb();
9583 barrier();
9584
9585 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9586 * because it's the only way for UIO Client to accept
9587 * multicasts (in non-promiscuous mode only one Client per
9588 * function will receive multicast packets (leading in our
9589 * case).
9590 */
9591 bnx2x_rxq_set_mac_filters(bp, cli,
9592 BNX2X_ACCEPT_UNICAST |
9593 BNX2X_ACCEPT_BROADCAST |
9594 BNX2X_ACCEPT_ALL_MULTICAST);
9595 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9596
993ac7b5
MC
9597 break;
9598 }
9599
9600 /* rtnl_lock is held. */
9601 case DRV_CTL_STOP_L2_CMD: {
9602 u32 cli = ctl->data.ring.client_id;
9603
523224a3
DK
9604 /* Stop accepting on iSCSI L2 ring */
9605 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9606 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9607
9608 mmiowb();
9609 barrier();
9610
9611 /* Unset iSCSI L2 MAC */
9612 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
993ac7b5
MC
9613 break;
9614 }
c2bff63f
DK
9615 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9616 int count = ctl->data.credit.credit_count;
9617
9618 smp_mb__before_atomic_inc();
9619 atomic_add(count, &bp->spq_left);
9620 smp_mb__after_atomic_inc();
9621 break;
9622 }
993ac7b5
MC
9623
9624 default:
9625 BNX2X_ERR("unknown command %x\n", ctl->cmd);
9626 rc = -EINVAL;
9627 }
9628
9629 return rc;
9630}
9631
9f6c9258 9632void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
993ac7b5
MC
9633{
9634 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9635
9636 if (bp->flags & USING_MSIX_FLAG) {
9637 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
9638 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
9639 cp->irq_arr[0].vector = bp->msix_table[1].vector;
9640 } else {
9641 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
9642 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
9643 }
f2e0899f
DK
9644 if (CHIP_IS_E2(bp))
9645 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
9646 else
9647 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
9648
993ac7b5 9649 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
523224a3 9650 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
993ac7b5
MC
9651 cp->irq_arr[1].status_blk = bp->def_status_blk;
9652 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
523224a3 9653 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
993ac7b5
MC
9654
9655 cp->num_irq = 2;
9656}
9657
9658static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
9659 void *data)
9660{
9661 struct bnx2x *bp = netdev_priv(dev);
9662 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9663
9664 if (ops == NULL)
9665 return -EINVAL;
9666
9667 if (atomic_read(&bp->intr_sem) != 0)
9668 return -EBUSY;
9669
9670 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
9671 if (!bp->cnic_kwq)
9672 return -ENOMEM;
9673
9674 bp->cnic_kwq_cons = bp->cnic_kwq;
9675 bp->cnic_kwq_prod = bp->cnic_kwq;
9676 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
9677
9678 bp->cnic_spq_pending = 0;
9679 bp->cnic_kwq_pending = 0;
9680
9681 bp->cnic_data = data;
9682
9683 cp->num_irq = 0;
9684 cp->drv_state = CNIC_DRV_STATE_REGD;
523224a3 9685 cp->iro_arr = bp->iro_arr;
993ac7b5 9686
993ac7b5 9687 bnx2x_setup_cnic_irq_info(bp);
c2bff63f 9688
993ac7b5
MC
9689 rcu_assign_pointer(bp->cnic_ops, ops);
9690
9691 return 0;
9692}
9693
9694static int bnx2x_unregister_cnic(struct net_device *dev)
9695{
9696 struct bnx2x *bp = netdev_priv(dev);
9697 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9698
9699 mutex_lock(&bp->cnic_mutex);
9700 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
9701 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
9702 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9703 }
9704 cp->drv_state = 0;
9705 rcu_assign_pointer(bp->cnic_ops, NULL);
9706 mutex_unlock(&bp->cnic_mutex);
9707 synchronize_rcu();
9708 kfree(bp->cnic_kwq);
9709 bp->cnic_kwq = NULL;
9710
9711 return 0;
9712}
9713
9714struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
9715{
9716 struct bnx2x *bp = netdev_priv(dev);
9717 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9718
9719 cp->drv_owner = THIS_MODULE;
9720 cp->chip_id = CHIP_ID(bp);
9721 cp->pdev = bp->pdev;
9722 cp->io_base = bp->regview;
9723 cp->io_base2 = bp->doorbells;
9724 cp->max_kwqe_pending = 8;
523224a3 9725 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
c2bff63f
DK
9726 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
9727 bnx2x_cid_ilt_lines(bp);
993ac7b5 9728 cp->ctx_tbl_len = CNIC_ILT_LINES;
c2bff63f 9729 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
993ac7b5
MC
9730 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
9731 cp->drv_ctl = bnx2x_drv_ctl;
9732 cp->drv_register_cnic = bnx2x_register_cnic;
9733 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
c2bff63f
DK
9734 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID;
9735 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
9736
9737 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
9738 "starting cid %d\n",
9739 cp->ctx_blk_size,
9740 cp->ctx_tbl_offset,
9741 cp->ctx_tbl_len,
9742 cp->starting_cid);
993ac7b5
MC
9743 return cp;
9744}
9745EXPORT_SYMBOL(bnx2x_cnic_probe);
9746
9747#endif /* BCM_CNIC */
94a78b79 9748