]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/bnx2x/bnx2x_main.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6
[net-next-2.6.git] / drivers / net / bnx2x / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/pci.h>
28 #include <linux/init.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitops.h>
34 #include <linux/irq.h>
35 #include <linux/delay.h>
36 #include <asm/byteorder.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if_vlan.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/crc32c.h>
48 #include <linux/prefetch.h>
49 #include <linux/zlib.h>
50 #include <linux/io.h>
51 #include <linux/stringify.h>
52
53 #define BNX2X_MAIN
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_cmn.h"
58
59 #include <linux/firmware.h>
60 #include "bnx2x_fw_file_hdr.h"
61 /* FW files */
62 #define FW_FILE_VERSION                                 \
63         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
64         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
65         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
66         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
67 #define FW_FILE_NAME_E1         "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
68 #define FW_FILE_NAME_E1H        "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
69 #define FW_FILE_NAME_E2         "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
70
71 /* Time in jiffies before concluding the transmitter is hung */
72 #define TX_TIMEOUT              (5*HZ)
73
74 static char version[] __devinitdata =
75         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
76         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
78 MODULE_AUTHOR("Eliezer Tamir");
79 MODULE_DESCRIPTION("Broadcom NetXtreme II "
80                    "BCM57710/57711/57711E/57712/57712E Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_MODULE_VERSION);
83 MODULE_FIRMWARE(FW_FILE_NAME_E1);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
85 MODULE_FIRMWARE(FW_FILE_NAME_E2);
86
87 static int multi_mode = 1;
88 module_param(multi_mode, int, 0);
89 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
90                              "(0 Disable; 1 Enable (default))");
91
92 int num_queues;
93 module_param(num_queues, int, 0);
94 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
95                                 " (default is as a number of CPUs)");
96
97 static int disable_tpa;
98 module_param(disable_tpa, int, 0);
99 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
100
101 static int int_mode;
102 module_param(int_mode, int, 0);
103 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
104                                 "(1 INT#x; 2 MSI)");
105
106 static int dropless_fc;
107 module_param(dropless_fc, int, 0);
108 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109
110 static int poll;
111 module_param(poll, int, 0);
112 MODULE_PARM_DESC(poll, " Use polling (for debug)");
113
114 static int mrrs = -1;
115 module_param(mrrs, int, 0);
116 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117
118 static int debug;
119 module_param(debug, int, 0);
120 MODULE_PARM_DESC(debug, " Default debug msglevel");
121
122 static struct workqueue_struct *bnx2x_wq;
123
124 enum bnx2x_board_type {
125         BCM57710 = 0,
126         BCM57711 = 1,
127         BCM57711E = 2,
128         BCM57712 = 3,
129         BCM57712E = 4
130 };
131
132 /* indexed by board_type, above */
133 static struct {
134         char *name;
135 } board_info[] __devinitdata = {
136         { "Broadcom NetXtreme II BCM57710 XGb" },
137         { "Broadcom NetXtreme II BCM57711 XGb" },
138         { "Broadcom NetXtreme II BCM57711E XGb" },
139         { "Broadcom NetXtreme II BCM57712 XGb" },
140         { "Broadcom NetXtreme II BCM57712E XGb" }
141 };
142
143 #ifndef PCI_DEVICE_ID_NX2_57712
144 #define PCI_DEVICE_ID_NX2_57712         0x1662
145 #endif
146 #ifndef PCI_DEVICE_ID_NX2_57712E
147 #define PCI_DEVICE_ID_NX2_57712E        0x1663
148 #endif
149
150 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
151         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
152         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
153         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
154         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
155         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
156         { 0 }
157 };
158
159 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
160
161 /****************************************************************************
162 * General service functions
163 ****************************************************************************/
164
165 static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
166                                        u32 addr, dma_addr_t mapping)
167 {
168         REG_WR(bp,  addr, U64_LO(mapping));
169         REG_WR(bp,  addr + 4, U64_HI(mapping));
170 }
171
172 static inline void __storm_memset_fill(struct bnx2x *bp,
173                                        u32 addr, size_t size, u32 val)
174 {
175         int i;
176         for (i = 0; i < size/4; i++)
177                 REG_WR(bp,  addr + (i * 4), val);
178 }
179
180 static inline void storm_memset_ustats_zero(struct bnx2x *bp,
181                                             u8 port, u16 stat_id)
182 {
183         size_t size = sizeof(struct ustorm_per_client_stats);
184
185         u32 addr = BAR_USTRORM_INTMEM +
186                         USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
187
188         __storm_memset_fill(bp, addr, size, 0);
189 }
190
191 static inline void storm_memset_tstats_zero(struct bnx2x *bp,
192                                             u8 port, u16 stat_id)
193 {
194         size_t size = sizeof(struct tstorm_per_client_stats);
195
196         u32 addr = BAR_TSTRORM_INTMEM +
197                         TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
198
199         __storm_memset_fill(bp, addr, size, 0);
200 }
201
202 static inline void storm_memset_xstats_zero(struct bnx2x *bp,
203                                             u8 port, u16 stat_id)
204 {
205         size_t size = sizeof(struct xstorm_per_client_stats);
206
207         u32 addr = BAR_XSTRORM_INTMEM +
208                         XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
209
210         __storm_memset_fill(bp, addr, size, 0);
211 }
212
213
214 static inline void storm_memset_spq_addr(struct bnx2x *bp,
215                                          dma_addr_t mapping, u16 abs_fid)
216 {
217         u32 addr = XSEM_REG_FAST_MEMORY +
218                         XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
219
220         __storm_memset_dma_mapping(bp, addr, mapping);
221 }
222
223 static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
224 {
225         REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
226 }
227
228 static inline void storm_memset_func_cfg(struct bnx2x *bp,
229                                 struct tstorm_eth_function_common_config *tcfg,
230                                 u16 abs_fid)
231 {
232         size_t size = sizeof(struct tstorm_eth_function_common_config);
233
234         u32 addr = BAR_TSTRORM_INTMEM +
235                         TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
236
237         __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
238 }
239
240 static inline void storm_memset_xstats_flags(struct bnx2x *bp,
241                                 struct stats_indication_flags *flags,
242                                 u16 abs_fid)
243 {
244         size_t size = sizeof(struct stats_indication_flags);
245
246         u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
247
248         __storm_memset_struct(bp, addr, size, (u32 *)flags);
249 }
250
251 static inline void storm_memset_tstats_flags(struct bnx2x *bp,
252                                 struct stats_indication_flags *flags,
253                                 u16 abs_fid)
254 {
255         size_t size = sizeof(struct stats_indication_flags);
256
257         u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
258
259         __storm_memset_struct(bp, addr, size, (u32 *)flags);
260 }
261
262 static inline void storm_memset_ustats_flags(struct bnx2x *bp,
263                                 struct stats_indication_flags *flags,
264                                 u16 abs_fid)
265 {
266         size_t size = sizeof(struct stats_indication_flags);
267
268         u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
269
270         __storm_memset_struct(bp, addr, size, (u32 *)flags);
271 }
272
273 static inline void storm_memset_cstats_flags(struct bnx2x *bp,
274                                 struct stats_indication_flags *flags,
275                                 u16 abs_fid)
276 {
277         size_t size = sizeof(struct stats_indication_flags);
278
279         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
280
281         __storm_memset_struct(bp, addr, size, (u32 *)flags);
282 }
283
284 static inline void storm_memset_xstats_addr(struct bnx2x *bp,
285                                            dma_addr_t mapping, u16 abs_fid)
286 {
287         u32 addr = BAR_XSTRORM_INTMEM +
288                 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
289
290         __storm_memset_dma_mapping(bp, addr, mapping);
291 }
292
293 static inline void storm_memset_tstats_addr(struct bnx2x *bp,
294                                            dma_addr_t mapping, u16 abs_fid)
295 {
296         u32 addr = BAR_TSTRORM_INTMEM +
297                 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
298
299         __storm_memset_dma_mapping(bp, addr, mapping);
300 }
301
302 static inline void storm_memset_ustats_addr(struct bnx2x *bp,
303                                            dma_addr_t mapping, u16 abs_fid)
304 {
305         u32 addr = BAR_USTRORM_INTMEM +
306                 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
307
308         __storm_memset_dma_mapping(bp, addr, mapping);
309 }
310
311 static inline void storm_memset_cstats_addr(struct bnx2x *bp,
312                                            dma_addr_t mapping, u16 abs_fid)
313 {
314         u32 addr = BAR_CSTRORM_INTMEM +
315                 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
316
317         __storm_memset_dma_mapping(bp, addr, mapping);
318 }
319
320 static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
321                                          u16 pf_id)
322 {
323         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
324                 pf_id);
325         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
326                 pf_id);
327         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
328                 pf_id);
329         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
330                 pf_id);
331 }
332
333 static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
334                                         u8 enable)
335 {
336         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
337                 enable);
338         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
339                 enable);
340         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
341                 enable);
342         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
343                 enable);
344 }
345
346 static inline void storm_memset_eq_data(struct bnx2x *bp,
347                                 struct event_ring_data *eq_data,
348                                 u16 pfid)
349 {
350         size_t size = sizeof(struct event_ring_data);
351
352         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
353
354         __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
355 }
356
357 static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
358                                         u16 pfid)
359 {
360         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
361         REG_WR16(bp, addr, eq_prod);
362 }
363
364 static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
365                                              u16 fw_sb_id, u8 sb_index,
366                                              u8 ticks)
367 {
368
369         int index_offset = CHIP_IS_E2(bp) ?
370                 offsetof(struct hc_status_block_data_e2, index_data) :
371                 offsetof(struct hc_status_block_data_e1x, index_data);
372         u32 addr = BAR_CSTRORM_INTMEM +
373                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
374                         index_offset +
375                         sizeof(struct hc_index_data)*sb_index +
376                         offsetof(struct hc_index_data, timeout);
377         REG_WR8(bp, addr, ticks);
378         DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
379                           port, fw_sb_id, sb_index, ticks);
380 }
381 static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
382                                              u16 fw_sb_id, u8 sb_index,
383                                              u8 disable)
384 {
385         u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
386         int index_offset = CHIP_IS_E2(bp) ?
387                 offsetof(struct hc_status_block_data_e2, index_data) :
388                 offsetof(struct hc_status_block_data_e1x, index_data);
389         u32 addr = BAR_CSTRORM_INTMEM +
390                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
391                         index_offset +
392                         sizeof(struct hc_index_data)*sb_index +
393                         offsetof(struct hc_index_data, flags);
394         u16 flags = REG_RD16(bp, addr);
395         /* clear and set */
396         flags &= ~HC_INDEX_DATA_HC_ENABLED;
397         flags |= enable_flag;
398         REG_WR16(bp, addr, flags);
399         DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
400                           port, fw_sb_id, sb_index, disable);
401 }
402
403 /* used only at init
404  * locking is done by mcp
405  */
406 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
407 {
408         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
409         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
410         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
411                                PCICFG_VENDOR_ID_OFFSET);
412 }
413
414 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
415 {
416         u32 val;
417
418         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
419         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
420         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
421                                PCICFG_VENDOR_ID_OFFSET);
422
423         return val;
424 }
425
426 #define DMAE_DP_SRC_GRC         "grc src_addr [%08x]"
427 #define DMAE_DP_SRC_PCI         "pci src_addr [%x:%08x]"
428 #define DMAE_DP_DST_GRC         "grc dst_addr [%08x]"
429 #define DMAE_DP_DST_PCI         "pci dst_addr [%x:%08x]"
430 #define DMAE_DP_DST_NONE        "dst_addr [none]"
431
432 void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
433 {
434         u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
435
436         switch (dmae->opcode & DMAE_COMMAND_DST) {
437         case DMAE_CMD_DST_PCI:
438                 if (src_type == DMAE_CMD_SRC_PCI)
439                         DP(msglvl, "DMAE: opcode 0x%08x\n"
440                            "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
441                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
442                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
443                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
444                            dmae->comp_addr_hi, dmae->comp_addr_lo,
445                            dmae->comp_val);
446                 else
447                         DP(msglvl, "DMAE: opcode 0x%08x\n"
448                            "src [%08x], len [%d*4], dst [%x:%08x]\n"
449                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
450                            dmae->opcode, dmae->src_addr_lo >> 2,
451                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
452                            dmae->comp_addr_hi, dmae->comp_addr_lo,
453                            dmae->comp_val);
454                 break;
455         case DMAE_CMD_DST_GRC:
456                 if (src_type == DMAE_CMD_SRC_PCI)
457                         DP(msglvl, "DMAE: opcode 0x%08x\n"
458                            "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
459                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
460                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
461                            dmae->len, dmae->dst_addr_lo >> 2,
462                            dmae->comp_addr_hi, dmae->comp_addr_lo,
463                            dmae->comp_val);
464                 else
465                         DP(msglvl, "DMAE: opcode 0x%08x\n"
466                            "src [%08x], len [%d*4], dst [%08x]\n"
467                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
468                            dmae->opcode, dmae->src_addr_lo >> 2,
469                            dmae->len, dmae->dst_addr_lo >> 2,
470                            dmae->comp_addr_hi, dmae->comp_addr_lo,
471                            dmae->comp_val);
472                 break;
473         default:
474                 if (src_type == DMAE_CMD_SRC_PCI)
475                         DP(msglvl, "DMAE: opcode 0x%08x\n"
476                            DP_LEVEL "src_addr [%x:%08x]  len [%d * 4]  "
477                                     "dst_addr [none]\n"
478                            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
479                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
480                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
481                            dmae->comp_val);
482                 else
483                         DP(msglvl, "DMAE: opcode 0x%08x\n"
484                            DP_LEVEL "src_addr [%08x]  len [%d * 4]  "
485                                     "dst_addr [none]\n"
486                            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
487                            dmae->opcode, dmae->src_addr_lo >> 2,
488                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
489                            dmae->comp_val);
490                 break;
491         }
492
493 }
494
495 const u32 dmae_reg_go_c[] = {
496         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
497         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
498         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
499         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
500 };
501
502 /* copy command into DMAE command memory and set DMAE command go */
503 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
504 {
505         u32 cmd_offset;
506         int i;
507
508         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
509         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
510                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
511
512                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
513                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
514         }
515         REG_WR(bp, dmae_reg_go_c[idx], 1);
516 }
517
518 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
519 {
520         return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
521                            DMAE_CMD_C_ENABLE);
522 }
523
524 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
525 {
526         return opcode & ~DMAE_CMD_SRC_RESET;
527 }
528
529 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
530                              bool with_comp, u8 comp_type)
531 {
532         u32 opcode = 0;
533
534         opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
535                    (dst_type << DMAE_COMMAND_DST_SHIFT));
536
537         opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
538
539         opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
540         opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
541                    (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
542         opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
543
544 #ifdef __BIG_ENDIAN
545         opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
546 #else
547         opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
548 #endif
549         if (with_comp)
550                 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
551         return opcode;
552 }
553
554 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
555                                u8 src_type, u8 dst_type)
556 {
557         memset(dmae, 0, sizeof(struct dmae_command));
558
559         /* set the opcode */
560         dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
561                                          true, DMAE_COMP_PCI);
562
563         /* fill in the completion parameters */
564         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
565         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
566         dmae->comp_val = DMAE_COMP_VAL;
567 }
568
569 /* issue a dmae command over the init-channel and wailt for completion */
570 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
571 {
572         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
573         int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
574         int rc = 0;
575
576         DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
577            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
578            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
579
580         /* lock the dmae channel */
581         mutex_lock(&bp->dmae_mutex);
582
583         /* reset completion */
584         *wb_comp = 0;
585
586         /* post the command on the channel used for initializations */
587         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
588
589         /* wait for completion */
590         udelay(5);
591         while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
592                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
593
594                 if (!cnt) {
595                         BNX2X_ERR("DMAE timeout!\n");
596                         rc = DMAE_TIMEOUT;
597                         goto unlock;
598                 }
599                 cnt--;
600                 udelay(50);
601         }
602         if (*wb_comp & DMAE_PCI_ERR_FLAG) {
603                 BNX2X_ERR("DMAE PCI error!\n");
604                 rc = DMAE_PCI_ERROR;
605         }
606
607         DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
608            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
609            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
610
611 unlock:
612         mutex_unlock(&bp->dmae_mutex);
613         return rc;
614 }
615
616 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
617                       u32 len32)
618 {
619         struct dmae_command dmae;
620
621         if (!bp->dmae_ready) {
622                 u32 *data = bnx2x_sp(bp, wb_data[0]);
623
624                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
625                    "  using indirect\n", dst_addr, len32);
626                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
627                 return;
628         }
629
630         /* set opcode and fixed command fields */
631         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
632
633         /* fill in addresses and len */
634         dmae.src_addr_lo = U64_LO(dma_addr);
635         dmae.src_addr_hi = U64_HI(dma_addr);
636         dmae.dst_addr_lo = dst_addr >> 2;
637         dmae.dst_addr_hi = 0;
638         dmae.len = len32;
639
640         bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
641
642         /* issue the command and wait for completion */
643         bnx2x_issue_dmae_with_comp(bp, &dmae);
644 }
645
646 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
647 {
648         struct dmae_command dmae;
649
650         if (!bp->dmae_ready) {
651                 u32 *data = bnx2x_sp(bp, wb_data[0]);
652                 int i;
653
654                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
655                    "  using indirect\n", src_addr, len32);
656                 for (i = 0; i < len32; i++)
657                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
658                 return;
659         }
660
661         /* set opcode and fixed command fields */
662         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
663
664         /* fill in addresses and len */
665         dmae.src_addr_lo = src_addr >> 2;
666         dmae.src_addr_hi = 0;
667         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
668         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
669         dmae.len = len32;
670
671         bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
672
673         /* issue the command and wait for completion */
674         bnx2x_issue_dmae_with_comp(bp, &dmae);
675 }
676
677 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
678                                u32 addr, u32 len)
679 {
680         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
681         int offset = 0;
682
683         while (len > dmae_wr_max) {
684                 bnx2x_write_dmae(bp, phys_addr + offset,
685                                  addr + offset, dmae_wr_max);
686                 offset += dmae_wr_max * 4;
687                 len -= dmae_wr_max;
688         }
689
690         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
691 }
692
693 /* used only for slowpath so not inlined */
694 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
695 {
696         u32 wb_write[2];
697
698         wb_write[0] = val_hi;
699         wb_write[1] = val_lo;
700         REG_WR_DMAE(bp, reg, wb_write, 2);
701 }
702
703 #ifdef USE_WB_RD
704 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
705 {
706         u32 wb_data[2];
707
708         REG_RD_DMAE(bp, reg, wb_data, 2);
709
710         return HILO_U64(wb_data[0], wb_data[1]);
711 }
712 #endif
713
714 static int bnx2x_mc_assert(struct bnx2x *bp)
715 {
716         char last_idx;
717         int i, rc = 0;
718         u32 row0, row1, row2, row3;
719
720         /* XSTORM */
721         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
722                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
723         if (last_idx)
724                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
725
726         /* print the asserts */
727         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
728
729                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
730                               XSTORM_ASSERT_LIST_OFFSET(i));
731                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
732                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
733                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
734                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
735                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
736                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
737
738                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
739                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
740                                   " 0x%08x 0x%08x 0x%08x\n",
741                                   i, row3, row2, row1, row0);
742                         rc++;
743                 } else {
744                         break;
745                 }
746         }
747
748         /* TSTORM */
749         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
750                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
751         if (last_idx)
752                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
753
754         /* print the asserts */
755         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
756
757                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
758                               TSTORM_ASSERT_LIST_OFFSET(i));
759                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
760                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
761                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
762                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
763                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
764                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
765
766                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
767                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
768                                   " 0x%08x 0x%08x 0x%08x\n",
769                                   i, row3, row2, row1, row0);
770                         rc++;
771                 } else {
772                         break;
773                 }
774         }
775
776         /* CSTORM */
777         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
778                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
779         if (last_idx)
780                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
781
782         /* print the asserts */
783         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
784
785                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
786                               CSTORM_ASSERT_LIST_OFFSET(i));
787                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
788                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
789                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
790                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
791                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
792                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
793
794                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
795                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
796                                   " 0x%08x 0x%08x 0x%08x\n",
797                                   i, row3, row2, row1, row0);
798                         rc++;
799                 } else {
800                         break;
801                 }
802         }
803
804         /* USTORM */
805         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
806                            USTORM_ASSERT_LIST_INDEX_OFFSET);
807         if (last_idx)
808                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
809
810         /* print the asserts */
811         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
812
813                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
814                               USTORM_ASSERT_LIST_OFFSET(i));
815                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
816                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
817                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
818                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
819                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
820                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
821
822                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
823                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
824                                   " 0x%08x 0x%08x 0x%08x\n",
825                                   i, row3, row2, row1, row0);
826                         rc++;
827                 } else {
828                         break;
829                 }
830         }
831
832         return rc;
833 }
834
835 static void bnx2x_fw_dump(struct bnx2x *bp)
836 {
837         u32 addr;
838         u32 mark, offset;
839         __be32 data[9];
840         int word;
841         u32 trace_shmem_base;
842         if (BP_NOMCP(bp)) {
843                 BNX2X_ERR("NO MCP - can not dump\n");
844                 return;
845         }
846
847         if (BP_PATH(bp) == 0)
848                 trace_shmem_base = bp->common.shmem_base;
849         else
850                 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
851         addr = trace_shmem_base - 0x0800 + 4;
852         mark = REG_RD(bp, addr);
853         mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
854                         + ((mark + 0x3) & ~0x3) - 0x08000000;
855         pr_err("begin fw dump (mark 0x%x)\n", mark);
856
857         pr_err("");
858         for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
859                 for (word = 0; word < 8; word++)
860                         data[word] = htonl(REG_RD(bp, offset + 4*word));
861                 data[8] = 0x0;
862                 pr_cont("%s", (char *)data);
863         }
864         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
865                 for (word = 0; word < 8; word++)
866                         data[word] = htonl(REG_RD(bp, offset + 4*word));
867                 data[8] = 0x0;
868                 pr_cont("%s", (char *)data);
869         }
870         pr_err("end of fw dump\n");
871 }
872
873 void bnx2x_panic_dump(struct bnx2x *bp)
874 {
875         int i;
876         u16 j;
877         struct hc_sp_status_block_data sp_sb_data;
878         int func = BP_FUNC(bp);
879 #ifdef BNX2X_STOP_ON_ERROR
880         u16 start = 0, end = 0;
881 #endif
882
883         bp->stats_state = STATS_STATE_DISABLED;
884         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
885
886         BNX2X_ERR("begin crash dump -----------------\n");
887
888         /* Indices */
889         /* Common */
890         BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
891                   "  spq_prod_idx(0x%x)\n",
892                   bp->def_idx, bp->def_att_idx,
893                   bp->attn_state, bp->spq_prod_idx);
894         BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
895                   bp->def_status_blk->atten_status_block.attn_bits,
896                   bp->def_status_blk->atten_status_block.attn_bits_ack,
897                   bp->def_status_blk->atten_status_block.status_block_id,
898                   bp->def_status_blk->atten_status_block.attn_bits_index);
899         BNX2X_ERR("     def (");
900         for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
901                 pr_cont("0x%x%s",
902                        bp->def_status_blk->sp_sb.index_values[i],
903                        (i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " ");
904
905         for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
906                 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
907                         CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
908                         i*sizeof(u32));
909
910         pr_cont("igu_sb_id(0x%x)  igu_seg_id (0x%x) "
911                          "pf_id(0x%x)  vnic_id(0x%x)  "
912                          "vf_id(0x%x)  vf_valid (0x%x)\n",
913                sp_sb_data.igu_sb_id,
914                sp_sb_data.igu_seg_id,
915                sp_sb_data.p_func.pf_id,
916                sp_sb_data.p_func.vnic_id,
917                sp_sb_data.p_func.vf_id,
918                sp_sb_data.p_func.vf_valid);
919
920
921         for_each_queue(bp, i) {
922                 struct bnx2x_fastpath *fp = &bp->fp[i];
923                 int loop;
924                 struct hc_status_block_data_e2 sb_data_e2;
925                 struct hc_status_block_data_e1x sb_data_e1x;
926                 struct hc_status_block_sm  *hc_sm_p =
927                         CHIP_IS_E2(bp) ?
928                         sb_data_e2.common.state_machine :
929                         sb_data_e1x.common.state_machine;
930                 struct hc_index_data *hc_index_p =
931                         CHIP_IS_E2(bp) ?
932                         sb_data_e2.index_data :
933                         sb_data_e1x.index_data;
934                 int data_size;
935                 u32 *sb_data_p;
936
937                 /* Rx */
938                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
939                           "  rx_comp_prod(0x%x)"
940                           "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
941                           i, fp->rx_bd_prod, fp->rx_bd_cons,
942                           fp->rx_comp_prod,
943                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
944                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
945                           "  fp_hc_idx(0x%x)\n",
946                           fp->rx_sge_prod, fp->last_max_sge,
947                           le16_to_cpu(fp->fp_hc_idx));
948
949                 /* Tx */
950                 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
951                           "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
952                           "  *tx_cons_sb(0x%x)\n",
953                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
954                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
955
956                 loop = CHIP_IS_E2(bp) ?
957                         HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
958
959                 /* host sb data */
960
961                 BNX2X_ERR("     run indexes (");
962                 for (j = 0; j < HC_SB_MAX_SM; j++)
963                         pr_cont("0x%x%s",
964                                fp->sb_running_index[j],
965                                (j == HC_SB_MAX_SM - 1) ? ")" : " ");
966
967                 BNX2X_ERR("     indexes (");
968                 for (j = 0; j < loop; j++)
969                         pr_cont("0x%x%s",
970                                fp->sb_index_values[j],
971                                (j == loop - 1) ? ")" : " ");
972                 /* fw sb data */
973                 data_size = CHIP_IS_E2(bp) ?
974                         sizeof(struct hc_status_block_data_e2) :
975                         sizeof(struct hc_status_block_data_e1x);
976                 data_size /= sizeof(u32);
977                 sb_data_p = CHIP_IS_E2(bp) ?
978                         (u32 *)&sb_data_e2 :
979                         (u32 *)&sb_data_e1x;
980                 /* copy sb data in here */
981                 for (j = 0; j < data_size; j++)
982                         *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
983                                 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
984                                 j * sizeof(u32));
985
986                 if (CHIP_IS_E2(bp)) {
987                         pr_cont("pf_id(0x%x)  vf_id (0x%x)  vf_valid(0x%x) "
988                                 "vnic_id(0x%x)  same_igu_sb_1b(0x%x)\n",
989                                 sb_data_e2.common.p_func.pf_id,
990                                 sb_data_e2.common.p_func.vf_id,
991                                 sb_data_e2.common.p_func.vf_valid,
992                                 sb_data_e2.common.p_func.vnic_id,
993                                 sb_data_e2.common.same_igu_sb_1b);
994                 } else {
995                         pr_cont("pf_id(0x%x)  vf_id (0x%x)  vf_valid(0x%x) "
996                                 "vnic_id(0x%x)  same_igu_sb_1b(0x%x)\n",
997                                 sb_data_e1x.common.p_func.pf_id,
998                                 sb_data_e1x.common.p_func.vf_id,
999                                 sb_data_e1x.common.p_func.vf_valid,
1000                                 sb_data_e1x.common.p_func.vnic_id,
1001                                 sb_data_e1x.common.same_igu_sb_1b);
1002                 }
1003
1004                 /* SB_SMs data */
1005                 for (j = 0; j < HC_SB_MAX_SM; j++) {
1006                         pr_cont("SM[%d] __flags (0x%x) "
1007                                "igu_sb_id (0x%x)  igu_seg_id(0x%x) "
1008                                "time_to_expire (0x%x) "
1009                                "timer_value(0x%x)\n", j,
1010                                hc_sm_p[j].__flags,
1011                                hc_sm_p[j].igu_sb_id,
1012                                hc_sm_p[j].igu_seg_id,
1013                                hc_sm_p[j].time_to_expire,
1014                                hc_sm_p[j].timer_value);
1015                 }
1016
1017                 /* Indecies data */
1018                 for (j = 0; j < loop; j++) {
1019                         pr_cont("INDEX[%d] flags (0x%x) "
1020                                          "timeout (0x%x)\n", j,
1021                                hc_index_p[j].flags,
1022                                hc_index_p[j].timeout);
1023                 }
1024         }
1025
1026 #ifdef BNX2X_STOP_ON_ERROR
1027         /* Rings */
1028         /* Rx */
1029         for_each_queue(bp, i) {
1030                 struct bnx2x_fastpath *fp = &bp->fp[i];
1031
1032                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1033                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1034                 for (j = start; j != end; j = RX_BD(j + 1)) {
1035                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1036                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1037
1038                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
1039                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
1040                 }
1041
1042                 start = RX_SGE(fp->rx_sge_prod);
1043                 end = RX_SGE(fp->last_max_sge);
1044                 for (j = start; j != end; j = RX_SGE(j + 1)) {
1045                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1046                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1047
1048                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
1049                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
1050                 }
1051
1052                 start = RCQ_BD(fp->rx_comp_cons - 10);
1053                 end = RCQ_BD(fp->rx_comp_cons + 503);
1054                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1055                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1056
1057                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1058                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1059                 }
1060         }
1061
1062         /* Tx */
1063         for_each_queue(bp, i) {
1064                 struct bnx2x_fastpath *fp = &bp->fp[i];
1065
1066                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1067                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1068                 for (j = start; j != end; j = TX_BD(j + 1)) {
1069                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1070
1071                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1072                                   i, j, sw_bd->skb, sw_bd->first_bd);
1073                 }
1074
1075                 start = TX_BD(fp->tx_bd_cons - 10);
1076                 end = TX_BD(fp->tx_bd_cons + 254);
1077                 for (j = start; j != end; j = TX_BD(j + 1)) {
1078                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1079
1080                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1081                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
1082                 }
1083         }
1084 #endif
1085         bnx2x_fw_dump(bp);
1086         bnx2x_mc_assert(bp);
1087         BNX2X_ERR("end crash dump -----------------\n");
1088 }
1089
1090 static void bnx2x_hc_int_enable(struct bnx2x *bp)
1091 {
1092         int port = BP_PORT(bp);
1093         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1094         u32 val = REG_RD(bp, addr);
1095         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1096         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1097
1098         if (msix) {
1099                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1100                          HC_CONFIG_0_REG_INT_LINE_EN_0);
1101                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1102                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1103         } else if (msi) {
1104                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1105                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1106                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1107                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1108         } else {
1109                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1110                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1111                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
1112                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1113
1114                 if (!CHIP_IS_E1(bp)) {
1115                         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1116                            val, port, addr);
1117
1118                         REG_WR(bp, addr, val);
1119
1120                         val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1121                 }
1122         }
1123
1124         if (CHIP_IS_E1(bp))
1125                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1126
1127         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
1128            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1129
1130         REG_WR(bp, addr, val);
1131         /*
1132          * Ensure that HC_CONFIG is written before leading/trailing edge config
1133          */
1134         mmiowb();
1135         barrier();
1136
1137         if (!CHIP_IS_E1(bp)) {
1138                 /* init leading/trailing edge */
1139                 if (IS_MF(bp)) {
1140                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1141                         if (bp->port.pmf)
1142                                 /* enable nig and gpio3 attention */
1143                                 val |= 0x1100;
1144                 } else
1145                         val = 0xffff;
1146
1147                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1148                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1149         }
1150
1151         /* Make sure that interrupts are indeed enabled from here on */
1152         mmiowb();
1153 }
1154
1155 static void bnx2x_igu_int_enable(struct bnx2x *bp)
1156 {
1157         u32 val;
1158         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1159         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1160
1161         val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1162
1163         if (msix) {
1164                 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1165                          IGU_PF_CONF_SINGLE_ISR_EN);
1166                 val |= (IGU_PF_CONF_FUNC_EN |
1167                         IGU_PF_CONF_MSI_MSIX_EN |
1168                         IGU_PF_CONF_ATTN_BIT_EN);
1169         } else if (msi) {
1170                 val &= ~IGU_PF_CONF_INT_LINE_EN;
1171                 val |= (IGU_PF_CONF_FUNC_EN |
1172                         IGU_PF_CONF_MSI_MSIX_EN |
1173                         IGU_PF_CONF_ATTN_BIT_EN |
1174                         IGU_PF_CONF_SINGLE_ISR_EN);
1175         } else {
1176                 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1177                 val |= (IGU_PF_CONF_FUNC_EN |
1178                         IGU_PF_CONF_INT_LINE_EN |
1179                         IGU_PF_CONF_ATTN_BIT_EN |
1180                         IGU_PF_CONF_SINGLE_ISR_EN);
1181         }
1182
1183         DP(NETIF_MSG_INTR, "write 0x%x to IGU  mode %s\n",
1184            val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1185
1186         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1187
1188         barrier();
1189
1190         /* init leading/trailing edge */
1191         if (IS_MF(bp)) {
1192                 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1193                 if (bp->port.pmf)
1194                         /* enable nig and gpio3 attention */
1195                         val |= 0x1100;
1196         } else
1197                 val = 0xffff;
1198
1199         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1200         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1201
1202         /* Make sure that interrupts are indeed enabled from here on */
1203         mmiowb();
1204 }
1205
1206 void bnx2x_int_enable(struct bnx2x *bp)
1207 {
1208         if (bp->common.int_block == INT_BLOCK_HC)
1209                 bnx2x_hc_int_enable(bp);
1210         else
1211                 bnx2x_igu_int_enable(bp);
1212 }
1213
1214 static void bnx2x_hc_int_disable(struct bnx2x *bp)
1215 {
1216         int port = BP_PORT(bp);
1217         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1218         u32 val = REG_RD(bp, addr);
1219
1220         /*
1221          * in E1 we must use only PCI configuration space to disable
1222          * MSI/MSIX capablility
1223          * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1224          */
1225         if (CHIP_IS_E1(bp)) {
1226                 /*  Since IGU_PF_CONF_MSI_MSIX_EN still always on
1227                  *  Use mask register to prevent from HC sending interrupts
1228                  *  after we exit the function
1229                  */
1230                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1231
1232                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1233                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
1234                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1235         } else
1236                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1237                          HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1238                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
1239                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1240
1241         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1242            val, port, addr);
1243
1244         /* flush all outstanding writes */
1245         mmiowb();
1246
1247         REG_WR(bp, addr, val);
1248         if (REG_RD(bp, addr) != val)
1249                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1250 }
1251
1252 static void bnx2x_igu_int_disable(struct bnx2x *bp)
1253 {
1254         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1255
1256         val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1257                  IGU_PF_CONF_INT_LINE_EN |
1258                  IGU_PF_CONF_ATTN_BIT_EN);
1259
1260         DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1261
1262         /* flush all outstanding writes */
1263         mmiowb();
1264
1265         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1266         if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1267                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1268 }
1269
1270 void bnx2x_int_disable(struct bnx2x *bp)
1271 {
1272         if (bp->common.int_block == INT_BLOCK_HC)
1273                 bnx2x_hc_int_disable(bp);
1274         else
1275                 bnx2x_igu_int_disable(bp);
1276 }
1277
1278 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1279 {
1280         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1281         int i, offset;
1282
1283         /* disable interrupt handling */
1284         atomic_inc(&bp->intr_sem);
1285         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1286
1287         if (disable_hw)
1288                 /* prevent the HW from sending interrupts */
1289                 bnx2x_int_disable(bp);
1290
1291         /* make sure all ISRs are done */
1292         if (msix) {
1293                 synchronize_irq(bp->msix_table[0].vector);
1294                 offset = 1;
1295 #ifdef BCM_CNIC
1296                 offset++;
1297 #endif
1298                 for_each_queue(bp, i)
1299                         synchronize_irq(bp->msix_table[i + offset].vector);
1300         } else
1301                 synchronize_irq(bp->pdev->irq);
1302
1303         /* make sure sp_task is not running */
1304         cancel_delayed_work(&bp->sp_task);
1305         flush_workqueue(bnx2x_wq);
1306 }
1307
1308 /* fast path */
1309
1310 /*
1311  * General service functions
1312  */
1313
1314 /* Return true if succeeded to acquire the lock */
1315 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1316 {
1317         u32 lock_status;
1318         u32 resource_bit = (1 << resource);
1319         int func = BP_FUNC(bp);
1320         u32 hw_lock_control_reg;
1321
1322         DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1323
1324         /* Validating that the resource is within range */
1325         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1326                 DP(NETIF_MSG_HW,
1327                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1328                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1329                 return false;
1330         }
1331
1332         if (func <= 5)
1333                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1334         else
1335                 hw_lock_control_reg =
1336                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1337
1338         /* Try to acquire the lock */
1339         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1340         lock_status = REG_RD(bp, hw_lock_control_reg);
1341         if (lock_status & resource_bit)
1342                 return true;
1343
1344         DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1345         return false;
1346 }
1347
1348 #ifdef BCM_CNIC
1349 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1350 #endif
1351
1352 void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1353                            union eth_rx_cqe *rr_cqe)
1354 {
1355         struct bnx2x *bp = fp->bp;
1356         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1357         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1358
1359         DP(BNX2X_MSG_SP,
1360            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1361            fp->index, cid, command, bp->state,
1362            rr_cqe->ramrod_cqe.ramrod_type);
1363
1364         switch (command | fp->state) {
1365         case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1366                 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1367                 fp->state = BNX2X_FP_STATE_OPEN;
1368                 break;
1369
1370         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1371                 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
1372                 fp->state = BNX2X_FP_STATE_HALTED;
1373                 break;
1374
1375         case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1376                 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1377                 fp->state = BNX2X_FP_STATE_TERMINATED;
1378                 break;
1379
1380         default:
1381                 BNX2X_ERR("unexpected MC reply (%d)  "
1382                           "fp[%d] state is %x\n",
1383                           command, fp->index, fp->state);
1384                 break;
1385         }
1386
1387         smp_mb__before_atomic_inc();
1388         atomic_inc(&bp->spq_left);
1389         /* push the change in fp->state and towards the memory */
1390         smp_wmb();
1391
1392         return;
1393 }
1394
1395 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1396 {
1397         struct bnx2x *bp = netdev_priv(dev_instance);
1398         u16 status = bnx2x_ack_int(bp);
1399         u16 mask;
1400         int i;
1401
1402         /* Return here if interrupt is shared and it's not for us */
1403         if (unlikely(status == 0)) {
1404                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1405                 return IRQ_NONE;
1406         }
1407         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1408
1409         /* Return here if interrupt is disabled */
1410         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1411                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1412                 return IRQ_HANDLED;
1413         }
1414
1415 #ifdef BNX2X_STOP_ON_ERROR
1416         if (unlikely(bp->panic))
1417                 return IRQ_HANDLED;
1418 #endif
1419
1420         for_each_queue(bp, i) {
1421                 struct bnx2x_fastpath *fp = &bp->fp[i];
1422
1423                 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
1424                 if (status & mask) {
1425                         /* Handle Rx and Tx according to SB id */
1426                         prefetch(fp->rx_cons_sb);
1427                         prefetch(fp->tx_cons_sb);
1428                         prefetch(&fp->sb_running_index[SM_RX_ID]);
1429                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1430                         status &= ~mask;
1431                 }
1432         }
1433
1434 #ifdef BCM_CNIC
1435         mask = 0x2;
1436         if (status & (mask | 0x1)) {
1437                 struct cnic_ops *c_ops = NULL;
1438
1439                 rcu_read_lock();
1440                 c_ops = rcu_dereference(bp->cnic_ops);
1441                 if (c_ops)
1442                         c_ops->cnic_handler(bp->cnic_data, NULL);
1443                 rcu_read_unlock();
1444
1445                 status &= ~mask;
1446         }
1447 #endif
1448
1449         if (unlikely(status & 0x1)) {
1450                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1451
1452                 status &= ~0x1;
1453                 if (!status)
1454                         return IRQ_HANDLED;
1455         }
1456
1457         if (unlikely(status))
1458                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1459                    status);
1460
1461         return IRQ_HANDLED;
1462 }
1463
1464 /* end of fast path */
1465
1466
1467 /* Link */
1468
1469 /*
1470  * General service functions
1471  */
1472
1473 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1474 {
1475         u32 lock_status;
1476         u32 resource_bit = (1 << resource);
1477         int func = BP_FUNC(bp);
1478         u32 hw_lock_control_reg;
1479         int cnt;
1480
1481         /* Validating that the resource is within range */
1482         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1483                 DP(NETIF_MSG_HW,
1484                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1485                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1486                 return -EINVAL;
1487         }
1488
1489         if (func <= 5) {
1490                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1491         } else {
1492                 hw_lock_control_reg =
1493                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1494         }
1495
1496         /* Validating that the resource is not already taken */
1497         lock_status = REG_RD(bp, hw_lock_control_reg);
1498         if (lock_status & resource_bit) {
1499                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1500                    lock_status, resource_bit);
1501                 return -EEXIST;
1502         }
1503
1504         /* Try for 5 second every 5ms */
1505         for (cnt = 0; cnt < 1000; cnt++) {
1506                 /* Try to acquire the lock */
1507                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1508                 lock_status = REG_RD(bp, hw_lock_control_reg);
1509                 if (lock_status & resource_bit)
1510                         return 0;
1511
1512                 msleep(5);
1513         }
1514         DP(NETIF_MSG_HW, "Timeout\n");
1515         return -EAGAIN;
1516 }
1517
1518 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1519 {
1520         u32 lock_status;
1521         u32 resource_bit = (1 << resource);
1522         int func = BP_FUNC(bp);
1523         u32 hw_lock_control_reg;
1524
1525         DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1526
1527         /* Validating that the resource is within range */
1528         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1529                 DP(NETIF_MSG_HW,
1530                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1531                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1532                 return -EINVAL;
1533         }
1534
1535         if (func <= 5) {
1536                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1537         } else {
1538                 hw_lock_control_reg =
1539                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1540         }
1541
1542         /* Validating that the resource is currently taken */
1543         lock_status = REG_RD(bp, hw_lock_control_reg);
1544         if (!(lock_status & resource_bit)) {
1545                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1546                    lock_status, resource_bit);
1547                 return -EFAULT;
1548         }
1549
1550         REG_WR(bp, hw_lock_control_reg, resource_bit);
1551         return 0;
1552 }
1553
1554
1555 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1556 {
1557         /* The GPIO should be swapped if swap register is set and active */
1558         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1559                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1560         int gpio_shift = gpio_num +
1561                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1562         u32 gpio_mask = (1 << gpio_shift);
1563         u32 gpio_reg;
1564         int value;
1565
1566         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1567                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1568                 return -EINVAL;
1569         }
1570
1571         /* read GPIO value */
1572         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1573
1574         /* get the requested pin value */
1575         if ((gpio_reg & gpio_mask) == gpio_mask)
1576                 value = 1;
1577         else
1578                 value = 0;
1579
1580         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1581
1582         return value;
1583 }
1584
1585 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1586 {
1587         /* The GPIO should be swapped if swap register is set and active */
1588         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1589                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1590         int gpio_shift = gpio_num +
1591                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1592         u32 gpio_mask = (1 << gpio_shift);
1593         u32 gpio_reg;
1594
1595         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1596                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1597                 return -EINVAL;
1598         }
1599
1600         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1601         /* read GPIO and mask except the float bits */
1602         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1603
1604         switch (mode) {
1605         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1606                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1607                    gpio_num, gpio_shift);
1608                 /* clear FLOAT and set CLR */
1609                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1610                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1611                 break;
1612
1613         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1614                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1615                    gpio_num, gpio_shift);
1616                 /* clear FLOAT and set SET */
1617                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1618                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1619                 break;
1620
1621         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1622                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1623                    gpio_num, gpio_shift);
1624                 /* set FLOAT */
1625                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1626                 break;
1627
1628         default:
1629                 break;
1630         }
1631
1632         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1633         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1634
1635         return 0;
1636 }
1637
1638 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1639 {
1640         /* The GPIO should be swapped if swap register is set and active */
1641         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1642                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1643         int gpio_shift = gpio_num +
1644                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1645         u32 gpio_mask = (1 << gpio_shift);
1646         u32 gpio_reg;
1647
1648         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1649                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1650                 return -EINVAL;
1651         }
1652
1653         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1654         /* read GPIO int */
1655         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1656
1657         switch (mode) {
1658         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1659                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1660                                    "output low\n", gpio_num, gpio_shift);
1661                 /* clear SET and set CLR */
1662                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1663                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1664                 break;
1665
1666         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1667                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1668                                    "output high\n", gpio_num, gpio_shift);
1669                 /* clear CLR and set SET */
1670                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1671                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1672                 break;
1673
1674         default:
1675                 break;
1676         }
1677
1678         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1679         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1680
1681         return 0;
1682 }
1683
1684 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1685 {
1686         u32 spio_mask = (1 << spio_num);
1687         u32 spio_reg;
1688
1689         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1690             (spio_num > MISC_REGISTERS_SPIO_7)) {
1691                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1692                 return -EINVAL;
1693         }
1694
1695         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1696         /* read SPIO and mask except the float bits */
1697         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1698
1699         switch (mode) {
1700         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1701                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1702                 /* clear FLOAT and set CLR */
1703                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1704                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1705                 break;
1706
1707         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1708                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1709                 /* clear FLOAT and set SET */
1710                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1711                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1712                 break;
1713
1714         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1715                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1716                 /* set FLOAT */
1717                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1718                 break;
1719
1720         default:
1721                 break;
1722         }
1723
1724         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1725         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1726
1727         return 0;
1728 }
1729
1730 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1731 {
1732         u32 sel_phy_idx = 0;
1733         if (bp->link_vars.link_up) {
1734                 sel_phy_idx = EXT_PHY1;
1735                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1736                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1737                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1738                         sel_phy_idx = EXT_PHY2;
1739         } else {
1740
1741                 switch (bnx2x_phy_selection(&bp->link_params)) {
1742                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1743                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1744                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1745                        sel_phy_idx = EXT_PHY1;
1746                        break;
1747                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1748                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1749                        sel_phy_idx = EXT_PHY2;
1750                        break;
1751                 }
1752         }
1753         /*
1754         * The selected actived PHY is always after swapping (in case PHY
1755         * swapping is enabled). So when swapping is enabled, we need to reverse
1756         * the configuration
1757         */
1758
1759         if (bp->link_params.multi_phy_config &
1760             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1761                 if (sel_phy_idx == EXT_PHY1)
1762                         sel_phy_idx = EXT_PHY2;
1763                 else if (sel_phy_idx == EXT_PHY2)
1764                         sel_phy_idx = EXT_PHY1;
1765         }
1766         return LINK_CONFIG_IDX(sel_phy_idx);
1767 }
1768
1769 void bnx2x_calc_fc_adv(struct bnx2x *bp)
1770 {
1771         u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
1772         switch (bp->link_vars.ieee_fc &
1773                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1774         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1775                 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1776                                                    ADVERTISED_Pause);
1777                 break;
1778
1779         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1780                 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
1781                                                   ADVERTISED_Pause);
1782                 break;
1783
1784         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1785                 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
1786                 break;
1787
1788         default:
1789                 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1790                                                    ADVERTISED_Pause);
1791                 break;
1792         }
1793 }
1794
1795 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1796 {
1797         if (!BP_NOMCP(bp)) {
1798                 u8 rc;
1799                 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1800                 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
1801                 /* Initialize link parameters structure variables */
1802                 /* It is recommended to turn off RX FC for jumbo frames
1803                    for better performance */
1804                 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
1805                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1806                 else
1807                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1808
1809                 bnx2x_acquire_phy_lock(bp);
1810
1811                 if (load_mode == LOAD_DIAG) {
1812                         bp->link_params.loopback_mode = LOOPBACK_XGXS;
1813                         bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1814                 }
1815
1816                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1817
1818                 bnx2x_release_phy_lock(bp);
1819
1820                 bnx2x_calc_fc_adv(bp);
1821
1822                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1823                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1824                         bnx2x_link_report(bp);
1825                 }
1826                 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
1827                 return rc;
1828         }
1829         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1830         return -EINVAL;
1831 }
1832
1833 void bnx2x_link_set(struct bnx2x *bp)
1834 {
1835         if (!BP_NOMCP(bp)) {
1836                 bnx2x_acquire_phy_lock(bp);
1837                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1838                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1839                 bnx2x_release_phy_lock(bp);
1840
1841                 bnx2x_calc_fc_adv(bp);
1842         } else
1843                 BNX2X_ERR("Bootcode is missing - can not set link\n");
1844 }
1845
1846 static void bnx2x__link_reset(struct bnx2x *bp)
1847 {
1848         if (!BP_NOMCP(bp)) {
1849                 bnx2x_acquire_phy_lock(bp);
1850                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1851                 bnx2x_release_phy_lock(bp);
1852         } else
1853                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1854 }
1855
1856 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
1857 {
1858         u8 rc = 0;
1859
1860         if (!BP_NOMCP(bp)) {
1861                 bnx2x_acquire_phy_lock(bp);
1862                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1863                                      is_serdes);
1864                 bnx2x_release_phy_lock(bp);
1865         } else
1866                 BNX2X_ERR("Bootcode is missing - can not test link\n");
1867
1868         return rc;
1869 }
1870
1871 static void bnx2x_init_port_minmax(struct bnx2x *bp)
1872 {
1873         u32 r_param = bp->link_vars.line_speed / 8;
1874         u32 fair_periodic_timeout_usec;
1875         u32 t_fair;
1876
1877         memset(&(bp->cmng.rs_vars), 0,
1878                sizeof(struct rate_shaping_vars_per_port));
1879         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1880
1881         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1882         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1883
1884         /* this is the threshold below which no timer arming will occur
1885            1.25 coefficient is for the threshold to be a little bigger
1886            than the real time, to compensate for timer in-accuracy */
1887         bp->cmng.rs_vars.rs_threshold =
1888                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1889
1890         /* resolution of fairness timer */
1891         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1892         /* for 10G it is 1000usec. for 1G it is 10000usec. */
1893         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1894
1895         /* this is the threshold below which we won't arm the timer anymore */
1896         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1897
1898         /* we multiply by 1e3/8 to get bytes/msec.
1899            We don't want the credits to pass a credit
1900            of the t_fair*FAIR_MEM (algorithm resolution) */
1901         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1902         /* since each tick is 4 usec */
1903         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1904 }
1905
1906 /* Calculates the sum of vn_min_rates.
1907    It's needed for further normalizing of the min_rates.
1908    Returns:
1909      sum of vn_min_rates.
1910        or
1911      0 - if all the min_rates are 0.
1912      In the later case fainess algorithm should be deactivated.
1913      If not all min_rates are zero then those that are zeroes will be set to 1.
1914  */
1915 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1916 {
1917         int all_zero = 1;
1918         int vn;
1919
1920         bp->vn_weight_sum = 0;
1921         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1922                 u32 vn_cfg = bp->mf_config[vn];
1923                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1924                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1925
1926                 /* Skip hidden vns */
1927                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1928                         continue;
1929
1930                 /* If min rate is zero - set it to 1 */
1931                 if (!vn_min_rate)
1932                         vn_min_rate = DEF_MIN_RATE;
1933                 else
1934                         all_zero = 0;
1935
1936                 bp->vn_weight_sum += vn_min_rate;
1937         }
1938
1939         /* ... only if all min rates are zeros - disable fairness */
1940         if (all_zero) {
1941                 bp->cmng.flags.cmng_enables &=
1942                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1943                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1944                    "  fairness will be disabled\n");
1945         } else
1946                 bp->cmng.flags.cmng_enables |=
1947                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1948 }
1949
1950 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
1951 {
1952         struct rate_shaping_vars_per_vn m_rs_vn;
1953         struct fairness_vars_per_vn m_fair_vn;
1954         u32 vn_cfg = bp->mf_config[vn];
1955         int func = 2*vn + BP_PORT(bp);
1956         u16 vn_min_rate, vn_max_rate;
1957         int i;
1958
1959         /* If function is hidden - set min and max to zeroes */
1960         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1961                 vn_min_rate = 0;
1962                 vn_max_rate = 0;
1963
1964         } else {
1965                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1966                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1967                 /* If min rate is zero - set it to 1 */
1968                 if (bp->vn_weight_sum && (vn_min_rate == 0))
1969                         vn_min_rate = DEF_MIN_RATE;
1970                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1971                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1972         }
1973
1974         DP(NETIF_MSG_IFUP,
1975            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
1976            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1977
1978         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1979         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1980
1981         /* global vn counter - maximal Mbps for this vn */
1982         m_rs_vn.vn_counter.rate = vn_max_rate;
1983
1984         /* quota - number of bytes transmitted in this period */
1985         m_rs_vn.vn_counter.quota =
1986                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1987
1988         if (bp->vn_weight_sum) {
1989                 /* credit for each period of the fairness algorithm:
1990                    number of bytes in T_FAIR (the vn share the port rate).
1991                    vn_weight_sum should not be larger than 10000, thus
1992                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1993                    than zero */
1994                 m_fair_vn.vn_credit_delta =
1995                         max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1996                                                    (8 * bp->vn_weight_sum))),
1997                               (bp->cmng.fair_vars.fair_threshold * 2));
1998                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
1999                    m_fair_vn.vn_credit_delta);
2000         }
2001
2002         /* Store it to internal memory */
2003         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2004                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2005                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2006                        ((u32 *)(&m_rs_vn))[i]);
2007
2008         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2009                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2010                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2011                        ((u32 *)(&m_fair_vn))[i]);
2012 }
2013
2014 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2015 {
2016         if (CHIP_REV_IS_SLOW(bp))
2017                 return CMNG_FNS_NONE;
2018         if (IS_MF(bp))
2019                 return CMNG_FNS_MINMAX;
2020
2021         return CMNG_FNS_NONE;
2022 }
2023
2024 static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2025 {
2026         int vn;
2027
2028         if (BP_NOMCP(bp))
2029                 return; /* what should be the default bvalue in this case */
2030
2031         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2032                 int /*abs*/func = 2*vn + BP_PORT(bp);
2033                 bp->mf_config[vn] =
2034                         MF_CFG_RD(bp, func_mf_config[func].config);
2035         }
2036 }
2037
2038 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2039 {
2040
2041         if (cmng_type == CMNG_FNS_MINMAX) {
2042                 int vn;
2043
2044                 /* clear cmng_enables */
2045                 bp->cmng.flags.cmng_enables = 0;
2046
2047                 /* read mf conf from shmem */
2048                 if (read_cfg)
2049                         bnx2x_read_mf_cfg(bp);
2050
2051                 /* Init rate shaping and fairness contexts */
2052                 bnx2x_init_port_minmax(bp);
2053
2054                 /* vn_weight_sum and enable fairness if not 0 */
2055                 bnx2x_calc_vn_weight_sum(bp);
2056
2057                 /* calculate and set min-max rate for each vn */
2058                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2059                         bnx2x_init_vn_minmax(bp, vn);
2060
2061                 /* always enable rate shaping and fairness */
2062                 bp->cmng.flags.cmng_enables |=
2063                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2064                 if (!bp->vn_weight_sum)
2065                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2066                                    "  fairness will be disabled\n");
2067                 return;
2068         }
2069
2070         /* rate shaping and fairness are disabled */
2071         DP(NETIF_MSG_IFUP,
2072            "rate shaping and fairness are disabled\n");
2073 }
2074
2075 static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2076 {
2077         int port = BP_PORT(bp);
2078         int func;
2079         int vn;
2080
2081         /* Set the attention towards other drivers on the same port */
2082         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2083                 if (vn == BP_E1HVN(bp))
2084                         continue;
2085
2086                 func = ((vn << 1) | port);
2087                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2088                        (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2089         }
2090 }
2091
2092 /* This function is called upon link interrupt */
2093 static void bnx2x_link_attn(struct bnx2x *bp)
2094 {
2095         u32 prev_link_status = bp->link_vars.link_status;
2096         /* Make sure that we are synced with the current statistics */
2097         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2098
2099         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2100
2101         if (bp->link_vars.link_up) {
2102
2103                 /* dropless flow control */
2104                 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
2105                         int port = BP_PORT(bp);
2106                         u32 pause_enabled = 0;
2107
2108                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2109                                 pause_enabled = 1;
2110
2111                         REG_WR(bp, BAR_USTRORM_INTMEM +
2112                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2113                                pause_enabled);
2114                 }
2115
2116                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2117                         struct host_port_stats *pstats;
2118
2119                         pstats = bnx2x_sp(bp, port_stats);
2120                         /* reset old bmac stats */
2121                         memset(&(pstats->mac_stx[0]), 0,
2122                                sizeof(struct mac_stx));
2123                 }
2124                 if (bp->state == BNX2X_STATE_OPEN)
2125                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2126         }
2127
2128         /* indicate link status only if link status actually changed */
2129         if (prev_link_status != bp->link_vars.link_status)
2130                 bnx2x_link_report(bp);
2131
2132         if (IS_MF(bp))
2133                 bnx2x_link_sync_notify(bp);
2134
2135         if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2136                 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2137
2138                 if (cmng_fns != CMNG_FNS_NONE) {
2139                         bnx2x_cmng_fns_init(bp, false, cmng_fns);
2140                         storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2141                 } else
2142                         /* rate shaping and fairness are disabled */
2143                         DP(NETIF_MSG_IFUP,
2144                            "single function mode without fairness\n");
2145         }
2146 }
2147
2148 void bnx2x__link_status_update(struct bnx2x *bp)
2149 {
2150         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2151                 return;
2152
2153         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2154
2155         if (bp->link_vars.link_up)
2156                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2157         else
2158                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2159
2160         /* the link status update could be the result of a DCC event
2161            hence re-read the shmem mf configuration */
2162         bnx2x_read_mf_cfg(bp);
2163
2164         /* indicate link status */
2165         bnx2x_link_report(bp);
2166 }
2167
2168 static void bnx2x_pmf_update(struct bnx2x *bp)
2169 {
2170         int port = BP_PORT(bp);
2171         u32 val;
2172
2173         bp->port.pmf = 1;
2174         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2175
2176         /* enable nig attention */
2177         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2178         if (bp->common.int_block == INT_BLOCK_HC) {
2179                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2180                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2181         } else if (CHIP_IS_E2(bp)) {
2182                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2183                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2184         }
2185
2186         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2187 }
2188
2189 /* end of Link */
2190
2191 /* slow path */
2192
2193 /*
2194  * General service functions
2195  */
2196
2197 /* send the MCP a request, block until there is a reply */
2198 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2199 {
2200         int mb_idx = BP_FW_MB_IDX(bp);
2201         u32 seq = ++bp->fw_seq;
2202         u32 rc = 0;
2203         u32 cnt = 1;
2204         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2205
2206         mutex_lock(&bp->fw_mb_mutex);
2207         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2208         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2209
2210         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2211
2212         do {
2213                 /* let the FW do it's magic ... */
2214                 msleep(delay);
2215
2216                 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2217
2218                 /* Give the FW up to 5 second (500*10ms) */
2219         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2220
2221         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2222            cnt*delay, rc, seq);
2223
2224         /* is this a reply to our command? */
2225         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2226                 rc &= FW_MSG_CODE_MASK;
2227         else {
2228                 /* FW BUG! */
2229                 BNX2X_ERR("FW failed to respond!\n");
2230                 bnx2x_fw_dump(bp);
2231                 rc = 0;
2232         }
2233         mutex_unlock(&bp->fw_mb_mutex);
2234
2235         return rc;
2236 }
2237
2238 /* must be called under rtnl_lock */
2239 void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2240 {
2241         u32 mask = (1 << cl_id);
2242
2243         /* initial seeting is BNX2X_ACCEPT_NONE */
2244         u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2245         u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2246         u8 unmatched_unicast = 0;
2247
2248         if (filters & BNX2X_PROMISCUOUS_MODE) {
2249                 /* promiscious - accept all, drop none */
2250                 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2251                 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2252         }
2253         if (filters & BNX2X_ACCEPT_UNICAST) {
2254                 /* accept matched ucast */
2255                 drop_all_ucast = 0;
2256         }
2257         if (filters & BNX2X_ACCEPT_MULTICAST) {
2258                 /* accept matched mcast */
2259                 drop_all_mcast = 0;
2260         }
2261         if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2262                 /* accept all mcast */
2263                 drop_all_ucast = 0;
2264                 accp_all_ucast = 1;
2265         }
2266         if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2267                 /* accept all mcast */
2268                 drop_all_mcast = 0;
2269                 accp_all_mcast = 1;
2270         }
2271         if (filters & BNX2X_ACCEPT_BROADCAST) {
2272                 /* accept (all) bcast */
2273                 drop_all_bcast = 0;
2274                 accp_all_bcast = 1;
2275         }
2276
2277         bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2278                 bp->mac_filters.ucast_drop_all | mask :
2279                 bp->mac_filters.ucast_drop_all & ~mask;
2280
2281         bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2282                 bp->mac_filters.mcast_drop_all | mask :
2283                 bp->mac_filters.mcast_drop_all & ~mask;
2284
2285         bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2286                 bp->mac_filters.bcast_drop_all | mask :
2287                 bp->mac_filters.bcast_drop_all & ~mask;
2288
2289         bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2290                 bp->mac_filters.ucast_accept_all | mask :
2291                 bp->mac_filters.ucast_accept_all & ~mask;
2292
2293         bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2294                 bp->mac_filters.mcast_accept_all | mask :
2295                 bp->mac_filters.mcast_accept_all & ~mask;
2296
2297         bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2298                 bp->mac_filters.bcast_accept_all | mask :
2299                 bp->mac_filters.bcast_accept_all & ~mask;
2300
2301         bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2302                 bp->mac_filters.unmatched_unicast | mask :
2303                 bp->mac_filters.unmatched_unicast & ~mask;
2304 }
2305
2306 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2307 {
2308         struct tstorm_eth_function_common_config tcfg = {0};
2309         u16 rss_flgs;
2310
2311         /* tpa */
2312         if (p->func_flgs & FUNC_FLG_TPA)
2313                 tcfg.config_flags |=
2314                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2315
2316         /* set rss flags */
2317         rss_flgs = (p->rss->mode <<
2318                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2319
2320         if (p->rss->cap & RSS_IPV4_CAP)
2321                 rss_flgs |= RSS_IPV4_CAP_MASK;
2322         if (p->rss->cap & RSS_IPV4_TCP_CAP)
2323                 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2324         if (p->rss->cap & RSS_IPV6_CAP)
2325                 rss_flgs |= RSS_IPV6_CAP_MASK;
2326         if (p->rss->cap & RSS_IPV6_TCP_CAP)
2327                 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2328
2329         tcfg.config_flags |= rss_flgs;
2330         tcfg.rss_result_mask = p->rss->result_mask;
2331
2332         storm_memset_func_cfg(bp, &tcfg, p->func_id);
2333
2334         /* Enable the function in the FW */
2335         storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2336         storm_memset_func_en(bp, p->func_id, 1);
2337
2338         /* statistics */
2339         if (p->func_flgs & FUNC_FLG_STATS) {
2340                 struct stats_indication_flags stats_flags = {0};
2341                 stats_flags.collect_eth = 1;
2342
2343                 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2344                 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2345
2346                 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2347                 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2348
2349                 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2350                 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2351
2352                 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2353                 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2354         }
2355
2356         /* spq */
2357         if (p->func_flgs & FUNC_FLG_SPQ) {
2358                 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2359                 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2360                        XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2361         }
2362 }
2363
2364 static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2365                                      struct bnx2x_fastpath *fp)
2366 {
2367         u16 flags = 0;
2368
2369         /* calculate queue flags */
2370         flags |= QUEUE_FLG_CACHE_ALIGN;
2371         flags |= QUEUE_FLG_HC;
2372         flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0;
2373
2374         flags |= QUEUE_FLG_VLAN;
2375         DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2376
2377         if (!fp->disable_tpa)
2378                 flags |= QUEUE_FLG_TPA;
2379
2380         flags |= QUEUE_FLG_STATS;
2381
2382         return flags;
2383 }
2384
2385 static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2386         struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2387         struct bnx2x_rxq_init_params *rxq_init)
2388 {
2389         u16 max_sge = 0;
2390         u16 sge_sz = 0;
2391         u16 tpa_agg_size = 0;
2392
2393         /* calculate queue flags */
2394         u16 flags = bnx2x_get_cl_flags(bp, fp);
2395
2396         if (!fp->disable_tpa) {
2397                 pause->sge_th_hi = 250;
2398                 pause->sge_th_lo = 150;
2399                 tpa_agg_size = min_t(u32,
2400                         (min_t(u32, 8, MAX_SKB_FRAGS) *
2401                         SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2402                 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2403                         SGE_PAGE_SHIFT;
2404                 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2405                           (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2406                 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2407                                     0xffff);
2408         }
2409
2410         /* pause - not for e1 */
2411         if (!CHIP_IS_E1(bp)) {
2412                 pause->bd_th_hi = 350;
2413                 pause->bd_th_lo = 250;
2414                 pause->rcq_th_hi = 350;
2415                 pause->rcq_th_lo = 250;
2416                 pause->sge_th_hi = 0;
2417                 pause->sge_th_lo = 0;
2418                 pause->pri_map = 1;
2419         }
2420
2421         /* rxq setup */
2422         rxq_init->flags = flags;
2423         rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2424         rxq_init->dscr_map = fp->rx_desc_mapping;
2425         rxq_init->sge_map = fp->rx_sge_mapping;
2426         rxq_init->rcq_map = fp->rx_comp_mapping;
2427         rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2428         rxq_init->mtu = bp->dev->mtu;
2429         rxq_init->buf_sz = bp->rx_buf_size;
2430         rxq_init->cl_qzone_id = fp->cl_qzone_id;
2431         rxq_init->cl_id = fp->cl_id;
2432         rxq_init->spcl_id = fp->cl_id;
2433         rxq_init->stat_id = fp->cl_id;
2434         rxq_init->tpa_agg_sz = tpa_agg_size;
2435         rxq_init->sge_buf_sz = sge_sz;
2436         rxq_init->max_sges_pkt = max_sge;
2437         rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2438         rxq_init->fw_sb_id = fp->fw_sb_id;
2439
2440         rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2441
2442         rxq_init->cid = HW_CID(bp, fp->cid);
2443
2444         rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2445 }
2446
2447 static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2448         struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2449 {
2450         u16 flags = bnx2x_get_cl_flags(bp, fp);
2451
2452         txq_init->flags = flags;
2453         txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2454         txq_init->dscr_map = fp->tx_desc_mapping;
2455         txq_init->stat_id = fp->cl_id;
2456         txq_init->cid = HW_CID(bp, fp->cid);
2457         txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2458         txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2459         txq_init->fw_sb_id = fp->fw_sb_id;
2460         txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2461 }
2462
2463 void bnx2x_pf_init(struct bnx2x *bp)
2464 {
2465         struct bnx2x_func_init_params func_init = {0};
2466         struct bnx2x_rss_params rss = {0};
2467         struct event_ring_data eq_data = { {0} };
2468         u16 flags;
2469
2470         /* pf specific setups */
2471         if (!CHIP_IS_E1(bp))
2472                 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
2473
2474         if (CHIP_IS_E2(bp)) {
2475                 /* reset IGU PF statistics: MSIX + ATTN */
2476                 /* PF */
2477                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2478                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2479                            (CHIP_MODE_IS_4_PORT(bp) ?
2480                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2481                 /* ATTN */
2482                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2483                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2484                            BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2485                            (CHIP_MODE_IS_4_PORT(bp) ?
2486                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2487         }
2488
2489         /* function setup flags */
2490         flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2491
2492         if (CHIP_IS_E1x(bp))
2493                 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2494         else
2495                 flags |= FUNC_FLG_TPA;
2496
2497         /* function setup */
2498
2499         /**
2500          * Although RSS is meaningless when there is a single HW queue we
2501          * still need it enabled in order to have HW Rx hash generated.
2502          */
2503         rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2504                    RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2505         rss.mode = bp->multi_mode;
2506         rss.result_mask = MULTI_MASK;
2507         func_init.rss = &rss;
2508
2509         func_init.func_flgs = flags;
2510         func_init.pf_id = BP_FUNC(bp);
2511         func_init.func_id = BP_FUNC(bp);
2512         func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2513         func_init.spq_map = bp->spq_mapping;
2514         func_init.spq_prod = bp->spq_prod_idx;
2515
2516         bnx2x_func_init(bp, &func_init);
2517
2518         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2519
2520         /*
2521         Congestion management values depend on the link rate
2522         There is no active link so initial link rate is set to 10 Gbps.
2523         When the link comes up The congestion management values are
2524         re-calculated according to the actual link rate.
2525         */
2526         bp->link_vars.line_speed = SPEED_10000;
2527         bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2528
2529         /* Only the PMF sets the HW */
2530         if (bp->port.pmf)
2531                 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2532
2533         /* no rx until link is up */
2534         bp->rx_mode = BNX2X_RX_MODE_NONE;
2535         bnx2x_set_storm_rx_mode(bp);
2536
2537         /* init Event Queue */
2538         eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2539         eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2540         eq_data.producer = bp->eq_prod;
2541         eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2542         eq_data.sb_id = DEF_SB_ID;
2543         storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2544 }
2545
2546
2547 static void bnx2x_e1h_disable(struct bnx2x *bp)
2548 {
2549         int port = BP_PORT(bp);
2550
2551         netif_tx_disable(bp->dev);
2552
2553         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2554
2555         netif_carrier_off(bp->dev);
2556 }
2557
2558 static void bnx2x_e1h_enable(struct bnx2x *bp)
2559 {
2560         int port = BP_PORT(bp);
2561
2562         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2563
2564         /* Tx queue should be only reenabled */
2565         netif_tx_wake_all_queues(bp->dev);
2566
2567         /*
2568          * Should not call netif_carrier_on since it will be called if the link
2569          * is up when checking for link state
2570          */
2571 }
2572
2573 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2574 {
2575         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2576
2577         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2578
2579                 /*
2580                  * This is the only place besides the function initialization
2581                  * where the bp->flags can change so it is done without any
2582                  * locks
2583                  */
2584                 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2585                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2586                         bp->flags |= MF_FUNC_DIS;
2587
2588                         bnx2x_e1h_disable(bp);
2589                 } else {
2590                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2591                         bp->flags &= ~MF_FUNC_DIS;
2592
2593                         bnx2x_e1h_enable(bp);
2594                 }
2595                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2596         }
2597         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2598
2599                 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2600                 bnx2x_link_sync_notify(bp);
2601                 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2602                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2603         }
2604
2605         /* Report results to MCP */
2606         if (dcc_event)
2607                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2608         else
2609                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2610 }
2611
2612 /* must be called under the spq lock */
2613 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2614 {
2615         struct eth_spe *next_spe = bp->spq_prod_bd;
2616
2617         if (bp->spq_prod_bd == bp->spq_last_bd) {
2618                 bp->spq_prod_bd = bp->spq;
2619                 bp->spq_prod_idx = 0;
2620                 DP(NETIF_MSG_TIMER, "end of spq\n");
2621         } else {
2622                 bp->spq_prod_bd++;
2623                 bp->spq_prod_idx++;
2624         }
2625         return next_spe;
2626 }
2627
2628 /* must be called under the spq lock */
2629 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2630 {
2631         int func = BP_FUNC(bp);
2632
2633         /* Make sure that BD data is updated before writing the producer */
2634         wmb();
2635
2636         REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2637                  bp->spq_prod_idx);
2638         mmiowb();
2639 }
2640
2641 /* the slow path queue is odd since completions arrive on the fastpath ring */
2642 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2643                   u32 data_hi, u32 data_lo, int common)
2644 {
2645         struct eth_spe *spe;
2646         u16 type;
2647
2648 #ifdef BNX2X_STOP_ON_ERROR
2649         if (unlikely(bp->panic))
2650                 return -EIO;
2651 #endif
2652
2653         spin_lock_bh(&bp->spq_lock);
2654
2655         if (!atomic_read(&bp->spq_left)) {
2656                 BNX2X_ERR("BUG! SPQ ring full!\n");
2657                 spin_unlock_bh(&bp->spq_lock);
2658                 bnx2x_panic();
2659                 return -EBUSY;
2660         }
2661
2662         spe = bnx2x_sp_get_next(bp);
2663
2664         /* CID needs port number to be encoded int it */
2665         spe->hdr.conn_and_cmd_data =
2666                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2667                                     HW_CID(bp, cid));
2668
2669         if (common)
2670                 /* Common ramrods:
2671                  *      FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2672                  *      TRAFFIC_STOP, TRAFFIC_START
2673                  */
2674                 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2675                         & SPE_HDR_CONN_TYPE;
2676         else
2677                 /* ETH ramrods: SETUP, HALT */
2678                 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2679                         & SPE_HDR_CONN_TYPE;
2680
2681         type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2682                  SPE_HDR_FUNCTION_ID);
2683
2684         spe->hdr.type = cpu_to_le16(type);
2685
2686         spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2687         spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2688
2689         /* stats ramrod has it's own slot on the spq */
2690         if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2691                 /* It's ok if the actual decrement is issued towards the memory
2692                  * somewhere between the spin_lock and spin_unlock. Thus no
2693                  * more explict memory barrier is needed.
2694                  */
2695                 atomic_dec(&bp->spq_left);
2696
2697         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2698            "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x) "
2699            "type(0x%x) left %x\n",
2700            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2701            (u32)(U64_LO(bp->spq_mapping) +
2702            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2703            HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
2704
2705         bnx2x_sp_prod_update(bp);
2706         spin_unlock_bh(&bp->spq_lock);
2707         return 0;
2708 }
2709
2710 /* acquire split MCP access lock register */
2711 static int bnx2x_acquire_alr(struct bnx2x *bp)
2712 {
2713         u32 j, val;
2714         int rc = 0;
2715
2716         might_sleep();
2717         for (j = 0; j < 1000; j++) {
2718                 val = (1UL << 31);
2719                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2720                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2721                 if (val & (1L << 31))
2722                         break;
2723
2724                 msleep(5);
2725         }
2726         if (!(val & (1L << 31))) {
2727                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2728                 rc = -EBUSY;
2729         }
2730
2731         return rc;
2732 }
2733
2734 /* release split MCP access lock register */
2735 static void bnx2x_release_alr(struct bnx2x *bp)
2736 {
2737         REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2738 }
2739
2740 #define BNX2X_DEF_SB_ATT_IDX    0x0001
2741 #define BNX2X_DEF_SB_IDX        0x0002
2742
2743 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2744 {
2745         struct host_sp_status_block *def_sb = bp->def_status_blk;
2746         u16 rc = 0;
2747
2748         barrier(); /* status block is written to by the chip */
2749         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2750                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2751                 rc |= BNX2X_DEF_SB_ATT_IDX;
2752         }
2753
2754         if (bp->def_idx != def_sb->sp_sb.running_index) {
2755                 bp->def_idx = def_sb->sp_sb.running_index;
2756                 rc |= BNX2X_DEF_SB_IDX;
2757         }
2758
2759         /* Do not reorder: indecies reading should complete before handling */
2760         barrier();
2761         return rc;
2762 }
2763
2764 /*
2765  * slow path service functions
2766  */
2767
2768 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2769 {
2770         int port = BP_PORT(bp);
2771         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2772                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2773         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2774                                        NIG_REG_MASK_INTERRUPT_PORT0;
2775         u32 aeu_mask;
2776         u32 nig_mask = 0;
2777         u32 reg_addr;
2778
2779         if (bp->attn_state & asserted)
2780                 BNX2X_ERR("IGU ERROR\n");
2781
2782         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2783         aeu_mask = REG_RD(bp, aeu_addr);
2784
2785         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2786            aeu_mask, asserted);
2787         aeu_mask &= ~(asserted & 0x3ff);
2788         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2789
2790         REG_WR(bp, aeu_addr, aeu_mask);
2791         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2792
2793         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2794         bp->attn_state |= asserted;
2795         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2796
2797         if (asserted & ATTN_HARD_WIRED_MASK) {
2798                 if (asserted & ATTN_NIG_FOR_FUNC) {
2799
2800                         bnx2x_acquire_phy_lock(bp);
2801
2802                         /* save nig interrupt mask */
2803                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2804                         REG_WR(bp, nig_int_mask_addr, 0);
2805
2806                         bnx2x_link_attn(bp);
2807
2808                         /* handle unicore attn? */
2809                 }
2810                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2811                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2812
2813                 if (asserted & GPIO_2_FUNC)
2814                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2815
2816                 if (asserted & GPIO_3_FUNC)
2817                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2818
2819                 if (asserted & GPIO_4_FUNC)
2820                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2821
2822                 if (port == 0) {
2823                         if (asserted & ATTN_GENERAL_ATTN_1) {
2824                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2825                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2826                         }
2827                         if (asserted & ATTN_GENERAL_ATTN_2) {
2828                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2829                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2830                         }
2831                         if (asserted & ATTN_GENERAL_ATTN_3) {
2832                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2833                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2834                         }
2835                 } else {
2836                         if (asserted & ATTN_GENERAL_ATTN_4) {
2837                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2838                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2839                         }
2840                         if (asserted & ATTN_GENERAL_ATTN_5) {
2841                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2842                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2843                         }
2844                         if (asserted & ATTN_GENERAL_ATTN_6) {
2845                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2846                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2847                         }
2848                 }
2849
2850         } /* if hardwired */
2851
2852         if (bp->common.int_block == INT_BLOCK_HC)
2853                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2854                             COMMAND_REG_ATTN_BITS_SET);
2855         else
2856                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2857
2858         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2859            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2860         REG_WR(bp, reg_addr, asserted);
2861
2862         /* now set back the mask */
2863         if (asserted & ATTN_NIG_FOR_FUNC) {
2864                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2865                 bnx2x_release_phy_lock(bp);
2866         }
2867 }
2868
2869 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2870 {
2871         int port = BP_PORT(bp);
2872         u32 ext_phy_config;
2873         /* mark the failure */
2874         ext_phy_config =
2875                 SHMEM_RD(bp,
2876                          dev_info.port_hw_config[port].external_phy_config);
2877
2878         ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2879         ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2880         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2881                  ext_phy_config);
2882
2883         /* log the failure */
2884         netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2885                " the driver to shutdown the card to prevent permanent"
2886                " damage.  Please contact OEM Support for assistance\n");
2887 }
2888
2889 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2890 {
2891         int port = BP_PORT(bp);
2892         int reg_offset;
2893         u32 val;
2894
2895         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2896                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2897
2898         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2899
2900                 val = REG_RD(bp, reg_offset);
2901                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2902                 REG_WR(bp, reg_offset, val);
2903
2904                 BNX2X_ERR("SPIO5 hw attention\n");
2905
2906                 /* Fan failure attention */
2907                 bnx2x_hw_reset_phy(&bp->link_params);
2908                 bnx2x_fan_failure(bp);
2909         }
2910
2911         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2912                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2913                 bnx2x_acquire_phy_lock(bp);
2914                 bnx2x_handle_module_detect_int(&bp->link_params);
2915                 bnx2x_release_phy_lock(bp);
2916         }
2917
2918         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2919
2920                 val = REG_RD(bp, reg_offset);
2921                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2922                 REG_WR(bp, reg_offset, val);
2923
2924                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2925                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2926                 bnx2x_panic();
2927         }
2928 }
2929
2930 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2931 {
2932         u32 val;
2933
2934         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2935
2936                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2937                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2938                 /* DORQ discard attention */
2939                 if (val & 0x2)
2940                         BNX2X_ERR("FATAL error from DORQ\n");
2941         }
2942
2943         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2944
2945                 int port = BP_PORT(bp);
2946                 int reg_offset;
2947
2948                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2949                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2950
2951                 val = REG_RD(bp, reg_offset);
2952                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2953                 REG_WR(bp, reg_offset, val);
2954
2955                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2956                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2957                 bnx2x_panic();
2958         }
2959 }
2960
2961 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2962 {
2963         u32 val;
2964
2965         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2966
2967                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2968                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2969                 /* CFC error attention */
2970                 if (val & 0x2)
2971                         BNX2X_ERR("FATAL error from CFC\n");
2972         }
2973
2974         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2975
2976                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2977                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2978                 /* RQ_USDMDP_FIFO_OVERFLOW */
2979                 if (val & 0x18000)
2980                         BNX2X_ERR("FATAL error from PXP\n");
2981                 if (CHIP_IS_E2(bp)) {
2982                         val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
2983                         BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
2984                 }
2985         }
2986
2987         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2988
2989                 int port = BP_PORT(bp);
2990                 int reg_offset;
2991
2992                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2993                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2994
2995                 val = REG_RD(bp, reg_offset);
2996                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2997                 REG_WR(bp, reg_offset, val);
2998
2999                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3000                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3001                 bnx2x_panic();
3002         }
3003 }
3004
3005 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3006 {
3007         u32 val;
3008
3009         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3010
3011                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3012                         int func = BP_FUNC(bp);
3013
3014                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3015                         bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3016                                         func_mf_config[BP_ABS_FUNC(bp)].config);
3017                         val = SHMEM_RD(bp,
3018                                        func_mb[BP_FW_MB_IDX(bp)].drv_status);
3019                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3020                                 bnx2x_dcc_event(bp,
3021                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3022                         bnx2x__link_status_update(bp);
3023                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3024                                 bnx2x_pmf_update(bp);
3025
3026                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3027
3028                         BNX2X_ERR("MC assert!\n");
3029                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3030                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3031                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3032                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3033                         bnx2x_panic();
3034
3035                 } else if (attn & BNX2X_MCP_ASSERT) {
3036
3037                         BNX2X_ERR("MCP assert!\n");
3038                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3039                         bnx2x_fw_dump(bp);
3040
3041                 } else
3042                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3043         }
3044
3045         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3046                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3047                 if (attn & BNX2X_GRC_TIMEOUT) {
3048                         val = CHIP_IS_E1(bp) ? 0 :
3049                                         REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
3050                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3051                 }
3052                 if (attn & BNX2X_GRC_RSV) {
3053                         val = CHIP_IS_E1(bp) ? 0 :
3054                                         REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
3055                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3056                 }
3057                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3058         }
3059 }
3060
3061 #define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
3062 #define LOAD_COUNTER_BITS       16 /* Number of bits for load counter */
3063 #define LOAD_COUNTER_MASK       (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3064 #define RESET_DONE_FLAG_MASK    (~LOAD_COUNTER_MASK)
3065 #define RESET_DONE_FLAG_SHIFT   LOAD_COUNTER_BITS
3066 #define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3067
3068 /*
3069  * should be run under rtnl lock
3070  */
3071 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3072 {
3073         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3074         val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3075         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3076         barrier();
3077         mmiowb();
3078 }
3079
3080 /*
3081  * should be run under rtnl lock
3082  */
3083 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3084 {
3085         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3086         val |= (1 << 16);
3087         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3088         barrier();
3089         mmiowb();
3090 }
3091
3092 /*
3093  * should be run under rtnl lock
3094  */
3095 bool bnx2x_reset_is_done(struct bnx2x *bp)
3096 {
3097         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3098         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3099         return (val & RESET_DONE_FLAG_MASK) ? false : true;
3100 }
3101
3102 /*
3103  * should be run under rtnl lock
3104  */
3105 inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3106 {
3107         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3108
3109         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3110
3111         val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3112         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3113         barrier();
3114         mmiowb();
3115 }
3116
3117 /*
3118  * should be run under rtnl lock
3119  */
3120 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3121 {
3122         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3123
3124         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3125
3126         val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3127         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3128         barrier();
3129         mmiowb();
3130
3131         return val1;
3132 }
3133
3134 /*
3135  * should be run under rtnl lock
3136  */
3137 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3138 {
3139         return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3140 }
3141
3142 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3143 {
3144         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3145         REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3146 }
3147
3148 static inline void _print_next_block(int idx, const char *blk)
3149 {
3150         if (idx)
3151                 pr_cont(", ");
3152         pr_cont("%s", blk);
3153 }
3154
3155 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3156 {
3157         int i = 0;
3158         u32 cur_bit = 0;
3159         for (i = 0; sig; i++) {
3160                 cur_bit = ((u32)0x1 << i);
3161                 if (sig & cur_bit) {
3162                         switch (cur_bit) {
3163                         case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3164                                 _print_next_block(par_num++, "BRB");
3165                                 break;
3166                         case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3167                                 _print_next_block(par_num++, "PARSER");
3168                                 break;
3169                         case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3170                                 _print_next_block(par_num++, "TSDM");
3171                                 break;
3172                         case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3173                                 _print_next_block(par_num++, "SEARCHER");
3174                                 break;
3175                         case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3176                                 _print_next_block(par_num++, "TSEMI");
3177                                 break;
3178                         }
3179
3180                         /* Clear the bit */
3181                         sig &= ~cur_bit;
3182                 }
3183         }
3184
3185         return par_num;
3186 }
3187
3188 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3189 {
3190         int i = 0;
3191         u32 cur_bit = 0;
3192         for (i = 0; sig; i++) {
3193                 cur_bit = ((u32)0x1 << i);
3194                 if (sig & cur_bit) {
3195                         switch (cur_bit) {
3196                         case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3197                                 _print_next_block(par_num++, "PBCLIENT");
3198                                 break;
3199                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3200                                 _print_next_block(par_num++, "QM");
3201                                 break;
3202                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3203                                 _print_next_block(par_num++, "XSDM");
3204                                 break;
3205                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3206                                 _print_next_block(par_num++, "XSEMI");
3207                                 break;
3208                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3209                                 _print_next_block(par_num++, "DOORBELLQ");
3210                                 break;
3211                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3212                                 _print_next_block(par_num++, "VAUX PCI CORE");
3213                                 break;
3214                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3215                                 _print_next_block(par_num++, "DEBUG");
3216                                 break;
3217                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3218                                 _print_next_block(par_num++, "USDM");
3219                                 break;
3220                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3221                                 _print_next_block(par_num++, "USEMI");
3222                                 break;
3223                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3224                                 _print_next_block(par_num++, "UPB");
3225                                 break;
3226                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3227                                 _print_next_block(par_num++, "CSDM");
3228                                 break;
3229                         }
3230
3231                         /* Clear the bit */
3232                         sig &= ~cur_bit;
3233                 }
3234         }
3235
3236         return par_num;
3237 }
3238
3239 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3240 {
3241         int i = 0;
3242         u32 cur_bit = 0;
3243         for (i = 0; sig; i++) {
3244                 cur_bit = ((u32)0x1 << i);
3245                 if (sig & cur_bit) {
3246                         switch (cur_bit) {
3247                         case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3248                                 _print_next_block(par_num++, "CSEMI");
3249                                 break;
3250                         case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3251                                 _print_next_block(par_num++, "PXP");
3252                                 break;
3253                         case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3254                                 _print_next_block(par_num++,
3255                                         "PXPPCICLOCKCLIENT");
3256                                 break;
3257                         case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3258                                 _print_next_block(par_num++, "CFC");
3259                                 break;
3260                         case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3261                                 _print_next_block(par_num++, "CDU");
3262                                 break;
3263                         case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3264                                 _print_next_block(par_num++, "IGU");
3265                                 break;
3266                         case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3267                                 _print_next_block(par_num++, "MISC");
3268                                 break;
3269                         }
3270
3271                         /* Clear the bit */
3272                         sig &= ~cur_bit;
3273                 }
3274         }
3275
3276         return par_num;
3277 }
3278
3279 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3280 {
3281         int i = 0;
3282         u32 cur_bit = 0;
3283         for (i = 0; sig; i++) {
3284                 cur_bit = ((u32)0x1 << i);
3285                 if (sig & cur_bit) {
3286                         switch (cur_bit) {
3287                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3288                                 _print_next_block(par_num++, "MCP ROM");
3289                                 break;
3290                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3291                                 _print_next_block(par_num++, "MCP UMP RX");
3292                                 break;
3293                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3294                                 _print_next_block(par_num++, "MCP UMP TX");
3295                                 break;
3296                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3297                                 _print_next_block(par_num++, "MCP SCPAD");
3298                                 break;
3299                         }
3300
3301                         /* Clear the bit */
3302                         sig &= ~cur_bit;
3303                 }
3304         }
3305
3306         return par_num;
3307 }
3308
3309 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3310                                      u32 sig2, u32 sig3)
3311 {
3312         if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3313             (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3314                 int par_num = 0;
3315                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3316                         "[0]:0x%08x [1]:0x%08x "
3317                         "[2]:0x%08x [3]:0x%08x\n",
3318                           sig0 & HW_PRTY_ASSERT_SET_0,
3319                           sig1 & HW_PRTY_ASSERT_SET_1,
3320                           sig2 & HW_PRTY_ASSERT_SET_2,
3321                           sig3 & HW_PRTY_ASSERT_SET_3);
3322                 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3323                        bp->dev->name);
3324                 par_num = bnx2x_print_blocks_with_parity0(
3325                         sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3326                 par_num = bnx2x_print_blocks_with_parity1(
3327                         sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3328                 par_num = bnx2x_print_blocks_with_parity2(
3329                         sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3330                 par_num = bnx2x_print_blocks_with_parity3(
3331                         sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3332                 printk("\n");
3333                 return true;
3334         } else
3335                 return false;
3336 }
3337
3338 bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3339 {
3340         struct attn_route attn;
3341         int port = BP_PORT(bp);
3342
3343         attn.sig[0] = REG_RD(bp,
3344                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3345                              port*4);
3346         attn.sig[1] = REG_RD(bp,
3347                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3348                              port*4);
3349         attn.sig[2] = REG_RD(bp,
3350                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3351                              port*4);
3352         attn.sig[3] = REG_RD(bp,
3353                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3354                              port*4);
3355
3356         return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3357                                         attn.sig[3]);
3358 }
3359
3360
3361 static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3362 {
3363         u32 val;
3364         if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3365
3366                 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3367                 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3368                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3369                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3370                                   "ADDRESS_ERROR\n");
3371                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3372                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3373                                   "INCORRECT_RCV_BEHAVIOR\n");
3374                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3375                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3376                                   "WAS_ERROR_ATTN\n");
3377                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3378                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3379                                   "VF_LENGTH_VIOLATION_ATTN\n");
3380                 if (val &
3381                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3382                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3383                                   "VF_GRC_SPACE_VIOLATION_ATTN\n");
3384                 if (val &
3385                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3386                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3387                                   "VF_MSIX_BAR_VIOLATION_ATTN\n");
3388                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3389                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3390                                   "TCPL_ERROR_ATTN\n");
3391                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3392                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3393                                   "TCPL_IN_TWO_RCBS_ATTN\n");
3394                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3395                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3396                                   "CSSNOOP_FIFO_OVERFLOW\n");
3397         }
3398         if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3399                 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3400                 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3401                 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3402                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3403                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3404                         BNX2X_ERR("ATC_ATC_INT_STS_REG"
3405                                   "_ATC_TCPL_TO_NOT_PEND\n");
3406                 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3407                         BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3408                                   "ATC_GPA_MULTIPLE_HITS\n");
3409                 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3410                         BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3411                                   "ATC_RCPL_TO_EMPTY_CNT\n");
3412                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3413                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3414                 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3415                         BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3416                                   "ATC_IREQ_LESS_THAN_STU\n");
3417         }
3418
3419         if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3420                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3421                 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3422                 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3423                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3424         }
3425
3426 }
3427
3428 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3429 {
3430         struct attn_route attn, *group_mask;
3431         int port = BP_PORT(bp);
3432         int index;
3433         u32 reg_addr;
3434         u32 val;
3435         u32 aeu_mask;
3436
3437         /* need to take HW lock because MCP or other port might also
3438            try to handle this event */
3439         bnx2x_acquire_alr(bp);
3440
3441         if (bnx2x_chk_parity_attn(bp)) {
3442                 bp->recovery_state = BNX2X_RECOVERY_INIT;
3443                 bnx2x_set_reset_in_progress(bp);
3444                 schedule_delayed_work(&bp->reset_task, 0);
3445                 /* Disable HW interrupts */
3446                 bnx2x_int_disable(bp);
3447                 bnx2x_release_alr(bp);
3448                 /* In case of parity errors don't handle attentions so that
3449                  * other function would "see" parity errors.
3450                  */
3451                 return;
3452         }
3453
3454         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3455         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3456         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3457         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3458         if (CHIP_IS_E2(bp))
3459                 attn.sig[4] =
3460                       REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3461         else
3462                 attn.sig[4] = 0;
3463
3464         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3465            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
3466
3467         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3468                 if (deasserted & (1 << index)) {
3469                         group_mask = &bp->attn_group[index];
3470
3471                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3472                                          "%08x %08x %08x\n",
3473                            index,
3474                            group_mask->sig[0], group_mask->sig[1],
3475                            group_mask->sig[2], group_mask->sig[3],
3476                            group_mask->sig[4]);
3477
3478                         bnx2x_attn_int_deasserted4(bp,
3479                                         attn.sig[4] & group_mask->sig[4]);
3480                         bnx2x_attn_int_deasserted3(bp,
3481                                         attn.sig[3] & group_mask->sig[3]);
3482                         bnx2x_attn_int_deasserted1(bp,
3483                                         attn.sig[1] & group_mask->sig[1]);
3484                         bnx2x_attn_int_deasserted2(bp,
3485                                         attn.sig[2] & group_mask->sig[2]);
3486                         bnx2x_attn_int_deasserted0(bp,
3487                                         attn.sig[0] & group_mask->sig[0]);
3488                 }
3489         }
3490
3491         bnx2x_release_alr(bp);
3492
3493         if (bp->common.int_block == INT_BLOCK_HC)
3494                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3495                             COMMAND_REG_ATTN_BITS_CLR);
3496         else
3497                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
3498
3499         val = ~deasserted;
3500         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3501            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
3502         REG_WR(bp, reg_addr, val);
3503
3504         if (~bp->attn_state & deasserted)
3505                 BNX2X_ERR("IGU ERROR\n");
3506
3507         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3508                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3509
3510         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3511         aeu_mask = REG_RD(bp, reg_addr);
3512
3513         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3514            aeu_mask, deasserted);
3515         aeu_mask |= (deasserted & 0x3ff);
3516         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3517
3518         REG_WR(bp, reg_addr, aeu_mask);
3519         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3520
3521         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3522         bp->attn_state &= ~deasserted;
3523         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3524 }
3525
3526 static void bnx2x_attn_int(struct bnx2x *bp)
3527 {
3528         /* read local copy of bits */
3529         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3530                                                                 attn_bits);
3531         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3532                                                                 attn_bits_ack);
3533         u32 attn_state = bp->attn_state;
3534
3535         /* look for changed bits */
3536         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3537         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3538
3539         DP(NETIF_MSG_HW,
3540            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3541            attn_bits, attn_ack, asserted, deasserted);
3542
3543         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3544                 BNX2X_ERR("BAD attention state\n");
3545
3546         /* handle bits that were raised */
3547         if (asserted)
3548                 bnx2x_attn_int_asserted(bp, asserted);
3549
3550         if (deasserted)
3551                 bnx2x_attn_int_deasserted(bp, deasserted);
3552 }
3553
3554 static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3555 {
3556         /* No memory barriers */
3557         storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3558         mmiowb(); /* keep prod updates ordered */
3559 }
3560
3561 #ifdef BCM_CNIC
3562 static int  bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3563                                       union event_ring_elem *elem)
3564 {
3565         if (!bp->cnic_eth_dev.starting_cid  ||
3566             cid < bp->cnic_eth_dev.starting_cid)
3567                 return 1;
3568
3569         DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3570
3571         if (unlikely(elem->message.data.cfc_del_event.error)) {
3572                 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3573                           cid);
3574                 bnx2x_panic_dump(bp);
3575         }
3576         bnx2x_cnic_cfc_comp(bp, cid);
3577         return 0;
3578 }
3579 #endif
3580
3581 static void bnx2x_eq_int(struct bnx2x *bp)
3582 {
3583         u16 hw_cons, sw_cons, sw_prod;
3584         union event_ring_elem *elem;
3585         u32 cid;
3586         u8 opcode;
3587         int spqe_cnt = 0;
3588
3589         hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3590
3591         /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3592          * when we get the the next-page we nned to adjust so the loop
3593          * condition below will be met. The next element is the size of a
3594          * regular element and hence incrementing by 1
3595          */
3596         if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3597                 hw_cons++;
3598
3599         /* This function may never run in parralel with itself for a
3600          * specific bp, thus there is no need in "paired" read memory
3601          * barrier here.
3602          */
3603         sw_cons = bp->eq_cons;
3604         sw_prod = bp->eq_prod;
3605
3606         DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->spq_left %u\n",
3607                         hw_cons, sw_cons, atomic_read(&bp->spq_left));
3608
3609         for (; sw_cons != hw_cons;
3610               sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3611
3612
3613                 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3614
3615                 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3616                 opcode = elem->message.opcode;
3617
3618
3619                 /* handle eq element */
3620                 switch (opcode) {
3621                 case EVENT_RING_OPCODE_STAT_QUERY:
3622                         DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3623                         /* nothing to do with stats comp */
3624                         continue;
3625
3626                 case EVENT_RING_OPCODE_CFC_DEL:
3627                         /* handle according to cid range */
3628                         /*
3629                          * we may want to verify here that the bp state is
3630                          * HALTING
3631                          */
3632                         DP(NETIF_MSG_IFDOWN,
3633                            "got delete ramrod for MULTI[%d]\n", cid);
3634 #ifdef BCM_CNIC
3635                         if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3636                                 goto next_spqe;
3637 #endif
3638                         bnx2x_fp(bp, cid, state) =
3639                                                 BNX2X_FP_STATE_CLOSED;
3640
3641                         goto next_spqe;
3642                 }
3643
3644                 switch (opcode | bp->state) {
3645                 case (EVENT_RING_OPCODE_FUNCTION_START |
3646                       BNX2X_STATE_OPENING_WAIT4_PORT):
3647                         DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3648                         bp->state = BNX2X_STATE_FUNC_STARTED;
3649                         break;
3650
3651                 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3652                       BNX2X_STATE_CLOSING_WAIT4_HALT):
3653                         DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3654                         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3655                         break;
3656
3657                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3658                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3659                         DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3660                         bp->set_mac_pending = 0;
3661                         break;
3662
3663                 case (EVENT_RING_OPCODE_SET_MAC |
3664                       BNX2X_STATE_CLOSING_WAIT4_HALT):
3665                         DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3666                         bp->set_mac_pending = 0;
3667                         break;
3668                 default:
3669                         /* unknown event log error and continue */
3670                         BNX2X_ERR("Unknown EQ event %d\n",
3671                                   elem->message.opcode);
3672                 }
3673 next_spqe:
3674                 spqe_cnt++;
3675         } /* for */
3676
3677         smp_mb__before_atomic_inc();
3678         atomic_add(spqe_cnt, &bp->spq_left);
3679
3680         bp->eq_cons = sw_cons;
3681         bp->eq_prod = sw_prod;
3682         /* Make sure that above mem writes were issued towards the memory */
3683         smp_wmb();
3684
3685         /* update producer */
3686         bnx2x_update_eq_prod(bp, bp->eq_prod);
3687 }
3688
3689 static void bnx2x_sp_task(struct work_struct *work)
3690 {
3691         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3692         u16 status;
3693
3694         /* Return here if interrupt is disabled */
3695         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3696                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3697                 return;
3698         }
3699
3700         status = bnx2x_update_dsb_idx(bp);
3701 /*      if (status == 0)                                     */
3702 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3703
3704         DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3705
3706         /* HW attentions */
3707         if (status & BNX2X_DEF_SB_ATT_IDX) {
3708                 bnx2x_attn_int(bp);
3709                 status &= ~BNX2X_DEF_SB_ATT_IDX;
3710         }
3711
3712         /* SP events: STAT_QUERY and others */
3713         if (status & BNX2X_DEF_SB_IDX) {
3714
3715                 /* Handle EQ completions */
3716                 bnx2x_eq_int(bp);
3717
3718                 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3719                         le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3720
3721                 status &= ~BNX2X_DEF_SB_IDX;
3722         }
3723
3724         if (unlikely(status))
3725                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3726                    status);
3727
3728         bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3729              le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
3730 }
3731
3732 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3733 {
3734         struct net_device *dev = dev_instance;
3735         struct bnx2x *bp = netdev_priv(dev);
3736
3737         /* Return here if interrupt is disabled */
3738         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3739                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3740                 return IRQ_HANDLED;
3741         }
3742
3743         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3744                      IGU_INT_DISABLE, 0);
3745
3746 #ifdef BNX2X_STOP_ON_ERROR
3747         if (unlikely(bp->panic))
3748                 return IRQ_HANDLED;
3749 #endif
3750
3751 #ifdef BCM_CNIC
3752         {
3753                 struct cnic_ops *c_ops;
3754
3755                 rcu_read_lock();
3756                 c_ops = rcu_dereference(bp->cnic_ops);
3757                 if (c_ops)
3758                         c_ops->cnic_handler(bp->cnic_data, NULL);
3759                 rcu_read_unlock();
3760         }
3761 #endif
3762         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3763
3764         return IRQ_HANDLED;
3765 }
3766
3767 /* end of slow path */
3768
3769 static void bnx2x_timer(unsigned long data)
3770 {
3771         struct bnx2x *bp = (struct bnx2x *) data;
3772
3773         if (!netif_running(bp->dev))
3774                 return;
3775
3776         if (atomic_read(&bp->intr_sem) != 0)
3777                 goto timer_restart;
3778
3779         if (poll) {
3780                 struct bnx2x_fastpath *fp = &bp->fp[0];
3781                 int rc;
3782
3783                 bnx2x_tx_int(fp);
3784                 rc = bnx2x_rx_int(fp, 1000);
3785         }
3786
3787         if (!BP_NOMCP(bp)) {
3788                 int mb_idx = BP_FW_MB_IDX(bp);
3789                 u32 drv_pulse;
3790                 u32 mcp_pulse;
3791
3792                 ++bp->fw_drv_pulse_wr_seq;
3793                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3794                 /* TBD - add SYSTEM_TIME */
3795                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3796                 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
3797
3798                 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
3799                              MCP_PULSE_SEQ_MASK);
3800                 /* The delta between driver pulse and mcp response
3801                  * should be 1 (before mcp response) or 0 (after mcp response)
3802                  */
3803                 if ((drv_pulse != mcp_pulse) &&
3804                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3805                         /* someone lost a heartbeat... */
3806                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3807                                   drv_pulse, mcp_pulse);
3808                 }
3809         }
3810
3811         if (bp->state == BNX2X_STATE_OPEN)
3812                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3813
3814 timer_restart:
3815         mod_timer(&bp->timer, jiffies + bp->current_interval);
3816 }
3817
3818 /* end of Statistics */
3819
3820 /* nic init */
3821
3822 /*
3823  * nic init service functions
3824  */
3825
3826 static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
3827 {
3828         u32 i;
3829         if (!(len%4) && !(addr%4))
3830                 for (i = 0; i < len; i += 4)
3831                         REG_WR(bp, addr + i, fill);
3832         else
3833                 for (i = 0; i < len; i++)
3834                         REG_WR8(bp, addr + i, fill);
3835
3836 }
3837
3838 /* helper: writes FP SP data to FW - data_size in dwords */
3839 static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3840                                        int fw_sb_id,
3841                                        u32 *sb_data_p,
3842                                        u32 data_size)
3843 {
3844         int index;
3845         for (index = 0; index < data_size; index++)
3846                 REG_WR(bp, BAR_CSTRORM_INTMEM +
3847                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3848                         sizeof(u32)*index,
3849                         *(sb_data_p + index));
3850 }
3851
3852 static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3853 {
3854         u32 *sb_data_p;
3855         u32 data_size = 0;
3856         struct hc_status_block_data_e2 sb_data_e2;
3857         struct hc_status_block_data_e1x sb_data_e1x;
3858
3859         /* disable the function first */
3860         if (CHIP_IS_E2(bp)) {
3861                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3862                 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3863                 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3864                 sb_data_e2.common.p_func.vf_valid = false;
3865                 sb_data_p = (u32 *)&sb_data_e2;
3866                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3867         } else {
3868                 memset(&sb_data_e1x, 0,
3869                        sizeof(struct hc_status_block_data_e1x));
3870                 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3871                 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3872                 sb_data_e1x.common.p_func.vf_valid = false;
3873                 sb_data_p = (u32 *)&sb_data_e1x;
3874                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3875         }
3876         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3877
3878         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3879                         CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3880                         CSTORM_STATUS_BLOCK_SIZE);
3881         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3882                         CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3883                         CSTORM_SYNC_BLOCK_SIZE);
3884 }
3885
3886 /* helper:  writes SP SB data to FW */
3887 static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3888                 struct hc_sp_status_block_data *sp_sb_data)
3889 {
3890         int func = BP_FUNC(bp);
3891         int i;
3892         for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3893                 REG_WR(bp, BAR_CSTRORM_INTMEM +
3894                         CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3895                         i*sizeof(u32),
3896                         *((u32 *)sp_sb_data + i));
3897 }
3898
3899 static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
3900 {
3901         int func = BP_FUNC(bp);
3902         struct hc_sp_status_block_data sp_sb_data;
3903         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3904
3905         sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3906         sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3907         sp_sb_data.p_func.vf_valid = false;
3908
3909         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3910
3911         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3912                         CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3913                         CSTORM_SP_STATUS_BLOCK_SIZE);
3914         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3915                         CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3916                         CSTORM_SP_SYNC_BLOCK_SIZE);
3917
3918 }
3919
3920
3921 static inline
3922 void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3923                                            int igu_sb_id, int igu_seg_id)
3924 {
3925         hc_sm->igu_sb_id = igu_sb_id;
3926         hc_sm->igu_seg_id = igu_seg_id;
3927         hc_sm->timer_value = 0xFF;
3928         hc_sm->time_to_expire = 0xFFFFFFFF;
3929 }
3930
3931 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
3932                           u8 vf_valid, int fw_sb_id, int igu_sb_id)
3933 {
3934         int igu_seg_id;
3935
3936         struct hc_status_block_data_e2 sb_data_e2;
3937         struct hc_status_block_data_e1x sb_data_e1x;
3938         struct hc_status_block_sm  *hc_sm_p;
3939         struct hc_index_data *hc_index_p;
3940         int data_size;
3941         u32 *sb_data_p;
3942
3943         if (CHIP_INT_MODE_IS_BC(bp))
3944                 igu_seg_id = HC_SEG_ACCESS_NORM;
3945         else
3946                 igu_seg_id = IGU_SEG_ACCESS_NORM;
3947
3948         bnx2x_zero_fp_sb(bp, fw_sb_id);
3949
3950         if (CHIP_IS_E2(bp)) {
3951                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3952                 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
3953                 sb_data_e2.common.p_func.vf_id = vfid;
3954                 sb_data_e2.common.p_func.vf_valid = vf_valid;
3955                 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
3956                 sb_data_e2.common.same_igu_sb_1b = true;
3957                 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
3958                 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
3959                 hc_sm_p = sb_data_e2.common.state_machine;
3960                 hc_index_p = sb_data_e2.index_data;
3961                 sb_data_p = (u32 *)&sb_data_e2;
3962                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3963         } else {
3964                 memset(&sb_data_e1x, 0,
3965                        sizeof(struct hc_status_block_data_e1x));
3966                 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3967                 sb_data_e1x.common.p_func.vf_id = 0xff;
3968                 sb_data_e1x.common.p_func.vf_valid = false;
3969                 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
3970                 sb_data_e1x.common.same_igu_sb_1b = true;
3971                 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3972                 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3973                 hc_sm_p = sb_data_e1x.common.state_machine;
3974                 hc_index_p = sb_data_e1x.index_data;
3975                 sb_data_p = (u32 *)&sb_data_e1x;
3976                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3977         }
3978
3979         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3980                                        igu_sb_id, igu_seg_id);
3981         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
3982                                        igu_sb_id, igu_seg_id);
3983
3984         DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
3985
3986         /* write indecies to HW */
3987         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3988 }
3989
3990 static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
3991                                         u8 sb_index, u8 disable, u16 usec)
3992 {
3993         int port = BP_PORT(bp);
3994         u8 ticks = usec / BNX2X_BTR;
3995
3996         storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3997
3998         disable = disable ? 1 : (usec ? 0 : 1);
3999         storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4000 }
4001
4002 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4003                                      u16 tx_usec, u16 rx_usec)
4004 {
4005         bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4006                                     false, rx_usec);
4007         bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4008                                     false, tx_usec);
4009 }
4010
4011 static void bnx2x_init_def_sb(struct bnx2x *bp)
4012 {
4013         struct host_sp_status_block *def_sb = bp->def_status_blk;
4014         dma_addr_t mapping = bp->def_status_blk_mapping;
4015         int igu_sp_sb_index;
4016         int igu_seg_id;
4017         int port = BP_PORT(bp);
4018         int func = BP_FUNC(bp);
4019         int reg_offset;
4020         u64 section;
4021         int index;
4022         struct hc_sp_status_block_data sp_sb_data;
4023         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4024
4025         if (CHIP_INT_MODE_IS_BC(bp)) {
4026                 igu_sp_sb_index = DEF_SB_IGU_ID;
4027                 igu_seg_id = HC_SEG_ACCESS_DEF;
4028         } else {
4029                 igu_sp_sb_index = bp->igu_dsb_id;
4030                 igu_seg_id = IGU_SEG_ACCESS_DEF;
4031         }
4032
4033         /* ATTN */
4034         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4035                                             atten_status_block);
4036         def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
4037
4038         bp->attn_state = 0;
4039
4040         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4041                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4042         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4043                 int sindex;
4044                 /* take care of sig[0]..sig[4] */
4045                 for (sindex = 0; sindex < 4; sindex++)
4046                         bp->attn_group[index].sig[sindex] =
4047                            REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
4048
4049                 if (CHIP_IS_E2(bp))
4050                         /*
4051                          * enable5 is separate from the rest of the registers,
4052                          * and therefore the address skip is 4
4053                          * and not 16 between the different groups
4054                          */
4055                         bp->attn_group[index].sig[4] = REG_RD(bp,
4056                                         reg_offset + 0x10 + 0x4*index);
4057                 else
4058                         bp->attn_group[index].sig[4] = 0;
4059         }
4060
4061         if (bp->common.int_block == INT_BLOCK_HC) {
4062                 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4063                                      HC_REG_ATTN_MSG0_ADDR_L);
4064
4065                 REG_WR(bp, reg_offset, U64_LO(section));
4066                 REG_WR(bp, reg_offset + 4, U64_HI(section));
4067         } else if (CHIP_IS_E2(bp)) {
4068                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4069                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4070         }
4071
4072         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4073                                             sp_sb);
4074
4075         bnx2x_zero_sp_sb(bp);
4076
4077         sp_sb_data.host_sb_addr.lo      = U64_LO(section);
4078         sp_sb_data.host_sb_addr.hi      = U64_HI(section);
4079         sp_sb_data.igu_sb_id            = igu_sp_sb_index;
4080         sp_sb_data.igu_seg_id           = igu_seg_id;
4081         sp_sb_data.p_func.pf_id         = func;
4082         sp_sb_data.p_func.vnic_id       = BP_VN(bp);
4083         sp_sb_data.p_func.vf_id         = 0xff;
4084
4085         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4086
4087         bp->stats_pending = 0;
4088         bp->set_mac_pending = 0;
4089
4090         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
4091 }
4092
4093 void bnx2x_update_coalesce(struct bnx2x *bp)
4094 {
4095         int i;
4096
4097         for_each_queue(bp, i)
4098                 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4099                                          bp->rx_ticks, bp->tx_ticks);
4100 }
4101
4102 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4103 {
4104         spin_lock_init(&bp->spq_lock);
4105         atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
4106
4107         bp->spq_prod_idx = 0;
4108         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4109         bp->spq_prod_bd = bp->spq;
4110         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4111 }
4112
4113 static void bnx2x_init_eq_ring(struct bnx2x *bp)
4114 {
4115         int i;
4116         for (i = 1; i <= NUM_EQ_PAGES; i++) {
4117                 union event_ring_elem *elem =
4118                         &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
4119
4120                 elem->next_page.addr.hi =
4121                         cpu_to_le32(U64_HI(bp->eq_mapping +
4122                                    BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4123                 elem->next_page.addr.lo =
4124                         cpu_to_le32(U64_LO(bp->eq_mapping +
4125                                    BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
4126         }
4127         bp->eq_cons = 0;
4128         bp->eq_prod = NUM_EQ_DESC;
4129         bp->eq_cons_sb = BNX2X_EQ_INDEX;
4130 }
4131
4132 static void bnx2x_init_ind_table(struct bnx2x *bp)
4133 {
4134         int func = BP_FUNC(bp);
4135         int i;
4136
4137         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4138                 return;
4139
4140         DP(NETIF_MSG_IFUP,
4141            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4142         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4143                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4144                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4145                         bp->fp->cl_id + (i % bp->num_queues));
4146 }
4147
4148 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4149 {
4150         int mode = bp->rx_mode;
4151         u16 cl_id;
4152
4153         /* All but management unicast packets should pass to the host as well */
4154         u32 llh_mask =
4155                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4156                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4157                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4158                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
4159
4160         switch (mode) {
4161         case BNX2X_RX_MODE_NONE: /* no Rx */
4162                 cl_id = BP_L_ID(bp);
4163                 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4164                 break;
4165
4166         case BNX2X_RX_MODE_NORMAL:
4167                 cl_id = BP_L_ID(bp);
4168                 bnx2x_rxq_set_mac_filters(bp, cl_id,
4169                         BNX2X_ACCEPT_UNICAST |
4170                         BNX2X_ACCEPT_BROADCAST |
4171                         BNX2X_ACCEPT_MULTICAST);
4172                 break;
4173
4174         case BNX2X_RX_MODE_ALLMULTI:
4175                 cl_id = BP_L_ID(bp);
4176                 bnx2x_rxq_set_mac_filters(bp, cl_id,
4177                         BNX2X_ACCEPT_UNICAST |
4178                         BNX2X_ACCEPT_BROADCAST |
4179                         BNX2X_ACCEPT_ALL_MULTICAST);
4180                 break;
4181
4182         case BNX2X_RX_MODE_PROMISC:
4183                 cl_id = BP_L_ID(bp);
4184                 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
4185
4186                 /* pass management unicast packets as well */
4187                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
4188                 break;
4189
4190         default:
4191                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4192                 break;
4193         }
4194
4195         REG_WR(bp,
4196                BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
4197                              NIG_REG_LLH0_BRB1_DRV_MASK,
4198                llh_mask);
4199
4200         DP(NETIF_MSG_IFUP, "rx mode %d\n"
4201                 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4202                 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
4203                 bp->mac_filters.ucast_drop_all,
4204                 bp->mac_filters.mcast_drop_all,
4205                 bp->mac_filters.bcast_drop_all,
4206                 bp->mac_filters.ucast_accept_all,
4207                 bp->mac_filters.mcast_accept_all,
4208                 bp->mac_filters.bcast_accept_all
4209         );
4210
4211         storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
4212 }
4213
4214 static void bnx2x_init_internal_common(struct bnx2x *bp)
4215 {
4216         int i;
4217
4218         if (!CHIP_IS_E1(bp)) {
4219
4220                 /* xstorm needs to know whether to add  ovlan to packets or not,
4221                  * in switch-independent we'll write 0 to here... */
4222                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4223                         bp->mf_mode);
4224                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4225                         bp->mf_mode);
4226                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4227                         bp->mf_mode);
4228                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4229                         bp->mf_mode);
4230         }
4231
4232         /* Zero this manually as its initialization is
4233            currently missing in the initTool */
4234         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4235                 REG_WR(bp, BAR_USTRORM_INTMEM +
4236                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4237         if (CHIP_IS_E2(bp)) {
4238                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4239                         CHIP_INT_MODE_IS_BC(bp) ?
4240                         HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4241         }
4242 }
4243
4244 static void bnx2x_init_internal_port(struct bnx2x *bp)
4245 {
4246         /* port */
4247 }
4248
4249 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4250 {
4251         switch (load_code) {
4252         case FW_MSG_CODE_DRV_LOAD_COMMON:
4253         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
4254                 bnx2x_init_internal_common(bp);
4255                 /* no break */
4256
4257         case FW_MSG_CODE_DRV_LOAD_PORT:
4258                 bnx2x_init_internal_port(bp);
4259                 /* no break */
4260
4261         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4262                 /* internal memory per function is
4263                    initialized inside bnx2x_pf_init */
4264                 break;
4265
4266         default:
4267                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4268                 break;
4269         }
4270 }
4271
4272 static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4273 {
4274         struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4275
4276         fp->state = BNX2X_FP_STATE_CLOSED;
4277
4278         fp->index = fp->cid = fp_idx;
4279         fp->cl_id = BP_L_ID(bp) + fp_idx;
4280         fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4281         fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4282         /* qZone id equals to FW (per path) client id */
4283         fp->cl_qzone_id  = fp->cl_id +
4284                            BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4285                                 ETH_MAX_RX_CLIENTS_E1H);
4286         /* init shortcut */
4287         fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4288                             USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
4289                             USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4290         /* Setup SB indicies */
4291         fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4292         fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4293
4294         DP(NETIF_MSG_IFUP, "queue[%d]:  bnx2x_init_sb(%p,%p)  "
4295                                    "cl_id %d  fw_sb %d  igu_sb %d\n",
4296                    fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4297                    fp->igu_sb_id);
4298         bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4299                       fp->fw_sb_id, fp->igu_sb_id);
4300
4301         bnx2x_update_fpsb_idx(fp);
4302 }
4303
4304 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4305 {
4306         int i;
4307
4308         for_each_queue(bp, i)
4309                 bnx2x_init_fp_sb(bp, i);
4310 #ifdef BCM_CNIC
4311
4312         bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4313                       BNX2X_VF_ID_INVALID, false,
4314                       CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4315
4316 #endif
4317
4318         /* ensure status block indices were read */
4319         rmb();
4320
4321         bnx2x_init_def_sb(bp);
4322         bnx2x_update_dsb_idx(bp);
4323         bnx2x_init_rx_rings(bp);
4324         bnx2x_init_tx_rings(bp);
4325         bnx2x_init_sp_ring(bp);
4326         bnx2x_init_eq_ring(bp);
4327         bnx2x_init_internal(bp, load_code);
4328         bnx2x_pf_init(bp);
4329         bnx2x_init_ind_table(bp);
4330         bnx2x_stats_init(bp);
4331
4332         /* At this point, we are ready for interrupts */
4333         atomic_set(&bp->intr_sem, 0);
4334
4335         /* flush all before enabling interrupts */
4336         mb();
4337         mmiowb();
4338
4339         bnx2x_int_enable(bp);
4340
4341         /* Check for SPIO5 */
4342         bnx2x_attn_int_deasserted0(bp,
4343                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4344                                    AEU_INPUTS_ATTN_BITS_SPIO5);
4345 }
4346
4347 /* end of nic init */
4348
4349 /*
4350  * gzip service functions
4351  */
4352
4353 static int bnx2x_gunzip_init(struct bnx2x *bp)
4354 {
4355         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4356                                             &bp->gunzip_mapping, GFP_KERNEL);
4357         if (bp->gunzip_buf  == NULL)
4358                 goto gunzip_nomem1;
4359
4360         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4361         if (bp->strm  == NULL)
4362                 goto gunzip_nomem2;
4363
4364         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4365                                       GFP_KERNEL);
4366         if (bp->strm->workspace == NULL)
4367                 goto gunzip_nomem3;
4368
4369         return 0;
4370
4371 gunzip_nomem3:
4372         kfree(bp->strm);
4373         bp->strm = NULL;
4374
4375 gunzip_nomem2:
4376         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4377                           bp->gunzip_mapping);
4378         bp->gunzip_buf = NULL;
4379
4380 gunzip_nomem1:
4381         netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4382                " un-compression\n");
4383         return -ENOMEM;
4384 }
4385
4386 static void bnx2x_gunzip_end(struct bnx2x *bp)
4387 {
4388         kfree(bp->strm->workspace);
4389         kfree(bp->strm);
4390         bp->strm = NULL;
4391
4392         if (bp->gunzip_buf) {
4393                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4394                                   bp->gunzip_mapping);
4395                 bp->gunzip_buf = NULL;
4396         }
4397 }
4398
4399 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
4400 {
4401         int n, rc;
4402
4403         /* check gzip header */
4404         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4405                 BNX2X_ERR("Bad gzip header\n");
4406                 return -EINVAL;
4407         }
4408
4409         n = 10;
4410
4411 #define FNAME                           0x8
4412
4413         if (zbuf[3] & FNAME)
4414                 while ((zbuf[n++] != 0) && (n < len));
4415
4416         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
4417         bp->strm->avail_in = len - n;
4418         bp->strm->next_out = bp->gunzip_buf;
4419         bp->strm->avail_out = FW_BUF_SIZE;
4420
4421         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4422         if (rc != Z_OK)
4423                 return rc;
4424
4425         rc = zlib_inflate(bp->strm, Z_FINISH);
4426         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4427                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4428                            bp->strm->msg);
4429
4430         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4431         if (bp->gunzip_outlen & 0x3)
4432                 netdev_err(bp->dev, "Firmware decompression error:"
4433                                     " gunzip_outlen (%d) not aligned\n",
4434                                 bp->gunzip_outlen);
4435         bp->gunzip_outlen >>= 2;
4436
4437         zlib_inflateEnd(bp->strm);
4438
4439         if (rc == Z_STREAM_END)
4440                 return 0;
4441
4442         return rc;
4443 }
4444
4445 /* nic load/unload */
4446
4447 /*
4448  * General service functions
4449  */
4450
4451 /* send a NIG loopback debug packet */
4452 static void bnx2x_lb_pckt(struct bnx2x *bp)
4453 {
4454         u32 wb_write[3];
4455
4456         /* Ethernet source and destination addresses */
4457         wb_write[0] = 0x55555555;
4458         wb_write[1] = 0x55555555;
4459         wb_write[2] = 0x20;             /* SOP */
4460         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4461
4462         /* NON-IP protocol */
4463         wb_write[0] = 0x09000000;
4464         wb_write[1] = 0x55555555;
4465         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4466         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4467 }
4468
4469 /* some of the internal memories
4470  * are not directly readable from the driver
4471  * to test them we send debug packets
4472  */
4473 static int bnx2x_int_mem_test(struct bnx2x *bp)
4474 {
4475         int factor;
4476         int count, i;
4477         u32 val = 0;
4478
4479         if (CHIP_REV_IS_FPGA(bp))
4480                 factor = 120;
4481         else if (CHIP_REV_IS_EMUL(bp))
4482                 factor = 200;
4483         else
4484                 factor = 1;
4485
4486         /* Disable inputs of parser neighbor blocks */
4487         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4488         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4489         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4490         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4491
4492         /*  Write 0 to parser credits for CFC search request */
4493         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4494
4495         /* send Ethernet packet */
4496         bnx2x_lb_pckt(bp);
4497
4498         /* TODO do i reset NIG statistic? */
4499         /* Wait until NIG register shows 1 packet of size 0x10 */
4500         count = 1000 * factor;
4501         while (count) {
4502
4503                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4504                 val = *bnx2x_sp(bp, wb_data[0]);
4505                 if (val == 0x10)
4506                         break;
4507
4508                 msleep(10);
4509                 count--;
4510         }
4511         if (val != 0x10) {
4512                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4513                 return -1;
4514         }
4515
4516         /* Wait until PRS register shows 1 packet */
4517         count = 1000 * factor;
4518         while (count) {
4519                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4520                 if (val == 1)
4521                         break;
4522
4523                 msleep(10);
4524                 count--;
4525         }
4526         if (val != 0x1) {
4527                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4528                 return -2;
4529         }
4530
4531         /* Reset and init BRB, PRS */
4532         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4533         msleep(50);
4534         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4535         msleep(50);
4536         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4537         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4538
4539         DP(NETIF_MSG_HW, "part2\n");
4540
4541         /* Disable inputs of parser neighbor blocks */
4542         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4543         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4544         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4545         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4546
4547         /* Write 0 to parser credits for CFC search request */
4548         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4549
4550         /* send 10 Ethernet packets */
4551         for (i = 0; i < 10; i++)
4552                 bnx2x_lb_pckt(bp);
4553
4554         /* Wait until NIG register shows 10 + 1
4555            packets of size 11*0x10 = 0xb0 */
4556         count = 1000 * factor;
4557         while (count) {
4558
4559                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4560                 val = *bnx2x_sp(bp, wb_data[0]);
4561                 if (val == 0xb0)
4562                         break;
4563
4564                 msleep(10);
4565                 count--;
4566         }
4567         if (val != 0xb0) {
4568                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4569                 return -3;
4570         }
4571
4572         /* Wait until PRS register shows 2 packets */
4573         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4574         if (val != 2)
4575                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
4576
4577         /* Write 1 to parser credits for CFC search request */
4578         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4579
4580         /* Wait until PRS register shows 3 packets */
4581         msleep(10 * factor);
4582         /* Wait until NIG register shows 1 packet of size 0x10 */
4583         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4584         if (val != 3)
4585                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
4586
4587         /* clear NIG EOP FIFO */
4588         for (i = 0; i < 11; i++)
4589                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4590         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4591         if (val != 1) {
4592                 BNX2X_ERR("clear of NIG failed\n");
4593                 return -4;
4594         }
4595
4596         /* Reset and init BRB, PRS, NIG */
4597         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4598         msleep(50);
4599         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4600         msleep(50);
4601         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4602         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4603 #ifndef BCM_CNIC
4604         /* set NIC mode */
4605         REG_WR(bp, PRS_REG_NIC_MODE, 1);
4606 #endif
4607
4608         /* Enable inputs of parser neighbor blocks */
4609         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4610         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4611         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
4612         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
4613
4614         DP(NETIF_MSG_HW, "done\n");
4615
4616         return 0; /* OK */
4617 }
4618
4619 static void enable_blocks_attention(struct bnx2x *bp)
4620 {
4621         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4622         if (CHIP_IS_E2(bp))
4623                 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4624         else
4625                 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
4626         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4627         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4628         /*
4629          * mask read length error interrupts in brb for parser
4630          * (parsing unit and 'checksum and crc' unit)
4631          * these errors are legal (PU reads fixed length and CAC can cause
4632          * read length error on truncated packets)
4633          */
4634         REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
4635         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4636         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4637         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4638         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4639         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
4640 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4641 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
4642         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4643         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4644         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
4645 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4646 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
4647         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4648         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4649         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4650         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
4651 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4652 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4653
4654         if (CHIP_REV_IS_FPGA(bp))
4655                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
4656         else if (CHIP_IS_E2(bp))
4657                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4658                            (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4659                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4660                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4661                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4662                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
4663         else
4664                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
4665         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4666         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4667         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
4668 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4669 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
4670         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4671         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
4672 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4673         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
4674 }
4675
4676 static const struct {
4677         u32 addr;
4678         u32 mask;
4679 } bnx2x_parity_mask[] = {
4680         {PXP_REG_PXP_PRTY_MASK,         0x3ffffff},
4681         {PXP2_REG_PXP2_PRTY_MASK_0,     0xffffffff},
4682         {PXP2_REG_PXP2_PRTY_MASK_1,     0x7f},
4683         {HC_REG_HC_PRTY_MASK,           0x7},
4684         {MISC_REG_MISC_PRTY_MASK,       0x1},
4685         {QM_REG_QM_PRTY_MASK,           0x0},
4686         {DORQ_REG_DORQ_PRTY_MASK,       0x0},
4687         {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4688         {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
4689         {SRC_REG_SRC_PRTY_MASK,         0x4}, /* bit 2 */
4690         {CDU_REG_CDU_PRTY_MASK,         0x0},
4691         {CFC_REG_CFC_PRTY_MASK,         0x0},
4692         {DBG_REG_DBG_PRTY_MASK,         0x0},
4693         {DMAE_REG_DMAE_PRTY_MASK,       0x0},
4694         {BRB1_REG_BRB1_PRTY_MASK,       0x0},
4695         {PRS_REG_PRS_PRTY_MASK,         (1<<6)},/* bit 6 */
4696         {TSDM_REG_TSDM_PRTY_MASK,       0x18},  /* bit 3,4 */
4697         {CSDM_REG_CSDM_PRTY_MASK,       0x8},   /* bit 3 */
4698         {USDM_REG_USDM_PRTY_MASK,       0x38},  /* bit 3,4,5 */
4699         {XSDM_REG_XSDM_PRTY_MASK,       0x8},   /* bit 3 */
4700         {TSEM_REG_TSEM_PRTY_MASK_0,     0x0},
4701         {TSEM_REG_TSEM_PRTY_MASK_1,     0x0},
4702         {USEM_REG_USEM_PRTY_MASK_0,     0x0},
4703         {USEM_REG_USEM_PRTY_MASK_1,     0x0},
4704         {CSEM_REG_CSEM_PRTY_MASK_0,     0x0},
4705         {CSEM_REG_CSEM_PRTY_MASK_1,     0x0},
4706         {XSEM_REG_XSEM_PRTY_MASK_0,     0x0},
4707         {XSEM_REG_XSEM_PRTY_MASK_1,     0x0}
4708 };
4709
4710 static void enable_blocks_parity(struct bnx2x *bp)
4711 {
4712         int i;
4713
4714         for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
4715                 REG_WR(bp, bnx2x_parity_mask[i].addr,
4716                         bnx2x_parity_mask[i].mask);
4717 }
4718
4719
4720 static void bnx2x_reset_common(struct bnx2x *bp)
4721 {
4722         /* reset_common */
4723         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4724                0xd3ffff7f);
4725         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4726 }
4727
4728 static void bnx2x_init_pxp(struct bnx2x *bp)
4729 {
4730         u16 devctl;
4731         int r_order, w_order;
4732
4733         pci_read_config_word(bp->pdev,
4734                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4735         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4736         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4737         if (bp->mrrs == -1)
4738                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4739         else {
4740                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4741                 r_order = bp->mrrs;
4742         }
4743
4744         bnx2x_init_pxp_arb(bp, r_order, w_order);
4745 }
4746
4747 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4748 {
4749         int is_required;
4750         u32 val;
4751         int port;
4752
4753         if (BP_NOMCP(bp))
4754                 return;
4755
4756         is_required = 0;
4757         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4758               SHARED_HW_CFG_FAN_FAILURE_MASK;
4759
4760         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4761                 is_required = 1;
4762
4763         /*
4764          * The fan failure mechanism is usually related to the PHY type since
4765          * the power consumption of the board is affected by the PHY. Currently,
4766          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4767          */
4768         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4769                 for (port = PORT_0; port < PORT_MAX; port++) {
4770                         is_required |=
4771                                 bnx2x_fan_failure_det_req(
4772                                         bp,
4773                                         bp->common.shmem_base,
4774                                         bp->common.shmem2_base,
4775                                         port);
4776                 }
4777
4778         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4779
4780         if (is_required == 0)
4781                 return;
4782
4783         /* Fan failure is indicated by SPIO 5 */
4784         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4785                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
4786
4787         /* set to active low mode */
4788         val = REG_RD(bp, MISC_REG_SPIO_INT);
4789         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
4790                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
4791         REG_WR(bp, MISC_REG_SPIO_INT, val);
4792
4793         /* enable interrupt to signal the IGU */
4794         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4795         val |= (1 << MISC_REGISTERS_SPIO_5);
4796         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4797 }
4798
4799 static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4800 {
4801         u32 offset = 0;
4802
4803         if (CHIP_IS_E1(bp))
4804                 return;
4805         if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4806                 return;
4807
4808         switch (BP_ABS_FUNC(bp)) {
4809         case 0:
4810                 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4811                 break;
4812         case 1:
4813                 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4814                 break;
4815         case 2:
4816                 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4817                 break;
4818         case 3:
4819                 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4820                 break;
4821         case 4:
4822                 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4823                 break;
4824         case 5:
4825                 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4826                 break;
4827         case 6:
4828                 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4829                 break;
4830         case 7:
4831                 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4832                 break;
4833         default:
4834                 return;
4835         }
4836
4837         REG_WR(bp, offset, pretend_func_num);
4838         REG_RD(bp, offset);
4839         DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4840 }
4841
4842 static void bnx2x_pf_disable(struct bnx2x *bp)
4843 {
4844         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4845         val &= ~IGU_PF_CONF_FUNC_EN;
4846
4847         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4848         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4849         REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4850 }
4851
4852 static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4853 {
4854         u32 val, i;
4855
4856         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_ABS_FUNC(bp));
4857
4858         bnx2x_reset_common(bp);
4859         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4860         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4861
4862         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
4863         if (!CHIP_IS_E1(bp))
4864                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
4865
4866         if (CHIP_IS_E2(bp)) {
4867                 u8 fid;
4868
4869                 /**
4870                  * 4-port mode or 2-port mode we need to turn of master-enable
4871                  * for everyone, after that, turn it back on for self.
4872                  * so, we disregard multi-function or not, and always disable
4873                  * for all functions on the given path, this means 0,2,4,6 for
4874                  * path 0 and 1,3,5,7 for path 1
4875                  */
4876                 for (fid = BP_PATH(bp); fid  < E2_FUNC_MAX*2; fid += 2) {
4877                         if (fid == BP_ABS_FUNC(bp)) {
4878                                 REG_WR(bp,
4879                                     PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4880                                     1);
4881                                 continue;
4882                         }
4883
4884                         bnx2x_pretend_func(bp, fid);
4885                         /* clear pf enable */
4886                         bnx2x_pf_disable(bp);
4887                         bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4888                 }
4889         }
4890
4891         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
4892         if (CHIP_IS_E1(bp)) {
4893                 /* enable HW interrupt from PXP on USDM overflow
4894                    bit 16 on INT_MASK_0 */
4895                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4896         }
4897
4898         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
4899         bnx2x_init_pxp(bp);
4900
4901 #ifdef __BIG_ENDIAN
4902         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4903         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4904         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4905         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4906         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
4907         /* make sure this value is 0 */
4908         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
4909
4910 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4911         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4912         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4913         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4914         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
4915 #endif
4916
4917         bnx2x_ilt_init_page_size(bp, INITOP_SET);
4918
4919         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4920                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
4921
4922         /* let the HW do it's magic ... */
4923         msleep(100);
4924         /* finish PXP init */
4925         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4926         if (val != 1) {
4927                 BNX2X_ERR("PXP2 CFG failed\n");
4928                 return -EBUSY;
4929         }
4930         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4931         if (val != 1) {
4932                 BNX2X_ERR("PXP2 RD_INIT failed\n");
4933                 return -EBUSY;
4934         }
4935
4936         /* Timers bug workaround E2 only. We need to set the entire ILT to
4937          * have entries with value "0" and valid bit on.
4938          * This needs to be done by the first PF that is loaded in a path
4939          * (i.e. common phase)
4940          */
4941         if (CHIP_IS_E2(bp)) {
4942                 struct ilt_client_info ilt_cli;
4943                 struct bnx2x_ilt ilt;
4944                 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
4945                 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
4946
4947                 /* initalize dummy TM client */
4948                 ilt_cli.start = 0;
4949                 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
4950                 ilt_cli.client_num = ILT_CLIENT_TM;
4951
4952                 /* Step 1: set zeroes to all ilt page entries with valid bit on
4953                  * Step 2: set the timers first/last ilt entry to point
4954                  * to the entire range to prevent ILT range error for 3rd/4th
4955                  * vnic (this code assumes existance of the vnic)
4956                  *
4957                  * both steps performed by call to bnx2x_ilt_client_init_op()
4958                  * with dummy TM client
4959                  *
4960                  * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
4961                  * and his brother are split registers
4962                  */
4963                 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
4964                 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
4965                 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4966
4967                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
4968                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
4969                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
4970         }
4971
4972
4973         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4974         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
4975
4976         if (CHIP_IS_E2(bp)) {
4977                 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
4978                                 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
4979                 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
4980
4981                 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
4982
4983                 /* let the HW do it's magic ... */
4984                 do {
4985                         msleep(200);
4986                         val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
4987                 } while (factor-- && (val != 1));
4988
4989                 if (val != 1) {
4990                         BNX2X_ERR("ATC_INIT failed\n");
4991                         return -EBUSY;
4992                 }
4993         }
4994
4995         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
4996
4997         /* clean the DMAE memory */
4998         bp->dmae_ready = 1;
4999         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5000
5001         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5002         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5003         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5004         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5005
5006         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5007         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5008         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5009         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5010
5011         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5012
5013         if (CHIP_MODE_IS_4_PORT(bp))
5014                 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
5015
5016         /* QM queues pointers table */
5017         bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
5018
5019         /* soft reset pulse */
5020         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5021         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5022
5023 #ifdef BCM_CNIC
5024         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5025 #endif
5026
5027         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5028         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5029
5030         if (!CHIP_REV_IS_SLOW(bp)) {
5031                 /* enable hw interrupt from doorbell Q */
5032                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5033         }
5034
5035         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5036         if (CHIP_MODE_IS_4_PORT(bp)) {
5037                 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5038                 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5039         }
5040
5041         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5042         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5043 #ifndef BCM_CNIC
5044         /* set NIC mode */
5045         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5046 #endif
5047         if (!CHIP_IS_E1(bp))
5048                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
5049
5050         if (CHIP_IS_E2(bp)) {
5051                 /* Bit-map indicating which L2 hdrs may appear after the
5052                    basic Ethernet header */
5053                 int has_ovlan = IS_MF(bp);
5054                 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5055                 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5056         }
5057
5058         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5059         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5060         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5061         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5062
5063         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5064         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5065         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5066         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5067
5068         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5069         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5070         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5071         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5072
5073         if (CHIP_MODE_IS_4_PORT(bp))
5074                 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5075
5076         /* sync semi rtc */
5077         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5078                0x80000000);
5079         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5080                0x80000000);
5081
5082         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5083         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5084         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5085
5086         if (CHIP_IS_E2(bp)) {
5087                 int has_ovlan = IS_MF(bp);
5088                 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5089                 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5090         }
5091
5092         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5093         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5094                 REG_WR(bp, i, random32());
5095
5096         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5097 #ifdef BCM_CNIC
5098         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5099         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5100         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5101         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5102         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5103         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5104         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5105         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5106         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5107         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5108 #endif
5109         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5110
5111         if (sizeof(union cdu_context) != 1024)
5112                 /* we currently assume that a context is 1024 bytes */
5113                 dev_alert(&bp->pdev->dev, "please adjust the size "
5114                                           "of cdu_context(%ld)\n",
5115                          (long)sizeof(union cdu_context));
5116
5117         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5118         val = (4 << 24) + (0 << 12) + 1024;
5119         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5120
5121         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5122         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5123         /* enable context validation interrupt from CFC */
5124         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5125
5126         /* set the thresholds to prevent CFC/CDU race */
5127         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5128
5129         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5130
5131         if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5132                 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5133
5134         bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
5135         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5136
5137         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5138         /* Reset PCIE errors for debug */
5139         REG_WR(bp, 0x2814, 0xffffffff);
5140         REG_WR(bp, 0x3820, 0xffffffff);
5141
5142         if (CHIP_IS_E2(bp)) {
5143                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5144                            (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5145                                 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5146                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5147                            (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5148                                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5149                                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5150                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5151                            (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5152                                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5153                                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5154         }
5155
5156         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5157         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5158         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5159         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5160
5161         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5162         if (!CHIP_IS_E1(bp)) {
5163                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5164                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
5165         }
5166         if (CHIP_IS_E2(bp)) {
5167                 /* Bit-map indicating which L2 hdrs may appear after the
5168                    basic Ethernet header */
5169                 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
5170         }
5171
5172         if (CHIP_REV_IS_SLOW(bp))
5173                 msleep(200);
5174
5175         /* finish CFC init */
5176         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5177         if (val != 1) {
5178                 BNX2X_ERR("CFC LL_INIT failed\n");
5179                 return -EBUSY;
5180         }
5181         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5182         if (val != 1) {
5183                 BNX2X_ERR("CFC AC_INIT failed\n");
5184                 return -EBUSY;
5185         }
5186         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5187         if (val != 1) {
5188                 BNX2X_ERR("CFC CAM_INIT failed\n");
5189                 return -EBUSY;
5190         }
5191         REG_WR(bp, CFC_REG_DEBUG0, 0);
5192
5193         if (CHIP_IS_E1(bp)) {
5194                 /* read NIG statistic
5195                    to see if this is our first up since powerup */
5196                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5197                 val = *bnx2x_sp(bp, wb_data[0]);
5198
5199                 /* do internal memory self test */
5200                 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5201                         BNX2X_ERR("internal mem self test failed\n");
5202                         return -EBUSY;
5203                 }
5204         }
5205
5206         bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
5207                                                        bp->common.shmem_base,
5208                                                        bp->common.shmem2_base);
5209
5210         bnx2x_setup_fan_failure_detection(bp);
5211
5212         /* clear PXP2 attentions */
5213         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5214
5215         enable_blocks_attention(bp);
5216         if (CHIP_PARITY_SUPPORTED(bp))
5217                 enable_blocks_parity(bp);
5218
5219         if (!BP_NOMCP(bp)) {
5220                 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5221                 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5222                     CHIP_IS_E1x(bp)) {
5223                         u32 shmem_base[2], shmem2_base[2];
5224                         shmem_base[0] =  bp->common.shmem_base;
5225                         shmem2_base[0] = bp->common.shmem2_base;
5226                         if (CHIP_IS_E2(bp)) {
5227                                 shmem_base[1] =
5228                                         SHMEM2_RD(bp, other_shmem_base_addr);
5229                                 shmem2_base[1] =
5230                                         SHMEM2_RD(bp, other_shmem2_base_addr);
5231                         }
5232                         bnx2x_acquire_phy_lock(bp);
5233                         bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5234                                               bp->common.chip_id);
5235                         bnx2x_release_phy_lock(bp);
5236                 }
5237         } else
5238                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5239
5240         return 0;
5241 }
5242
5243 static int bnx2x_init_hw_port(struct bnx2x *bp)
5244 {
5245         int port = BP_PORT(bp);
5246         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5247         u32 low, high;
5248         u32 val;
5249
5250         DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
5251
5252         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5253
5254         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5255         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5256
5257         /* Timers bug workaround: disables the pf_master bit in pglue at
5258          * common phase, we need to enable it here before any dmae access are
5259          * attempted. Therefore we manually added the enable-master to the
5260          * port phase (it also happens in the function phase)
5261          */
5262         if (CHIP_IS_E2(bp))
5263                 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5264
5265         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5266         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5267         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
5268         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5269
5270         /* QM cid (connection) count */
5271         bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
5272
5273 #ifdef BCM_CNIC
5274         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5275         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5276         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
5277 #endif
5278
5279         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5280
5281         if (CHIP_MODE_IS_4_PORT(bp))
5282                 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
5283
5284         if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5285                 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5286                 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5287                         /* no pause for emulation and FPGA */
5288                         low = 0;
5289                         high = 513;
5290                 } else {
5291                         if (IS_MF(bp))
5292                                 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5293                         else if (bp->dev->mtu > 4096) {
5294                                 if (bp->flags & ONE_PORT_FLAG)
5295                                         low = 160;
5296                                 else {
5297                                         val = bp->dev->mtu;
5298                                         /* (24*1024 + val*4)/256 */
5299                                         low = 96 + (val/64) +
5300                                                         ((val % 64) ? 1 : 0);
5301                                 }
5302                         } else
5303                                 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5304                         high = low + 56;        /* 14*1024/256 */
5305                 }
5306                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5307                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5308         }
5309
5310         if (CHIP_MODE_IS_4_PORT(bp)) {
5311                 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5312                 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5313                 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5314                                           BRB1_REG_MAC_GUARANTIED_0), 40);
5315         }
5316
5317         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5318
5319         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5320         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5321         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5322         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5323
5324         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5325         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5326         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5327         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5328         if (CHIP_MODE_IS_4_PORT(bp))
5329                 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
5330
5331         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5332         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5333
5334         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5335
5336         if (!CHIP_IS_E2(bp)) {
5337                 /* configure PBF to work without PAUSE mtu 9000 */
5338                 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5339
5340                 /* update threshold */
5341                 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5342                 /* update init credit */
5343                 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5344
5345                 /* probe changes */
5346                 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5347                 udelay(50);
5348                 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5349         }
5350
5351 #ifdef BCM_CNIC
5352         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
5353 #endif
5354         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5355         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5356
5357         if (CHIP_IS_E1(bp)) {
5358                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5359                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5360         }
5361         bnx2x_init_block(bp, HC_BLOCK, init_stage);
5362
5363         bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5364
5365         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
5366         /* init aeu_mask_attn_func_0/1:
5367          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5368          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5369          *             bits 4-7 are used for "per vn group attention" */
5370         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5371                (IS_MF(bp) ? 0xF7 : 0x7));
5372
5373         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
5374         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
5375         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
5376         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
5377         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
5378
5379         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
5380
5381         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5382
5383         if (!CHIP_IS_E1(bp)) {
5384                 /* 0x2 disable mf_ov, 0x1 enable */
5385                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5386                        (IS_MF(bp) ? 0x1 : 0x2));
5387
5388                 if (CHIP_IS_E2(bp)) {
5389                         val = 0;
5390                         switch (bp->mf_mode) {
5391                         case MULTI_FUNCTION_SD:
5392                                 val = 1;
5393                                 break;
5394                         case MULTI_FUNCTION_SI:
5395                                 val = 2;
5396                                 break;
5397                         }
5398
5399                         REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5400                                                   NIG_REG_LLH0_CLS_TYPE), val);
5401                 }
5402                 {
5403                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5404                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5405                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5406                 }
5407         }
5408
5409         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
5410         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
5411         bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
5412                                                        bp->common.shmem_base,
5413                                                        bp->common.shmem2_base);
5414         if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
5415                                       bp->common.shmem2_base, port)) {
5416                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5417                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5418                 val = REG_RD(bp, reg_addr);
5419                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5420                 REG_WR(bp, reg_addr, val);
5421         }
5422         bnx2x__link_reset(bp);
5423
5424         return 0;
5425 }
5426
5427 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5428 {
5429         int reg;
5430
5431         if (CHIP_IS_E1(bp))
5432                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5433         else
5434                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5435
5436         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5437 }
5438
5439 static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5440 {
5441         bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5442 }
5443
5444 static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5445 {
5446         u32 i, base = FUNC_ILT_BASE(func);
5447         for (i = base; i < base + ILT_PER_FUNC; i++)
5448                 bnx2x_ilt_wr(bp, i, 0);
5449 }
5450
5451 static int bnx2x_init_hw_func(struct bnx2x *bp)
5452 {
5453         int port = BP_PORT(bp);
5454         int func = BP_FUNC(bp);
5455         struct bnx2x_ilt *ilt = BP_ILT(bp);
5456         u16 cdu_ilt_start;
5457         u32 addr, val;
5458         u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5459         int i, main_mem_width;
5460
5461         DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
5462
5463         /* set MSI reconfigure capability */
5464         if (bp->common.int_block == INT_BLOCK_HC) {
5465                 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5466                 val = REG_RD(bp, addr);
5467                 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5468                 REG_WR(bp, addr, val);
5469         }
5470
5471         ilt = BP_ILT(bp);
5472         cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
5473
5474         for (i = 0; i < L2_ILT_LINES(bp); i++) {
5475                 ilt->lines[cdu_ilt_start + i].page =
5476                         bp->context.vcxt + (ILT_PAGE_CIDS * i);
5477                 ilt->lines[cdu_ilt_start + i].page_mapping =
5478                         bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5479                 /* cdu ilt pages are allocated manually so there's no need to
5480                 set the size */
5481         }
5482         bnx2x_ilt_init_op(bp, INITOP_SET);
5483
5484 #ifdef BCM_CNIC
5485         bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
5486
5487         /* T1 hash bits value determines the T1 number of entries */
5488         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
5489 #endif
5490
5491 #ifndef BCM_CNIC
5492         /* set NIC mode */
5493         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5494 #endif  /* BCM_CNIC */
5495
5496         if (CHIP_IS_E2(bp)) {
5497                 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5498
5499                 /* Turn on a single ISR mode in IGU if driver is going to use
5500                  * INT#x or MSI
5501                  */
5502                 if (!(bp->flags & USING_MSIX_FLAG))
5503                         pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5504                 /*
5505                  * Timers workaround bug: function init part.
5506                  * Need to wait 20msec after initializing ILT,
5507                  * needed to make sure there are no requests in
5508                  * one of the PXP internal queues with "old" ILT addresses
5509                  */
5510                 msleep(20);
5511                 /*
5512                  * Master enable - Due to WB DMAE writes performed before this
5513                  * register is re-initialized as part of the regular function
5514                  * init
5515                  */
5516                 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5517                 /* Enable the function in IGU */
5518                 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5519         }
5520
5521         bp->dmae_ready = 1;
5522
5523         bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5524
5525         if (CHIP_IS_E2(bp))
5526                 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5527
5528         bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5529         bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5530         bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5531         bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5532         bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5533         bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5534         bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5535         bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5536         bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5537
5538         if (CHIP_IS_E2(bp)) {
5539                 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5540                                                                 BP_PATH(bp));
5541                 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5542                                                                 BP_PATH(bp));
5543         }
5544
5545         if (CHIP_MODE_IS_4_PORT(bp))
5546                 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5547
5548         if (CHIP_IS_E2(bp))
5549                 REG_WR(bp, QM_REG_PF_EN, 1);
5550
5551         bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
5552
5553         if (CHIP_MODE_IS_4_PORT(bp))
5554                 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5555
5556         bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5557         bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5558         bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5559         bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5560         bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5561         bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5562         bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5563         bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5564         bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5565         bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5566         bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
5567         if (CHIP_IS_E2(bp))
5568                 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5569
5570         bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5571
5572         bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5573
5574         if (CHIP_IS_E2(bp))
5575                 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5576
5577         if (IS_MF(bp)) {
5578                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5579                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
5580         }
5581
5582         bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5583
5584         /* HC init per function */
5585         if (bp->common.int_block == INT_BLOCK_HC) {
5586                 if (CHIP_IS_E1H(bp)) {
5587                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5588
5589                         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5590                         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5591                 }
5592                 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5593
5594         } else {
5595                 int num_segs, sb_idx, prod_offset;
5596
5597                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5598
5599                 if (CHIP_IS_E2(bp)) {
5600                         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5601                         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5602                 }
5603
5604                 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5605
5606                 if (CHIP_IS_E2(bp)) {
5607                         int dsb_idx = 0;
5608                         /**
5609                          * Producer memory:
5610                          * E2 mode: address 0-135 match to the mapping memory;
5611                          * 136 - PF0 default prod; 137 - PF1 default prod;
5612                          * 138 - PF2 default prod; 139 - PF3 default prod;
5613                          * 140 - PF0 attn prod;    141 - PF1 attn prod;
5614                          * 142 - PF2 attn prod;    143 - PF3 attn prod;
5615                          * 144-147 reserved.
5616                          *
5617                          * E1.5 mode - In backward compatible mode;
5618                          * for non default SB; each even line in the memory
5619                          * holds the U producer and each odd line hold
5620                          * the C producer. The first 128 producers are for
5621                          * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5622                          * producers are for the DSB for each PF.
5623                          * Each PF has five segments: (the order inside each
5624                          * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5625                          * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5626                          * 144-147 attn prods;
5627                          */
5628                         /* non-default-status-blocks */
5629                         num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5630                                 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5631                         for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5632                                 prod_offset = (bp->igu_base_sb + sb_idx) *
5633                                         num_segs;
5634
5635                                 for (i = 0; i < num_segs; i++) {
5636                                         addr = IGU_REG_PROD_CONS_MEMORY +
5637                                                         (prod_offset + i) * 4;
5638                                         REG_WR(bp, addr, 0);
5639                                 }
5640                                 /* send consumer update with value 0 */
5641                                 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5642                                              USTORM_ID, 0, IGU_INT_NOP, 1);
5643                                 bnx2x_igu_clear_sb(bp,
5644                                                    bp->igu_base_sb + sb_idx);
5645                         }
5646
5647                         /* default-status-blocks */
5648                         num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5649                                 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5650
5651                         if (CHIP_MODE_IS_4_PORT(bp))
5652                                 dsb_idx = BP_FUNC(bp);
5653                         else
5654                                 dsb_idx = BP_E1HVN(bp);
5655
5656                         prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5657                                        IGU_BC_BASE_DSB_PROD + dsb_idx :
5658                                        IGU_NORM_BASE_DSB_PROD + dsb_idx);
5659
5660                         for (i = 0; i < (num_segs * E1HVN_MAX);
5661                              i += E1HVN_MAX) {
5662                                 addr = IGU_REG_PROD_CONS_MEMORY +
5663                                                         (prod_offset + i)*4;
5664                                 REG_WR(bp, addr, 0);
5665                         }
5666                         /* send consumer update with 0 */
5667                         if (CHIP_INT_MODE_IS_BC(bp)) {
5668                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5669                                              USTORM_ID, 0, IGU_INT_NOP, 1);
5670                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5671                                              CSTORM_ID, 0, IGU_INT_NOP, 1);
5672                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5673                                              XSTORM_ID, 0, IGU_INT_NOP, 1);
5674                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5675                                              TSTORM_ID, 0, IGU_INT_NOP, 1);
5676                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5677                                              ATTENTION_ID, 0, IGU_INT_NOP, 1);
5678                         } else {
5679                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5680                                              USTORM_ID, 0, IGU_INT_NOP, 1);
5681                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5682                                              ATTENTION_ID, 0, IGU_INT_NOP, 1);
5683                         }
5684                         bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5685
5686                         /* !!! these should become driver const once
5687                            rf-tool supports split-68 const */
5688                         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5689                         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5690                         REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5691                         REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5692                         REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5693                         REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5694                 }
5695         }
5696
5697         /* Reset PCIE errors for debug */
5698         REG_WR(bp, 0x2114, 0xffffffff);
5699         REG_WR(bp, 0x2120, 0xffffffff);
5700
5701         bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5702         bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5703         bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5704         bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5705         bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5706         bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5707
5708         if (CHIP_IS_E1x(bp)) {
5709                 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5710                 main_mem_base = HC_REG_MAIN_MEMORY +
5711                                 BP_PORT(bp) * (main_mem_size * 4);
5712                 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5713                 main_mem_width = 8;
5714
5715                 val = REG_RD(bp, main_mem_prty_clr);
5716                 if (val)
5717                         DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5718                                           "block during "
5719                                           "function init (0x%x)!\n", val);
5720
5721                 /* Clear "false" parity errors in MSI-X table */
5722                 for (i = main_mem_base;
5723                      i < main_mem_base + main_mem_size * 4;
5724                      i += main_mem_width) {
5725                         bnx2x_read_dmae(bp, i, main_mem_width / 4);
5726                         bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5727                                          i, main_mem_width / 4);
5728                 }
5729                 /* Clear HC parity attention */
5730                 REG_RD(bp, main_mem_prty_clr);
5731         }
5732
5733         bnx2x_phy_probe(&bp->link_params);
5734
5735         return 0;
5736 }
5737
5738 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5739 {
5740         int rc = 0;
5741
5742         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5743            BP_ABS_FUNC(bp), load_code);
5744
5745         bp->dmae_ready = 0;
5746         mutex_init(&bp->dmae_mutex);
5747         rc = bnx2x_gunzip_init(bp);
5748         if (rc)
5749                 return rc;
5750
5751         switch (load_code) {
5752         case FW_MSG_CODE_DRV_LOAD_COMMON:
5753         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5754                 rc = bnx2x_init_hw_common(bp, load_code);
5755                 if (rc)
5756                         goto init_hw_err;
5757                 /* no break */
5758
5759         case FW_MSG_CODE_DRV_LOAD_PORT:
5760                 rc = bnx2x_init_hw_port(bp);
5761                 if (rc)
5762                         goto init_hw_err;
5763                 /* no break */
5764
5765         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5766                 rc = bnx2x_init_hw_func(bp);
5767                 if (rc)
5768                         goto init_hw_err;
5769                 break;
5770
5771         default:
5772                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5773                 break;
5774         }
5775
5776         if (!BP_NOMCP(bp)) {
5777                 int mb_idx = BP_FW_MB_IDX(bp);
5778
5779                 bp->fw_drv_pulse_wr_seq =
5780                                 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
5781                                  DRV_PULSE_SEQ_MASK);
5782                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5783         }
5784
5785 init_hw_err:
5786         bnx2x_gunzip_end(bp);
5787
5788         return rc;
5789 }
5790
5791 void bnx2x_free_mem(struct bnx2x *bp)
5792 {
5793
5794 #define BNX2X_PCI_FREE(x, y, size) \
5795         do { \
5796                 if (x) { \
5797                         dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
5798                         x = NULL; \
5799                         y = 0; \
5800                 } \
5801         } while (0)
5802
5803 #define BNX2X_FREE(x) \
5804         do { \
5805                 if (x) { \
5806                         kfree((void *)x); \
5807                         x = NULL; \
5808                 } \
5809         } while (0)
5810
5811         int i;
5812
5813         /* fastpath */
5814         /* Common */
5815         for_each_queue(bp, i) {
5816                 /* status blocks */
5817                 if (CHIP_IS_E2(bp))
5818                         BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5819                                        bnx2x_fp(bp, i, status_blk_mapping),
5820                                        sizeof(struct host_hc_status_block_e2));
5821                 else
5822                         BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5823                                        bnx2x_fp(bp, i, status_blk_mapping),
5824                                        sizeof(struct host_hc_status_block_e1x));
5825         }
5826         /* Rx */
5827         for_each_queue(bp, i) {
5828
5829                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5830                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5831                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5832                                bnx2x_fp(bp, i, rx_desc_mapping),
5833                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5834
5835                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5836                                bnx2x_fp(bp, i, rx_comp_mapping),
5837                                sizeof(struct eth_fast_path_rx_cqe) *
5838                                NUM_RCQ_BD);
5839
5840                 /* SGE ring */
5841                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5842                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5843                                bnx2x_fp(bp, i, rx_sge_mapping),
5844                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5845         }
5846         /* Tx */
5847         for_each_queue(bp, i) {
5848
5849                 /* fastpath tx rings: tx_buf tx_desc */
5850                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5851                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5852                                bnx2x_fp(bp, i, tx_desc_mapping),
5853                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
5854         }
5855         /* end of fastpath */
5856
5857         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5858                        sizeof(struct host_sp_status_block));
5859
5860         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5861                        sizeof(struct bnx2x_slowpath));
5862
5863         BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5864                        bp->context.size);
5865
5866         bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5867
5868         BNX2X_FREE(bp->ilt->lines);
5869
5870 #ifdef BCM_CNIC
5871         if (CHIP_IS_E2(bp))
5872                 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5873                                sizeof(struct host_hc_status_block_e2));
5874         else
5875                 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5876                                sizeof(struct host_hc_status_block_e1x));
5877
5878         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
5879 #endif
5880
5881         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5882
5883         BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5884                        BCM_PAGE_SIZE * NUM_EQ_PAGES);
5885
5886 #undef BNX2X_PCI_FREE
5887 #undef BNX2X_KFREE
5888 }
5889
5890 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
5891 {
5892         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
5893         if (CHIP_IS_E2(bp)) {
5894                 bnx2x_fp(bp, index, sb_index_values) =
5895                         (__le16 *)status_blk.e2_sb->sb.index_values;
5896                 bnx2x_fp(bp, index, sb_running_index) =
5897                         (__le16 *)status_blk.e2_sb->sb.running_index;
5898         } else {
5899                 bnx2x_fp(bp, index, sb_index_values) =
5900                         (__le16 *)status_blk.e1x_sb->sb.index_values;
5901                 bnx2x_fp(bp, index, sb_running_index) =
5902                         (__le16 *)status_blk.e1x_sb->sb.running_index;
5903         }
5904 }
5905
5906 int bnx2x_alloc_mem(struct bnx2x *bp)
5907 {
5908 #define BNX2X_PCI_ALLOC(x, y, size) \
5909         do { \
5910                 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
5911                 if (x == NULL) \
5912                         goto alloc_mem_err; \
5913                 memset(x, 0, size); \
5914         } while (0)
5915
5916 #define BNX2X_ALLOC(x, size) \
5917         do { \
5918                 x = kzalloc(size, GFP_KERNEL); \
5919                 if (x == NULL) \
5920                         goto alloc_mem_err; \
5921         } while (0)
5922
5923         int i;
5924
5925         /* fastpath */
5926         /* Common */
5927         for_each_queue(bp, i) {
5928                 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
5929                 bnx2x_fp(bp, i, bp) = bp;
5930                 /* status blocks */
5931                 if (CHIP_IS_E2(bp))
5932                         BNX2X_PCI_ALLOC(sb->e2_sb,
5933                                 &bnx2x_fp(bp, i, status_blk_mapping),
5934                                 sizeof(struct host_hc_status_block_e2));
5935                 else
5936                         BNX2X_PCI_ALLOC(sb->e1x_sb,
5937                                 &bnx2x_fp(bp, i, status_blk_mapping),
5938                                 sizeof(struct host_hc_status_block_e1x));
5939
5940                 set_sb_shortcuts(bp, i);
5941         }
5942         /* Rx */
5943         for_each_queue(bp, i) {
5944
5945                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5946                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5947                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5948                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5949                                 &bnx2x_fp(bp, i, rx_desc_mapping),
5950                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5951
5952                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5953                                 &bnx2x_fp(bp, i, rx_comp_mapping),
5954                                 sizeof(struct eth_fast_path_rx_cqe) *
5955                                 NUM_RCQ_BD);
5956
5957                 /* SGE ring */
5958                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5959                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5960                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5961                                 &bnx2x_fp(bp, i, rx_sge_mapping),
5962                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5963         }
5964         /* Tx */
5965         for_each_queue(bp, i) {
5966
5967                 /* fastpath tx rings: tx_buf tx_desc */
5968                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5969                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5970                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5971                                 &bnx2x_fp(bp, i, tx_desc_mapping),
5972                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
5973         }
5974         /* end of fastpath */
5975
5976 #ifdef BCM_CNIC
5977         if (CHIP_IS_E2(bp))
5978                 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5979                                 sizeof(struct host_hc_status_block_e2));
5980         else
5981                 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5982                                 sizeof(struct host_hc_status_block_e1x));
5983
5984         /* allocate searcher T2 table */
5985         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5986 #endif
5987
5988
5989         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5990                         sizeof(struct host_sp_status_block));
5991
5992         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5993                         sizeof(struct bnx2x_slowpath));
5994
5995         bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
5996
5997         BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
5998                         bp->context.size);
5999
6000         BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
6001
6002         if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
6003                 goto alloc_mem_err;
6004
6005         /* Slow path ring */
6006         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6007
6008         /* EQ */
6009         BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6010                         BCM_PAGE_SIZE * NUM_EQ_PAGES);
6011         return 0;
6012
6013 alloc_mem_err:
6014         bnx2x_free_mem(bp);
6015         return -ENOMEM;
6016
6017 #undef BNX2X_PCI_ALLOC
6018 #undef BNX2X_ALLOC
6019 }
6020
6021 /*
6022  * Init service functions
6023  */
6024 int bnx2x_func_start(struct bnx2x *bp)
6025 {
6026         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
6027
6028         /* Wait for completion */
6029         return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
6030                                  WAIT_RAMROD_COMMON);
6031 }
6032
6033 int bnx2x_func_stop(struct bnx2x *bp)
6034 {
6035         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
6036
6037         /* Wait for completion */
6038         return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6039                                       0, &(bp->state), WAIT_RAMROD_COMMON);
6040 }
6041
6042 /**
6043  * Sets a MAC in a CAM for a few L2 Clients for E1x chips
6044  *
6045  * @param bp driver descriptor
6046  * @param set set or clear an entry (1 or 0)
6047  * @param mac pointer to a buffer containing a MAC
6048  * @param cl_bit_vec bit vector of clients to register a MAC for
6049  * @param cam_offset offset in a CAM to use
6050  * @param is_bcast is the set MAC a broadcast address (for E1 only)
6051  */
6052 static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
6053                                    u32 cl_bit_vec, u8 cam_offset,
6054                                    u8 is_bcast)
6055 {
6056         struct mac_configuration_cmd *config =
6057                 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6058         int ramrod_flags = WAIT_RAMROD_COMMON;
6059
6060         bp->set_mac_pending = 1;
6061         smp_wmb();
6062
6063         config->hdr.length = 1;
6064         config->hdr.offset = cam_offset;
6065         config->hdr.client_id = 0xff;
6066         config->hdr.reserved1 = 0;
6067
6068         /* primary MAC */
6069         config->config_table[0].msb_mac_addr =
6070                                         swab16(*(u16 *)&mac[0]);
6071         config->config_table[0].middle_mac_addr =
6072                                         swab16(*(u16 *)&mac[2]);
6073         config->config_table[0].lsb_mac_addr =
6074                                         swab16(*(u16 *)&mac[4]);
6075         config->config_table[0].clients_bit_vector =
6076                                         cpu_to_le32(cl_bit_vec);
6077         config->config_table[0].vlan_id = 0;
6078         config->config_table[0].pf_id = BP_FUNC(bp);
6079         if (set)
6080                 SET_FLAG(config->config_table[0].flags,
6081                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6082                         T_ETH_MAC_COMMAND_SET);
6083         else
6084                 SET_FLAG(config->config_table[0].flags,
6085                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6086                         T_ETH_MAC_COMMAND_INVALIDATE);
6087
6088         if (is_bcast)
6089                 SET_FLAG(config->config_table[0].flags,
6090                         MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6091
6092         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  PF_ID %d  CLID mask %d\n",
6093            (set ? "setting" : "clearing"),
6094            config->config_table[0].msb_mac_addr,
6095            config->config_table[0].middle_mac_addr,
6096            config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
6097
6098         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6099                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6100                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6101
6102         /* Wait for a completion */
6103         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
6104 }
6105
6106 int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6107                       int *state_p, int flags)
6108 {
6109         /* can take a while if any port is running */
6110         int cnt = 5000;
6111         u8 poll = flags & WAIT_RAMROD_POLL;
6112         u8 common = flags & WAIT_RAMROD_COMMON;
6113
6114         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6115            poll ? "polling" : "waiting", state, idx);
6116
6117         might_sleep();
6118         while (cnt--) {
6119                 if (poll) {
6120                         if (common)
6121                                 bnx2x_eq_int(bp);
6122                         else {
6123                                 bnx2x_rx_int(bp->fp, 10);
6124                                 /* if index is different from 0
6125                                  * the reply for some commands will
6126                                  * be on the non default queue
6127                                  */
6128                                 if (idx)
6129                                         bnx2x_rx_int(&bp->fp[idx], 10);
6130                         }
6131                 }
6132
6133                 mb(); /* state is changed by bnx2x_sp_event() */
6134                 if (*state_p == state) {
6135 #ifdef BNX2X_STOP_ON_ERROR
6136                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
6137 #endif
6138                         return 0;
6139                 }
6140
6141                 msleep(1);
6142
6143                 if (bp->panic)
6144                         return -EIO;
6145         }
6146
6147         /* timeout! */
6148         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6149                   poll ? "polling" : "waiting", state, idx);
6150 #ifdef BNX2X_STOP_ON_ERROR
6151         bnx2x_panic();
6152 #endif
6153
6154         return -EBUSY;
6155 }
6156
6157 u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
6158 {
6159         if (CHIP_IS_E1H(bp))
6160                 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6161         else if (CHIP_MODE_IS_4_PORT(bp))
6162                 return BP_FUNC(bp) * 32  + rel_offset;
6163         else
6164                 return BP_VN(bp) * 32  + rel_offset;
6165 }
6166
6167 void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6168 {
6169         u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6170                          bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6171
6172         /* networking  MAC */
6173         bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6174                                (1 << bp->fp->cl_id), cam_offset , 0);
6175
6176         if (CHIP_IS_E1(bp)) {
6177                 /* broadcast MAC */
6178                 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6179                 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6180         }
6181 }
6182 static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6183 {
6184         int i = 0, old;
6185         struct net_device *dev = bp->dev;
6186         struct netdev_hw_addr *ha;
6187         struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6188         dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6189
6190         netdev_for_each_mc_addr(ha, dev) {
6191                 /* copy mac */
6192                 config_cmd->config_table[i].msb_mac_addr =
6193                         swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6194                 config_cmd->config_table[i].middle_mac_addr =
6195                         swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6196                 config_cmd->config_table[i].lsb_mac_addr =
6197                         swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6198
6199                 config_cmd->config_table[i].vlan_id = 0;
6200                 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6201                 config_cmd->config_table[i].clients_bit_vector =
6202                         cpu_to_le32(1 << BP_L_ID(bp));
6203
6204                 SET_FLAG(config_cmd->config_table[i].flags,
6205                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6206                         T_ETH_MAC_COMMAND_SET);
6207
6208                 DP(NETIF_MSG_IFUP,
6209                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6210                    config_cmd->config_table[i].msb_mac_addr,
6211                    config_cmd->config_table[i].middle_mac_addr,
6212                    config_cmd->config_table[i].lsb_mac_addr);
6213                 i++;
6214         }
6215         old = config_cmd->hdr.length;
6216         if (old > i) {
6217                 for (; i < old; i++) {
6218                         if (CAM_IS_INVALID(config_cmd->
6219                                            config_table[i])) {
6220                                 /* already invalidated */
6221                                 break;
6222                         }
6223                         /* invalidate */
6224                         SET_FLAG(config_cmd->config_table[i].flags,
6225                                 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6226                                 T_ETH_MAC_COMMAND_INVALIDATE);
6227                 }
6228         }
6229
6230         config_cmd->hdr.length = i;
6231         config_cmd->hdr.offset = offset;
6232         config_cmd->hdr.client_id = 0xff;
6233         config_cmd->hdr.reserved1 = 0;
6234
6235         bp->set_mac_pending = 1;
6236         smp_wmb();
6237
6238         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6239                    U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6240 }
6241 static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6242 {
6243         int i;
6244         struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6245         dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6246         int ramrod_flags = WAIT_RAMROD_COMMON;
6247
6248         bp->set_mac_pending = 1;
6249         smp_wmb();
6250
6251         for (i = 0; i < config_cmd->hdr.length; i++)
6252                 SET_FLAG(config_cmd->config_table[i].flags,
6253                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6254                         T_ETH_MAC_COMMAND_INVALIDATE);
6255
6256         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6257                       U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6258
6259         /* Wait for a completion */
6260         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6261                                 ramrod_flags);
6262
6263 }
6264
6265 #ifdef BCM_CNIC
6266 /**
6267  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6268  * MAC(s). This function will wait until the ramdord completion
6269  * returns.
6270  *
6271  * @param bp driver handle
6272  * @param set set or clear the CAM entry
6273  *
6274  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6275  */
6276 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6277 {
6278         u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6279                          bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6280         u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
6281         u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
6282
6283         /* Send a SET_MAC ramrod */
6284         bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6285                                cam_offset, 0);
6286         return 0;
6287 }
6288 #endif
6289
6290 static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6291                                     struct bnx2x_client_init_params *params,
6292                                     u8 activate,
6293                                     struct client_init_ramrod_data *data)
6294 {
6295         /* Clear the buffer */
6296         memset(data, 0, sizeof(*data));
6297
6298         /* general */
6299         data->general.client_id = params->rxq_params.cl_id;
6300         data->general.statistics_counter_id = params->rxq_params.stat_id;
6301         data->general.statistics_en_flg =
6302                 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6303         data->general.activate_flg = activate;
6304         data->general.sp_client_id = params->rxq_params.spcl_id;
6305
6306         /* Rx data */
6307         data->rx.tpa_en_flg =
6308                 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6309         data->rx.vmqueue_mode_en_flg = 0;
6310         data->rx.cache_line_alignment_log_size =
6311                 params->rxq_params.cache_line_log;
6312         data->rx.enable_dynamic_hc =
6313                 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6314         data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6315         data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6316         data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6317
6318         /* We don't set drop flags */
6319         data->rx.drop_ip_cs_err_flg = 0;
6320         data->rx.drop_tcp_cs_err_flg = 0;
6321         data->rx.drop_ttl0_flg = 0;
6322         data->rx.drop_udp_cs_err_flg = 0;
6323
6324         data->rx.inner_vlan_removal_enable_flg =
6325                 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6326         data->rx.outer_vlan_removal_enable_flg =
6327                 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6328         data->rx.status_block_id = params->rxq_params.fw_sb_id;
6329         data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6330         data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6331         data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6332         data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6333         data->rx.bd_page_base.lo =
6334                 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6335         data->rx.bd_page_base.hi =
6336                 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6337         data->rx.sge_page_base.lo =
6338                 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6339         data->rx.sge_page_base.hi =
6340                 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6341         data->rx.cqe_page_base.lo =
6342                 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6343         data->rx.cqe_page_base.hi =
6344                 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6345         data->rx.is_leading_rss =
6346                 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6347         data->rx.is_approx_mcast = data->rx.is_leading_rss;
6348
6349         /* Tx data */
6350         data->tx.enforce_security_flg = 0; /* VF specific */
6351         data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6352         data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6353         data->tx.mtu = 0; /* VF specific */
6354         data->tx.tx_bd_page_base.lo =
6355                 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6356         data->tx.tx_bd_page_base.hi =
6357                 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6358
6359         /* flow control data */
6360         data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6361         data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6362         data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6363         data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6364         data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6365         data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6366         data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6367
6368         data->fc.safc_group_num = params->txq_params.cos;
6369         data->fc.safc_group_en_flg =
6370                 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6371         data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
6372 }
6373
6374 static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6375 {
6376         /* ustorm cxt validation */
6377         cxt->ustorm_ag_context.cdu_usage =
6378                 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6379                                        ETH_CONNECTION_TYPE);
6380         /* xcontext validation */
6381         cxt->xstorm_ag_context.cdu_reserved =
6382                 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6383                                        ETH_CONNECTION_TYPE);
6384 }
6385
6386 int bnx2x_setup_fw_client(struct bnx2x *bp,
6387                           struct bnx2x_client_init_params *params,
6388                           u8 activate,
6389                           struct client_init_ramrod_data *data,
6390                           dma_addr_t data_mapping)
6391 {
6392         u16 hc_usec;
6393         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6394         int ramrod_flags = 0, rc;
6395
6396         /* HC and context validation values */
6397         hc_usec = params->txq_params.hc_rate ?
6398                 1000000 / params->txq_params.hc_rate : 0;
6399         bnx2x_update_coalesce_sb_index(bp,
6400                         params->txq_params.fw_sb_id,
6401                         params->txq_params.sb_cq_index,
6402                         !(params->txq_params.flags & QUEUE_FLG_HC),
6403                         hc_usec);
6404
6405         *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6406
6407         hc_usec = params->rxq_params.hc_rate ?
6408                 1000000 / params->rxq_params.hc_rate : 0;
6409         bnx2x_update_coalesce_sb_index(bp,
6410                         params->rxq_params.fw_sb_id,
6411                         params->rxq_params.sb_cq_index,
6412                         !(params->rxq_params.flags & QUEUE_FLG_HC),
6413                         hc_usec);
6414
6415         bnx2x_set_ctx_validation(params->rxq_params.cxt,
6416                                  params->rxq_params.cid);
6417
6418         /* zero stats */
6419         if (params->txq_params.flags & QUEUE_FLG_STATS)
6420                 storm_memset_xstats_zero(bp, BP_PORT(bp),
6421                                          params->txq_params.stat_id);
6422
6423         if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6424                 storm_memset_ustats_zero(bp, BP_PORT(bp),
6425                                          params->rxq_params.stat_id);
6426                 storm_memset_tstats_zero(bp, BP_PORT(bp),
6427                                          params->rxq_params.stat_id);
6428         }
6429
6430         /* Fill the ramrod data */
6431         bnx2x_fill_cl_init_data(bp, params, activate, data);
6432
6433         /* SETUP ramrod.
6434          *
6435          * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6436          * barrier except from mmiowb() is needed to impose a
6437          * proper ordering of memory operations.
6438          */
6439         mmiowb();
6440
6441
6442         bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6443                       U64_HI(data_mapping), U64_LO(data_mapping), 0);
6444
6445         /* Wait for completion */
6446         rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6447                                  params->ramrod_params.index,
6448                                  params->ramrod_params.pstate,
6449                                  ramrod_flags);
6450         return rc;
6451 }
6452
6453 /**
6454  * Configure interrupt mode according to current configuration.
6455  * In case of MSI-X it will also try to enable MSI-X.
6456  *
6457  * @param bp
6458  *
6459  * @return int
6460  */
6461 static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
6462 {
6463         int rc = 0;
6464
6465         switch (bp->int_mode) {
6466         case INT_MODE_MSI:
6467                 bnx2x_enable_msi(bp);
6468                 /* falling through... */
6469         case INT_MODE_INTx:
6470                 bp->num_queues = 1;
6471                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
6472                 break;
6473         default:
6474                 /* Set number of queues according to bp->multi_mode value */
6475                 bnx2x_set_num_queues(bp);
6476
6477                 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6478                    bp->num_queues);
6479
6480                 /* if we can't use MSI-X we only need one fp,
6481                  * so try to enable MSI-X with the requested number of fp's
6482                  * and fallback to MSI or legacy INTx with one fp
6483                  */
6484                 rc = bnx2x_enable_msix(bp);
6485                 if (rc) {
6486                         /* failed to enable MSI-X */
6487                         if (bp->multi_mode)
6488                                 DP(NETIF_MSG_IFUP,
6489                                           "Multi requested but failed to "
6490                                           "enable MSI-X (%d), "
6491                                           "set number of queues to %d\n",
6492                                    bp->num_queues,
6493                                    1);
6494                         bp->num_queues = 1;
6495
6496                         if (!(bp->flags & DISABLE_MSI_FLAG))
6497                                 bnx2x_enable_msi(bp);
6498                 }
6499
6500                 break;
6501         }
6502
6503         return rc;
6504 }
6505
6506 /* must be called prioir to any HW initializations */
6507 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6508 {
6509         return L2_ILT_LINES(bp);
6510 }
6511
6512 void bnx2x_ilt_set_info(struct bnx2x *bp)
6513 {
6514         struct ilt_client_info *ilt_client;
6515         struct bnx2x_ilt *ilt = BP_ILT(bp);
6516         u16 line = 0;
6517
6518         ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6519         DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6520
6521         /* CDU */
6522         ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6523         ilt_client->client_num = ILT_CLIENT_CDU;
6524         ilt_client->page_size = CDU_ILT_PAGE_SZ;
6525         ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6526         ilt_client->start = line;
6527         line += L2_ILT_LINES(bp);
6528 #ifdef BCM_CNIC
6529         line += CNIC_ILT_LINES;
6530 #endif
6531         ilt_client->end = line - 1;
6532
6533         DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6534                                          "flags 0x%x, hw psz %d\n",
6535            ilt_client->start,
6536            ilt_client->end,
6537            ilt_client->page_size,
6538            ilt_client->flags,
6539            ilog2(ilt_client->page_size >> 12));
6540
6541         /* QM */
6542         if (QM_INIT(bp->qm_cid_count)) {
6543                 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6544                 ilt_client->client_num = ILT_CLIENT_QM;
6545                 ilt_client->page_size = QM_ILT_PAGE_SZ;
6546                 ilt_client->flags = 0;
6547                 ilt_client->start = line;
6548
6549                 /* 4 bytes for each cid */
6550                 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6551                                                          QM_ILT_PAGE_SZ);
6552
6553                 ilt_client->end = line - 1;
6554
6555                 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6556                                                  "flags 0x%x, hw psz %d\n",
6557                    ilt_client->start,
6558                    ilt_client->end,
6559                    ilt_client->page_size,
6560                    ilt_client->flags,
6561                    ilog2(ilt_client->page_size >> 12));
6562
6563         }
6564         /* SRC */
6565         ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6566 #ifdef BCM_CNIC
6567         ilt_client->client_num = ILT_CLIENT_SRC;
6568         ilt_client->page_size = SRC_ILT_PAGE_SZ;
6569         ilt_client->flags = 0;
6570         ilt_client->start = line;
6571         line += SRC_ILT_LINES;
6572         ilt_client->end = line - 1;
6573
6574         DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6575                                          "flags 0x%x, hw psz %d\n",
6576            ilt_client->start,
6577            ilt_client->end,
6578            ilt_client->page_size,
6579            ilt_client->flags,
6580            ilog2(ilt_client->page_size >> 12));
6581
6582 #else
6583         ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6584 #endif
6585
6586         /* TM */
6587         ilt_client = &ilt->clients[ILT_CLIENT_TM];
6588 #ifdef BCM_CNIC
6589         ilt_client->client_num = ILT_CLIENT_TM;
6590         ilt_client->page_size = TM_ILT_PAGE_SZ;
6591         ilt_client->flags = 0;
6592         ilt_client->start = line;
6593         line += TM_ILT_LINES;
6594         ilt_client->end = line - 1;
6595
6596         DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6597                                          "flags 0x%x, hw psz %d\n",
6598            ilt_client->start,
6599            ilt_client->end,
6600            ilt_client->page_size,
6601            ilt_client->flags,
6602            ilog2(ilt_client->page_size >> 12));
6603
6604 #else
6605         ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6606 #endif
6607 }
6608
6609 int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6610                        int is_leading)
6611 {
6612         struct bnx2x_client_init_params params = { {0} };
6613         int rc;
6614
6615         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6616                              IGU_INT_ENABLE, 0);
6617
6618         params.ramrod_params.pstate = &fp->state;
6619         params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6620         params.ramrod_params.index = fp->index;
6621         params.ramrod_params.cid = fp->cid;
6622
6623         if (is_leading)
6624                 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6625
6626         bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6627
6628         bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6629
6630         rc = bnx2x_setup_fw_client(bp, &params, 1,
6631                                      bnx2x_sp(bp, client_init_data),
6632                                      bnx2x_sp_mapping(bp, client_init_data));
6633         return rc;
6634 }
6635
6636 int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
6637 {
6638         int rc;
6639
6640         int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
6641
6642         /* halt the connection */
6643         *p->pstate = BNX2X_FP_STATE_HALTING;
6644         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6645                                                   p->cl_id, 0);
6646
6647         /* Wait for completion */
6648         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6649                                p->pstate, poll_flag);
6650         if (rc) /* timeout */
6651                 return rc;
6652
6653         *p->pstate = BNX2X_FP_STATE_TERMINATING;
6654         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6655                                                        p->cl_id, 0);
6656         /* Wait for completion */
6657         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6658                                p->pstate, poll_flag);
6659         if (rc) /* timeout */
6660                 return rc;
6661
6662
6663         /* delete cfc entry */
6664         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
6665
6666         /* Wait for completion */
6667         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6668                                p->pstate, WAIT_RAMROD_COMMON);
6669         return rc;
6670 }
6671
6672 static int bnx2x_stop_client(struct bnx2x *bp, int index)
6673 {
6674         struct bnx2x_client_ramrod_params client_stop = {0};
6675         struct bnx2x_fastpath *fp = &bp->fp[index];
6676
6677         client_stop.index = index;
6678         client_stop.cid = fp->cid;
6679         client_stop.cl_id = fp->cl_id;
6680         client_stop.pstate = &(fp->state);
6681         client_stop.poll = 0;
6682
6683         return bnx2x_stop_fw_client(bp, &client_stop);
6684 }
6685
6686
6687 static void bnx2x_reset_func(struct bnx2x *bp)
6688 {
6689         int port = BP_PORT(bp);
6690         int func = BP_FUNC(bp);
6691         int i;
6692         int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
6693                         (CHIP_IS_E2(bp) ?
6694                          offsetof(struct hc_status_block_data_e2, common) :
6695                          offsetof(struct hc_status_block_data_e1x, common));
6696         int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6697         int pfid_offset = offsetof(struct pci_entity, pf_id);
6698
6699         /* Disable the function in the FW */
6700         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6701         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6702         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6703         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6704
6705         /* FP SBs */
6706         for_each_queue(bp, i) {
6707                 struct bnx2x_fastpath *fp = &bp->fp[i];
6708                 REG_WR8(bp,
6709                         BAR_CSTRORM_INTMEM +
6710                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6711                         + pfunc_offset_fp + pfid_offset,
6712                         HC_FUNCTION_DISABLED);
6713         }
6714
6715         /* SP SB */
6716         REG_WR8(bp,
6717                 BAR_CSTRORM_INTMEM +
6718                 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6719                 pfunc_offset_sp + pfid_offset,
6720                 HC_FUNCTION_DISABLED);
6721
6722
6723         for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6724                 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6725                        0);
6726
6727         /* Configure IGU */
6728         if (bp->common.int_block == INT_BLOCK_HC) {
6729                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6730                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6731         } else {
6732                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6733                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6734         }
6735
6736 #ifdef BCM_CNIC
6737         /* Disable Timer scan */
6738         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6739         /*
6740          * Wait for at least 10ms and up to 2 second for the timers scan to
6741          * complete
6742          */
6743         for (i = 0; i < 200; i++) {
6744                 msleep(10);
6745                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6746                         break;
6747         }
6748 #endif
6749         /* Clear ILT */
6750         bnx2x_clear_func_ilt(bp, func);
6751
6752         /* Timers workaround bug for E2: if this is vnic-3,
6753          * we need to set the entire ilt range for this timers.
6754          */
6755         if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6756                 struct ilt_client_info ilt_cli;
6757                 /* use dummy TM client */
6758                 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6759                 ilt_cli.start = 0;
6760                 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6761                 ilt_cli.client_num = ILT_CLIENT_TM;
6762
6763                 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6764         }
6765
6766         /* this assumes that reset_port() called before reset_func()*/
6767         if (CHIP_IS_E2(bp))
6768                 bnx2x_pf_disable(bp);
6769
6770         bp->dmae_ready = 0;
6771 }
6772
6773 static void bnx2x_reset_port(struct bnx2x *bp)
6774 {
6775         int port = BP_PORT(bp);
6776         u32 val;
6777
6778         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6779
6780         /* Do not rcv packets to BRB */
6781         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6782         /* Do not direct rcv packets that are not for MCP to the BRB */
6783         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6784                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6785
6786         /* Configure AEU */
6787         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6788
6789         msleep(100);
6790         /* Check for BRB port occupancy */
6791         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6792         if (val)
6793                 DP(NETIF_MSG_IFDOWN,
6794                    "BRB1 is not empty  %d blocks are occupied\n", val);
6795
6796         /* TODO: Close Doorbell port? */
6797 }
6798
6799 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6800 {
6801         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
6802            BP_ABS_FUNC(bp), reset_code);
6803
6804         switch (reset_code) {
6805         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6806                 bnx2x_reset_port(bp);
6807                 bnx2x_reset_func(bp);
6808                 bnx2x_reset_common(bp);
6809                 break;
6810
6811         case FW_MSG_CODE_DRV_UNLOAD_PORT:
6812                 bnx2x_reset_port(bp);
6813                 bnx2x_reset_func(bp);
6814                 break;
6815
6816         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6817                 bnx2x_reset_func(bp);
6818                 break;
6819
6820         default:
6821                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6822                 break;
6823         }
6824 }
6825
6826 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
6827 {
6828         int port = BP_PORT(bp);
6829         u32 reset_code = 0;
6830         int i, cnt, rc;
6831
6832         /* Wait until tx fastpath tasks complete */
6833         for_each_queue(bp, i) {
6834                 struct bnx2x_fastpath *fp = &bp->fp[i];
6835
6836                 cnt = 1000;
6837                 while (bnx2x_has_tx_work_unload(fp)) {
6838
6839                         if (!cnt) {
6840                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
6841                                           i);
6842 #ifdef BNX2X_STOP_ON_ERROR
6843                                 bnx2x_panic();
6844                                 return -EBUSY;
6845 #else
6846                                 break;
6847 #endif
6848                         }
6849                         cnt--;
6850                         msleep(1);
6851                 }
6852         }
6853         /* Give HW time to discard old tx messages */
6854         msleep(1);
6855
6856         if (CHIP_IS_E1(bp)) {
6857                 /* invalidate mc list,
6858                  * wait and poll (interrupts are off)
6859                  */
6860                 bnx2x_invlidate_e1_mc_list(bp);
6861                 bnx2x_set_eth_mac(bp, 0);
6862
6863         } else {
6864                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6865
6866                 bnx2x_set_eth_mac(bp, 0);
6867
6868                 for (i = 0; i < MC_HASH_SIZE; i++)
6869                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6870         }
6871
6872 #ifdef BCM_CNIC
6873         /* Clear iSCSI L2 MAC */
6874         mutex_lock(&bp->cnic_mutex);
6875         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
6876                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
6877                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
6878         }
6879         mutex_unlock(&bp->cnic_mutex);
6880 #endif
6881
6882         if (unload_mode == UNLOAD_NORMAL)
6883                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6884
6885         else if (bp->flags & NO_WOL_FLAG)
6886                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6887
6888         else if (bp->wol) {
6889                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6890                 u8 *mac_addr = bp->dev->dev_addr;
6891                 u32 val;
6892                 /* The mac address is written to entries 1-4 to
6893                    preserve entry 0 which is used by the PMF */
6894                 u8 entry = (BP_E1HVN(bp) + 1)*8;
6895
6896                 val = (mac_addr[0] << 8) | mac_addr[1];
6897                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6898
6899                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6900                       (mac_addr[4] << 8) | mac_addr[5];
6901                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6902
6903                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6904
6905         } else
6906                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6907
6908         /* Close multi and leading connections
6909            Completions for ramrods are collected in a synchronous way */
6910         for_each_queue(bp, i)
6911
6912                 if (bnx2x_stop_client(bp, i))
6913 #ifdef BNX2X_STOP_ON_ERROR
6914                         return;
6915 #else
6916                         goto unload_error;
6917 #endif
6918
6919         rc = bnx2x_func_stop(bp);
6920         if (rc) {
6921                 BNX2X_ERR("Function stop failed!\n");
6922 #ifdef BNX2X_STOP_ON_ERROR
6923                 return;
6924 #else
6925                 goto unload_error;
6926 #endif
6927         }
6928 #ifndef BNX2X_STOP_ON_ERROR
6929 unload_error:
6930 #endif
6931         if (!BP_NOMCP(bp))
6932                 reset_code = bnx2x_fw_command(bp, reset_code, 0);
6933         else {
6934                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d]      "
6935                                      "%d, %d, %d\n", BP_PATH(bp),
6936                    load_count[BP_PATH(bp)][0],
6937                    load_count[BP_PATH(bp)][1],
6938                    load_count[BP_PATH(bp)][2]);
6939                 load_count[BP_PATH(bp)][0]--;
6940                 load_count[BP_PATH(bp)][1 + port]--;
6941                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d]  "
6942                                      "%d, %d, %d\n", BP_PATH(bp),
6943                    load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
6944                    load_count[BP_PATH(bp)][2]);
6945                 if (load_count[BP_PATH(bp)][0] == 0)
6946                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6947                 else if (load_count[BP_PATH(bp)][1 + port] == 0)
6948                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6949                 else
6950                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6951         }
6952
6953         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6954             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6955                 bnx2x__link_reset(bp);
6956
6957         /* Disable HW interrupts, NAPI */
6958         bnx2x_netif_stop(bp, 1);
6959
6960         /* Release IRQs */
6961         bnx2x_free_irq(bp);
6962
6963         /* Reset the chip */
6964         bnx2x_reset_chip(bp, reset_code);
6965
6966         /* Report UNLOAD_DONE to MCP */
6967         if (!BP_NOMCP(bp))
6968                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
6969
6970 }
6971
6972 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
6973 {
6974         u32 val;
6975
6976         DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6977
6978         if (CHIP_IS_E1(bp)) {
6979                 int port = BP_PORT(bp);
6980                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6981                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
6982
6983                 val = REG_RD(bp, addr);
6984                 val &= ~(0x300);
6985                 REG_WR(bp, addr, val);
6986         } else if (CHIP_IS_E1H(bp)) {
6987                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6988                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6989                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6990                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6991         }
6992 }
6993
6994 /* Close gates #2, #3 and #4: */
6995 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
6996 {
6997         u32 val, addr;
6998
6999         /* Gates #2 and #4a are closed/opened for "not E1" only */
7000         if (!CHIP_IS_E1(bp)) {
7001                 /* #4 */
7002                 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7003                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7004                        close ? (val | 0x1) : (val & (~(u32)1)));
7005                 /* #2 */
7006                 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7007                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7008                        close ? (val | 0x1) : (val & (~(u32)1)));
7009         }
7010
7011         /* #3 */
7012         addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7013         val = REG_RD(bp, addr);
7014         REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7015
7016         DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7017                 close ? "closing" : "opening");
7018         mmiowb();
7019 }
7020
7021 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
7022
7023 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7024 {
7025         /* Do some magic... */
7026         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7027         *magic_val = val & SHARED_MF_CLP_MAGIC;
7028         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7029 }
7030
7031 /* Restore the value of the `magic' bit.
7032  *
7033  * @param pdev Device handle.
7034  * @param magic_val Old value of the `magic' bit.
7035  */
7036 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7037 {
7038         /* Restore the `magic' bit value... */
7039         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7040         MF_CFG_WR(bp, shared_mf_config.clp_mb,
7041                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7042 }
7043
7044 /**
7045  * Prepares for MCP reset: takes care of CLP configurations.
7046  *
7047  * @param bp
7048  * @param magic_val Old value of 'magic' bit.
7049  */
7050 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7051 {
7052         u32 shmem;
7053         u32 validity_offset;
7054
7055         DP(NETIF_MSG_HW, "Starting\n");
7056
7057         /* Set `magic' bit in order to save MF config */
7058         if (!CHIP_IS_E1(bp))
7059                 bnx2x_clp_reset_prep(bp, magic_val);
7060
7061         /* Get shmem offset */
7062         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7063         validity_offset = offsetof(struct shmem_region, validity_map[0]);
7064
7065         /* Clear validity map flags */
7066         if (shmem > 0)
7067                 REG_WR(bp, shmem + validity_offset, 0);
7068 }
7069
7070 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
7071 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
7072
7073 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7074  * depending on the HW type.
7075  *
7076  * @param bp
7077  */
7078 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7079 {
7080         /* special handling for emulation and FPGA,
7081            wait 10 times longer */
7082         if (CHIP_REV_IS_SLOW(bp))
7083                 msleep(MCP_ONE_TIMEOUT*10);
7084         else
7085                 msleep(MCP_ONE_TIMEOUT);
7086 }
7087
7088 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7089 {
7090         u32 shmem, cnt, validity_offset, val;
7091         int rc = 0;
7092
7093         msleep(100);
7094
7095         /* Get shmem offset */
7096         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7097         if (shmem == 0) {
7098                 BNX2X_ERR("Shmem 0 return failure\n");
7099                 rc = -ENOTTY;
7100                 goto exit_lbl;
7101         }
7102
7103         validity_offset = offsetof(struct shmem_region, validity_map[0]);
7104
7105         /* Wait for MCP to come up */
7106         for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7107                 /* TBD: its best to check validity map of last port.
7108                  * currently checks on port 0.
7109                  */
7110                 val = REG_RD(bp, shmem + validity_offset);
7111                 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7112                    shmem + validity_offset, val);
7113
7114                 /* check that shared memory is valid. */
7115                 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7116                     == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7117                         break;
7118
7119                 bnx2x_mcp_wait_one(bp);
7120         }
7121
7122         DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7123
7124         /* Check that shared memory is valid. This indicates that MCP is up. */
7125         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7126             (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7127                 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7128                 rc = -ENOTTY;
7129                 goto exit_lbl;
7130         }
7131
7132 exit_lbl:
7133         /* Restore the `magic' bit value */
7134         if (!CHIP_IS_E1(bp))
7135                 bnx2x_clp_reset_done(bp, magic_val);
7136
7137         return rc;
7138 }
7139
7140 static void bnx2x_pxp_prep(struct bnx2x *bp)
7141 {
7142         if (!CHIP_IS_E1(bp)) {
7143                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7144                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7145                 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7146                 mmiowb();
7147         }
7148 }
7149
7150 /*
7151  * Reset the whole chip except for:
7152  *      - PCIE core
7153  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7154  *              one reset bit)
7155  *      - IGU
7156  *      - MISC (including AEU)
7157  *      - GRC
7158  *      - RBCN, RBCP
7159  */
7160 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7161 {
7162         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7163
7164         not_reset_mask1 =
7165                 MISC_REGISTERS_RESET_REG_1_RST_HC |
7166                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7167                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7168
7169         not_reset_mask2 =
7170                 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7171                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7172                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7173                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7174                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7175                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
7176                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7177                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7178
7179         reset_mask1 = 0xffffffff;
7180
7181         if (CHIP_IS_E1(bp))
7182                 reset_mask2 = 0xffff;
7183         else
7184                 reset_mask2 = 0x1ffff;
7185
7186         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7187                reset_mask1 & (~not_reset_mask1));
7188         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7189                reset_mask2 & (~not_reset_mask2));
7190
7191         barrier();
7192         mmiowb();
7193
7194         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7195         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7196         mmiowb();
7197 }
7198
7199 static int bnx2x_process_kill(struct bnx2x *bp)
7200 {
7201         int cnt = 1000;
7202         u32 val = 0;
7203         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7204
7205
7206         /* Empty the Tetris buffer, wait for 1s */
7207         do {
7208                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7209                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7210                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7211                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7212                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7213                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7214                     ((port_is_idle_0 & 0x1) == 0x1) &&
7215                     ((port_is_idle_1 & 0x1) == 0x1) &&
7216                     (pgl_exp_rom2 == 0xffffffff))
7217                         break;
7218                 msleep(1);
7219         } while (cnt-- > 0);
7220
7221         if (cnt <= 0) {
7222                 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7223                           " are still"
7224                           " outstanding read requests after 1s!\n");
7225                 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7226                           " port_is_idle_0=0x%08x,"
7227                           " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7228                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7229                           pgl_exp_rom2);
7230                 return -EAGAIN;
7231         }
7232
7233         barrier();
7234
7235         /* Close gates #2, #3 and #4 */
7236         bnx2x_set_234_gates(bp, true);
7237
7238         /* TBD: Indicate that "process kill" is in progress to MCP */
7239
7240         /* Clear "unprepared" bit */
7241         REG_WR(bp, MISC_REG_UNPREPARED, 0);
7242         barrier();
7243
7244         /* Make sure all is written to the chip before the reset */
7245         mmiowb();
7246
7247         /* Wait for 1ms to empty GLUE and PCI-E core queues,
7248          * PSWHST, GRC and PSWRD Tetris buffer.
7249          */
7250         msleep(1);
7251
7252         /* Prepare to chip reset: */
7253         /* MCP */
7254         bnx2x_reset_mcp_prep(bp, &val);
7255
7256         /* PXP */
7257         bnx2x_pxp_prep(bp);
7258         barrier();
7259
7260         /* reset the chip */
7261         bnx2x_process_kill_chip_reset(bp);
7262         barrier();
7263
7264         /* Recover after reset: */
7265         /* MCP */
7266         if (bnx2x_reset_mcp_comp(bp, val))
7267                 return -EAGAIN;
7268
7269         /* PXP */
7270         bnx2x_pxp_prep(bp);
7271
7272         /* Open the gates #2, #3 and #4 */
7273         bnx2x_set_234_gates(bp, false);
7274
7275         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7276          * reset state, re-enable attentions. */
7277
7278         return 0;
7279 }
7280
7281 static int bnx2x_leader_reset(struct bnx2x *bp)
7282 {
7283         int rc = 0;
7284         /* Try to recover after the failure */
7285         if (bnx2x_process_kill(bp)) {
7286                 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7287                        bp->dev->name);
7288                 rc = -EAGAIN;
7289                 goto exit_leader_reset;
7290         }
7291
7292         /* Clear "reset is in progress" bit and update the driver state */
7293         bnx2x_set_reset_done(bp);
7294         bp->recovery_state = BNX2X_RECOVERY_DONE;
7295
7296 exit_leader_reset:
7297         bp->is_leader = 0;
7298         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7299         smp_wmb();
7300         return rc;
7301 }
7302
7303 /* Assumption: runs under rtnl lock. This together with the fact
7304  * that it's called only from bnx2x_reset_task() ensure that it
7305  * will never be called when netif_running(bp->dev) is false.
7306  */
7307 static void bnx2x_parity_recover(struct bnx2x *bp)
7308 {
7309         DP(NETIF_MSG_HW, "Handling parity\n");
7310         while (1) {
7311                 switch (bp->recovery_state) {
7312                 case BNX2X_RECOVERY_INIT:
7313                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7314                         /* Try to get a LEADER_LOCK HW lock */
7315                         if (bnx2x_trylock_hw_lock(bp,
7316                                 HW_LOCK_RESOURCE_RESERVED_08))
7317                                 bp->is_leader = 1;
7318
7319                         /* Stop the driver */
7320                         /* If interface has been removed - break */
7321                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7322                                 return;
7323
7324                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
7325                         /* Ensure "is_leader" and "recovery_state"
7326                          *  update values are seen on other CPUs
7327                          */
7328                         smp_wmb();
7329                         break;
7330
7331                 case BNX2X_RECOVERY_WAIT:
7332                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7333                         if (bp->is_leader) {
7334                                 u32 load_counter = bnx2x_get_load_cnt(bp);
7335                                 if (load_counter) {
7336                                         /* Wait until all other functions get
7337                                          * down.
7338                                          */
7339                                         schedule_delayed_work(&bp->reset_task,
7340                                                                 HZ/10);
7341                                         return;
7342                                 } else {
7343                                         /* If all other functions got down -
7344                                          * try to bring the chip back to
7345                                          * normal. In any case it's an exit
7346                                          * point for a leader.
7347                                          */
7348                                         if (bnx2x_leader_reset(bp) ||
7349                                         bnx2x_nic_load(bp, LOAD_NORMAL)) {
7350                                                 printk(KERN_ERR"%s: Recovery "
7351                                                 "has failed. Power cycle is "
7352                                                 "needed.\n", bp->dev->name);
7353                                                 /* Disconnect this device */
7354                                                 netif_device_detach(bp->dev);
7355                                                 /* Block ifup for all function
7356                                                  * of this ASIC until
7357                                                  * "process kill" or power
7358                                                  * cycle.
7359                                                  */
7360                                                 bnx2x_set_reset_in_progress(bp);
7361                                                 /* Shut down the power */
7362                                                 bnx2x_set_power_state(bp,
7363                                                                 PCI_D3hot);
7364                                                 return;
7365                                         }
7366
7367                                         return;
7368                                 }
7369                         } else { /* non-leader */
7370                                 if (!bnx2x_reset_is_done(bp)) {
7371                                         /* Try to get a LEADER_LOCK HW lock as
7372                                          * long as a former leader may have
7373                                          * been unloaded by the user or
7374                                          * released a leadership by another
7375                                          * reason.
7376                                          */
7377                                         if (bnx2x_trylock_hw_lock(bp,
7378                                             HW_LOCK_RESOURCE_RESERVED_08)) {
7379                                                 /* I'm a leader now! Restart a
7380                                                  * switch case.
7381                                                  */
7382                                                 bp->is_leader = 1;
7383                                                 break;
7384                                         }
7385
7386                                         schedule_delayed_work(&bp->reset_task,
7387                                                                 HZ/10);
7388                                         return;
7389
7390                                 } else { /* A leader has completed
7391                                           * the "process kill". It's an exit
7392                                           * point for a non-leader.
7393                                           */
7394                                         bnx2x_nic_load(bp, LOAD_NORMAL);
7395                                         bp->recovery_state =
7396                                                 BNX2X_RECOVERY_DONE;
7397                                         smp_wmb();
7398                                         return;
7399                                 }
7400                         }
7401                 default:
7402                         return;
7403                 }
7404         }
7405 }
7406
7407 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7408  * scheduled on a general queue in order to prevent a dead lock.
7409  */
7410 static void bnx2x_reset_task(struct work_struct *work)
7411 {
7412         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
7413
7414 #ifdef BNX2X_STOP_ON_ERROR
7415         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7416                   " so reset not done to allow debug dump,\n"
7417          KERN_ERR " you will need to reboot when done\n");
7418         return;
7419 #endif
7420
7421         rtnl_lock();
7422
7423         if (!netif_running(bp->dev))
7424                 goto reset_task_exit;
7425
7426         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7427                 bnx2x_parity_recover(bp);
7428         else {
7429                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7430                 bnx2x_nic_load(bp, LOAD_NORMAL);
7431         }
7432
7433 reset_task_exit:
7434         rtnl_unlock();
7435 }
7436
7437 /* end of nic load/unload */
7438
7439 /*
7440  * Init service functions
7441  */
7442
7443 u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
7444 {
7445         u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7446         u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7447         return base + (BP_ABS_FUNC(bp)) * stride;
7448 }
7449
7450 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
7451 {
7452         u32 reg = bnx2x_get_pretend_reg(bp);
7453
7454         /* Flush all outstanding writes */
7455         mmiowb();
7456
7457         /* Pretend to be function 0 */
7458         REG_WR(bp, reg, 0);
7459         REG_RD(bp, reg);        /* Flush the GRC transaction (in the chip) */
7460
7461         /* From now we are in the "like-E1" mode */
7462         bnx2x_int_disable(bp);
7463
7464         /* Flush all outstanding writes */
7465         mmiowb();
7466
7467         /* Restore the original function */
7468         REG_WR(bp, reg, BP_ABS_FUNC(bp));
7469         REG_RD(bp, reg);
7470 }
7471
7472 static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
7473 {
7474         if (CHIP_IS_E1(bp))
7475                 bnx2x_int_disable(bp);
7476         else
7477                 bnx2x_undi_int_disable_e1h(bp);
7478 }
7479
7480 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7481 {
7482         u32 val;
7483
7484         /* Check if there is any driver already loaded */
7485         val = REG_RD(bp, MISC_REG_UNPREPARED);
7486         if (val == 0x1) {
7487                 /* Check if it is the UNDI driver
7488                  * UNDI driver initializes CID offset for normal bell to 0x7
7489                  */
7490                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7491                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7492                 if (val == 0x7) {
7493                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7494                         /* save our pf_num */
7495                         int orig_pf_num = bp->pf_num;
7496                         u32 swap_en;
7497                         u32 swap_val;
7498
7499                         /* clear the UNDI indication */
7500                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7501
7502                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
7503
7504                         /* try unload UNDI on port 0 */
7505                         bp->pf_num = 0;
7506                         bp->fw_seq =
7507                               (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7508                                 DRV_MSG_SEQ_NUMBER_MASK);
7509                         reset_code = bnx2x_fw_command(bp, reset_code, 0);
7510
7511                         /* if UNDI is loaded on the other port */
7512                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7513
7514                                 /* send "DONE" for previous unload */
7515                                 bnx2x_fw_command(bp,
7516                                                  DRV_MSG_CODE_UNLOAD_DONE, 0);
7517
7518                                 /* unload UNDI on port 1 */
7519                                 bp->pf_num = 1;
7520                                 bp->fw_seq =
7521                               (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7522                                         DRV_MSG_SEQ_NUMBER_MASK);
7523                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7524
7525                                 bnx2x_fw_command(bp, reset_code, 0);
7526                         }
7527
7528                         /* now it's safe to release the lock */
7529                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7530
7531                         bnx2x_undi_int_disable(bp);
7532
7533                         /* close input traffic and wait for it */
7534                         /* Do not rcv packets to BRB */
7535                         REG_WR(bp,
7536                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7537                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7538                         /* Do not direct rcv packets that are not for MCP to
7539                          * the BRB */
7540                         REG_WR(bp,
7541                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7542                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7543                         /* clear AEU */
7544                         REG_WR(bp,
7545                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7546                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7547                         msleep(10);
7548
7549                         /* save NIG port swap info */
7550                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7551                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7552                         /* reset device */
7553                         REG_WR(bp,
7554                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7555                                0xd3ffffff);
7556                         REG_WR(bp,
7557                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7558                                0x1403);
7559                         /* take the NIG out of reset and restore swap values */
7560                         REG_WR(bp,
7561                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7562                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
7563                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7564                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7565
7566                         /* send unload done to the MCP */
7567                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
7568
7569                         /* restore our func and fw_seq */
7570                         bp->pf_num = orig_pf_num;
7571                         bp->fw_seq =
7572                               (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7573                                 DRV_MSG_SEQ_NUMBER_MASK);
7574                 } else
7575                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7576         }
7577 }
7578
7579 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7580 {
7581         u32 val, val2, val3, val4, id;
7582         u16 pmc;
7583
7584         /* Get the chip revision id and number. */
7585         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7586         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7587         id = ((val & 0xffff) << 16);
7588         val = REG_RD(bp, MISC_REG_CHIP_REV);
7589         id |= ((val & 0xf) << 12);
7590         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7591         id |= ((val & 0xff) << 4);
7592         val = REG_RD(bp, MISC_REG_BOND_ID);
7593         id |= (val & 0xf);
7594         bp->common.chip_id = id;
7595
7596         /* Set doorbell size */
7597         bp->db_size = (1 << BNX2X_DB_SHIFT);
7598
7599         if (CHIP_IS_E2(bp)) {
7600                 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7601                 if ((val & 1) == 0)
7602                         val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7603                 else
7604                         val = (val >> 1) & 1;
7605                 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7606                                                        "2_PORT_MODE");
7607                 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7608                                                  CHIP_2_PORT_MODE;
7609
7610                 if (CHIP_MODE_IS_4_PORT(bp))
7611                         bp->pfid = (bp->pf_num >> 1);   /* 0..3 */
7612                 else
7613                         bp->pfid = (bp->pf_num & 0x6);  /* 0, 2, 4, 6 */
7614         } else {
7615                 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7616                 bp->pfid = bp->pf_num;                  /* 0..7 */
7617         }
7618
7619         /*
7620          * set base FW non-default (fast path) status block id, this value is
7621          * used to initialize the fw_sb_id saved on the fp/queue structure to
7622          * determine the id used by the FW.
7623          */
7624         if (CHIP_IS_E1x(bp))
7625                 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7626         else /* E2 */
7627                 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7628
7629         bp->link_params.chip_id = bp->common.chip_id;
7630         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7631
7632         val = (REG_RD(bp, 0x2874) & 0x55);
7633         if ((bp->common.chip_id & 0x1) ||
7634             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7635                 bp->flags |= ONE_PORT_FLAG;
7636                 BNX2X_DEV_INFO("single port device\n");
7637         }
7638
7639         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7640         bp->common.flash_size = (NVRAM_1MB_SIZE <<
7641                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
7642         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7643                        bp->common.flash_size, bp->common.flash_size);
7644
7645         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7646         bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7647                                         MISC_REG_GENERIC_CR_1 :
7648                                         MISC_REG_GENERIC_CR_0));
7649         bp->link_params.shmem_base = bp->common.shmem_base;
7650         bp->link_params.shmem2_base = bp->common.shmem2_base;
7651         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
7652                        bp->common.shmem_base, bp->common.shmem2_base);
7653
7654         if (!bp->common.shmem_base) {
7655                 BNX2X_DEV_INFO("MCP not active\n");
7656                 bp->flags |= NO_MCP_FLAG;
7657                 return;
7658         }
7659
7660         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7661         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7662                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7663                 BNX2X_ERR("BAD MCP validity signature\n");
7664
7665         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7666         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7667
7668         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7669                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7670                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7671
7672         bp->link_params.feature_config_flags = 0;
7673         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7674         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7675                 bp->link_params.feature_config_flags |=
7676                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7677         else
7678                 bp->link_params.feature_config_flags &=
7679                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7680
7681         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7682         bp->common.bc_ver = val;
7683         BNX2X_DEV_INFO("bc_ver %X\n", val);
7684         if (val < BNX2X_BC_VER) {
7685                 /* for now only warn
7686                  * later we might need to enforce this */
7687                 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7688                           "please upgrade BC\n", BNX2X_BC_VER, val);
7689         }
7690         bp->link_params.feature_config_flags |=
7691                                 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
7692                                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7693
7694         bp->link_params.feature_config_flags |=
7695                 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7696                 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
7697
7698         if (BP_E1HVN(bp) == 0) {
7699                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7700                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7701         } else {
7702                 /* no WOL capability for E1HVN != 0 */
7703                 bp->flags |= NO_WOL_FLAG;
7704         }
7705         BNX2X_DEV_INFO("%sWoL capable\n",
7706                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
7707
7708         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7709         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7710         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7711         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7712
7713         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7714                  val, val2, val3, val4);
7715 }
7716
7717 #define IGU_FID(val)    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7718 #define IGU_VEC(val)    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7719
7720 static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7721 {
7722         int pfid = BP_FUNC(bp);
7723         int vn = BP_E1HVN(bp);
7724         int igu_sb_id;
7725         u32 val;
7726         u8 fid;
7727
7728         bp->igu_base_sb = 0xff;
7729         bp->igu_sb_cnt = 0;
7730         if (CHIP_INT_MODE_IS_BC(bp)) {
7731                 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7732                                        bp->l2_cid_count);
7733
7734                 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7735                         FP_SB_MAX_E1x;
7736
7737                 bp->igu_dsb_id =  E1HVN_MAX * FP_SB_MAX_E1x +
7738                         (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7739
7740                 return;
7741         }
7742
7743         /* IGU in normal mode - read CAM */
7744         for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7745              igu_sb_id++) {
7746                 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7747                 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7748                         continue;
7749                 fid = IGU_FID(val);
7750                 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7751                         if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7752                                 continue;
7753                         if (IGU_VEC(val) == 0)
7754                                 /* default status block */
7755                                 bp->igu_dsb_id = igu_sb_id;
7756                         else {
7757                                 if (bp->igu_base_sb == 0xff)
7758                                         bp->igu_base_sb = igu_sb_id;
7759                                 bp->igu_sb_cnt++;
7760                         }
7761                 }
7762         }
7763         bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
7764         if (bp->igu_sb_cnt == 0)
7765                 BNX2X_ERR("CAM configuration error\n");
7766 }
7767
7768 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7769                                                     u32 switch_cfg)
7770 {
7771         int cfg_size = 0, idx, port = BP_PORT(bp);
7772
7773         /* Aggregation of supported attributes of all external phys */
7774         bp->port.supported[0] = 0;
7775         bp->port.supported[1] = 0;
7776         switch (bp->link_params.num_phys) {
7777         case 1:
7778                 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
7779                 cfg_size = 1;
7780                 break;
7781         case 2:
7782                 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
7783                 cfg_size = 1;
7784                 break;
7785         case 3:
7786                 if (bp->link_params.multi_phy_config &
7787                     PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7788                         bp->port.supported[1] =
7789                                 bp->link_params.phy[EXT_PHY1].supported;
7790                         bp->port.supported[0] =
7791                                 bp->link_params.phy[EXT_PHY2].supported;
7792                 } else {
7793                         bp->port.supported[0] =
7794                                 bp->link_params.phy[EXT_PHY1].supported;
7795                         bp->port.supported[1] =
7796                                 bp->link_params.phy[EXT_PHY2].supported;
7797                 }
7798                 cfg_size = 2;
7799                 break;
7800         }
7801
7802         if (!(bp->port.supported[0] || bp->port.supported[1])) {
7803                 BNX2X_ERR("NVRAM config error. BAD phy config."
7804                           "PHY1 config 0x%x, PHY2 config 0x%x\n",
7805                            SHMEM_RD(bp,
7806                            dev_info.port_hw_config[port].external_phy_config),
7807                            SHMEM_RD(bp,
7808                            dev_info.port_hw_config[port].external_phy_config2));
7809                         return;
7810         }
7811
7812         switch (switch_cfg) {
7813         case SWITCH_CFG_1G:
7814                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7815                                            port*0x10);
7816                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7817                 break;
7818
7819         case SWITCH_CFG_10G:
7820                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7821                                            port*0x18);
7822                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7823                 break;
7824
7825         default:
7826                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7827                           bp->port.link_config[0]);
7828                 return;
7829         }
7830         /* mask what we support according to speed_cap_mask per configuration */
7831         for (idx = 0; idx < cfg_size; idx++) {
7832                 if (!(bp->link_params.speed_cap_mask[idx] &
7833                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7834                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
7835
7836                 if (!(bp->link_params.speed_cap_mask[idx] &
7837                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7838                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
7839
7840                 if (!(bp->link_params.speed_cap_mask[idx] &
7841                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7842                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
7843
7844                 if (!(bp->link_params.speed_cap_mask[idx] &
7845                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7846                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
7847
7848                 if (!(bp->link_params.speed_cap_mask[idx] &
7849                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7850                         bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
7851                                                      SUPPORTED_1000baseT_Full);
7852
7853                 if (!(bp->link_params.speed_cap_mask[idx] &
7854                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7855                         bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
7856
7857                 if (!(bp->link_params.speed_cap_mask[idx] &
7858                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7859                         bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
7860
7861         }
7862
7863         BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
7864                        bp->port.supported[1]);
7865 }
7866
7867 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7868 {
7869         u32 link_config, idx, cfg_size = 0;
7870         bp->port.advertising[0] = 0;
7871         bp->port.advertising[1] = 0;
7872         switch (bp->link_params.num_phys) {
7873         case 1:
7874         case 2:
7875                 cfg_size = 1;
7876                 break;
7877         case 3:
7878                 cfg_size = 2;
7879                 break;
7880         }
7881         for (idx = 0; idx < cfg_size; idx++) {
7882                 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
7883                 link_config = bp->port.link_config[idx];
7884                 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7885                 case PORT_FEATURE_LINK_SPEED_AUTO:
7886                         if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
7887                                 bp->link_params.req_line_speed[idx] =
7888                                         SPEED_AUTO_NEG;
7889                                 bp->port.advertising[idx] |=
7890                                         bp->port.supported[idx];
7891                         } else {
7892                                 /* force 10G, no AN */
7893                                 bp->link_params.req_line_speed[idx] =
7894                                         SPEED_10000;
7895                                 bp->port.advertising[idx] |=
7896                                         (ADVERTISED_10000baseT_Full |
7897                                          ADVERTISED_FIBRE);
7898                                 continue;
7899                         }
7900                         break;
7901
7902                 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7903                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
7904                                 bp->link_params.req_line_speed[idx] =
7905                                         SPEED_10;
7906                                 bp->port.advertising[idx] |=
7907                                         (ADVERTISED_10baseT_Full |
7908                                          ADVERTISED_TP);
7909                         } else {
7910                                 BNX2X_ERROR("NVRAM config error. "
7911                                             "Invalid link_config 0x%x"
7912                                             "  speed_cap_mask 0x%x\n",
7913                                             link_config,
7914                                     bp->link_params.speed_cap_mask[idx]);
7915                                 return;
7916                         }
7917                         break;
7918
7919                 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7920                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
7921                                 bp->link_params.req_line_speed[idx] =
7922                                         SPEED_10;
7923                                 bp->link_params.req_duplex[idx] =
7924                                         DUPLEX_HALF;
7925                                 bp->port.advertising[idx] |=
7926                                         (ADVERTISED_10baseT_Half |
7927                                          ADVERTISED_TP);
7928                         } else {
7929                                 BNX2X_ERROR("NVRAM config error. "
7930                                             "Invalid link_config 0x%x"
7931                                             "  speed_cap_mask 0x%x\n",
7932                                             link_config,
7933                                           bp->link_params.speed_cap_mask[idx]);
7934                                 return;
7935                         }
7936                         break;
7937
7938                 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7939                         if (bp->port.supported[idx] &
7940                             SUPPORTED_100baseT_Full) {
7941                                 bp->link_params.req_line_speed[idx] =
7942                                         SPEED_100;
7943                                 bp->port.advertising[idx] |=
7944                                         (ADVERTISED_100baseT_Full |
7945                                          ADVERTISED_TP);
7946                         } else {
7947                                 BNX2X_ERROR("NVRAM config error. "
7948                                             "Invalid link_config 0x%x"
7949                                             "  speed_cap_mask 0x%x\n",
7950                                             link_config,
7951                                           bp->link_params.speed_cap_mask[idx]);
7952                                 return;
7953                         }
7954                         break;
7955
7956                 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7957                         if (bp->port.supported[idx] &
7958                             SUPPORTED_100baseT_Half) {
7959                                 bp->link_params.req_line_speed[idx] =
7960                                                                 SPEED_100;
7961                                 bp->link_params.req_duplex[idx] =
7962                                                                 DUPLEX_HALF;
7963                                 bp->port.advertising[idx] |=
7964                                         (ADVERTISED_100baseT_Half |
7965                                          ADVERTISED_TP);
7966                         } else {
7967                                 BNX2X_ERROR("NVRAM config error. "
7968                                     "Invalid link_config 0x%x"
7969                                     "  speed_cap_mask 0x%x\n",
7970                                     link_config,
7971                                     bp->link_params.speed_cap_mask[idx]);
7972                                 return;
7973                         }
7974                         break;
7975
7976                 case PORT_FEATURE_LINK_SPEED_1G:
7977                         if (bp->port.supported[idx] &
7978                             SUPPORTED_1000baseT_Full) {
7979                                 bp->link_params.req_line_speed[idx] =
7980                                         SPEED_1000;
7981                                 bp->port.advertising[idx] |=
7982                                         (ADVERTISED_1000baseT_Full |
7983                                          ADVERTISED_TP);
7984                         } else {
7985                                 BNX2X_ERROR("NVRAM config error. "
7986                                     "Invalid link_config 0x%x"
7987                                     "  speed_cap_mask 0x%x\n",
7988                                     link_config,
7989                                     bp->link_params.speed_cap_mask[idx]);
7990                                 return;
7991                         }
7992                         break;
7993
7994                 case PORT_FEATURE_LINK_SPEED_2_5G:
7995                         if (bp->port.supported[idx] &
7996                             SUPPORTED_2500baseX_Full) {
7997                                 bp->link_params.req_line_speed[idx] =
7998                                         SPEED_2500;
7999                                 bp->port.advertising[idx] |=
8000                                         (ADVERTISED_2500baseX_Full |
8001                                                 ADVERTISED_TP);
8002                         } else {
8003                                 BNX2X_ERROR("NVRAM config error. "
8004                                     "Invalid link_config 0x%x"
8005                                     "  speed_cap_mask 0x%x\n",
8006                                     link_config,
8007                                     bp->link_params.speed_cap_mask[idx]);
8008                                 return;
8009                         }
8010                         break;
8011
8012                 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8013                 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8014                 case PORT_FEATURE_LINK_SPEED_10G_KR:
8015                         if (bp->port.supported[idx] &
8016                             SUPPORTED_10000baseT_Full) {
8017                                 bp->link_params.req_line_speed[idx] =
8018                                         SPEED_10000;
8019                                 bp->port.advertising[idx] |=
8020                                         (ADVERTISED_10000baseT_Full |
8021                                                 ADVERTISED_FIBRE);
8022                         } else {
8023                                 BNX2X_ERROR("NVRAM config error. "
8024                                     "Invalid link_config 0x%x"
8025                                     "  speed_cap_mask 0x%x\n",
8026                                     link_config,
8027                                     bp->link_params.speed_cap_mask[idx]);
8028                                 return;
8029                         }
8030                         break;
8031
8032                 default:
8033                         BNX2X_ERROR("NVRAM config error. "
8034                                     "BAD link speed link_config 0x%x\n",
8035                                           link_config);
8036                                 bp->link_params.req_line_speed[idx] =
8037                                                         SPEED_AUTO_NEG;
8038                                 bp->port.advertising[idx] =
8039                                                 bp->port.supported[idx];
8040                         break;
8041                 }
8042
8043                 bp->link_params.req_flow_ctrl[idx] = (link_config &
8044                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8045                 if ((bp->link_params.req_flow_ctrl[idx] ==
8046                      BNX2X_FLOW_CTRL_AUTO) &&
8047                     !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8048                         bp->link_params.req_flow_ctrl[idx] =
8049                                 BNX2X_FLOW_CTRL_NONE;
8050                 }
8051
8052                 BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d req_flow_ctrl"
8053                                " 0x%x advertising 0x%x\n",
8054                                bp->link_params.req_line_speed[idx],
8055                                bp->link_params.req_duplex[idx],
8056                                bp->link_params.req_flow_ctrl[idx],
8057                                bp->port.advertising[idx]);
8058         }
8059 }
8060
8061 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8062 {
8063         mac_hi = cpu_to_be16(mac_hi);
8064         mac_lo = cpu_to_be32(mac_lo);
8065         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8066         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8067 }
8068
8069 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8070 {
8071         int port = BP_PORT(bp);
8072         u32 val, val2;
8073         u32 config;
8074         u32 ext_phy_type, ext_phy_config;;
8075
8076         bp->link_params.bp = bp;
8077         bp->link_params.port = port;
8078
8079         bp->link_params.lane_config =
8080                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8081
8082         bp->link_params.speed_cap_mask[0] =
8083                 SHMEM_RD(bp,
8084                          dev_info.port_hw_config[port].speed_capability_mask);
8085         bp->link_params.speed_cap_mask[1] =
8086                 SHMEM_RD(bp,
8087                          dev_info.port_hw_config[port].speed_capability_mask2);
8088         bp->port.link_config[0] =
8089                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8090
8091         bp->port.link_config[1] =
8092                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
8093
8094         bp->link_params.multi_phy_config =
8095                 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
8096         /* If the device is capable of WoL, set the default state according
8097          * to the HW
8098          */
8099         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8100         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8101                    (config & PORT_FEATURE_WOL_ENABLED));
8102
8103         BNX2X_DEV_INFO("lane_config 0x%08x  "
8104                        "speed_cap_mask0 0x%08x  link_config0 0x%08x\n",
8105                        bp->link_params.lane_config,
8106                        bp->link_params.speed_cap_mask[0],
8107                        bp->port.link_config[0]);
8108
8109         bp->link_params.switch_cfg = (bp->port.link_config[0] &
8110                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
8111         bnx2x_phy_probe(&bp->link_params);
8112         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8113
8114         bnx2x_link_settings_requested(bp);
8115
8116         /*
8117          * If connected directly, work with the internal PHY, otherwise, work
8118          * with the external PHY
8119          */
8120         ext_phy_config =
8121                 SHMEM_RD(bp,
8122                          dev_info.port_hw_config[port].external_phy_config);
8123         ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
8124         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8125                 bp->mdio.prtad = bp->port.phy_addr;
8126
8127         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8128                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8129                 bp->mdio.prtad =
8130                         XGXS_EXT_PHY_ADDR(ext_phy_config);
8131
8132         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8133         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8134         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8135         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8136         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8137
8138 #ifdef BCM_CNIC
8139         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8140         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8141         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8142 #endif
8143 }
8144
8145 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8146 {
8147         int func = BP_ABS_FUNC(bp);
8148         int vn;
8149         u32 val, val2;
8150         int rc = 0;
8151
8152         bnx2x_get_common_hwinfo(bp);
8153
8154         if (CHIP_IS_E1x(bp)) {
8155                 bp->common.int_block = INT_BLOCK_HC;
8156
8157                 bp->igu_dsb_id = DEF_SB_IGU_ID;
8158                 bp->igu_base_sb = 0;
8159                 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
8160         } else {
8161                 bp->common.int_block = INT_BLOCK_IGU;
8162                 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8163                 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8164                         DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8165                         bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8166                 } else
8167                         DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8168
8169                 bnx2x_get_igu_cam_info(bp);
8170
8171         }
8172         DP(NETIF_MSG_PROBE, "igu_dsb_id %d  igu_base_sb %d  igu_sb_cnt %d\n",
8173                              bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8174
8175         /*
8176          * Initialize MF configuration
8177          */
8178
8179         bp->mf_ov = 0;
8180         bp->mf_mode = 0;
8181         vn = BP_E1HVN(bp);
8182         if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8183                 if (SHMEM2_HAS(bp, mf_cfg_addr))
8184                         bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8185                 else
8186                         bp->common.mf_cfg_base = bp->common.shmem_base +
8187                                 offsetof(struct shmem_region, func_mb) +
8188                                 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
8189                 bp->mf_config[vn] =
8190                         MF_CFG_RD(bp, func_mf_config[func].config);
8191
8192                 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
8193                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8194                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8195                         bp->mf_mode = 1;
8196                 BNX2X_DEV_INFO("%s function mode\n",
8197                                IS_MF(bp) ? "multi" : "single");
8198
8199                 if (IS_MF(bp)) {
8200                         val = (MF_CFG_RD(bp, func_mf_config[func].
8201                                                                 e1hov_tag) &
8202                                FUNC_MF_CFG_E1HOV_TAG_MASK);
8203                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8204                                 bp->mf_ov = val;
8205                                 BNX2X_DEV_INFO("MF OV for func %d is %d "
8206                                                "(0x%04x)\n",
8207                                                func, bp->mf_ov, bp->mf_ov);
8208                         } else {
8209                                 BNX2X_ERROR("No valid MF OV for func %d,"
8210                                             "  aborting\n", func);
8211                                 rc = -EPERM;
8212                         }
8213                 } else {
8214                         if (BP_VN(bp)) {
8215                                 BNX2X_ERROR("VN %d in single function mode,"
8216                                             "  aborting\n", BP_E1HVN(bp));
8217                                 rc = -EPERM;
8218                         }
8219                 }
8220         }
8221
8222         /* adjust igu_sb_cnt to MF for E1x */
8223         if (CHIP_IS_E1x(bp) && IS_MF(bp))
8224                 bp->igu_sb_cnt /= E1HVN_MAX;
8225
8226         /*
8227          * adjust E2 sb count: to be removed when FW will support
8228          * more then 16 L2 clients
8229          */
8230 #define MAX_L2_CLIENTS                          16
8231         if (CHIP_IS_E2(bp))
8232                 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8233                                        MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8234
8235         if (!BP_NOMCP(bp)) {
8236                 bnx2x_get_port_hwinfo(bp);
8237
8238                 bp->fw_seq =
8239                         (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8240                          DRV_MSG_SEQ_NUMBER_MASK);
8241                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8242         }
8243
8244         if (IS_MF(bp)) {
8245                 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8246                 val = MF_CFG_RD(bp,  func_mf_config[func].mac_lower);
8247                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8248                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8249                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8250                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8251                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8252                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8253                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8254                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8255                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8256                                ETH_ALEN);
8257                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8258                                ETH_ALEN);
8259                 }
8260
8261                 return rc;
8262         }
8263
8264         if (BP_NOMCP(bp)) {
8265                 /* only supposed to happen on emulation/FPGA */
8266                 BNX2X_ERROR("warning: random MAC workaround active\n");
8267                 random_ether_addr(bp->dev->dev_addr);
8268                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8269         }
8270
8271         return rc;
8272 }
8273
8274 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8275 {
8276         int cnt, i, block_end, rodi;
8277         char vpd_data[BNX2X_VPD_LEN+1];
8278         char str_id_reg[VENDOR_ID_LEN+1];
8279         char str_id_cap[VENDOR_ID_LEN+1];
8280         u8 len;
8281
8282         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8283         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8284
8285         if (cnt < BNX2X_VPD_LEN)
8286                 goto out_not_found;
8287
8288         i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8289                              PCI_VPD_LRDT_RO_DATA);
8290         if (i < 0)
8291                 goto out_not_found;
8292
8293
8294         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8295                     pci_vpd_lrdt_size(&vpd_data[i]);
8296
8297         i += PCI_VPD_LRDT_TAG_SIZE;
8298
8299         if (block_end > BNX2X_VPD_LEN)
8300                 goto out_not_found;
8301
8302         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8303                                    PCI_VPD_RO_KEYWORD_MFR_ID);
8304         if (rodi < 0)
8305                 goto out_not_found;
8306
8307         len = pci_vpd_info_field_size(&vpd_data[rodi]);
8308
8309         if (len != VENDOR_ID_LEN)
8310                 goto out_not_found;
8311
8312         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8313
8314         /* vendor specific info */
8315         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8316         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8317         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8318             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8319
8320                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8321                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
8322                 if (rodi >= 0) {
8323                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
8324
8325                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8326
8327                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8328                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8329                                 bp->fw_ver[len] = ' ';
8330                         }
8331                 }
8332                 return;
8333         }
8334 out_not_found:
8335         return;
8336 }
8337
8338 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8339 {
8340         int func;
8341         int timer_interval;
8342         int rc;
8343
8344         /* Disable interrupt handling until HW is initialized */
8345         atomic_set(&bp->intr_sem, 1);
8346         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8347
8348         mutex_init(&bp->port.phy_mutex);
8349         mutex_init(&bp->fw_mb_mutex);
8350         spin_lock_init(&bp->stats_lock);
8351 #ifdef BCM_CNIC
8352         mutex_init(&bp->cnic_mutex);
8353 #endif
8354
8355         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8356         INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
8357
8358         rc = bnx2x_get_hwinfo(bp);
8359
8360         if (!rc)
8361                 rc = bnx2x_alloc_mem_bp(bp);
8362
8363         bnx2x_read_fwinfo(bp);
8364
8365         func = BP_FUNC(bp);
8366
8367         /* need to reset chip if undi was active */
8368         if (!BP_NOMCP(bp))
8369                 bnx2x_undi_unload(bp);
8370
8371         if (CHIP_REV_IS_FPGA(bp))
8372                 dev_err(&bp->pdev->dev, "FPGA detected\n");
8373
8374         if (BP_NOMCP(bp) && (func == 0))
8375                 dev_err(&bp->pdev->dev, "MCP disabled, "
8376                                         "must load devices in order!\n");
8377
8378         /* Set multi queue mode */
8379         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8380             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8381                 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8382                                         "requested is not MSI-X\n");
8383                 multi_mode = ETH_RSS_MODE_DISABLED;
8384         }
8385         bp->multi_mode = multi_mode;
8386         bp->int_mode = int_mode;
8387
8388         bp->dev->features |= NETIF_F_GRO;
8389
8390         /* Set TPA flags */
8391         if (disable_tpa) {
8392                 bp->flags &= ~TPA_ENABLE_FLAG;
8393                 bp->dev->features &= ~NETIF_F_LRO;
8394         } else {
8395                 bp->flags |= TPA_ENABLE_FLAG;
8396                 bp->dev->features |= NETIF_F_LRO;
8397         }
8398         bp->disable_tpa = disable_tpa;
8399
8400         if (CHIP_IS_E1(bp))
8401                 bp->dropless_fc = 0;
8402         else
8403                 bp->dropless_fc = dropless_fc;
8404
8405         bp->mrrs = mrrs;
8406
8407         bp->tx_ring_size = MAX_TX_AVAIL;
8408
8409         bp->rx_csum = 1;
8410
8411         /* make sure that the numbers are in the right granularity */
8412         bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8413         bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
8414
8415         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8416         bp->current_interval = (poll ? poll : timer_interval);
8417
8418         init_timer(&bp->timer);
8419         bp->timer.expires = jiffies + bp->current_interval;
8420         bp->timer.data = (unsigned long) bp;
8421         bp->timer.function = bnx2x_timer;
8422
8423         return rc;
8424 }
8425
8426
8427 /****************************************************************************
8428 * General service functions
8429 ****************************************************************************/
8430
8431 /* called with rtnl_lock */
8432 static int bnx2x_open(struct net_device *dev)
8433 {
8434         struct bnx2x *bp = netdev_priv(dev);
8435
8436         netif_carrier_off(dev);
8437
8438         bnx2x_set_power_state(bp, PCI_D0);
8439
8440         if (!bnx2x_reset_is_done(bp)) {
8441                 do {
8442                         /* Reset MCP mail box sequence if there is on going
8443                          * recovery
8444                          */
8445                         bp->fw_seq = 0;
8446
8447                         /* If it's the first function to load and reset done
8448                          * is still not cleared it may mean that. We don't
8449                          * check the attention state here because it may have
8450                          * already been cleared by a "common" reset but we
8451                          * shell proceed with "process kill" anyway.
8452                          */
8453                         if ((bnx2x_get_load_cnt(bp) == 0) &&
8454                                 bnx2x_trylock_hw_lock(bp,
8455                                 HW_LOCK_RESOURCE_RESERVED_08) &&
8456                                 (!bnx2x_leader_reset(bp))) {
8457                                 DP(NETIF_MSG_HW, "Recovered in open\n");
8458                                 break;
8459                         }
8460
8461                         bnx2x_set_power_state(bp, PCI_D3hot);
8462
8463                         printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8464                         " completed yet. Try again later. If u still see this"
8465                         " message after a few retries then power cycle is"
8466                         " required.\n", bp->dev->name);
8467
8468                         return -EAGAIN;
8469                 } while (0);
8470         }
8471
8472         bp->recovery_state = BNX2X_RECOVERY_DONE;
8473
8474         return bnx2x_nic_load(bp, LOAD_OPEN);
8475 }
8476
8477 /* called with rtnl_lock */
8478 static int bnx2x_close(struct net_device *dev)
8479 {
8480         struct bnx2x *bp = netdev_priv(dev);
8481
8482         /* Unload the driver, release IRQs */
8483         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
8484         bnx2x_set_power_state(bp, PCI_D3hot);
8485
8486         return 0;
8487 }
8488
8489 /* called with netif_tx_lock from dev_mcast.c */
8490 void bnx2x_set_rx_mode(struct net_device *dev)
8491 {
8492         struct bnx2x *bp = netdev_priv(dev);
8493         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8494         int port = BP_PORT(bp);
8495
8496         if (bp->state != BNX2X_STATE_OPEN) {
8497                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8498                 return;
8499         }
8500
8501         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8502
8503         if (dev->flags & IFF_PROMISC)
8504                 rx_mode = BNX2X_RX_MODE_PROMISC;
8505         else if ((dev->flags & IFF_ALLMULTI) ||
8506                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8507                   CHIP_IS_E1(bp)))
8508                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8509         else { /* some multicasts */
8510                 if (CHIP_IS_E1(bp)) {
8511                         /*
8512                          * set mc list, do not wait as wait implies sleep
8513                          * and set_rx_mode can be invoked from non-sleepable
8514                          * context
8515                          */
8516                         u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8517                                      BNX2X_MAX_EMUL_MULTI*(1 + port) :
8518                                      BNX2X_MAX_MULTICAST*(1 + port));
8519
8520                         bnx2x_set_e1_mc_list(bp, offset);
8521                 } else { /* E1H */
8522                         /* Accept one or more multicasts */
8523                         struct netdev_hw_addr *ha;
8524                         u32 mc_filter[MC_HASH_SIZE];
8525                         u32 crc, bit, regidx;
8526                         int i;
8527
8528                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8529
8530                         netdev_for_each_mc_addr(ha, dev) {
8531                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
8532                                    bnx2x_mc_addr(ha));
8533
8534                                 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8535                                                 ETH_ALEN);
8536                                 bit = (crc >> 24) & 0xff;
8537                                 regidx = bit >> 5;
8538                                 bit &= 0x1f;
8539                                 mc_filter[regidx] |= (1 << bit);
8540                         }
8541
8542                         for (i = 0; i < MC_HASH_SIZE; i++)
8543                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8544                                        mc_filter[i]);
8545                 }
8546         }
8547
8548         bp->rx_mode = rx_mode;
8549         bnx2x_set_storm_rx_mode(bp);
8550 }
8551
8552 /* called with rtnl_lock */
8553 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8554                            int devad, u16 addr)
8555 {
8556         struct bnx2x *bp = netdev_priv(netdev);
8557         u16 value;
8558         int rc;
8559
8560         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8561            prtad, devad, addr);
8562
8563         /* The HW expects different devad if CL22 is used */
8564         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8565
8566         bnx2x_acquire_phy_lock(bp);
8567         rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
8568         bnx2x_release_phy_lock(bp);
8569         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
8570
8571         if (!rc)
8572                 rc = value;
8573         return rc;
8574 }
8575
8576 /* called with rtnl_lock */
8577 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8578                             u16 addr, u16 value)
8579 {
8580         struct bnx2x *bp = netdev_priv(netdev);
8581         int rc;
8582
8583         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8584                            " value 0x%x\n", prtad, devad, addr, value);
8585
8586         /* The HW expects different devad if CL22 is used */
8587         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8588
8589         bnx2x_acquire_phy_lock(bp);
8590         rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
8591         bnx2x_release_phy_lock(bp);
8592         return rc;
8593 }
8594
8595 /* called with rtnl_lock */
8596 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8597 {
8598         struct bnx2x *bp = netdev_priv(dev);
8599         struct mii_ioctl_data *mdio = if_mii(ifr);
8600
8601         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8602            mdio->phy_id, mdio->reg_num, mdio->val_in);
8603
8604         if (!netif_running(dev))
8605                 return -EAGAIN;
8606
8607         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
8608 }
8609
8610 #ifdef CONFIG_NET_POLL_CONTROLLER
8611 static void poll_bnx2x(struct net_device *dev)
8612 {
8613         struct bnx2x *bp = netdev_priv(dev);
8614
8615         disable_irq(bp->pdev->irq);
8616         bnx2x_interrupt(bp->pdev->irq, dev);
8617         enable_irq(bp->pdev->irq);
8618 }
8619 #endif
8620
8621 static const struct net_device_ops bnx2x_netdev_ops = {
8622         .ndo_open               = bnx2x_open,
8623         .ndo_stop               = bnx2x_close,
8624         .ndo_start_xmit         = bnx2x_start_xmit,
8625         .ndo_set_multicast_list = bnx2x_set_rx_mode,
8626         .ndo_set_mac_address    = bnx2x_change_mac_addr,
8627         .ndo_validate_addr      = eth_validate_addr,
8628         .ndo_do_ioctl           = bnx2x_ioctl,
8629         .ndo_change_mtu         = bnx2x_change_mtu,
8630         .ndo_tx_timeout         = bnx2x_tx_timeout,
8631 #ifdef CONFIG_NET_POLL_CONTROLLER
8632         .ndo_poll_controller    = poll_bnx2x,
8633 #endif
8634 };
8635
8636 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8637                                     struct net_device *dev)
8638 {
8639         struct bnx2x *bp;
8640         int rc;
8641
8642         SET_NETDEV_DEV(dev, &pdev->dev);
8643         bp = netdev_priv(dev);
8644
8645         bp->dev = dev;
8646         bp->pdev = pdev;
8647         bp->flags = 0;
8648         bp->pf_num = PCI_FUNC(pdev->devfn);
8649
8650         rc = pci_enable_device(pdev);
8651         if (rc) {
8652                 dev_err(&bp->pdev->dev,
8653                         "Cannot enable PCI device, aborting\n");
8654                 goto err_out;
8655         }
8656
8657         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8658                 dev_err(&bp->pdev->dev,
8659                         "Cannot find PCI device base address, aborting\n");
8660                 rc = -ENODEV;
8661                 goto err_out_disable;
8662         }
8663
8664         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
8665                 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
8666                        " base address, aborting\n");
8667                 rc = -ENODEV;
8668                 goto err_out_disable;
8669         }
8670
8671         if (atomic_read(&pdev->enable_cnt) == 1) {
8672                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8673                 if (rc) {
8674                         dev_err(&bp->pdev->dev,
8675                                 "Cannot obtain PCI resources, aborting\n");
8676                         goto err_out_disable;
8677                 }
8678
8679                 pci_set_master(pdev);
8680                 pci_save_state(pdev);
8681         }
8682
8683         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8684         if (bp->pm_cap == 0) {
8685                 dev_err(&bp->pdev->dev,
8686                         "Cannot find power management capability, aborting\n");
8687                 rc = -EIO;
8688                 goto err_out_release;
8689         }
8690
8691         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8692         if (bp->pcie_cap == 0) {
8693                 dev_err(&bp->pdev->dev,
8694                         "Cannot find PCI Express capability, aborting\n");
8695                 rc = -EIO;
8696                 goto err_out_release;
8697         }
8698
8699         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
8700                 bp->flags |= USING_DAC_FLAG;
8701                 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
8702                         dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
8703                                " failed, aborting\n");
8704                         rc = -EIO;
8705                         goto err_out_release;
8706                 }
8707
8708         } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
8709                 dev_err(&bp->pdev->dev,
8710                         "System does not support DMA, aborting\n");
8711                 rc = -EIO;
8712                 goto err_out_release;
8713         }
8714
8715         dev->mem_start = pci_resource_start(pdev, 0);
8716         dev->base_addr = dev->mem_start;
8717         dev->mem_end = pci_resource_end(pdev, 0);
8718
8719         dev->irq = pdev->irq;
8720
8721         bp->regview = pci_ioremap_bar(pdev, 0);
8722         if (!bp->regview) {
8723                 dev_err(&bp->pdev->dev,
8724                         "Cannot map register space, aborting\n");
8725                 rc = -ENOMEM;
8726                 goto err_out_release;
8727         }
8728
8729         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
8730                                         min_t(u64, BNX2X_DB_SIZE(bp),
8731                                               pci_resource_len(pdev, 2)));
8732         if (!bp->doorbells) {
8733                 dev_err(&bp->pdev->dev,
8734                         "Cannot map doorbell space, aborting\n");
8735                 rc = -ENOMEM;
8736                 goto err_out_unmap;
8737         }
8738
8739         bnx2x_set_power_state(bp, PCI_D0);
8740
8741         /* clean indirect addresses */
8742         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
8743                                PCICFG_VENDOR_ID_OFFSET);
8744         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
8745         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
8746         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
8747         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
8748
8749         /* Reset the load counter */
8750         bnx2x_clear_load_cnt(bp);
8751
8752         dev->watchdog_timeo = TX_TIMEOUT;
8753
8754         dev->netdev_ops = &bnx2x_netdev_ops;
8755         bnx2x_set_ethtool_ops(dev);
8756         dev->features |= NETIF_F_SG;
8757         dev->features |= NETIF_F_HW_CSUM;
8758         if (bp->flags & USING_DAC_FLAG)
8759                 dev->features |= NETIF_F_HIGHDMA;
8760         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8761         dev->features |= NETIF_F_TSO6;
8762         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
8763
8764         dev->vlan_features |= NETIF_F_SG;
8765         dev->vlan_features |= NETIF_F_HW_CSUM;
8766         if (bp->flags & USING_DAC_FLAG)
8767                 dev->vlan_features |= NETIF_F_HIGHDMA;
8768         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8769         dev->vlan_features |= NETIF_F_TSO6;
8770
8771         /* get_port_hwinfo() will set prtad and mmds properly */
8772         bp->mdio.prtad = MDIO_PRTAD_NONE;
8773         bp->mdio.mmds = 0;
8774         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8775         bp->mdio.dev = dev;
8776         bp->mdio.mdio_read = bnx2x_mdio_read;
8777         bp->mdio.mdio_write = bnx2x_mdio_write;
8778
8779         return 0;
8780
8781 err_out_unmap:
8782         if (bp->regview) {
8783                 iounmap(bp->regview);
8784                 bp->regview = NULL;
8785         }
8786         if (bp->doorbells) {
8787                 iounmap(bp->doorbells);
8788                 bp->doorbells = NULL;
8789         }
8790
8791 err_out_release:
8792         if (atomic_read(&pdev->enable_cnt) == 1)
8793                 pci_release_regions(pdev);
8794
8795 err_out_disable:
8796         pci_disable_device(pdev);
8797         pci_set_drvdata(pdev, NULL);
8798
8799 err_out:
8800         return rc;
8801 }
8802
8803 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
8804                                                  int *width, int *speed)
8805 {
8806         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
8807
8808         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
8809
8810         /* return value of 1=2.5GHz 2=5GHz */
8811         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
8812 }
8813
8814 static int bnx2x_check_firmware(struct bnx2x *bp)
8815 {
8816         const struct firmware *firmware = bp->firmware;
8817         struct bnx2x_fw_file_hdr *fw_hdr;
8818         struct bnx2x_fw_file_section *sections;
8819         u32 offset, len, num_ops;
8820         u16 *ops_offsets;
8821         int i;
8822         const u8 *fw_ver;
8823
8824         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
8825                 return -EINVAL;
8826
8827         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
8828         sections = (struct bnx2x_fw_file_section *)fw_hdr;
8829
8830         /* Make sure none of the offsets and sizes make us read beyond
8831          * the end of the firmware data */
8832         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
8833                 offset = be32_to_cpu(sections[i].offset);
8834                 len = be32_to_cpu(sections[i].len);
8835                 if (offset + len > firmware->size) {
8836                         dev_err(&bp->pdev->dev,
8837                                 "Section %d length is out of bounds\n", i);
8838                         return -EINVAL;
8839                 }
8840         }
8841
8842         /* Likewise for the init_ops offsets */
8843         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
8844         ops_offsets = (u16 *)(firmware->data + offset);
8845         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
8846
8847         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
8848                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
8849                         dev_err(&bp->pdev->dev,
8850                                 "Section offset %d is out of bounds\n", i);
8851                         return -EINVAL;
8852                 }
8853         }
8854
8855         /* Check FW version */
8856         offset = be32_to_cpu(fw_hdr->fw_version.offset);
8857         fw_ver = firmware->data + offset;
8858         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
8859             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
8860             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
8861             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
8862                 dev_err(&bp->pdev->dev,
8863                         "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
8864                        fw_ver[0], fw_ver[1], fw_ver[2],
8865                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
8866                        BCM_5710_FW_MINOR_VERSION,
8867                        BCM_5710_FW_REVISION_VERSION,
8868                        BCM_5710_FW_ENGINEERING_VERSION);
8869                 return -EINVAL;
8870         }
8871
8872         return 0;
8873 }
8874
8875 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
8876 {
8877         const __be32 *source = (const __be32 *)_source;
8878         u32 *target = (u32 *)_target;
8879         u32 i;
8880
8881         for (i = 0; i < n/4; i++)
8882                 target[i] = be32_to_cpu(source[i]);
8883 }
8884
8885 /*
8886    Ops array is stored in the following format:
8887    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
8888  */
8889 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
8890 {
8891         const __be32 *source = (const __be32 *)_source;
8892         struct raw_op *target = (struct raw_op *)_target;
8893         u32 i, j, tmp;
8894
8895         for (i = 0, j = 0; i < n/8; i++, j += 2) {
8896                 tmp = be32_to_cpu(source[j]);
8897                 target[i].op = (tmp >> 24) & 0xff;
8898                 target[i].offset = tmp & 0xffffff;
8899                 target[i].raw_data = be32_to_cpu(source[j + 1]);
8900         }
8901 }
8902
8903 /**
8904  * IRO array is stored in the following format:
8905  * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
8906  */
8907 static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
8908 {
8909         const __be32 *source = (const __be32 *)_source;
8910         struct iro *target = (struct iro *)_target;
8911         u32 i, j, tmp;
8912
8913         for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
8914                 target[i].base = be32_to_cpu(source[j]);
8915                 j++;
8916                 tmp = be32_to_cpu(source[j]);
8917                 target[i].m1 = (tmp >> 16) & 0xffff;
8918                 target[i].m2 = tmp & 0xffff;
8919                 j++;
8920                 tmp = be32_to_cpu(source[j]);
8921                 target[i].m3 = (tmp >> 16) & 0xffff;
8922                 target[i].size = tmp & 0xffff;
8923                 j++;
8924         }
8925 }
8926
8927 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
8928 {
8929         const __be16 *source = (const __be16 *)_source;
8930         u16 *target = (u16 *)_target;
8931         u32 i;
8932
8933         for (i = 0; i < n/2; i++)
8934                 target[i] = be16_to_cpu(source[i]);
8935 }
8936
8937 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
8938 do {                                                                    \
8939         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
8940         bp->arr = kmalloc(len, GFP_KERNEL);                             \
8941         if (!bp->arr) {                                                 \
8942                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
8943                 goto lbl;                                               \
8944         }                                                               \
8945         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
8946              (u8 *)bp->arr, len);                                       \
8947 } while (0)
8948
8949 int bnx2x_init_firmware(struct bnx2x *bp)
8950 {
8951         const char *fw_file_name;
8952         struct bnx2x_fw_file_hdr *fw_hdr;
8953         int rc;
8954
8955         if (CHIP_IS_E1(bp))
8956                 fw_file_name = FW_FILE_NAME_E1;
8957         else if (CHIP_IS_E1H(bp))
8958                 fw_file_name = FW_FILE_NAME_E1H;
8959         else if (CHIP_IS_E2(bp))
8960                 fw_file_name = FW_FILE_NAME_E2;
8961         else {
8962                 BNX2X_ERR("Unsupported chip revision\n");
8963                 return -EINVAL;
8964         }
8965
8966         BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
8967
8968         rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
8969         if (rc) {
8970                 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
8971                 goto request_firmware_exit;
8972         }
8973
8974         rc = bnx2x_check_firmware(bp);
8975         if (rc) {
8976                 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
8977                 goto request_firmware_exit;
8978         }
8979
8980         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
8981
8982         /* Initialize the pointers to the init arrays */
8983         /* Blob */
8984         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
8985
8986         /* Opcodes */
8987         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
8988
8989         /* Offsets */
8990         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
8991                             be16_to_cpu_n);
8992
8993         /* STORMs firmware */
8994         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8995                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
8996         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
8997                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
8998         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8999                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9000         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
9001                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
9002         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9003                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9004         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
9005                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9006         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9007                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9008         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
9009                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
9010         /* IRO */
9011         BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
9012
9013         return 0;
9014
9015 iro_alloc_err:
9016         kfree(bp->init_ops_offsets);
9017 init_offsets_alloc_err:
9018         kfree(bp->init_ops);
9019 init_ops_alloc_err:
9020         kfree(bp->init_data);
9021 request_firmware_exit:
9022         release_firmware(bp->firmware);
9023
9024         return rc;
9025 }
9026
9027 static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9028 {
9029         int cid_count = L2_FP_COUNT(l2_cid_count);
9030
9031 #ifdef BCM_CNIC
9032         cid_count += CNIC_CID_MAX;
9033 #endif
9034         return roundup(cid_count, QM_CID_ROUND);
9035 }
9036
9037 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9038                                     const struct pci_device_id *ent)
9039 {
9040         struct net_device *dev = NULL;
9041         struct bnx2x *bp;
9042         int pcie_width, pcie_speed;
9043         int rc, cid_count;
9044
9045         switch (ent->driver_data) {
9046         case BCM57710:
9047         case BCM57711:
9048         case BCM57711E:
9049                 cid_count = FP_SB_MAX_E1x;
9050                 break;
9051
9052         case BCM57712:
9053         case BCM57712E:
9054                 cid_count = FP_SB_MAX_E2;
9055                 break;
9056
9057         default:
9058                 pr_err("Unknown board_type (%ld), aborting\n",
9059                            ent->driver_data);
9060                 return ENODEV;
9061         }
9062
9063         cid_count += CNIC_CONTEXT_USE;
9064
9065         /* dev zeroed in init_etherdev */
9066         dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
9067         if (!dev) {
9068                 dev_err(&pdev->dev, "Cannot allocate net device\n");
9069                 return -ENOMEM;
9070         }
9071
9072         bp = netdev_priv(dev);
9073         bp->msg_enable = debug;
9074
9075         pci_set_drvdata(pdev, dev);
9076
9077         bp->l2_cid_count = cid_count;
9078
9079         rc = bnx2x_init_dev(pdev, dev);
9080         if (rc < 0) {
9081                 free_netdev(dev);
9082                 return rc;
9083         }
9084
9085         rc = bnx2x_init_bp(bp);
9086         if (rc)
9087                 goto init_one_exit;
9088
9089         /* calc qm_cid_count */
9090         bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9091
9092         rc = register_netdev(dev);
9093         if (rc) {
9094                 dev_err(&pdev->dev, "Cannot register net device\n");
9095                 goto init_one_exit;
9096         }
9097
9098         /* Configure interupt mode: try to enable MSI-X/MSI if
9099          * needed, set bp->num_queues appropriately.
9100          */
9101         bnx2x_set_int_mode(bp);
9102
9103         /* Add all NAPI objects */
9104         bnx2x_add_all_napi(bp);
9105
9106         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
9107
9108         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9109                " IRQ %d, ", board_info[ent->driver_data].name,
9110                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
9111                pcie_width,
9112                ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9113                  (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9114                                                 "5GHz (Gen2)" : "2.5GHz",
9115                dev->base_addr, bp->pdev->irq);
9116         pr_cont("node addr %pM\n", dev->dev_addr);
9117
9118         return 0;
9119
9120 init_one_exit:
9121         if (bp->regview)
9122                 iounmap(bp->regview);
9123
9124         if (bp->doorbells)
9125                 iounmap(bp->doorbells);
9126
9127         free_netdev(dev);
9128
9129         if (atomic_read(&pdev->enable_cnt) == 1)
9130                 pci_release_regions(pdev);
9131
9132         pci_disable_device(pdev);
9133         pci_set_drvdata(pdev, NULL);
9134
9135         return rc;
9136 }
9137
9138 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9139 {
9140         struct net_device *dev = pci_get_drvdata(pdev);
9141         struct bnx2x *bp;
9142
9143         if (!dev) {
9144                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
9145                 return;
9146         }
9147         bp = netdev_priv(dev);
9148
9149         unregister_netdev(dev);
9150
9151         /* Delete all NAPI objects */
9152         bnx2x_del_all_napi(bp);
9153
9154         /* Disable MSI/MSI-X */
9155         bnx2x_disable_msi(bp);
9156
9157         /* Make sure RESET task is not scheduled before continuing */
9158         cancel_delayed_work_sync(&bp->reset_task);
9159
9160         if (bp->regview)
9161                 iounmap(bp->regview);
9162
9163         if (bp->doorbells)
9164                 iounmap(bp->doorbells);
9165
9166         bnx2x_free_mem_bp(bp);
9167
9168         free_netdev(dev);
9169
9170         if (atomic_read(&pdev->enable_cnt) == 1)
9171                 pci_release_regions(pdev);
9172
9173         pci_disable_device(pdev);
9174         pci_set_drvdata(pdev, NULL);
9175 }
9176
9177 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9178 {
9179         int i;
9180
9181         bp->state = BNX2X_STATE_ERROR;
9182
9183         bp->rx_mode = BNX2X_RX_MODE_NONE;
9184
9185         bnx2x_netif_stop(bp, 0);
9186         netif_carrier_off(bp->dev);
9187
9188         del_timer_sync(&bp->timer);
9189         bp->stats_state = STATS_STATE_DISABLED;
9190         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9191
9192         /* Release IRQs */
9193         bnx2x_free_irq(bp);
9194
9195         /* Free SKBs, SGEs, TPA pool and driver internals */
9196         bnx2x_free_skbs(bp);
9197
9198         for_each_queue(bp, i)
9199                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
9200
9201         bnx2x_free_mem(bp);
9202
9203         bp->state = BNX2X_STATE_CLOSED;
9204
9205         return 0;
9206 }
9207
9208 static void bnx2x_eeh_recover(struct bnx2x *bp)
9209 {
9210         u32 val;
9211
9212         mutex_init(&bp->port.phy_mutex);
9213
9214         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9215         bp->link_params.shmem_base = bp->common.shmem_base;
9216         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9217
9218         if (!bp->common.shmem_base ||
9219             (bp->common.shmem_base < 0xA0000) ||
9220             (bp->common.shmem_base >= 0xC0000)) {
9221                 BNX2X_DEV_INFO("MCP not active\n");
9222                 bp->flags |= NO_MCP_FLAG;
9223                 return;
9224         }
9225
9226         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9227         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9228                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9229                 BNX2X_ERR("BAD MCP validity signature\n");
9230
9231         if (!BP_NOMCP(bp)) {
9232                 bp->fw_seq =
9233                     (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9234                     DRV_MSG_SEQ_NUMBER_MASK);
9235                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9236         }
9237 }
9238
9239 /**
9240  * bnx2x_io_error_detected - called when PCI error is detected
9241  * @pdev: Pointer to PCI device
9242  * @state: The current pci connection state
9243  *
9244  * This function is called after a PCI bus error affecting
9245  * this device has been detected.
9246  */
9247 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9248                                                 pci_channel_state_t state)
9249 {
9250         struct net_device *dev = pci_get_drvdata(pdev);
9251         struct bnx2x *bp = netdev_priv(dev);
9252
9253         rtnl_lock();
9254
9255         netif_device_detach(dev);
9256
9257         if (state == pci_channel_io_perm_failure) {
9258                 rtnl_unlock();
9259                 return PCI_ERS_RESULT_DISCONNECT;
9260         }
9261
9262         if (netif_running(dev))
9263                 bnx2x_eeh_nic_unload(bp);
9264
9265         pci_disable_device(pdev);
9266
9267         rtnl_unlock();
9268
9269         /* Request a slot reset */
9270         return PCI_ERS_RESULT_NEED_RESET;
9271 }
9272
9273 /**
9274  * bnx2x_io_slot_reset - called after the PCI bus has been reset
9275  * @pdev: Pointer to PCI device
9276  *
9277  * Restart the card from scratch, as if from a cold-boot.
9278  */
9279 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9280 {
9281         struct net_device *dev = pci_get_drvdata(pdev);
9282         struct bnx2x *bp = netdev_priv(dev);
9283
9284         rtnl_lock();
9285
9286         if (pci_enable_device(pdev)) {
9287                 dev_err(&pdev->dev,
9288                         "Cannot re-enable PCI device after reset\n");
9289                 rtnl_unlock();
9290                 return PCI_ERS_RESULT_DISCONNECT;
9291         }
9292
9293         pci_set_master(pdev);
9294         pci_restore_state(pdev);
9295
9296         if (netif_running(dev))
9297                 bnx2x_set_power_state(bp, PCI_D0);
9298
9299         rtnl_unlock();
9300
9301         return PCI_ERS_RESULT_RECOVERED;
9302 }
9303
9304 /**
9305  * bnx2x_io_resume - called when traffic can start flowing again
9306  * @pdev: Pointer to PCI device
9307  *
9308  * This callback is called when the error recovery driver tells us that
9309  * its OK to resume normal operation.
9310  */
9311 static void bnx2x_io_resume(struct pci_dev *pdev)
9312 {
9313         struct net_device *dev = pci_get_drvdata(pdev);
9314         struct bnx2x *bp = netdev_priv(dev);
9315
9316         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
9317                 printk(KERN_ERR "Handling parity error recovery. "
9318                                 "Try again later\n");
9319                 return;
9320         }
9321
9322         rtnl_lock();
9323
9324         bnx2x_eeh_recover(bp);
9325
9326         if (netif_running(dev))
9327                 bnx2x_nic_load(bp, LOAD_NORMAL);
9328
9329         netif_device_attach(dev);
9330
9331         rtnl_unlock();
9332 }
9333
9334 static struct pci_error_handlers bnx2x_err_handler = {
9335         .error_detected = bnx2x_io_error_detected,
9336         .slot_reset     = bnx2x_io_slot_reset,
9337         .resume         = bnx2x_io_resume,
9338 };
9339
9340 static struct pci_driver bnx2x_pci_driver = {
9341         .name        = DRV_MODULE_NAME,
9342         .id_table    = bnx2x_pci_tbl,
9343         .probe       = bnx2x_init_one,
9344         .remove      = __devexit_p(bnx2x_remove_one),
9345         .suspend     = bnx2x_suspend,
9346         .resume      = bnx2x_resume,
9347         .err_handler = &bnx2x_err_handler,
9348 };
9349
9350 static int __init bnx2x_init(void)
9351 {
9352         int ret;
9353
9354         pr_info("%s", version);
9355
9356         bnx2x_wq = create_singlethread_workqueue("bnx2x");
9357         if (bnx2x_wq == NULL) {
9358                 pr_err("Cannot create workqueue\n");
9359                 return -ENOMEM;
9360         }
9361
9362         ret = pci_register_driver(&bnx2x_pci_driver);
9363         if (ret) {
9364                 pr_err("Cannot register driver\n");
9365                 destroy_workqueue(bnx2x_wq);
9366         }
9367         return ret;
9368 }
9369
9370 static void __exit bnx2x_cleanup(void)
9371 {
9372         pci_unregister_driver(&bnx2x_pci_driver);
9373
9374         destroy_workqueue(bnx2x_wq);
9375 }
9376
9377 module_init(bnx2x_init);
9378 module_exit(bnx2x_cleanup);
9379
9380 #ifdef BCM_CNIC
9381
9382 /* count denotes the number of new completions we have seen */
9383 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9384 {
9385         struct eth_spe *spe;
9386
9387 #ifdef BNX2X_STOP_ON_ERROR
9388         if (unlikely(bp->panic))
9389                 return;
9390 #endif
9391
9392         spin_lock_bh(&bp->spq_lock);
9393         BUG_ON(bp->cnic_spq_pending < count);
9394         bp->cnic_spq_pending -= count;
9395
9396
9397         for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9398                 u16 type =  (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9399                                 & SPE_HDR_CONN_TYPE) >>
9400                                 SPE_HDR_CONN_TYPE_SHIFT;
9401
9402                 /* Set validation for iSCSI L2 client before sending SETUP
9403                  *  ramrod
9404                  */
9405                 if (type == ETH_CONNECTION_TYPE) {
9406                         u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9407                                              hdr.conn_and_cmd_data) >>
9408                                 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9409
9410                         if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9411                                 bnx2x_set_ctx_validation(&bp->context.
9412                                                 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9413                                         HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9414                 }
9415
9416                 /* There may be not more than 8 L2 and COMMON SPEs and not more
9417                  * than 8 L5 SPEs in the air.
9418                  */
9419                 if ((type == NONE_CONNECTION_TYPE) ||
9420                     (type == ETH_CONNECTION_TYPE)) {
9421                         if (!atomic_read(&bp->spq_left))
9422                                 break;
9423                         else
9424                                 atomic_dec(&bp->spq_left);
9425                 } else if (type == ISCSI_CONNECTION_TYPE) {
9426                         if (bp->cnic_spq_pending >=
9427                             bp->cnic_eth_dev.max_kwqe_pending)
9428                                 break;
9429                         else
9430                                 bp->cnic_spq_pending++;
9431                 } else {
9432                         BNX2X_ERR("Unknown SPE type: %d\n", type);
9433                         bnx2x_panic();
9434                         break;
9435                 }
9436
9437                 spe = bnx2x_sp_get_next(bp);
9438                 *spe = *bp->cnic_kwq_cons;
9439
9440                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9441                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9442
9443                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9444                         bp->cnic_kwq_cons = bp->cnic_kwq;
9445                 else
9446                         bp->cnic_kwq_cons++;
9447         }
9448         bnx2x_sp_prod_update(bp);
9449         spin_unlock_bh(&bp->spq_lock);
9450 }
9451
9452 static int bnx2x_cnic_sp_queue(struct net_device *dev,
9453                                struct kwqe_16 *kwqes[], u32 count)
9454 {
9455         struct bnx2x *bp = netdev_priv(dev);
9456         int i;
9457
9458 #ifdef BNX2X_STOP_ON_ERROR
9459         if (unlikely(bp->panic))
9460                 return -EIO;
9461 #endif
9462
9463         spin_lock_bh(&bp->spq_lock);
9464
9465         for (i = 0; i < count; i++) {
9466                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9467
9468                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9469                         break;
9470
9471                 *bp->cnic_kwq_prod = *spe;
9472
9473                 bp->cnic_kwq_pending++;
9474
9475                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9476                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
9477                    spe->data.update_data_addr.hi,
9478                    spe->data.update_data_addr.lo,
9479                    bp->cnic_kwq_pending);
9480
9481                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9482                         bp->cnic_kwq_prod = bp->cnic_kwq;
9483                 else
9484                         bp->cnic_kwq_prod++;
9485         }
9486
9487         spin_unlock_bh(&bp->spq_lock);
9488
9489         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9490                 bnx2x_cnic_sp_post(bp, 0);
9491
9492         return i;
9493 }
9494
9495 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9496 {
9497         struct cnic_ops *c_ops;
9498         int rc = 0;
9499
9500         mutex_lock(&bp->cnic_mutex);
9501         c_ops = bp->cnic_ops;
9502         if (c_ops)
9503                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9504         mutex_unlock(&bp->cnic_mutex);
9505
9506         return rc;
9507 }
9508
9509 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9510 {
9511         struct cnic_ops *c_ops;
9512         int rc = 0;
9513
9514         rcu_read_lock();
9515         c_ops = rcu_dereference(bp->cnic_ops);
9516         if (c_ops)
9517                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9518         rcu_read_unlock();
9519
9520         return rc;
9521 }
9522
9523 /*
9524  * for commands that have no data
9525  */
9526 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
9527 {
9528         struct cnic_ctl_info ctl = {0};
9529
9530         ctl.cmd = cmd;
9531
9532         return bnx2x_cnic_ctl_send(bp, &ctl);
9533 }
9534
9535 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9536 {
9537         struct cnic_ctl_info ctl;
9538
9539         /* first we tell CNIC and only then we count this as a completion */
9540         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9541         ctl.data.comp.cid = cid;
9542
9543         bnx2x_cnic_ctl_send_bh(bp, &ctl);
9544         bnx2x_cnic_sp_post(bp, 0);
9545 }
9546
9547 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9548 {
9549         struct bnx2x *bp = netdev_priv(dev);
9550         int rc = 0;
9551
9552         switch (ctl->cmd) {
9553         case DRV_CTL_CTXTBL_WR_CMD: {
9554                 u32 index = ctl->data.io.offset;
9555                 dma_addr_t addr = ctl->data.io.dma_addr;
9556
9557                 bnx2x_ilt_wr(bp, index, addr);
9558                 break;
9559         }
9560
9561         case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9562                 int count = ctl->data.credit.credit_count;
9563
9564                 bnx2x_cnic_sp_post(bp, count);
9565                 break;
9566         }
9567
9568         /* rtnl_lock is held.  */
9569         case DRV_CTL_START_L2_CMD: {
9570                 u32 cli = ctl->data.ring.client_id;
9571
9572                 /* Set iSCSI MAC address */
9573                 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9574
9575                 mmiowb();
9576                 barrier();
9577
9578                 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9579                  * because it's the only way for UIO Client to accept
9580                  * multicasts (in non-promiscuous mode only one Client per
9581                  * function will receive multicast packets (leading in our
9582                  * case).
9583                  */
9584                 bnx2x_rxq_set_mac_filters(bp, cli,
9585                         BNX2X_ACCEPT_UNICAST |
9586                         BNX2X_ACCEPT_BROADCAST |
9587                         BNX2X_ACCEPT_ALL_MULTICAST);
9588                 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9589
9590                 break;
9591         }
9592
9593         /* rtnl_lock is held.  */
9594         case DRV_CTL_STOP_L2_CMD: {
9595                 u32 cli = ctl->data.ring.client_id;
9596
9597                 /* Stop accepting on iSCSI L2 ring */
9598                 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9599                 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9600
9601                 mmiowb();
9602                 barrier();
9603
9604                 /* Unset iSCSI L2 MAC */
9605                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9606                 break;
9607         }
9608         case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9609                 int count = ctl->data.credit.credit_count;
9610
9611                 smp_mb__before_atomic_inc();
9612                 atomic_add(count, &bp->spq_left);
9613                 smp_mb__after_atomic_inc();
9614                 break;
9615         }
9616
9617         default:
9618                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
9619                 rc = -EINVAL;
9620         }
9621
9622         return rc;
9623 }
9624
9625 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
9626 {
9627         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9628
9629         if (bp->flags & USING_MSIX_FLAG) {
9630                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
9631                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
9632                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
9633         } else {
9634                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
9635                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
9636         }
9637         if (CHIP_IS_E2(bp))
9638                 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
9639         else
9640                 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
9641
9642         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
9643         cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
9644         cp->irq_arr[1].status_blk = bp->def_status_blk;
9645         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
9646         cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
9647
9648         cp->num_irq = 2;
9649 }
9650
9651 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
9652                                void *data)
9653 {
9654         struct bnx2x *bp = netdev_priv(dev);
9655         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9656
9657         if (ops == NULL)
9658                 return -EINVAL;
9659
9660         if (atomic_read(&bp->intr_sem) != 0)
9661                 return -EBUSY;
9662
9663         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
9664         if (!bp->cnic_kwq)
9665                 return -ENOMEM;
9666
9667         bp->cnic_kwq_cons = bp->cnic_kwq;
9668         bp->cnic_kwq_prod = bp->cnic_kwq;
9669         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
9670
9671         bp->cnic_spq_pending = 0;
9672         bp->cnic_kwq_pending = 0;
9673
9674         bp->cnic_data = data;
9675
9676         cp->num_irq = 0;
9677         cp->drv_state = CNIC_DRV_STATE_REGD;
9678         cp->iro_arr = bp->iro_arr;
9679
9680         bnx2x_setup_cnic_irq_info(bp);
9681
9682         rcu_assign_pointer(bp->cnic_ops, ops);
9683
9684         return 0;
9685 }
9686
9687 static int bnx2x_unregister_cnic(struct net_device *dev)
9688 {
9689         struct bnx2x *bp = netdev_priv(dev);
9690         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9691
9692         mutex_lock(&bp->cnic_mutex);
9693         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
9694                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
9695                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9696         }
9697         cp->drv_state = 0;
9698         rcu_assign_pointer(bp->cnic_ops, NULL);
9699         mutex_unlock(&bp->cnic_mutex);
9700         synchronize_rcu();
9701         kfree(bp->cnic_kwq);
9702         bp->cnic_kwq = NULL;
9703
9704         return 0;
9705 }
9706
9707 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
9708 {
9709         struct bnx2x *bp = netdev_priv(dev);
9710         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9711
9712         cp->drv_owner = THIS_MODULE;
9713         cp->chip_id = CHIP_ID(bp);
9714         cp->pdev = bp->pdev;
9715         cp->io_base = bp->regview;
9716         cp->io_base2 = bp->doorbells;
9717         cp->max_kwqe_pending = 8;
9718         cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
9719         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
9720                              bnx2x_cid_ilt_lines(bp);
9721         cp->ctx_tbl_len = CNIC_ILT_LINES;
9722         cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
9723         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
9724         cp->drv_ctl = bnx2x_drv_ctl;
9725         cp->drv_register_cnic = bnx2x_register_cnic;
9726         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
9727         cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID;
9728         cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
9729
9730         DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
9731                          "starting cid %d\n",
9732            cp->ctx_blk_size,
9733            cp->ctx_tbl_offset,
9734            cp->ctx_tbl_len,
9735            cp->starting_cid);
9736         return cp;
9737 }
9738 EXPORT_SYMBOL(bnx2x_cnic_probe);
9739
9740 #endif /* BCM_CNIC */
9741