]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/bnx2x/bnx2x_main.c
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[net-next-2.6.git] / drivers / net / bnx2x / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/pci.h>
28 #include <linux/init.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitops.h>
34 #include <linux/irq.h>
35 #include <linux/delay.h>
36 #include <asm/byteorder.h>
37 #include <linux/time.h>
38 #include <linux/ethtool.h>
39 #include <linux/mii.h>
40 #include <linux/if_vlan.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/crc32c.h>
48 #include <linux/prefetch.h>
49 #include <linux/zlib.h>
50 #include <linux/io.h>
51 #include <linux/stringify.h>
52
53 #define BNX2X_MAIN
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_cmn.h"
58
59 #include <linux/firmware.h>
60 #include "bnx2x_fw_file_hdr.h"
61 /* FW files */
62 #define FW_FILE_VERSION                                 \
63         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
64         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
65         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
66         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
67 #define FW_FILE_NAME_E1         "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
68 #define FW_FILE_NAME_E1H        "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
69 #define FW_FILE_NAME_E2         "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
70
71 /* Time in jiffies before concluding the transmitter is hung */
72 #define TX_TIMEOUT              (5*HZ)
73
74 static char version[] __devinitdata =
75         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
76         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
78 MODULE_AUTHOR("Eliezer Tamir");
79 MODULE_DESCRIPTION("Broadcom NetXtreme II "
80                    "BCM57710/57711/57711E/57712/57712E Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_MODULE_VERSION);
83 MODULE_FIRMWARE(FW_FILE_NAME_E1);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
85 MODULE_FIRMWARE(FW_FILE_NAME_E2);
86
87 static int multi_mode = 1;
88 module_param(multi_mode, int, 0);
89 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
90                              "(0 Disable; 1 Enable (default))");
91
92 int num_queues;
93 module_param(num_queues, int, 0);
94 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
95                                 " (default is as a number of CPUs)");
96
97 static int disable_tpa;
98 module_param(disable_tpa, int, 0);
99 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
100
101 static int int_mode;
102 module_param(int_mode, int, 0);
103 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
104                                 "(1 INT#x; 2 MSI)");
105
106 static int dropless_fc;
107 module_param(dropless_fc, int, 0);
108 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109
110 static int poll;
111 module_param(poll, int, 0);
112 MODULE_PARM_DESC(poll, " Use polling (for debug)");
113
114 static int mrrs = -1;
115 module_param(mrrs, int, 0);
116 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117
118 static int debug;
119 module_param(debug, int, 0);
120 MODULE_PARM_DESC(debug, " Default debug msglevel");
121
122 static struct workqueue_struct *bnx2x_wq;
123
124 enum bnx2x_board_type {
125         BCM57710 = 0,
126         BCM57711 = 1,
127         BCM57711E = 2,
128         BCM57712 = 3,
129         BCM57712E = 4
130 };
131
132 /* indexed by board_type, above */
133 static struct {
134         char *name;
135 } board_info[] __devinitdata = {
136         { "Broadcom NetXtreme II BCM57710 XGb" },
137         { "Broadcom NetXtreme II BCM57711 XGb" },
138         { "Broadcom NetXtreme II BCM57711E XGb" },
139         { "Broadcom NetXtreme II BCM57712 XGb" },
140         { "Broadcom NetXtreme II BCM57712E XGb" }
141 };
142
143 #ifndef PCI_DEVICE_ID_NX2_57712
144 #define PCI_DEVICE_ID_NX2_57712         0x1662
145 #endif
146 #ifndef PCI_DEVICE_ID_NX2_57712E
147 #define PCI_DEVICE_ID_NX2_57712E        0x1663
148 #endif
149
150 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
151         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
152         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
153         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
154         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
155         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
156         { 0 }
157 };
158
159 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
160
161 /****************************************************************************
162 * General service functions
163 ****************************************************************************/
164
165 static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
166                                        u32 addr, dma_addr_t mapping)
167 {
168         REG_WR(bp,  addr, U64_LO(mapping));
169         REG_WR(bp,  addr + 4, U64_HI(mapping));
170 }
171
172 static inline void __storm_memset_fill(struct bnx2x *bp,
173                                        u32 addr, size_t size, u32 val)
174 {
175         int i;
176         for (i = 0; i < size/4; i++)
177                 REG_WR(bp,  addr + (i * 4), val);
178 }
179
180 static inline void storm_memset_ustats_zero(struct bnx2x *bp,
181                                             u8 port, u16 stat_id)
182 {
183         size_t size = sizeof(struct ustorm_per_client_stats);
184
185         u32 addr = BAR_USTRORM_INTMEM +
186                         USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
187
188         __storm_memset_fill(bp, addr, size, 0);
189 }
190
191 static inline void storm_memset_tstats_zero(struct bnx2x *bp,
192                                             u8 port, u16 stat_id)
193 {
194         size_t size = sizeof(struct tstorm_per_client_stats);
195
196         u32 addr = BAR_TSTRORM_INTMEM +
197                         TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
198
199         __storm_memset_fill(bp, addr, size, 0);
200 }
201
202 static inline void storm_memset_xstats_zero(struct bnx2x *bp,
203                                             u8 port, u16 stat_id)
204 {
205         size_t size = sizeof(struct xstorm_per_client_stats);
206
207         u32 addr = BAR_XSTRORM_INTMEM +
208                         XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
209
210         __storm_memset_fill(bp, addr, size, 0);
211 }
212
213
214 static inline void storm_memset_spq_addr(struct bnx2x *bp,
215                                          dma_addr_t mapping, u16 abs_fid)
216 {
217         u32 addr = XSEM_REG_FAST_MEMORY +
218                         XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
219
220         __storm_memset_dma_mapping(bp, addr, mapping);
221 }
222
223 static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
224 {
225         REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
226 }
227
228 static inline void storm_memset_func_cfg(struct bnx2x *bp,
229                                 struct tstorm_eth_function_common_config *tcfg,
230                                 u16 abs_fid)
231 {
232         size_t size = sizeof(struct tstorm_eth_function_common_config);
233
234         u32 addr = BAR_TSTRORM_INTMEM +
235                         TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
236
237         __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
238 }
239
240 static inline void storm_memset_xstats_flags(struct bnx2x *bp,
241                                 struct stats_indication_flags *flags,
242                                 u16 abs_fid)
243 {
244         size_t size = sizeof(struct stats_indication_flags);
245
246         u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
247
248         __storm_memset_struct(bp, addr, size, (u32 *)flags);
249 }
250
251 static inline void storm_memset_tstats_flags(struct bnx2x *bp,
252                                 struct stats_indication_flags *flags,
253                                 u16 abs_fid)
254 {
255         size_t size = sizeof(struct stats_indication_flags);
256
257         u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
258
259         __storm_memset_struct(bp, addr, size, (u32 *)flags);
260 }
261
262 static inline void storm_memset_ustats_flags(struct bnx2x *bp,
263                                 struct stats_indication_flags *flags,
264                                 u16 abs_fid)
265 {
266         size_t size = sizeof(struct stats_indication_flags);
267
268         u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
269
270         __storm_memset_struct(bp, addr, size, (u32 *)flags);
271 }
272
273 static inline void storm_memset_cstats_flags(struct bnx2x *bp,
274                                 struct stats_indication_flags *flags,
275                                 u16 abs_fid)
276 {
277         size_t size = sizeof(struct stats_indication_flags);
278
279         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
280
281         __storm_memset_struct(bp, addr, size, (u32 *)flags);
282 }
283
284 static inline void storm_memset_xstats_addr(struct bnx2x *bp,
285                                            dma_addr_t mapping, u16 abs_fid)
286 {
287         u32 addr = BAR_XSTRORM_INTMEM +
288                 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
289
290         __storm_memset_dma_mapping(bp, addr, mapping);
291 }
292
293 static inline void storm_memset_tstats_addr(struct bnx2x *bp,
294                                            dma_addr_t mapping, u16 abs_fid)
295 {
296         u32 addr = BAR_TSTRORM_INTMEM +
297                 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
298
299         __storm_memset_dma_mapping(bp, addr, mapping);
300 }
301
302 static inline void storm_memset_ustats_addr(struct bnx2x *bp,
303                                            dma_addr_t mapping, u16 abs_fid)
304 {
305         u32 addr = BAR_USTRORM_INTMEM +
306                 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
307
308         __storm_memset_dma_mapping(bp, addr, mapping);
309 }
310
311 static inline void storm_memset_cstats_addr(struct bnx2x *bp,
312                                            dma_addr_t mapping, u16 abs_fid)
313 {
314         u32 addr = BAR_CSTRORM_INTMEM +
315                 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
316
317         __storm_memset_dma_mapping(bp, addr, mapping);
318 }
319
320 static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
321                                          u16 pf_id)
322 {
323         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
324                 pf_id);
325         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
326                 pf_id);
327         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
328                 pf_id);
329         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
330                 pf_id);
331 }
332
333 static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
334                                         u8 enable)
335 {
336         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
337                 enable);
338         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
339                 enable);
340         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
341                 enable);
342         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
343                 enable);
344 }
345
346 static inline void storm_memset_eq_data(struct bnx2x *bp,
347                                 struct event_ring_data *eq_data,
348                                 u16 pfid)
349 {
350         size_t size = sizeof(struct event_ring_data);
351
352         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
353
354         __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
355 }
356
357 static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
358                                         u16 pfid)
359 {
360         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
361         REG_WR16(bp, addr, eq_prod);
362 }
363
364 static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
365                                              u16 fw_sb_id, u8 sb_index,
366                                              u8 ticks)
367 {
368
369         int index_offset = CHIP_IS_E2(bp) ?
370                 offsetof(struct hc_status_block_data_e2, index_data) :
371                 offsetof(struct hc_status_block_data_e1x, index_data);
372         u32 addr = BAR_CSTRORM_INTMEM +
373                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
374                         index_offset +
375                         sizeof(struct hc_index_data)*sb_index +
376                         offsetof(struct hc_index_data, timeout);
377         REG_WR8(bp, addr, ticks);
378         DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
379                           port, fw_sb_id, sb_index, ticks);
380 }
381 static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
382                                              u16 fw_sb_id, u8 sb_index,
383                                              u8 disable)
384 {
385         u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
386         int index_offset = CHIP_IS_E2(bp) ?
387                 offsetof(struct hc_status_block_data_e2, index_data) :
388                 offsetof(struct hc_status_block_data_e1x, index_data);
389         u32 addr = BAR_CSTRORM_INTMEM +
390                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
391                         index_offset +
392                         sizeof(struct hc_index_data)*sb_index +
393                         offsetof(struct hc_index_data, flags);
394         u16 flags = REG_RD16(bp, addr);
395         /* clear and set */
396         flags &= ~HC_INDEX_DATA_HC_ENABLED;
397         flags |= enable_flag;
398         REG_WR16(bp, addr, flags);
399         DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
400                           port, fw_sb_id, sb_index, disable);
401 }
402
403 /* used only at init
404  * locking is done by mcp
405  */
406 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
407 {
408         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
409         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
410         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
411                                PCICFG_VENDOR_ID_OFFSET);
412 }
413
414 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
415 {
416         u32 val;
417
418         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
419         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
420         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
421                                PCICFG_VENDOR_ID_OFFSET);
422
423         return val;
424 }
425
426 #define DMAE_DP_SRC_GRC         "grc src_addr [%08x]"
427 #define DMAE_DP_SRC_PCI         "pci src_addr [%x:%08x]"
428 #define DMAE_DP_DST_GRC         "grc dst_addr [%08x]"
429 #define DMAE_DP_DST_PCI         "pci dst_addr [%x:%08x]"
430 #define DMAE_DP_DST_NONE        "dst_addr [none]"
431
432 void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
433 {
434         u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
435
436         switch (dmae->opcode & DMAE_COMMAND_DST) {
437         case DMAE_CMD_DST_PCI:
438                 if (src_type == DMAE_CMD_SRC_PCI)
439                         DP(msglvl, "DMAE: opcode 0x%08x\n"
440                            "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
441                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
442                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
443                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
444                            dmae->comp_addr_hi, dmae->comp_addr_lo,
445                            dmae->comp_val);
446                 else
447                         DP(msglvl, "DMAE: opcode 0x%08x\n"
448                            "src [%08x], len [%d*4], dst [%x:%08x]\n"
449                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
450                            dmae->opcode, dmae->src_addr_lo >> 2,
451                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
452                            dmae->comp_addr_hi, dmae->comp_addr_lo,
453                            dmae->comp_val);
454                 break;
455         case DMAE_CMD_DST_GRC:
456                 if (src_type == DMAE_CMD_SRC_PCI)
457                         DP(msglvl, "DMAE: opcode 0x%08x\n"
458                            "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
459                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
460                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
461                            dmae->len, dmae->dst_addr_lo >> 2,
462                            dmae->comp_addr_hi, dmae->comp_addr_lo,
463                            dmae->comp_val);
464                 else
465                         DP(msglvl, "DMAE: opcode 0x%08x\n"
466                            "src [%08x], len [%d*4], dst [%08x]\n"
467                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
468                            dmae->opcode, dmae->src_addr_lo >> 2,
469                            dmae->len, dmae->dst_addr_lo >> 2,
470                            dmae->comp_addr_hi, dmae->comp_addr_lo,
471                            dmae->comp_val);
472                 break;
473         default:
474                 if (src_type == DMAE_CMD_SRC_PCI)
475                         DP(msglvl, "DMAE: opcode 0x%08x\n"
476                            DP_LEVEL "src_addr [%x:%08x]  len [%d * 4]  "
477                                     "dst_addr [none]\n"
478                            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
479                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
480                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
481                            dmae->comp_val);
482                 else
483                         DP(msglvl, "DMAE: opcode 0x%08x\n"
484                            DP_LEVEL "src_addr [%08x]  len [%d * 4]  "
485                                     "dst_addr [none]\n"
486                            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
487                            dmae->opcode, dmae->src_addr_lo >> 2,
488                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
489                            dmae->comp_val);
490                 break;
491         }
492
493 }
494
495 const u32 dmae_reg_go_c[] = {
496         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
497         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
498         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
499         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
500 };
501
502 /* copy command into DMAE command memory and set DMAE command go */
503 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
504 {
505         u32 cmd_offset;
506         int i;
507
508         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
509         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
510                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
511
512                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
513                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
514         }
515         REG_WR(bp, dmae_reg_go_c[idx], 1);
516 }
517
518 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
519 {
520         return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
521                            DMAE_CMD_C_ENABLE);
522 }
523
524 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
525 {
526         return opcode & ~DMAE_CMD_SRC_RESET;
527 }
528
529 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
530                              bool with_comp, u8 comp_type)
531 {
532         u32 opcode = 0;
533
534         opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
535                    (dst_type << DMAE_COMMAND_DST_SHIFT));
536
537         opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
538
539         opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
540         opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
541                    (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
542         opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
543
544 #ifdef __BIG_ENDIAN
545         opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
546 #else
547         opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
548 #endif
549         if (with_comp)
550                 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
551         return opcode;
552 }
553
554 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
555                                u8 src_type, u8 dst_type)
556 {
557         memset(dmae, 0, sizeof(struct dmae_command));
558
559         /* set the opcode */
560         dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
561                                          true, DMAE_COMP_PCI);
562
563         /* fill in the completion parameters */
564         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
565         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
566         dmae->comp_val = DMAE_COMP_VAL;
567 }
568
569 /* issue a dmae command over the init-channel and wailt for completion */
570 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
571 {
572         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
573         int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
574         int rc = 0;
575
576         DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
577            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
578            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
579
580         /* lock the dmae channel */
581         mutex_lock(&bp->dmae_mutex);
582
583         /* reset completion */
584         *wb_comp = 0;
585
586         /* post the command on the channel used for initializations */
587         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
588
589         /* wait for completion */
590         udelay(5);
591         while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
592                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
593
594                 if (!cnt) {
595                         BNX2X_ERR("DMAE timeout!\n");
596                         rc = DMAE_TIMEOUT;
597                         goto unlock;
598                 }
599                 cnt--;
600                 udelay(50);
601         }
602         if (*wb_comp & DMAE_PCI_ERR_FLAG) {
603                 BNX2X_ERR("DMAE PCI error!\n");
604                 rc = DMAE_PCI_ERROR;
605         }
606
607         DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
608            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
609            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
610
611 unlock:
612         mutex_unlock(&bp->dmae_mutex);
613         return rc;
614 }
615
616 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
617                       u32 len32)
618 {
619         struct dmae_command dmae;
620
621         if (!bp->dmae_ready) {
622                 u32 *data = bnx2x_sp(bp, wb_data[0]);
623
624                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
625                    "  using indirect\n", dst_addr, len32);
626                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
627                 return;
628         }
629
630         /* set opcode and fixed command fields */
631         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
632
633         /* fill in addresses and len */
634         dmae.src_addr_lo = U64_LO(dma_addr);
635         dmae.src_addr_hi = U64_HI(dma_addr);
636         dmae.dst_addr_lo = dst_addr >> 2;
637         dmae.dst_addr_hi = 0;
638         dmae.len = len32;
639
640         bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
641
642         /* issue the command and wait for completion */
643         bnx2x_issue_dmae_with_comp(bp, &dmae);
644 }
645
646 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
647 {
648         struct dmae_command dmae;
649
650         if (!bp->dmae_ready) {
651                 u32 *data = bnx2x_sp(bp, wb_data[0]);
652                 int i;
653
654                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
655                    "  using indirect\n", src_addr, len32);
656                 for (i = 0; i < len32; i++)
657                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
658                 return;
659         }
660
661         /* set opcode and fixed command fields */
662         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
663
664         /* fill in addresses and len */
665         dmae.src_addr_lo = src_addr >> 2;
666         dmae.src_addr_hi = 0;
667         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
668         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
669         dmae.len = len32;
670
671         bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
672
673         /* issue the command and wait for completion */
674         bnx2x_issue_dmae_with_comp(bp, &dmae);
675 }
676
677 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
678                                u32 addr, u32 len)
679 {
680         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
681         int offset = 0;
682
683         while (len > dmae_wr_max) {
684                 bnx2x_write_dmae(bp, phys_addr + offset,
685                                  addr + offset, dmae_wr_max);
686                 offset += dmae_wr_max * 4;
687                 len -= dmae_wr_max;
688         }
689
690         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
691 }
692
693 /* used only for slowpath so not inlined */
694 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
695 {
696         u32 wb_write[2];
697
698         wb_write[0] = val_hi;
699         wb_write[1] = val_lo;
700         REG_WR_DMAE(bp, reg, wb_write, 2);
701 }
702
703 #ifdef USE_WB_RD
704 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
705 {
706         u32 wb_data[2];
707
708         REG_RD_DMAE(bp, reg, wb_data, 2);
709
710         return HILO_U64(wb_data[0], wb_data[1]);
711 }
712 #endif
713
714 static int bnx2x_mc_assert(struct bnx2x *bp)
715 {
716         char last_idx;
717         int i, rc = 0;
718         u32 row0, row1, row2, row3;
719
720         /* XSTORM */
721         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
722                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
723         if (last_idx)
724                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
725
726         /* print the asserts */
727         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
728
729                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
730                               XSTORM_ASSERT_LIST_OFFSET(i));
731                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
732                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
733                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
734                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
735                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
736                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
737
738                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
739                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
740                                   " 0x%08x 0x%08x 0x%08x\n",
741                                   i, row3, row2, row1, row0);
742                         rc++;
743                 } else {
744                         break;
745                 }
746         }
747
748         /* TSTORM */
749         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
750                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
751         if (last_idx)
752                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
753
754         /* print the asserts */
755         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
756
757                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
758                               TSTORM_ASSERT_LIST_OFFSET(i));
759                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
760                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
761                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
762                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
763                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
764                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
765
766                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
767                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
768                                   " 0x%08x 0x%08x 0x%08x\n",
769                                   i, row3, row2, row1, row0);
770                         rc++;
771                 } else {
772                         break;
773                 }
774         }
775
776         /* CSTORM */
777         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
778                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
779         if (last_idx)
780                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
781
782         /* print the asserts */
783         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
784
785                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
786                               CSTORM_ASSERT_LIST_OFFSET(i));
787                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
788                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
789                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
790                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
791                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
792                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
793
794                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
795                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
796                                   " 0x%08x 0x%08x 0x%08x\n",
797                                   i, row3, row2, row1, row0);
798                         rc++;
799                 } else {
800                         break;
801                 }
802         }
803
804         /* USTORM */
805         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
806                            USTORM_ASSERT_LIST_INDEX_OFFSET);
807         if (last_idx)
808                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
809
810         /* print the asserts */
811         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
812
813                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
814                               USTORM_ASSERT_LIST_OFFSET(i));
815                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
816                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
817                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
818                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
819                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
820                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
821
822                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
823                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
824                                   " 0x%08x 0x%08x 0x%08x\n",
825                                   i, row3, row2, row1, row0);
826                         rc++;
827                 } else {
828                         break;
829                 }
830         }
831
832         return rc;
833 }
834
835 static void bnx2x_fw_dump(struct bnx2x *bp)
836 {
837         u32 addr;
838         u32 mark, offset;
839         __be32 data[9];
840         int word;
841         u32 trace_shmem_base;
842         if (BP_NOMCP(bp)) {
843                 BNX2X_ERR("NO MCP - can not dump\n");
844                 return;
845         }
846
847         if (BP_PATH(bp) == 0)
848                 trace_shmem_base = bp->common.shmem_base;
849         else
850                 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
851         addr = trace_shmem_base - 0x0800 + 4;
852         mark = REG_RD(bp, addr);
853         mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
854                         + ((mark + 0x3) & ~0x3) - 0x08000000;
855         pr_err("begin fw dump (mark 0x%x)\n", mark);
856
857         pr_err("");
858         for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
859                 for (word = 0; word < 8; word++)
860                         data[word] = htonl(REG_RD(bp, offset + 4*word));
861                 data[8] = 0x0;
862                 pr_cont("%s", (char *)data);
863         }
864         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
865                 for (word = 0; word < 8; word++)
866                         data[word] = htonl(REG_RD(bp, offset + 4*word));
867                 data[8] = 0x0;
868                 pr_cont("%s", (char *)data);
869         }
870         pr_err("end of fw dump\n");
871 }
872
873 void bnx2x_panic_dump(struct bnx2x *bp)
874 {
875         int i;
876         u16 j;
877         struct hc_sp_status_block_data sp_sb_data;
878         int func = BP_FUNC(bp);
879 #ifdef BNX2X_STOP_ON_ERROR
880         u16 start = 0, end = 0;
881 #endif
882
883         bp->stats_state = STATS_STATE_DISABLED;
884         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
885
886         BNX2X_ERR("begin crash dump -----------------\n");
887
888         /* Indices */
889         /* Common */
890         BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
891                   "  spq_prod_idx(0x%x)\n",
892                   bp->def_idx, bp->def_att_idx,
893                   bp->attn_state, bp->spq_prod_idx);
894         BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
895                   bp->def_status_blk->atten_status_block.attn_bits,
896                   bp->def_status_blk->atten_status_block.attn_bits_ack,
897                   bp->def_status_blk->atten_status_block.status_block_id,
898                   bp->def_status_blk->atten_status_block.attn_bits_index);
899         BNX2X_ERR("     def (");
900         for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
901                 pr_cont("0x%x%s",
902                        bp->def_status_blk->sp_sb.index_values[i],
903                        (i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " ");
904
905         for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
906                 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
907                         CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
908                         i*sizeof(u32));
909
910         pr_cont("igu_sb_id(0x%x)  igu_seg_id (0x%x) "
911                          "pf_id(0x%x)  vnic_id(0x%x)  "
912                          "vf_id(0x%x)  vf_valid (0x%x)\n",
913                sp_sb_data.igu_sb_id,
914                sp_sb_data.igu_seg_id,
915                sp_sb_data.p_func.pf_id,
916                sp_sb_data.p_func.vnic_id,
917                sp_sb_data.p_func.vf_id,
918                sp_sb_data.p_func.vf_valid);
919
920
921         for_each_queue(bp, i) {
922                 struct bnx2x_fastpath *fp = &bp->fp[i];
923                 int loop;
924                 struct hc_status_block_data_e2 sb_data_e2;
925                 struct hc_status_block_data_e1x sb_data_e1x;
926                 struct hc_status_block_sm  *hc_sm_p =
927                         CHIP_IS_E2(bp) ?
928                         sb_data_e2.common.state_machine :
929                         sb_data_e1x.common.state_machine;
930                 struct hc_index_data *hc_index_p =
931                         CHIP_IS_E2(bp) ?
932                         sb_data_e2.index_data :
933                         sb_data_e1x.index_data;
934                 int data_size;
935                 u32 *sb_data_p;
936
937                 /* Rx */
938                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
939                           "  rx_comp_prod(0x%x)"
940                           "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
941                           i, fp->rx_bd_prod, fp->rx_bd_cons,
942                           fp->rx_comp_prod,
943                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
944                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
945                           "  fp_hc_idx(0x%x)\n",
946                           fp->rx_sge_prod, fp->last_max_sge,
947                           le16_to_cpu(fp->fp_hc_idx));
948
949                 /* Tx */
950                 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
951                           "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
952                           "  *tx_cons_sb(0x%x)\n",
953                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
954                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
955
956                 loop = CHIP_IS_E2(bp) ?
957                         HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
958
959                 /* host sb data */
960
961                 BNX2X_ERR("     run indexes (");
962                 for (j = 0; j < HC_SB_MAX_SM; j++)
963                         pr_cont("0x%x%s",
964                                fp->sb_running_index[j],
965                                (j == HC_SB_MAX_SM - 1) ? ")" : " ");
966
967                 BNX2X_ERR("     indexes (");
968                 for (j = 0; j < loop; j++)
969                         pr_cont("0x%x%s",
970                                fp->sb_index_values[j],
971                                (j == loop - 1) ? ")" : " ");
972                 /* fw sb data */
973                 data_size = CHIP_IS_E2(bp) ?
974                         sizeof(struct hc_status_block_data_e2) :
975                         sizeof(struct hc_status_block_data_e1x);
976                 data_size /= sizeof(u32);
977                 sb_data_p = CHIP_IS_E2(bp) ?
978                         (u32 *)&sb_data_e2 :
979                         (u32 *)&sb_data_e1x;
980                 /* copy sb data in here */
981                 for (j = 0; j < data_size; j++)
982                         *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
983                                 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
984                                 j * sizeof(u32));
985
986                 if (CHIP_IS_E2(bp)) {
987                         pr_cont("pf_id(0x%x)  vf_id (0x%x)  vf_valid(0x%x) "
988                                 "vnic_id(0x%x)  same_igu_sb_1b(0x%x)\n",
989                                 sb_data_e2.common.p_func.pf_id,
990                                 sb_data_e2.common.p_func.vf_id,
991                                 sb_data_e2.common.p_func.vf_valid,
992                                 sb_data_e2.common.p_func.vnic_id,
993                                 sb_data_e2.common.same_igu_sb_1b);
994                 } else {
995                         pr_cont("pf_id(0x%x)  vf_id (0x%x)  vf_valid(0x%x) "
996                                 "vnic_id(0x%x)  same_igu_sb_1b(0x%x)\n",
997                                 sb_data_e1x.common.p_func.pf_id,
998                                 sb_data_e1x.common.p_func.vf_id,
999                                 sb_data_e1x.common.p_func.vf_valid,
1000                                 sb_data_e1x.common.p_func.vnic_id,
1001                                 sb_data_e1x.common.same_igu_sb_1b);
1002                 }
1003
1004                 /* SB_SMs data */
1005                 for (j = 0; j < HC_SB_MAX_SM; j++) {
1006                         pr_cont("SM[%d] __flags (0x%x) "
1007                                "igu_sb_id (0x%x)  igu_seg_id(0x%x) "
1008                                "time_to_expire (0x%x) "
1009                                "timer_value(0x%x)\n", j,
1010                                hc_sm_p[j].__flags,
1011                                hc_sm_p[j].igu_sb_id,
1012                                hc_sm_p[j].igu_seg_id,
1013                                hc_sm_p[j].time_to_expire,
1014                                hc_sm_p[j].timer_value);
1015                 }
1016
1017                 /* Indecies data */
1018                 for (j = 0; j < loop; j++) {
1019                         pr_cont("INDEX[%d] flags (0x%x) "
1020                                          "timeout (0x%x)\n", j,
1021                                hc_index_p[j].flags,
1022                                hc_index_p[j].timeout);
1023                 }
1024         }
1025
1026 #ifdef BNX2X_STOP_ON_ERROR
1027         /* Rings */
1028         /* Rx */
1029         for_each_queue(bp, i) {
1030                 struct bnx2x_fastpath *fp = &bp->fp[i];
1031
1032                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1033                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1034                 for (j = start; j != end; j = RX_BD(j + 1)) {
1035                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1036                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1037
1038                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
1039                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
1040                 }
1041
1042                 start = RX_SGE(fp->rx_sge_prod);
1043                 end = RX_SGE(fp->last_max_sge);
1044                 for (j = start; j != end; j = RX_SGE(j + 1)) {
1045                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1046                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1047
1048                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
1049                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
1050                 }
1051
1052                 start = RCQ_BD(fp->rx_comp_cons - 10);
1053                 end = RCQ_BD(fp->rx_comp_cons + 503);
1054                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1055                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1056
1057                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1058                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1059                 }
1060         }
1061
1062         /* Tx */
1063         for_each_queue(bp, i) {
1064                 struct bnx2x_fastpath *fp = &bp->fp[i];
1065
1066                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1067                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1068                 for (j = start; j != end; j = TX_BD(j + 1)) {
1069                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1070
1071                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1072                                   i, j, sw_bd->skb, sw_bd->first_bd);
1073                 }
1074
1075                 start = TX_BD(fp->tx_bd_cons - 10);
1076                 end = TX_BD(fp->tx_bd_cons + 254);
1077                 for (j = start; j != end; j = TX_BD(j + 1)) {
1078                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1079
1080                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1081                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
1082                 }
1083         }
1084 #endif
1085         bnx2x_fw_dump(bp);
1086         bnx2x_mc_assert(bp);
1087         BNX2X_ERR("end crash dump -----------------\n");
1088 }
1089
1090 static void bnx2x_hc_int_enable(struct bnx2x *bp)
1091 {
1092         int port = BP_PORT(bp);
1093         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1094         u32 val = REG_RD(bp, addr);
1095         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1096         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1097
1098         if (msix) {
1099                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1100                          HC_CONFIG_0_REG_INT_LINE_EN_0);
1101                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1102                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1103         } else if (msi) {
1104                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1105                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1106                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1107                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1108         } else {
1109                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1110                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1111                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
1112                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1113
1114                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1115                    val, port, addr);
1116
1117                 REG_WR(bp, addr, val);
1118
1119                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1120         }
1121
1122         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
1123            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1124
1125         REG_WR(bp, addr, val);
1126         /*
1127          * Ensure that HC_CONFIG is written before leading/trailing edge config
1128          */
1129         mmiowb();
1130         barrier();
1131
1132         if (!CHIP_IS_E1(bp)) {
1133                 /* init leading/trailing edge */
1134                 if (IS_MF(bp)) {
1135                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1136                         if (bp->port.pmf)
1137                                 /* enable nig and gpio3 attention */
1138                                 val |= 0x1100;
1139                 } else
1140                         val = 0xffff;
1141
1142                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1143                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1144         }
1145
1146         /* Make sure that interrupts are indeed enabled from here on */
1147         mmiowb();
1148 }
1149
1150 static void bnx2x_igu_int_enable(struct bnx2x *bp)
1151 {
1152         u32 val;
1153         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1154         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1155
1156         val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1157
1158         if (msix) {
1159                 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1160                          IGU_PF_CONF_SINGLE_ISR_EN);
1161                 val |= (IGU_PF_CONF_FUNC_EN |
1162                         IGU_PF_CONF_MSI_MSIX_EN |
1163                         IGU_PF_CONF_ATTN_BIT_EN);
1164         } else if (msi) {
1165                 val &= ~IGU_PF_CONF_INT_LINE_EN;
1166                 val |= (IGU_PF_CONF_FUNC_EN |
1167                         IGU_PF_CONF_MSI_MSIX_EN |
1168                         IGU_PF_CONF_ATTN_BIT_EN |
1169                         IGU_PF_CONF_SINGLE_ISR_EN);
1170         } else {
1171                 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1172                 val |= (IGU_PF_CONF_FUNC_EN |
1173                         IGU_PF_CONF_INT_LINE_EN |
1174                         IGU_PF_CONF_ATTN_BIT_EN |
1175                         IGU_PF_CONF_SINGLE_ISR_EN);
1176         }
1177
1178         DP(NETIF_MSG_INTR, "write 0x%x to IGU  mode %s\n",
1179            val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1180
1181         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1182
1183         barrier();
1184
1185         /* init leading/trailing edge */
1186         if (IS_MF(bp)) {
1187                 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1188                 if (bp->port.pmf)
1189                         /* enable nig and gpio3 attention */
1190                         val |= 0x1100;
1191         } else
1192                 val = 0xffff;
1193
1194         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1195         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1196
1197         /* Make sure that interrupts are indeed enabled from here on */
1198         mmiowb();
1199 }
1200
1201 void bnx2x_int_enable(struct bnx2x *bp)
1202 {
1203         if (bp->common.int_block == INT_BLOCK_HC)
1204                 bnx2x_hc_int_enable(bp);
1205         else
1206                 bnx2x_igu_int_enable(bp);
1207 }
1208
1209 static void bnx2x_hc_int_disable(struct bnx2x *bp)
1210 {
1211         int port = BP_PORT(bp);
1212         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1213         u32 val = REG_RD(bp, addr);
1214
1215         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1216                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1217                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
1218                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1219
1220         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1221            val, port, addr);
1222
1223         /* flush all outstanding writes */
1224         mmiowb();
1225
1226         REG_WR(bp, addr, val);
1227         if (REG_RD(bp, addr) != val)
1228                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1229 }
1230
1231 static void bnx2x_igu_int_disable(struct bnx2x *bp)
1232 {
1233         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1234
1235         val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1236                  IGU_PF_CONF_INT_LINE_EN |
1237                  IGU_PF_CONF_ATTN_BIT_EN);
1238
1239         DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1240
1241         /* flush all outstanding writes */
1242         mmiowb();
1243
1244         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1245         if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1246                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1247 }
1248
1249 void bnx2x_int_disable(struct bnx2x *bp)
1250 {
1251         if (bp->common.int_block == INT_BLOCK_HC)
1252                 bnx2x_hc_int_disable(bp);
1253         else
1254                 bnx2x_igu_int_disable(bp);
1255 }
1256
1257 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1258 {
1259         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1260         int i, offset;
1261
1262         /* disable interrupt handling */
1263         atomic_inc(&bp->intr_sem);
1264         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1265
1266         if (disable_hw)
1267                 /* prevent the HW from sending interrupts */
1268                 bnx2x_int_disable(bp);
1269
1270         /* make sure all ISRs are done */
1271         if (msix) {
1272                 synchronize_irq(bp->msix_table[0].vector);
1273                 offset = 1;
1274 #ifdef BCM_CNIC
1275                 offset++;
1276 #endif
1277                 for_each_queue(bp, i)
1278                         synchronize_irq(bp->msix_table[i + offset].vector);
1279         } else
1280                 synchronize_irq(bp->pdev->irq);
1281
1282         /* make sure sp_task is not running */
1283         cancel_delayed_work(&bp->sp_task);
1284         flush_workqueue(bnx2x_wq);
1285 }
1286
1287 /* fast path */
1288
1289 /*
1290  * General service functions
1291  */
1292
1293 /* Return true if succeeded to acquire the lock */
1294 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1295 {
1296         u32 lock_status;
1297         u32 resource_bit = (1 << resource);
1298         int func = BP_FUNC(bp);
1299         u32 hw_lock_control_reg;
1300
1301         DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1302
1303         /* Validating that the resource is within range */
1304         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1305                 DP(NETIF_MSG_HW,
1306                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1307                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1308                 return false;
1309         }
1310
1311         if (func <= 5)
1312                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1313         else
1314                 hw_lock_control_reg =
1315                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1316
1317         /* Try to acquire the lock */
1318         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1319         lock_status = REG_RD(bp, hw_lock_control_reg);
1320         if (lock_status & resource_bit)
1321                 return true;
1322
1323         DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1324         return false;
1325 }
1326
1327 #ifdef BCM_CNIC
1328 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1329 #endif
1330
1331 void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1332                            union eth_rx_cqe *rr_cqe)
1333 {
1334         struct bnx2x *bp = fp->bp;
1335         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1336         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1337
1338         DP(BNX2X_MSG_SP,
1339            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1340            fp->index, cid, command, bp->state,
1341            rr_cqe->ramrod_cqe.ramrod_type);
1342
1343         switch (command | fp->state) {
1344         case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1345                 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1346                 fp->state = BNX2X_FP_STATE_OPEN;
1347                 break;
1348
1349         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1350                 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
1351                 fp->state = BNX2X_FP_STATE_HALTED;
1352                 break;
1353
1354         case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1355                 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1356                 fp->state = BNX2X_FP_STATE_TERMINATED;
1357                 break;
1358
1359         default:
1360                 BNX2X_ERR("unexpected MC reply (%d)  "
1361                           "fp[%d] state is %x\n",
1362                           command, fp->index, fp->state);
1363                 break;
1364         }
1365
1366         smp_mb__before_atomic_inc();
1367         atomic_inc(&bp->spq_left);
1368         /* push the change in fp->state and towards the memory */
1369         smp_wmb();
1370
1371         return;
1372 }
1373
1374 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1375 {
1376         struct bnx2x *bp = netdev_priv(dev_instance);
1377         u16 status = bnx2x_ack_int(bp);
1378         u16 mask;
1379         int i;
1380
1381         /* Return here if interrupt is shared and it's not for us */
1382         if (unlikely(status == 0)) {
1383                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1384                 return IRQ_NONE;
1385         }
1386         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1387
1388         /* Return here if interrupt is disabled */
1389         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1390                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1391                 return IRQ_HANDLED;
1392         }
1393
1394 #ifdef BNX2X_STOP_ON_ERROR
1395         if (unlikely(bp->panic))
1396                 return IRQ_HANDLED;
1397 #endif
1398
1399         for_each_queue(bp, i) {
1400                 struct bnx2x_fastpath *fp = &bp->fp[i];
1401
1402                 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
1403                 if (status & mask) {
1404                         /* Handle Rx and Tx according to SB id */
1405                         prefetch(fp->rx_cons_sb);
1406                         prefetch(fp->tx_cons_sb);
1407                         prefetch(&fp->sb_running_index[SM_RX_ID]);
1408                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1409                         status &= ~mask;
1410                 }
1411         }
1412
1413 #ifdef BCM_CNIC
1414         mask = 0x2;
1415         if (status & (mask | 0x1)) {
1416                 struct cnic_ops *c_ops = NULL;
1417
1418                 rcu_read_lock();
1419                 c_ops = rcu_dereference(bp->cnic_ops);
1420                 if (c_ops)
1421                         c_ops->cnic_handler(bp->cnic_data, NULL);
1422                 rcu_read_unlock();
1423
1424                 status &= ~mask;
1425         }
1426 #endif
1427
1428         if (unlikely(status & 0x1)) {
1429                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1430
1431                 status &= ~0x1;
1432                 if (!status)
1433                         return IRQ_HANDLED;
1434         }
1435
1436         if (unlikely(status))
1437                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1438                    status);
1439
1440         return IRQ_HANDLED;
1441 }
1442
1443 /* end of fast path */
1444
1445
1446 /* Link */
1447
1448 /*
1449  * General service functions
1450  */
1451
1452 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1453 {
1454         u32 lock_status;
1455         u32 resource_bit = (1 << resource);
1456         int func = BP_FUNC(bp);
1457         u32 hw_lock_control_reg;
1458         int cnt;
1459
1460         /* Validating that the resource is within range */
1461         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1462                 DP(NETIF_MSG_HW,
1463                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1464                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1465                 return -EINVAL;
1466         }
1467
1468         if (func <= 5) {
1469                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1470         } else {
1471                 hw_lock_control_reg =
1472                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1473         }
1474
1475         /* Validating that the resource is not already taken */
1476         lock_status = REG_RD(bp, hw_lock_control_reg);
1477         if (lock_status & resource_bit) {
1478                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1479                    lock_status, resource_bit);
1480                 return -EEXIST;
1481         }
1482
1483         /* Try for 5 second every 5ms */
1484         for (cnt = 0; cnt < 1000; cnt++) {
1485                 /* Try to acquire the lock */
1486                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1487                 lock_status = REG_RD(bp, hw_lock_control_reg);
1488                 if (lock_status & resource_bit)
1489                         return 0;
1490
1491                 msleep(5);
1492         }
1493         DP(NETIF_MSG_HW, "Timeout\n");
1494         return -EAGAIN;
1495 }
1496
1497 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1498 {
1499         u32 lock_status;
1500         u32 resource_bit = (1 << resource);
1501         int func = BP_FUNC(bp);
1502         u32 hw_lock_control_reg;
1503
1504         DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1505
1506         /* Validating that the resource is within range */
1507         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1508                 DP(NETIF_MSG_HW,
1509                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1510                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1511                 return -EINVAL;
1512         }
1513
1514         if (func <= 5) {
1515                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1516         } else {
1517                 hw_lock_control_reg =
1518                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1519         }
1520
1521         /* Validating that the resource is currently taken */
1522         lock_status = REG_RD(bp, hw_lock_control_reg);
1523         if (!(lock_status & resource_bit)) {
1524                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1525                    lock_status, resource_bit);
1526                 return -EFAULT;
1527         }
1528
1529         REG_WR(bp, hw_lock_control_reg, resource_bit);
1530         return 0;
1531 }
1532
1533
1534 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1535 {
1536         /* The GPIO should be swapped if swap register is set and active */
1537         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1538                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1539         int gpio_shift = gpio_num +
1540                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1541         u32 gpio_mask = (1 << gpio_shift);
1542         u32 gpio_reg;
1543         int value;
1544
1545         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1546                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1547                 return -EINVAL;
1548         }
1549
1550         /* read GPIO value */
1551         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1552
1553         /* get the requested pin value */
1554         if ((gpio_reg & gpio_mask) == gpio_mask)
1555                 value = 1;
1556         else
1557                 value = 0;
1558
1559         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1560
1561         return value;
1562 }
1563
1564 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1565 {
1566         /* The GPIO should be swapped if swap register is set and active */
1567         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1568                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1569         int gpio_shift = gpio_num +
1570                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1571         u32 gpio_mask = (1 << gpio_shift);
1572         u32 gpio_reg;
1573
1574         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1575                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1576                 return -EINVAL;
1577         }
1578
1579         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1580         /* read GPIO and mask except the float bits */
1581         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1582
1583         switch (mode) {
1584         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1585                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1586                    gpio_num, gpio_shift);
1587                 /* clear FLOAT and set CLR */
1588                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1589                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1590                 break;
1591
1592         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1593                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1594                    gpio_num, gpio_shift);
1595                 /* clear FLOAT and set SET */
1596                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1597                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1598                 break;
1599
1600         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1601                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1602                    gpio_num, gpio_shift);
1603                 /* set FLOAT */
1604                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1605                 break;
1606
1607         default:
1608                 break;
1609         }
1610
1611         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1612         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1613
1614         return 0;
1615 }
1616
1617 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1618 {
1619         /* The GPIO should be swapped if swap register is set and active */
1620         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1621                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1622         int gpio_shift = gpio_num +
1623                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1624         u32 gpio_mask = (1 << gpio_shift);
1625         u32 gpio_reg;
1626
1627         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1628                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1629                 return -EINVAL;
1630         }
1631
1632         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1633         /* read GPIO int */
1634         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1635
1636         switch (mode) {
1637         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1638                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1639                                    "output low\n", gpio_num, gpio_shift);
1640                 /* clear SET and set CLR */
1641                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1642                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1643                 break;
1644
1645         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1646                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1647                                    "output high\n", gpio_num, gpio_shift);
1648                 /* clear CLR and set SET */
1649                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1650                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1651                 break;
1652
1653         default:
1654                 break;
1655         }
1656
1657         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1658         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1659
1660         return 0;
1661 }
1662
1663 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1664 {
1665         u32 spio_mask = (1 << spio_num);
1666         u32 spio_reg;
1667
1668         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1669             (spio_num > MISC_REGISTERS_SPIO_7)) {
1670                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1671                 return -EINVAL;
1672         }
1673
1674         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1675         /* read SPIO and mask except the float bits */
1676         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1677
1678         switch (mode) {
1679         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1680                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1681                 /* clear FLOAT and set CLR */
1682                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1683                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1684                 break;
1685
1686         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1687                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1688                 /* clear FLOAT and set SET */
1689                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1690                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1691                 break;
1692
1693         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1694                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1695                 /* set FLOAT */
1696                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1697                 break;
1698
1699         default:
1700                 break;
1701         }
1702
1703         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1704         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1705
1706         return 0;
1707 }
1708
1709 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1710 {
1711         u32 sel_phy_idx = 0;
1712         if (bp->link_vars.link_up) {
1713                 sel_phy_idx = EXT_PHY1;
1714                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1715                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1716                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1717                         sel_phy_idx = EXT_PHY2;
1718         } else {
1719
1720                 switch (bnx2x_phy_selection(&bp->link_params)) {
1721                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1722                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1723                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1724                        sel_phy_idx = EXT_PHY1;
1725                        break;
1726                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1727                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1728                        sel_phy_idx = EXT_PHY2;
1729                        break;
1730                 }
1731         }
1732         /*
1733         * The selected actived PHY is always after swapping (in case PHY
1734         * swapping is enabled). So when swapping is enabled, we need to reverse
1735         * the configuration
1736         */
1737
1738         if (bp->link_params.multi_phy_config &
1739             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1740                 if (sel_phy_idx == EXT_PHY1)
1741                         sel_phy_idx = EXT_PHY2;
1742                 else if (sel_phy_idx == EXT_PHY2)
1743                         sel_phy_idx = EXT_PHY1;
1744         }
1745         return LINK_CONFIG_IDX(sel_phy_idx);
1746 }
1747
1748 void bnx2x_calc_fc_adv(struct bnx2x *bp)
1749 {
1750         u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
1751         switch (bp->link_vars.ieee_fc &
1752                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1753         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1754                 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1755                                                    ADVERTISED_Pause);
1756                 break;
1757
1758         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1759                 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
1760                                                   ADVERTISED_Pause);
1761                 break;
1762
1763         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1764                 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
1765                 break;
1766
1767         default:
1768                 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1769                                                    ADVERTISED_Pause);
1770                 break;
1771         }
1772 }
1773
1774 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1775 {
1776         if (!BP_NOMCP(bp)) {
1777                 u8 rc;
1778                 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1779                 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
1780                 /* Initialize link parameters structure variables */
1781                 /* It is recommended to turn off RX FC for jumbo frames
1782                    for better performance */
1783                 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
1784                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1785                 else
1786                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1787
1788                 bnx2x_acquire_phy_lock(bp);
1789
1790                 if (load_mode == LOAD_DIAG) {
1791                         bp->link_params.loopback_mode = LOOPBACK_XGXS;
1792                         bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1793                 }
1794
1795                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1796
1797                 bnx2x_release_phy_lock(bp);
1798
1799                 bnx2x_calc_fc_adv(bp);
1800
1801                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1802                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1803                         bnx2x_link_report(bp);
1804                 }
1805                 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
1806                 return rc;
1807         }
1808         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1809         return -EINVAL;
1810 }
1811
1812 void bnx2x_link_set(struct bnx2x *bp)
1813 {
1814         if (!BP_NOMCP(bp)) {
1815                 bnx2x_acquire_phy_lock(bp);
1816                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1817                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1818                 bnx2x_release_phy_lock(bp);
1819
1820                 bnx2x_calc_fc_adv(bp);
1821         } else
1822                 BNX2X_ERR("Bootcode is missing - can not set link\n");
1823 }
1824
1825 static void bnx2x__link_reset(struct bnx2x *bp)
1826 {
1827         if (!BP_NOMCP(bp)) {
1828                 bnx2x_acquire_phy_lock(bp);
1829                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1830                 bnx2x_release_phy_lock(bp);
1831         } else
1832                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1833 }
1834
1835 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
1836 {
1837         u8 rc = 0;
1838
1839         if (!BP_NOMCP(bp)) {
1840                 bnx2x_acquire_phy_lock(bp);
1841                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1842                                      is_serdes);
1843                 bnx2x_release_phy_lock(bp);
1844         } else
1845                 BNX2X_ERR("Bootcode is missing - can not test link\n");
1846
1847         return rc;
1848 }
1849
1850 static void bnx2x_init_port_minmax(struct bnx2x *bp)
1851 {
1852         u32 r_param = bp->link_vars.line_speed / 8;
1853         u32 fair_periodic_timeout_usec;
1854         u32 t_fair;
1855
1856         memset(&(bp->cmng.rs_vars), 0,
1857                sizeof(struct rate_shaping_vars_per_port));
1858         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1859
1860         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1861         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1862
1863         /* this is the threshold below which no timer arming will occur
1864            1.25 coefficient is for the threshold to be a little bigger
1865            than the real time, to compensate for timer in-accuracy */
1866         bp->cmng.rs_vars.rs_threshold =
1867                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1868
1869         /* resolution of fairness timer */
1870         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1871         /* for 10G it is 1000usec. for 1G it is 10000usec. */
1872         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1873
1874         /* this is the threshold below which we won't arm the timer anymore */
1875         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1876
1877         /* we multiply by 1e3/8 to get bytes/msec.
1878            We don't want the credits to pass a credit
1879            of the t_fair*FAIR_MEM (algorithm resolution) */
1880         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1881         /* since each tick is 4 usec */
1882         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1883 }
1884
1885 /* Calculates the sum of vn_min_rates.
1886    It's needed for further normalizing of the min_rates.
1887    Returns:
1888      sum of vn_min_rates.
1889        or
1890      0 - if all the min_rates are 0.
1891      In the later case fainess algorithm should be deactivated.
1892      If not all min_rates are zero then those that are zeroes will be set to 1.
1893  */
1894 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1895 {
1896         int all_zero = 1;
1897         int vn;
1898
1899         bp->vn_weight_sum = 0;
1900         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1901                 u32 vn_cfg = bp->mf_config[vn];
1902                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1903                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1904
1905                 /* Skip hidden vns */
1906                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1907                         continue;
1908
1909                 /* If min rate is zero - set it to 1 */
1910                 if (!vn_min_rate)
1911                         vn_min_rate = DEF_MIN_RATE;
1912                 else
1913                         all_zero = 0;
1914
1915                 bp->vn_weight_sum += vn_min_rate;
1916         }
1917
1918         /* ... only if all min rates are zeros - disable fairness */
1919         if (all_zero) {
1920                 bp->cmng.flags.cmng_enables &=
1921                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1922                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1923                    "  fairness will be disabled\n");
1924         } else
1925                 bp->cmng.flags.cmng_enables |=
1926                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1927 }
1928
1929 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
1930 {
1931         struct rate_shaping_vars_per_vn m_rs_vn;
1932         struct fairness_vars_per_vn m_fair_vn;
1933         u32 vn_cfg = bp->mf_config[vn];
1934         int func = 2*vn + BP_PORT(bp);
1935         u16 vn_min_rate, vn_max_rate;
1936         int i;
1937
1938         /* If function is hidden - set min and max to zeroes */
1939         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1940                 vn_min_rate = 0;
1941                 vn_max_rate = 0;
1942
1943         } else {
1944                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1945                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1946                 /* If min rate is zero - set it to 1 */
1947                 if (bp->vn_weight_sum && (vn_min_rate == 0))
1948                         vn_min_rate = DEF_MIN_RATE;
1949                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1950                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1951         }
1952
1953         DP(NETIF_MSG_IFUP,
1954            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
1955            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1956
1957         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1958         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1959
1960         /* global vn counter - maximal Mbps for this vn */
1961         m_rs_vn.vn_counter.rate = vn_max_rate;
1962
1963         /* quota - number of bytes transmitted in this period */
1964         m_rs_vn.vn_counter.quota =
1965                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1966
1967         if (bp->vn_weight_sum) {
1968                 /* credit for each period of the fairness algorithm:
1969                    number of bytes in T_FAIR (the vn share the port rate).
1970                    vn_weight_sum should not be larger than 10000, thus
1971                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1972                    than zero */
1973                 m_fair_vn.vn_credit_delta =
1974                         max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1975                                                    (8 * bp->vn_weight_sum))),
1976                               (bp->cmng.fair_vars.fair_threshold * 2));
1977                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
1978                    m_fair_vn.vn_credit_delta);
1979         }
1980
1981         /* Store it to internal memory */
1982         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1983                 REG_WR(bp, BAR_XSTRORM_INTMEM +
1984                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1985                        ((u32 *)(&m_rs_vn))[i]);
1986
1987         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1988                 REG_WR(bp, BAR_XSTRORM_INTMEM +
1989                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1990                        ((u32 *)(&m_fair_vn))[i]);
1991 }
1992
1993 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
1994 {
1995         if (CHIP_REV_IS_SLOW(bp))
1996                 return CMNG_FNS_NONE;
1997         if (IS_MF(bp))
1998                 return CMNG_FNS_MINMAX;
1999
2000         return CMNG_FNS_NONE;
2001 }
2002
2003 static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2004 {
2005         int vn;
2006
2007         if (BP_NOMCP(bp))
2008                 return; /* what should be the default bvalue in this case */
2009
2010         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2011                 int /*abs*/func = 2*vn + BP_PORT(bp);
2012                 bp->mf_config[vn] =
2013                         MF_CFG_RD(bp, func_mf_config[func].config);
2014         }
2015 }
2016
2017 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2018 {
2019
2020         if (cmng_type == CMNG_FNS_MINMAX) {
2021                 int vn;
2022
2023                 /* clear cmng_enables */
2024                 bp->cmng.flags.cmng_enables = 0;
2025
2026                 /* read mf conf from shmem */
2027                 if (read_cfg)
2028                         bnx2x_read_mf_cfg(bp);
2029
2030                 /* Init rate shaping and fairness contexts */
2031                 bnx2x_init_port_minmax(bp);
2032
2033                 /* vn_weight_sum and enable fairness if not 0 */
2034                 bnx2x_calc_vn_weight_sum(bp);
2035
2036                 /* calculate and set min-max rate for each vn */
2037                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2038                         bnx2x_init_vn_minmax(bp, vn);
2039
2040                 /* always enable rate shaping and fairness */
2041                 bp->cmng.flags.cmng_enables |=
2042                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2043                 if (!bp->vn_weight_sum)
2044                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2045                                    "  fairness will be disabled\n");
2046                 return;
2047         }
2048
2049         /* rate shaping and fairness are disabled */
2050         DP(NETIF_MSG_IFUP,
2051            "rate shaping and fairness are disabled\n");
2052 }
2053
2054 static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2055 {
2056         int port = BP_PORT(bp);
2057         int func;
2058         int vn;
2059
2060         /* Set the attention towards other drivers on the same port */
2061         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2062                 if (vn == BP_E1HVN(bp))
2063                         continue;
2064
2065                 func = ((vn << 1) | port);
2066                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2067                        (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2068         }
2069 }
2070
2071 /* This function is called upon link interrupt */
2072 static void bnx2x_link_attn(struct bnx2x *bp)
2073 {
2074         u32 prev_link_status = bp->link_vars.link_status;
2075         /* Make sure that we are synced with the current statistics */
2076         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2077
2078         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2079
2080         if (bp->link_vars.link_up) {
2081
2082                 /* dropless flow control */
2083                 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
2084                         int port = BP_PORT(bp);
2085                         u32 pause_enabled = 0;
2086
2087                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2088                                 pause_enabled = 1;
2089
2090                         REG_WR(bp, BAR_USTRORM_INTMEM +
2091                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2092                                pause_enabled);
2093                 }
2094
2095                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2096                         struct host_port_stats *pstats;
2097
2098                         pstats = bnx2x_sp(bp, port_stats);
2099                         /* reset old bmac stats */
2100                         memset(&(pstats->mac_stx[0]), 0,
2101                                sizeof(struct mac_stx));
2102                 }
2103                 if (bp->state == BNX2X_STATE_OPEN)
2104                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2105         }
2106
2107         /* indicate link status only if link status actually changed */
2108         if (prev_link_status != bp->link_vars.link_status)
2109                 bnx2x_link_report(bp);
2110
2111         if (IS_MF(bp))
2112                 bnx2x_link_sync_notify(bp);
2113
2114         if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2115                 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2116
2117                 if (cmng_fns != CMNG_FNS_NONE) {
2118                         bnx2x_cmng_fns_init(bp, false, cmng_fns);
2119                         storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2120                 } else
2121                         /* rate shaping and fairness are disabled */
2122                         DP(NETIF_MSG_IFUP,
2123                            "single function mode without fairness\n");
2124         }
2125 }
2126
2127 void bnx2x__link_status_update(struct bnx2x *bp)
2128 {
2129         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2130                 return;
2131
2132         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2133
2134         if (bp->link_vars.link_up)
2135                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2136         else
2137                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2138
2139         /* the link status update could be the result of a DCC event
2140            hence re-read the shmem mf configuration */
2141         bnx2x_read_mf_cfg(bp);
2142
2143         /* indicate link status */
2144         bnx2x_link_report(bp);
2145 }
2146
2147 static void bnx2x_pmf_update(struct bnx2x *bp)
2148 {
2149         int port = BP_PORT(bp);
2150         u32 val;
2151
2152         bp->port.pmf = 1;
2153         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2154
2155         /* enable nig attention */
2156         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2157         if (bp->common.int_block == INT_BLOCK_HC) {
2158                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2159                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2160         } else if (CHIP_IS_E2(bp)) {
2161                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2162                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2163         }
2164
2165         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2166 }
2167
2168 /* end of Link */
2169
2170 /* slow path */
2171
2172 /*
2173  * General service functions
2174  */
2175
2176 /* send the MCP a request, block until there is a reply */
2177 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2178 {
2179         int mb_idx = BP_FW_MB_IDX(bp);
2180         u32 seq = ++bp->fw_seq;
2181         u32 rc = 0;
2182         u32 cnt = 1;
2183         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2184
2185         mutex_lock(&bp->fw_mb_mutex);
2186         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2187         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2188
2189         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2190
2191         do {
2192                 /* let the FW do it's magic ... */
2193                 msleep(delay);
2194
2195                 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2196
2197                 /* Give the FW up to 5 second (500*10ms) */
2198         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2199
2200         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2201            cnt*delay, rc, seq);
2202
2203         /* is this a reply to our command? */
2204         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2205                 rc &= FW_MSG_CODE_MASK;
2206         else {
2207                 /* FW BUG! */
2208                 BNX2X_ERR("FW failed to respond!\n");
2209                 bnx2x_fw_dump(bp);
2210                 rc = 0;
2211         }
2212         mutex_unlock(&bp->fw_mb_mutex);
2213
2214         return rc;
2215 }
2216
2217 /* must be called under rtnl_lock */
2218 void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2219 {
2220         u32 mask = (1 << cl_id);
2221
2222         /* initial seeting is BNX2X_ACCEPT_NONE */
2223         u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2224         u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2225         u8 unmatched_unicast = 0;
2226
2227         if (filters & BNX2X_PROMISCUOUS_MODE) {
2228                 /* promiscious - accept all, drop none */
2229                 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2230                 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2231         }
2232         if (filters & BNX2X_ACCEPT_UNICAST) {
2233                 /* accept matched ucast */
2234                 drop_all_ucast = 0;
2235         }
2236         if (filters & BNX2X_ACCEPT_MULTICAST) {
2237                 /* accept matched mcast */
2238                 drop_all_mcast = 0;
2239         }
2240         if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2241                 /* accept all mcast */
2242                 drop_all_ucast = 0;
2243                 accp_all_ucast = 1;
2244         }
2245         if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2246                 /* accept all mcast */
2247                 drop_all_mcast = 0;
2248                 accp_all_mcast = 1;
2249         }
2250         if (filters & BNX2X_ACCEPT_BROADCAST) {
2251                 /* accept (all) bcast */
2252                 drop_all_bcast = 0;
2253                 accp_all_bcast = 1;
2254         }
2255
2256         bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2257                 bp->mac_filters.ucast_drop_all | mask :
2258                 bp->mac_filters.ucast_drop_all & ~mask;
2259
2260         bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2261                 bp->mac_filters.mcast_drop_all | mask :
2262                 bp->mac_filters.mcast_drop_all & ~mask;
2263
2264         bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2265                 bp->mac_filters.bcast_drop_all | mask :
2266                 bp->mac_filters.bcast_drop_all & ~mask;
2267
2268         bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2269                 bp->mac_filters.ucast_accept_all | mask :
2270                 bp->mac_filters.ucast_accept_all & ~mask;
2271
2272         bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2273                 bp->mac_filters.mcast_accept_all | mask :
2274                 bp->mac_filters.mcast_accept_all & ~mask;
2275
2276         bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2277                 bp->mac_filters.bcast_accept_all | mask :
2278                 bp->mac_filters.bcast_accept_all & ~mask;
2279
2280         bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2281                 bp->mac_filters.unmatched_unicast | mask :
2282                 bp->mac_filters.unmatched_unicast & ~mask;
2283 }
2284
2285 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2286 {
2287         if (FUNC_CONFIG(p->func_flgs)) {
2288                 struct tstorm_eth_function_common_config tcfg = {0};
2289
2290                 /* tpa */
2291                 if (p->func_flgs & FUNC_FLG_TPA)
2292                         tcfg.config_flags |=
2293                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2294
2295                 /* set rss flags */
2296                 if (p->func_flgs & FUNC_FLG_RSS) {
2297                         u16 rss_flgs = (p->rss->mode <<
2298                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2299
2300                         if (p->rss->cap & RSS_IPV4_CAP)
2301                                 rss_flgs |= RSS_IPV4_CAP_MASK;
2302                         if (p->rss->cap & RSS_IPV4_TCP_CAP)
2303                                 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2304                         if (p->rss->cap & RSS_IPV6_CAP)
2305                                 rss_flgs |= RSS_IPV6_CAP_MASK;
2306                         if (p->rss->cap & RSS_IPV6_TCP_CAP)
2307                                 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2308
2309                         tcfg.config_flags |= rss_flgs;
2310                         tcfg.rss_result_mask = p->rss->result_mask;
2311
2312                 }
2313
2314                 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2315         }
2316
2317         /* Enable the function in the FW */
2318         storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2319         storm_memset_func_en(bp, p->func_id, 1);
2320
2321         /* statistics */
2322         if (p->func_flgs & FUNC_FLG_STATS) {
2323                 struct stats_indication_flags stats_flags = {0};
2324                 stats_flags.collect_eth = 1;
2325
2326                 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2327                 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2328
2329                 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2330                 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2331
2332                 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2333                 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2334
2335                 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2336                 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2337         }
2338
2339         /* spq */
2340         if (p->func_flgs & FUNC_FLG_SPQ) {
2341                 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2342                 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2343                        XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2344         }
2345 }
2346
2347 static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2348                                      struct bnx2x_fastpath *fp)
2349 {
2350         u16 flags = 0;
2351
2352         /* calculate queue flags */
2353         flags |= QUEUE_FLG_CACHE_ALIGN;
2354         flags |= QUEUE_FLG_HC;
2355         flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0;
2356
2357 #ifdef BCM_VLAN
2358         flags |= QUEUE_FLG_VLAN;
2359         DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2360 #endif
2361
2362         if (!fp->disable_tpa)
2363                 flags |= QUEUE_FLG_TPA;
2364
2365         flags |= QUEUE_FLG_STATS;
2366
2367         return flags;
2368 }
2369
2370 static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2371         struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2372         struct bnx2x_rxq_init_params *rxq_init)
2373 {
2374         u16 max_sge = 0;
2375         u16 sge_sz = 0;
2376         u16 tpa_agg_size = 0;
2377
2378         /* calculate queue flags */
2379         u16 flags = bnx2x_get_cl_flags(bp, fp);
2380
2381         if (!fp->disable_tpa) {
2382                 pause->sge_th_hi = 250;
2383                 pause->sge_th_lo = 150;
2384                 tpa_agg_size = min_t(u32,
2385                         (min_t(u32, 8, MAX_SKB_FRAGS) *
2386                         SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2387                 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2388                         SGE_PAGE_SHIFT;
2389                 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2390                           (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2391                 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2392                                     0xffff);
2393         }
2394
2395         /* pause - not for e1 */
2396         if (!CHIP_IS_E1(bp)) {
2397                 pause->bd_th_hi = 350;
2398                 pause->bd_th_lo = 250;
2399                 pause->rcq_th_hi = 350;
2400                 pause->rcq_th_lo = 250;
2401                 pause->sge_th_hi = 0;
2402                 pause->sge_th_lo = 0;
2403                 pause->pri_map = 1;
2404         }
2405
2406         /* rxq setup */
2407         rxq_init->flags = flags;
2408         rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2409         rxq_init->dscr_map = fp->rx_desc_mapping;
2410         rxq_init->sge_map = fp->rx_sge_mapping;
2411         rxq_init->rcq_map = fp->rx_comp_mapping;
2412         rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2413         rxq_init->mtu = bp->dev->mtu;
2414         rxq_init->buf_sz = bp->rx_buf_size;
2415         rxq_init->cl_qzone_id = fp->cl_qzone_id;
2416         rxq_init->cl_id = fp->cl_id;
2417         rxq_init->spcl_id = fp->cl_id;
2418         rxq_init->stat_id = fp->cl_id;
2419         rxq_init->tpa_agg_sz = tpa_agg_size;
2420         rxq_init->sge_buf_sz = sge_sz;
2421         rxq_init->max_sges_pkt = max_sge;
2422         rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2423         rxq_init->fw_sb_id = fp->fw_sb_id;
2424
2425         rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2426
2427         rxq_init->cid = HW_CID(bp, fp->cid);
2428
2429         rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2430 }
2431
2432 static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2433         struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2434 {
2435         u16 flags = bnx2x_get_cl_flags(bp, fp);
2436
2437         txq_init->flags = flags;
2438         txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2439         txq_init->dscr_map = fp->tx_desc_mapping;
2440         txq_init->stat_id = fp->cl_id;
2441         txq_init->cid = HW_CID(bp, fp->cid);
2442         txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2443         txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2444         txq_init->fw_sb_id = fp->fw_sb_id;
2445         txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2446 }
2447
2448 void bnx2x_pf_init(struct bnx2x *bp)
2449 {
2450         struct bnx2x_func_init_params func_init = {0};
2451         struct bnx2x_rss_params rss = {0};
2452         struct event_ring_data eq_data = { {0} };
2453         u16 flags;
2454
2455         /* pf specific setups */
2456         if (!CHIP_IS_E1(bp))
2457                 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
2458
2459         if (CHIP_IS_E2(bp)) {
2460                 /* reset IGU PF statistics: MSIX + ATTN */
2461                 /* PF */
2462                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2463                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2464                            (CHIP_MODE_IS_4_PORT(bp) ?
2465                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2466                 /* ATTN */
2467                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2468                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2469                            BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2470                            (CHIP_MODE_IS_4_PORT(bp) ?
2471                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2472         }
2473
2474         /* function setup flags */
2475         flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2476
2477         if (CHIP_IS_E1x(bp))
2478                 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2479         else
2480                 flags |= FUNC_FLG_TPA;
2481
2482         /**
2483          * Although RSS is meaningless when there is a single HW queue we
2484          * still need it enabled in order to have HW Rx hash generated.
2485          *
2486          * if (is_eth_multi(bp))
2487          *      flags |= FUNC_FLG_RSS;
2488          */
2489
2490         /* function setup */
2491         if (flags & FUNC_FLG_RSS) {
2492                 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2493                            RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2494                 rss.mode = bp->multi_mode;
2495                 rss.result_mask = MULTI_MASK;
2496                 func_init.rss = &rss;
2497         }
2498
2499         func_init.func_flgs = flags;
2500         func_init.pf_id = BP_FUNC(bp);
2501         func_init.func_id = BP_FUNC(bp);
2502         func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2503         func_init.spq_map = bp->spq_mapping;
2504         func_init.spq_prod = bp->spq_prod_idx;
2505
2506         bnx2x_func_init(bp, &func_init);
2507
2508         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2509
2510         /*
2511         Congestion management values depend on the link rate
2512         There is no active link so initial link rate is set to 10 Gbps.
2513         When the link comes up The congestion management values are
2514         re-calculated according to the actual link rate.
2515         */
2516         bp->link_vars.line_speed = SPEED_10000;
2517         bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2518
2519         /* Only the PMF sets the HW */
2520         if (bp->port.pmf)
2521                 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2522
2523         /* no rx until link is up */
2524         bp->rx_mode = BNX2X_RX_MODE_NONE;
2525         bnx2x_set_storm_rx_mode(bp);
2526
2527         /* init Event Queue */
2528         eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2529         eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2530         eq_data.producer = bp->eq_prod;
2531         eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2532         eq_data.sb_id = DEF_SB_ID;
2533         storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2534 }
2535
2536
2537 static void bnx2x_e1h_disable(struct bnx2x *bp)
2538 {
2539         int port = BP_PORT(bp);
2540
2541         netif_tx_disable(bp->dev);
2542
2543         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2544
2545         netif_carrier_off(bp->dev);
2546 }
2547
2548 static void bnx2x_e1h_enable(struct bnx2x *bp)
2549 {
2550         int port = BP_PORT(bp);
2551
2552         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2553
2554         /* Tx queue should be only reenabled */
2555         netif_tx_wake_all_queues(bp->dev);
2556
2557         /*
2558          * Should not call netif_carrier_on since it will be called if the link
2559          * is up when checking for link state
2560          */
2561 }
2562
2563 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2564 {
2565         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2566
2567         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2568
2569                 /*
2570                  * This is the only place besides the function initialization
2571                  * where the bp->flags can change so it is done without any
2572                  * locks
2573                  */
2574                 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2575                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2576                         bp->flags |= MF_FUNC_DIS;
2577
2578                         bnx2x_e1h_disable(bp);
2579                 } else {
2580                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2581                         bp->flags &= ~MF_FUNC_DIS;
2582
2583                         bnx2x_e1h_enable(bp);
2584                 }
2585                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2586         }
2587         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2588
2589                 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2590                 bnx2x_link_sync_notify(bp);
2591                 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2592                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2593         }
2594
2595         /* Report results to MCP */
2596         if (dcc_event)
2597                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2598         else
2599                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2600 }
2601
2602 /* must be called under the spq lock */
2603 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2604 {
2605         struct eth_spe *next_spe = bp->spq_prod_bd;
2606
2607         if (bp->spq_prod_bd == bp->spq_last_bd) {
2608                 bp->spq_prod_bd = bp->spq;
2609                 bp->spq_prod_idx = 0;
2610                 DP(NETIF_MSG_TIMER, "end of spq\n");
2611         } else {
2612                 bp->spq_prod_bd++;
2613                 bp->spq_prod_idx++;
2614         }
2615         return next_spe;
2616 }
2617
2618 /* must be called under the spq lock */
2619 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2620 {
2621         int func = BP_FUNC(bp);
2622
2623         /* Make sure that BD data is updated before writing the producer */
2624         wmb();
2625
2626         REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2627                  bp->spq_prod_idx);
2628         mmiowb();
2629 }
2630
2631 /* the slow path queue is odd since completions arrive on the fastpath ring */
2632 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2633                   u32 data_hi, u32 data_lo, int common)
2634 {
2635         struct eth_spe *spe;
2636         u16 type;
2637
2638 #ifdef BNX2X_STOP_ON_ERROR
2639         if (unlikely(bp->panic))
2640                 return -EIO;
2641 #endif
2642
2643         spin_lock_bh(&bp->spq_lock);
2644
2645         if (!atomic_read(&bp->spq_left)) {
2646                 BNX2X_ERR("BUG! SPQ ring full!\n");
2647                 spin_unlock_bh(&bp->spq_lock);
2648                 bnx2x_panic();
2649                 return -EBUSY;
2650         }
2651
2652         spe = bnx2x_sp_get_next(bp);
2653
2654         /* CID needs port number to be encoded int it */
2655         spe->hdr.conn_and_cmd_data =
2656                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2657                                     HW_CID(bp, cid));
2658
2659         if (common)
2660                 /* Common ramrods:
2661                  *      FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2662                  *      TRAFFIC_STOP, TRAFFIC_START
2663                  */
2664                 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2665                         & SPE_HDR_CONN_TYPE;
2666         else
2667                 /* ETH ramrods: SETUP, HALT */
2668                 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2669                         & SPE_HDR_CONN_TYPE;
2670
2671         type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2672                  SPE_HDR_FUNCTION_ID);
2673
2674         spe->hdr.type = cpu_to_le16(type);
2675
2676         spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2677         spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2678
2679         /* stats ramrod has it's own slot on the spq */
2680         if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2681                 /* It's ok if the actual decrement is issued towards the memory
2682                  * somewhere between the spin_lock and spin_unlock. Thus no
2683                  * more explict memory barrier is needed.
2684                  */
2685                 atomic_dec(&bp->spq_left);
2686
2687         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2688            "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x) "
2689            "type(0x%x) left %x\n",
2690            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2691            (u32)(U64_LO(bp->spq_mapping) +
2692            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2693            HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
2694
2695         bnx2x_sp_prod_update(bp);
2696         spin_unlock_bh(&bp->spq_lock);
2697         return 0;
2698 }
2699
2700 /* acquire split MCP access lock register */
2701 static int bnx2x_acquire_alr(struct bnx2x *bp)
2702 {
2703         u32 j, val;
2704         int rc = 0;
2705
2706         might_sleep();
2707         for (j = 0; j < 1000; j++) {
2708                 val = (1UL << 31);
2709                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2710                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2711                 if (val & (1L << 31))
2712                         break;
2713
2714                 msleep(5);
2715         }
2716         if (!(val & (1L << 31))) {
2717                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2718                 rc = -EBUSY;
2719         }
2720
2721         return rc;
2722 }
2723
2724 /* release split MCP access lock register */
2725 static void bnx2x_release_alr(struct bnx2x *bp)
2726 {
2727         REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2728 }
2729
2730 #define BNX2X_DEF_SB_ATT_IDX    0x0001
2731 #define BNX2X_DEF_SB_IDX        0x0002
2732
2733 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2734 {
2735         struct host_sp_status_block *def_sb = bp->def_status_blk;
2736         u16 rc = 0;
2737
2738         barrier(); /* status block is written to by the chip */
2739         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2740                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2741                 rc |= BNX2X_DEF_SB_ATT_IDX;
2742         }
2743
2744         if (bp->def_idx != def_sb->sp_sb.running_index) {
2745                 bp->def_idx = def_sb->sp_sb.running_index;
2746                 rc |= BNX2X_DEF_SB_IDX;
2747         }
2748
2749         /* Do not reorder: indecies reading should complete before handling */
2750         barrier();
2751         return rc;
2752 }
2753
2754 /*
2755  * slow path service functions
2756  */
2757
2758 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2759 {
2760         int port = BP_PORT(bp);
2761         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2762                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2763         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2764                                        NIG_REG_MASK_INTERRUPT_PORT0;
2765         u32 aeu_mask;
2766         u32 nig_mask = 0;
2767         u32 reg_addr;
2768
2769         if (bp->attn_state & asserted)
2770                 BNX2X_ERR("IGU ERROR\n");
2771
2772         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2773         aeu_mask = REG_RD(bp, aeu_addr);
2774
2775         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2776            aeu_mask, asserted);
2777         aeu_mask &= ~(asserted & 0x3ff);
2778         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2779
2780         REG_WR(bp, aeu_addr, aeu_mask);
2781         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2782
2783         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2784         bp->attn_state |= asserted;
2785         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2786
2787         if (asserted & ATTN_HARD_WIRED_MASK) {
2788                 if (asserted & ATTN_NIG_FOR_FUNC) {
2789
2790                         bnx2x_acquire_phy_lock(bp);
2791
2792                         /* save nig interrupt mask */
2793                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2794                         REG_WR(bp, nig_int_mask_addr, 0);
2795
2796                         bnx2x_link_attn(bp);
2797
2798                         /* handle unicore attn? */
2799                 }
2800                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2801                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2802
2803                 if (asserted & GPIO_2_FUNC)
2804                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2805
2806                 if (asserted & GPIO_3_FUNC)
2807                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2808
2809                 if (asserted & GPIO_4_FUNC)
2810                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2811
2812                 if (port == 0) {
2813                         if (asserted & ATTN_GENERAL_ATTN_1) {
2814                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2815                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2816                         }
2817                         if (asserted & ATTN_GENERAL_ATTN_2) {
2818                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2819                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2820                         }
2821                         if (asserted & ATTN_GENERAL_ATTN_3) {
2822                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2823                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2824                         }
2825                 } else {
2826                         if (asserted & ATTN_GENERAL_ATTN_4) {
2827                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2828                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2829                         }
2830                         if (asserted & ATTN_GENERAL_ATTN_5) {
2831                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2832                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2833                         }
2834                         if (asserted & ATTN_GENERAL_ATTN_6) {
2835                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2836                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2837                         }
2838                 }
2839
2840         } /* if hardwired */
2841
2842         if (bp->common.int_block == INT_BLOCK_HC)
2843                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2844                             COMMAND_REG_ATTN_BITS_SET);
2845         else
2846                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2847
2848         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2849            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2850         REG_WR(bp, reg_addr, asserted);
2851
2852         /* now set back the mask */
2853         if (asserted & ATTN_NIG_FOR_FUNC) {
2854                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2855                 bnx2x_release_phy_lock(bp);
2856         }
2857 }
2858
2859 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2860 {
2861         int port = BP_PORT(bp);
2862         u32 ext_phy_config;
2863         /* mark the failure */
2864         ext_phy_config =
2865                 SHMEM_RD(bp,
2866                          dev_info.port_hw_config[port].external_phy_config);
2867
2868         ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2869         ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2870         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2871                  ext_phy_config);
2872
2873         /* log the failure */
2874         netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2875                " the driver to shutdown the card to prevent permanent"
2876                " damage.  Please contact OEM Support for assistance\n");
2877 }
2878
2879 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2880 {
2881         int port = BP_PORT(bp);
2882         int reg_offset;
2883         u32 val;
2884
2885         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2886                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2887
2888         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2889
2890                 val = REG_RD(bp, reg_offset);
2891                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2892                 REG_WR(bp, reg_offset, val);
2893
2894                 BNX2X_ERR("SPIO5 hw attention\n");
2895
2896                 /* Fan failure attention */
2897                 bnx2x_hw_reset_phy(&bp->link_params);
2898                 bnx2x_fan_failure(bp);
2899         }
2900
2901         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2902                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2903                 bnx2x_acquire_phy_lock(bp);
2904                 bnx2x_handle_module_detect_int(&bp->link_params);
2905                 bnx2x_release_phy_lock(bp);
2906         }
2907
2908         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2909
2910                 val = REG_RD(bp, reg_offset);
2911                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2912                 REG_WR(bp, reg_offset, val);
2913
2914                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2915                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2916                 bnx2x_panic();
2917         }
2918 }
2919
2920 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2921 {
2922         u32 val;
2923
2924         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2925
2926                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2927                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2928                 /* DORQ discard attention */
2929                 if (val & 0x2)
2930                         BNX2X_ERR("FATAL error from DORQ\n");
2931         }
2932
2933         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2934
2935                 int port = BP_PORT(bp);
2936                 int reg_offset;
2937
2938                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2939                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2940
2941                 val = REG_RD(bp, reg_offset);
2942                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2943                 REG_WR(bp, reg_offset, val);
2944
2945                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2946                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2947                 bnx2x_panic();
2948         }
2949 }
2950
2951 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2952 {
2953         u32 val;
2954
2955         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2956
2957                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2958                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2959                 /* CFC error attention */
2960                 if (val & 0x2)
2961                         BNX2X_ERR("FATAL error from CFC\n");
2962         }
2963
2964         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2965
2966                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2967                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2968                 /* RQ_USDMDP_FIFO_OVERFLOW */
2969                 if (val & 0x18000)
2970                         BNX2X_ERR("FATAL error from PXP\n");
2971                 if (CHIP_IS_E2(bp)) {
2972                         val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
2973                         BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
2974                 }
2975         }
2976
2977         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2978
2979                 int port = BP_PORT(bp);
2980                 int reg_offset;
2981
2982                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2983                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2984
2985                 val = REG_RD(bp, reg_offset);
2986                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2987                 REG_WR(bp, reg_offset, val);
2988
2989                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2990                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
2991                 bnx2x_panic();
2992         }
2993 }
2994
2995 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2996 {
2997         u32 val;
2998
2999         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3000
3001                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3002                         int func = BP_FUNC(bp);
3003
3004                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3005                         bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3006                                         func_mf_config[BP_ABS_FUNC(bp)].config);
3007                         val = SHMEM_RD(bp,
3008                                        func_mb[BP_FW_MB_IDX(bp)].drv_status);
3009                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3010                                 bnx2x_dcc_event(bp,
3011                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3012                         bnx2x__link_status_update(bp);
3013                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3014                                 bnx2x_pmf_update(bp);
3015
3016                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3017
3018                         BNX2X_ERR("MC assert!\n");
3019                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3020                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3021                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3022                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3023                         bnx2x_panic();
3024
3025                 } else if (attn & BNX2X_MCP_ASSERT) {
3026
3027                         BNX2X_ERR("MCP assert!\n");
3028                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3029                         bnx2x_fw_dump(bp);
3030
3031                 } else
3032                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3033         }
3034
3035         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3036                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3037                 if (attn & BNX2X_GRC_TIMEOUT) {
3038                         val = CHIP_IS_E1(bp) ? 0 :
3039                                         REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
3040                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3041                 }
3042                 if (attn & BNX2X_GRC_RSV) {
3043                         val = CHIP_IS_E1(bp) ? 0 :
3044                                         REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
3045                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3046                 }
3047                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3048         }
3049 }
3050
3051 #define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
3052 #define LOAD_COUNTER_BITS       16 /* Number of bits for load counter */
3053 #define LOAD_COUNTER_MASK       (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3054 #define RESET_DONE_FLAG_MASK    (~LOAD_COUNTER_MASK)
3055 #define RESET_DONE_FLAG_SHIFT   LOAD_COUNTER_BITS
3056 #define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3057
3058 /*
3059  * should be run under rtnl lock
3060  */
3061 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3062 {
3063         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3064         val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3065         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3066         barrier();
3067         mmiowb();
3068 }
3069
3070 /*
3071  * should be run under rtnl lock
3072  */
3073 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3074 {
3075         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3076         val |= (1 << 16);
3077         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3078         barrier();
3079         mmiowb();
3080 }
3081
3082 /*
3083  * should be run under rtnl lock
3084  */
3085 bool bnx2x_reset_is_done(struct bnx2x *bp)
3086 {
3087         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3088         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3089         return (val & RESET_DONE_FLAG_MASK) ? false : true;
3090 }
3091
3092 /*
3093  * should be run under rtnl lock
3094  */
3095 inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3096 {
3097         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3098
3099         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3100
3101         val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3102         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3103         barrier();
3104         mmiowb();
3105 }
3106
3107 /*
3108  * should be run under rtnl lock
3109  */
3110 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3111 {
3112         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3113
3114         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3115
3116         val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3117         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3118         barrier();
3119         mmiowb();
3120
3121         return val1;
3122 }
3123
3124 /*
3125  * should be run under rtnl lock
3126  */
3127 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3128 {
3129         return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3130 }
3131
3132 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3133 {
3134         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3135         REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3136 }
3137
3138 static inline void _print_next_block(int idx, const char *blk)
3139 {
3140         if (idx)
3141                 pr_cont(", ");
3142         pr_cont("%s", blk);
3143 }
3144
3145 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3146 {
3147         int i = 0;
3148         u32 cur_bit = 0;
3149         for (i = 0; sig; i++) {
3150                 cur_bit = ((u32)0x1 << i);
3151                 if (sig & cur_bit) {
3152                         switch (cur_bit) {
3153                         case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3154                                 _print_next_block(par_num++, "BRB");
3155                                 break;
3156                         case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3157                                 _print_next_block(par_num++, "PARSER");
3158                                 break;
3159                         case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3160                                 _print_next_block(par_num++, "TSDM");
3161                                 break;
3162                         case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3163                                 _print_next_block(par_num++, "SEARCHER");
3164                                 break;
3165                         case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3166                                 _print_next_block(par_num++, "TSEMI");
3167                                 break;
3168                         }
3169
3170                         /* Clear the bit */
3171                         sig &= ~cur_bit;
3172                 }
3173         }
3174
3175         return par_num;
3176 }
3177
3178 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3179 {
3180         int i = 0;
3181         u32 cur_bit = 0;
3182         for (i = 0; sig; i++) {
3183                 cur_bit = ((u32)0x1 << i);
3184                 if (sig & cur_bit) {
3185                         switch (cur_bit) {
3186                         case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3187                                 _print_next_block(par_num++, "PBCLIENT");
3188                                 break;
3189                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3190                                 _print_next_block(par_num++, "QM");
3191                                 break;
3192                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3193                                 _print_next_block(par_num++, "XSDM");
3194                                 break;
3195                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3196                                 _print_next_block(par_num++, "XSEMI");
3197                                 break;
3198                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3199                                 _print_next_block(par_num++, "DOORBELLQ");
3200                                 break;
3201                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3202                                 _print_next_block(par_num++, "VAUX PCI CORE");
3203                                 break;
3204                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3205                                 _print_next_block(par_num++, "DEBUG");
3206                                 break;
3207                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3208                                 _print_next_block(par_num++, "USDM");
3209                                 break;
3210                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3211                                 _print_next_block(par_num++, "USEMI");
3212                                 break;
3213                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3214                                 _print_next_block(par_num++, "UPB");
3215                                 break;
3216                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3217                                 _print_next_block(par_num++, "CSDM");
3218                                 break;
3219                         }
3220
3221                         /* Clear the bit */
3222                         sig &= ~cur_bit;
3223                 }
3224         }
3225
3226         return par_num;
3227 }
3228
3229 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3230 {
3231         int i = 0;
3232         u32 cur_bit = 0;
3233         for (i = 0; sig; i++) {
3234                 cur_bit = ((u32)0x1 << i);
3235                 if (sig & cur_bit) {
3236                         switch (cur_bit) {
3237                         case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3238                                 _print_next_block(par_num++, "CSEMI");
3239                                 break;
3240                         case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3241                                 _print_next_block(par_num++, "PXP");
3242                                 break;
3243                         case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3244                                 _print_next_block(par_num++,
3245                                         "PXPPCICLOCKCLIENT");
3246                                 break;
3247                         case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3248                                 _print_next_block(par_num++, "CFC");
3249                                 break;
3250                         case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3251                                 _print_next_block(par_num++, "CDU");
3252                                 break;
3253                         case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3254                                 _print_next_block(par_num++, "IGU");
3255                                 break;
3256                         case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3257                                 _print_next_block(par_num++, "MISC");
3258                                 break;
3259                         }
3260
3261                         /* Clear the bit */
3262                         sig &= ~cur_bit;
3263                 }
3264         }
3265
3266         return par_num;
3267 }
3268
3269 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3270 {
3271         int i = 0;
3272         u32 cur_bit = 0;
3273         for (i = 0; sig; i++) {
3274                 cur_bit = ((u32)0x1 << i);
3275                 if (sig & cur_bit) {
3276                         switch (cur_bit) {
3277                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3278                                 _print_next_block(par_num++, "MCP ROM");
3279                                 break;
3280                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3281                                 _print_next_block(par_num++, "MCP UMP RX");
3282                                 break;
3283                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3284                                 _print_next_block(par_num++, "MCP UMP TX");
3285                                 break;
3286                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3287                                 _print_next_block(par_num++, "MCP SCPAD");
3288                                 break;
3289                         }
3290
3291                         /* Clear the bit */
3292                         sig &= ~cur_bit;
3293                 }
3294         }
3295
3296         return par_num;
3297 }
3298
3299 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3300                                      u32 sig2, u32 sig3)
3301 {
3302         if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3303             (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3304                 int par_num = 0;
3305                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3306                         "[0]:0x%08x [1]:0x%08x "
3307                         "[2]:0x%08x [3]:0x%08x\n",
3308                           sig0 & HW_PRTY_ASSERT_SET_0,
3309                           sig1 & HW_PRTY_ASSERT_SET_1,
3310                           sig2 & HW_PRTY_ASSERT_SET_2,
3311                           sig3 & HW_PRTY_ASSERT_SET_3);
3312                 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3313                        bp->dev->name);
3314                 par_num = bnx2x_print_blocks_with_parity0(
3315                         sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3316                 par_num = bnx2x_print_blocks_with_parity1(
3317                         sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3318                 par_num = bnx2x_print_blocks_with_parity2(
3319                         sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3320                 par_num = bnx2x_print_blocks_with_parity3(
3321                         sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3322                 printk("\n");
3323                 return true;
3324         } else
3325                 return false;
3326 }
3327
3328 bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3329 {
3330         struct attn_route attn;
3331         int port = BP_PORT(bp);
3332
3333         attn.sig[0] = REG_RD(bp,
3334                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3335                              port*4);
3336         attn.sig[1] = REG_RD(bp,
3337                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3338                              port*4);
3339         attn.sig[2] = REG_RD(bp,
3340                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3341                              port*4);
3342         attn.sig[3] = REG_RD(bp,
3343                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3344                              port*4);
3345
3346         return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3347                                         attn.sig[3]);
3348 }
3349
3350
3351 static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3352 {
3353         u32 val;
3354         if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3355
3356                 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3357                 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3358                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3359                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3360                                   "ADDRESS_ERROR\n");
3361                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3362                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3363                                   "INCORRECT_RCV_BEHAVIOR\n");
3364                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3365                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3366                                   "WAS_ERROR_ATTN\n");
3367                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3368                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3369                                   "VF_LENGTH_VIOLATION_ATTN\n");
3370                 if (val &
3371                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3372                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3373                                   "VF_GRC_SPACE_VIOLATION_ATTN\n");
3374                 if (val &
3375                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3376                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3377                                   "VF_MSIX_BAR_VIOLATION_ATTN\n");
3378                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3379                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3380                                   "TCPL_ERROR_ATTN\n");
3381                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3382                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3383                                   "TCPL_IN_TWO_RCBS_ATTN\n");
3384                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3385                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3386                                   "CSSNOOP_FIFO_OVERFLOW\n");
3387         }
3388         if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3389                 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3390                 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3391                 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3392                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3393                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3394                         BNX2X_ERR("ATC_ATC_INT_STS_REG"
3395                                   "_ATC_TCPL_TO_NOT_PEND\n");
3396                 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3397                         BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3398                                   "ATC_GPA_MULTIPLE_HITS\n");
3399                 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3400                         BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3401                                   "ATC_RCPL_TO_EMPTY_CNT\n");
3402                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3403                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3404                 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3405                         BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3406                                   "ATC_IREQ_LESS_THAN_STU\n");
3407         }
3408
3409         if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3410                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3411                 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3412                 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3413                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3414         }
3415
3416 }
3417
3418 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3419 {
3420         struct attn_route attn, *group_mask;
3421         int port = BP_PORT(bp);
3422         int index;
3423         u32 reg_addr;
3424         u32 val;
3425         u32 aeu_mask;
3426
3427         /* need to take HW lock because MCP or other port might also
3428            try to handle this event */
3429         bnx2x_acquire_alr(bp);
3430
3431         if (bnx2x_chk_parity_attn(bp)) {
3432                 bp->recovery_state = BNX2X_RECOVERY_INIT;
3433                 bnx2x_set_reset_in_progress(bp);
3434                 schedule_delayed_work(&bp->reset_task, 0);
3435                 /* Disable HW interrupts */
3436                 bnx2x_int_disable(bp);
3437                 bnx2x_release_alr(bp);
3438                 /* In case of parity errors don't handle attentions so that
3439                  * other function would "see" parity errors.
3440                  */
3441                 return;
3442         }
3443
3444         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3445         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3446         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3447         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3448         if (CHIP_IS_E2(bp))
3449                 attn.sig[4] =
3450                       REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3451         else
3452                 attn.sig[4] = 0;
3453
3454         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3455            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
3456
3457         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3458                 if (deasserted & (1 << index)) {
3459                         group_mask = &bp->attn_group[index];
3460
3461                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3462                                          "%08x %08x %08x\n",
3463                            index,
3464                            group_mask->sig[0], group_mask->sig[1],
3465                            group_mask->sig[2], group_mask->sig[3],
3466                            group_mask->sig[4]);
3467
3468                         bnx2x_attn_int_deasserted4(bp,
3469                                         attn.sig[4] & group_mask->sig[4]);
3470                         bnx2x_attn_int_deasserted3(bp,
3471                                         attn.sig[3] & group_mask->sig[3]);
3472                         bnx2x_attn_int_deasserted1(bp,
3473                                         attn.sig[1] & group_mask->sig[1]);
3474                         bnx2x_attn_int_deasserted2(bp,
3475                                         attn.sig[2] & group_mask->sig[2]);
3476                         bnx2x_attn_int_deasserted0(bp,
3477                                         attn.sig[0] & group_mask->sig[0]);
3478                 }
3479         }
3480
3481         bnx2x_release_alr(bp);
3482
3483         if (bp->common.int_block == INT_BLOCK_HC)
3484                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3485                             COMMAND_REG_ATTN_BITS_CLR);
3486         else
3487                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
3488
3489         val = ~deasserted;
3490         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3491            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
3492         REG_WR(bp, reg_addr, val);
3493
3494         if (~bp->attn_state & deasserted)
3495                 BNX2X_ERR("IGU ERROR\n");
3496
3497         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3498                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3499
3500         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3501         aeu_mask = REG_RD(bp, reg_addr);
3502
3503         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3504            aeu_mask, deasserted);
3505         aeu_mask |= (deasserted & 0x3ff);
3506         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3507
3508         REG_WR(bp, reg_addr, aeu_mask);
3509         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3510
3511         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3512         bp->attn_state &= ~deasserted;
3513         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3514 }
3515
3516 static void bnx2x_attn_int(struct bnx2x *bp)
3517 {
3518         /* read local copy of bits */
3519         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3520                                                                 attn_bits);
3521         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3522                                                                 attn_bits_ack);
3523         u32 attn_state = bp->attn_state;
3524
3525         /* look for changed bits */
3526         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3527         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3528
3529         DP(NETIF_MSG_HW,
3530            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3531            attn_bits, attn_ack, asserted, deasserted);
3532
3533         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3534                 BNX2X_ERR("BAD attention state\n");
3535
3536         /* handle bits that were raised */
3537         if (asserted)
3538                 bnx2x_attn_int_asserted(bp, asserted);
3539
3540         if (deasserted)
3541                 bnx2x_attn_int_deasserted(bp, deasserted);
3542 }
3543
3544 static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3545 {
3546         /* No memory barriers */
3547         storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3548         mmiowb(); /* keep prod updates ordered */
3549 }
3550
3551 #ifdef BCM_CNIC
3552 static int  bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3553                                       union event_ring_elem *elem)
3554 {
3555         if (!bp->cnic_eth_dev.starting_cid  ||
3556             cid < bp->cnic_eth_dev.starting_cid)
3557                 return 1;
3558
3559         DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3560
3561         if (unlikely(elem->message.data.cfc_del_event.error)) {
3562                 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3563                           cid);
3564                 bnx2x_panic_dump(bp);
3565         }
3566         bnx2x_cnic_cfc_comp(bp, cid);
3567         return 0;
3568 }
3569 #endif
3570
3571 static void bnx2x_eq_int(struct bnx2x *bp)
3572 {
3573         u16 hw_cons, sw_cons, sw_prod;
3574         union event_ring_elem *elem;
3575         u32 cid;
3576         u8 opcode;
3577         int spqe_cnt = 0;
3578
3579         hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3580
3581         /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3582          * when we get the the next-page we nned to adjust so the loop
3583          * condition below will be met. The next element is the size of a
3584          * regular element and hence incrementing by 1
3585          */
3586         if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3587                 hw_cons++;
3588
3589         /* This function may never run in parralel with itself for a
3590          * specific bp, thus there is no need in "paired" read memory
3591          * barrier here.
3592          */
3593         sw_cons = bp->eq_cons;
3594         sw_prod = bp->eq_prod;
3595
3596         DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->spq_left %u\n",
3597                         hw_cons, sw_cons, atomic_read(&bp->spq_left));
3598
3599         for (; sw_cons != hw_cons;
3600               sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3601
3602
3603                 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3604
3605                 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3606                 opcode = elem->message.opcode;
3607
3608
3609                 /* handle eq element */
3610                 switch (opcode) {
3611                 case EVENT_RING_OPCODE_STAT_QUERY:
3612                         DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3613                         /* nothing to do with stats comp */
3614                         continue;
3615
3616                 case EVENT_RING_OPCODE_CFC_DEL:
3617                         /* handle according to cid range */
3618                         /*
3619                          * we may want to verify here that the bp state is
3620                          * HALTING
3621                          */
3622                         DP(NETIF_MSG_IFDOWN,
3623                            "got delete ramrod for MULTI[%d]\n", cid);
3624 #ifdef BCM_CNIC
3625                         if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3626                                 goto next_spqe;
3627 #endif
3628                         bnx2x_fp(bp, cid, state) =
3629                                                 BNX2X_FP_STATE_CLOSED;
3630
3631                         goto next_spqe;
3632                 }
3633
3634                 switch (opcode | bp->state) {
3635                 case (EVENT_RING_OPCODE_FUNCTION_START |
3636                       BNX2X_STATE_OPENING_WAIT4_PORT):
3637                         DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3638                         bp->state = BNX2X_STATE_FUNC_STARTED;
3639                         break;
3640
3641                 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3642                       BNX2X_STATE_CLOSING_WAIT4_HALT):
3643                         DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3644                         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3645                         break;
3646
3647                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3648                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3649                         DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3650                         bp->set_mac_pending = 0;
3651                         break;
3652
3653                 case (EVENT_RING_OPCODE_SET_MAC |
3654                       BNX2X_STATE_CLOSING_WAIT4_HALT):
3655                         DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3656                         bp->set_mac_pending = 0;
3657                         break;
3658                 default:
3659                         /* unknown event log error and continue */
3660                         BNX2X_ERR("Unknown EQ event %d\n",
3661                                   elem->message.opcode);
3662                 }
3663 next_spqe:
3664                 spqe_cnt++;
3665         } /* for */
3666
3667         smp_mb__before_atomic_inc();
3668         atomic_add(spqe_cnt, &bp->spq_left);
3669
3670         bp->eq_cons = sw_cons;
3671         bp->eq_prod = sw_prod;
3672         /* Make sure that above mem writes were issued towards the memory */
3673         smp_wmb();
3674
3675         /* update producer */
3676         bnx2x_update_eq_prod(bp, bp->eq_prod);
3677 }
3678
3679 static void bnx2x_sp_task(struct work_struct *work)
3680 {
3681         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3682         u16 status;
3683
3684         /* Return here if interrupt is disabled */
3685         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3686                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3687                 return;
3688         }
3689
3690         status = bnx2x_update_dsb_idx(bp);
3691 /*      if (status == 0)                                     */
3692 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3693
3694         DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3695
3696         /* HW attentions */
3697         if (status & BNX2X_DEF_SB_ATT_IDX) {
3698                 bnx2x_attn_int(bp);
3699                 status &= ~BNX2X_DEF_SB_ATT_IDX;
3700         }
3701
3702         /* SP events: STAT_QUERY and others */
3703         if (status & BNX2X_DEF_SB_IDX) {
3704
3705                 /* Handle EQ completions */
3706                 bnx2x_eq_int(bp);
3707
3708                 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3709                         le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3710
3711                 status &= ~BNX2X_DEF_SB_IDX;
3712         }
3713
3714         if (unlikely(status))
3715                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3716                    status);
3717
3718         bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3719              le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
3720 }
3721
3722 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3723 {
3724         struct net_device *dev = dev_instance;
3725         struct bnx2x *bp = netdev_priv(dev);
3726
3727         /* Return here if interrupt is disabled */
3728         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3729                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3730                 return IRQ_HANDLED;
3731         }
3732
3733         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3734                      IGU_INT_DISABLE, 0);
3735
3736 #ifdef BNX2X_STOP_ON_ERROR
3737         if (unlikely(bp->panic))
3738                 return IRQ_HANDLED;
3739 #endif
3740
3741 #ifdef BCM_CNIC
3742         {
3743                 struct cnic_ops *c_ops;
3744
3745                 rcu_read_lock();
3746                 c_ops = rcu_dereference(bp->cnic_ops);
3747                 if (c_ops)
3748                         c_ops->cnic_handler(bp->cnic_data, NULL);
3749                 rcu_read_unlock();
3750         }
3751 #endif
3752         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3753
3754         return IRQ_HANDLED;
3755 }
3756
3757 /* end of slow path */
3758
3759 static void bnx2x_timer(unsigned long data)
3760 {
3761         struct bnx2x *bp = (struct bnx2x *) data;
3762
3763         if (!netif_running(bp->dev))
3764                 return;
3765
3766         if (atomic_read(&bp->intr_sem) != 0)
3767                 goto timer_restart;
3768
3769         if (poll) {
3770                 struct bnx2x_fastpath *fp = &bp->fp[0];
3771                 int rc;
3772
3773                 bnx2x_tx_int(fp);
3774                 rc = bnx2x_rx_int(fp, 1000);
3775         }
3776
3777         if (!BP_NOMCP(bp)) {
3778                 int mb_idx = BP_FW_MB_IDX(bp);
3779                 u32 drv_pulse;
3780                 u32 mcp_pulse;
3781
3782                 ++bp->fw_drv_pulse_wr_seq;
3783                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3784                 /* TBD - add SYSTEM_TIME */
3785                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3786                 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
3787
3788                 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
3789                              MCP_PULSE_SEQ_MASK);
3790                 /* The delta between driver pulse and mcp response
3791                  * should be 1 (before mcp response) or 0 (after mcp response)
3792                  */
3793                 if ((drv_pulse != mcp_pulse) &&
3794                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3795                         /* someone lost a heartbeat... */
3796                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3797                                   drv_pulse, mcp_pulse);
3798                 }
3799         }
3800
3801         if (bp->state == BNX2X_STATE_OPEN)
3802                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3803
3804 timer_restart:
3805         mod_timer(&bp->timer, jiffies + bp->current_interval);
3806 }
3807
3808 /* end of Statistics */
3809
3810 /* nic init */
3811
3812 /*
3813  * nic init service functions
3814  */
3815
3816 static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
3817 {
3818         u32 i;
3819         if (!(len%4) && !(addr%4))
3820                 for (i = 0; i < len; i += 4)
3821                         REG_WR(bp, addr + i, fill);
3822         else
3823                 for (i = 0; i < len; i++)
3824                         REG_WR8(bp, addr + i, fill);
3825
3826 }
3827
3828 /* helper: writes FP SP data to FW - data_size in dwords */
3829 static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3830                                        int fw_sb_id,
3831                                        u32 *sb_data_p,
3832                                        u32 data_size)
3833 {
3834         int index;
3835         for (index = 0; index < data_size; index++)
3836                 REG_WR(bp, BAR_CSTRORM_INTMEM +
3837                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3838                         sizeof(u32)*index,
3839                         *(sb_data_p + index));
3840 }
3841
3842 static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3843 {
3844         u32 *sb_data_p;
3845         u32 data_size = 0;
3846         struct hc_status_block_data_e2 sb_data_e2;
3847         struct hc_status_block_data_e1x sb_data_e1x;
3848
3849         /* disable the function first */
3850         if (CHIP_IS_E2(bp)) {
3851                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3852                 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3853                 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3854                 sb_data_e2.common.p_func.vf_valid = false;
3855                 sb_data_p = (u32 *)&sb_data_e2;
3856                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3857         } else {
3858                 memset(&sb_data_e1x, 0,
3859                        sizeof(struct hc_status_block_data_e1x));
3860                 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3861                 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3862                 sb_data_e1x.common.p_func.vf_valid = false;
3863                 sb_data_p = (u32 *)&sb_data_e1x;
3864                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3865         }
3866         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3867
3868         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3869                         CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3870                         CSTORM_STATUS_BLOCK_SIZE);
3871         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3872                         CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3873                         CSTORM_SYNC_BLOCK_SIZE);
3874 }
3875
3876 /* helper:  writes SP SB data to FW */
3877 static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3878                 struct hc_sp_status_block_data *sp_sb_data)
3879 {
3880         int func = BP_FUNC(bp);
3881         int i;
3882         for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3883                 REG_WR(bp, BAR_CSTRORM_INTMEM +
3884                         CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3885                         i*sizeof(u32),
3886                         *((u32 *)sp_sb_data + i));
3887 }
3888
3889 static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
3890 {
3891         int func = BP_FUNC(bp);
3892         struct hc_sp_status_block_data sp_sb_data;
3893         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3894
3895         sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3896         sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3897         sp_sb_data.p_func.vf_valid = false;
3898
3899         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3900
3901         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3902                         CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3903                         CSTORM_SP_STATUS_BLOCK_SIZE);
3904         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3905                         CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3906                         CSTORM_SP_SYNC_BLOCK_SIZE);
3907
3908 }
3909
3910
3911 static inline
3912 void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3913                                            int igu_sb_id, int igu_seg_id)
3914 {
3915         hc_sm->igu_sb_id = igu_sb_id;
3916         hc_sm->igu_seg_id = igu_seg_id;
3917         hc_sm->timer_value = 0xFF;
3918         hc_sm->time_to_expire = 0xFFFFFFFF;
3919 }
3920
3921 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
3922                           u8 vf_valid, int fw_sb_id, int igu_sb_id)
3923 {
3924         int igu_seg_id;
3925
3926         struct hc_status_block_data_e2 sb_data_e2;
3927         struct hc_status_block_data_e1x sb_data_e1x;
3928         struct hc_status_block_sm  *hc_sm_p;
3929         struct hc_index_data *hc_index_p;
3930         int data_size;
3931         u32 *sb_data_p;
3932
3933         if (CHIP_INT_MODE_IS_BC(bp))
3934                 igu_seg_id = HC_SEG_ACCESS_NORM;
3935         else
3936                 igu_seg_id = IGU_SEG_ACCESS_NORM;
3937
3938         bnx2x_zero_fp_sb(bp, fw_sb_id);
3939
3940         if (CHIP_IS_E2(bp)) {
3941                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3942                 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
3943                 sb_data_e2.common.p_func.vf_id = vfid;
3944                 sb_data_e2.common.p_func.vf_valid = vf_valid;
3945                 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
3946                 sb_data_e2.common.same_igu_sb_1b = true;
3947                 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
3948                 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
3949                 hc_sm_p = sb_data_e2.common.state_machine;
3950                 hc_index_p = sb_data_e2.index_data;
3951                 sb_data_p = (u32 *)&sb_data_e2;
3952                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3953         } else {
3954                 memset(&sb_data_e1x, 0,
3955                        sizeof(struct hc_status_block_data_e1x));
3956                 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3957                 sb_data_e1x.common.p_func.vf_id = 0xff;
3958                 sb_data_e1x.common.p_func.vf_valid = false;
3959                 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
3960                 sb_data_e1x.common.same_igu_sb_1b = true;
3961                 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3962                 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3963                 hc_sm_p = sb_data_e1x.common.state_machine;
3964                 hc_index_p = sb_data_e1x.index_data;
3965                 sb_data_p = (u32 *)&sb_data_e1x;
3966                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3967         }
3968
3969         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3970                                        igu_sb_id, igu_seg_id);
3971         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
3972                                        igu_sb_id, igu_seg_id);
3973
3974         DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
3975
3976         /* write indecies to HW */
3977         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3978 }
3979
3980 static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
3981                                         u8 sb_index, u8 disable, u16 usec)
3982 {
3983         int port = BP_PORT(bp);
3984         u8 ticks = usec / BNX2X_BTR;
3985
3986         storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3987
3988         disable = disable ? 1 : (usec ? 0 : 1);
3989         storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3990 }
3991
3992 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
3993                                      u16 tx_usec, u16 rx_usec)
3994 {
3995         bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
3996                                     false, rx_usec);
3997         bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
3998                                     false, tx_usec);
3999 }
4000
4001 static void bnx2x_init_def_sb(struct bnx2x *bp)
4002 {
4003         struct host_sp_status_block *def_sb = bp->def_status_blk;
4004         dma_addr_t mapping = bp->def_status_blk_mapping;
4005         int igu_sp_sb_index;
4006         int igu_seg_id;
4007         int port = BP_PORT(bp);
4008         int func = BP_FUNC(bp);
4009         int reg_offset;
4010         u64 section;
4011         int index;
4012         struct hc_sp_status_block_data sp_sb_data;
4013         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4014
4015         if (CHIP_INT_MODE_IS_BC(bp)) {
4016                 igu_sp_sb_index = DEF_SB_IGU_ID;
4017                 igu_seg_id = HC_SEG_ACCESS_DEF;
4018         } else {
4019                 igu_sp_sb_index = bp->igu_dsb_id;
4020                 igu_seg_id = IGU_SEG_ACCESS_DEF;
4021         }
4022
4023         /* ATTN */
4024         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4025                                             atten_status_block);
4026         def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
4027
4028         bp->attn_state = 0;
4029
4030         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4031                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4032         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4033                 int sindex;
4034                 /* take care of sig[0]..sig[4] */
4035                 for (sindex = 0; sindex < 4; sindex++)
4036                         bp->attn_group[index].sig[sindex] =
4037                            REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
4038
4039                 if (CHIP_IS_E2(bp))
4040                         /*
4041                          * enable5 is separate from the rest of the registers,
4042                          * and therefore the address skip is 4
4043                          * and not 16 between the different groups
4044                          */
4045                         bp->attn_group[index].sig[4] = REG_RD(bp,
4046                                         reg_offset + 0x10 + 0x4*index);
4047                 else
4048                         bp->attn_group[index].sig[4] = 0;
4049         }
4050
4051         if (bp->common.int_block == INT_BLOCK_HC) {
4052                 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4053                                      HC_REG_ATTN_MSG0_ADDR_L);
4054
4055                 REG_WR(bp, reg_offset, U64_LO(section));
4056                 REG_WR(bp, reg_offset + 4, U64_HI(section));
4057         } else if (CHIP_IS_E2(bp)) {
4058                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4059                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4060         }
4061
4062         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4063                                             sp_sb);
4064
4065         bnx2x_zero_sp_sb(bp);
4066
4067         sp_sb_data.host_sb_addr.lo      = U64_LO(section);
4068         sp_sb_data.host_sb_addr.hi      = U64_HI(section);
4069         sp_sb_data.igu_sb_id            = igu_sp_sb_index;
4070         sp_sb_data.igu_seg_id           = igu_seg_id;
4071         sp_sb_data.p_func.pf_id         = func;
4072         sp_sb_data.p_func.vnic_id       = BP_VN(bp);
4073         sp_sb_data.p_func.vf_id         = 0xff;
4074
4075         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4076
4077         bp->stats_pending = 0;
4078         bp->set_mac_pending = 0;
4079
4080         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
4081 }
4082
4083 void bnx2x_update_coalesce(struct bnx2x *bp)
4084 {
4085         int i;
4086
4087         for_each_queue(bp, i)
4088                 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4089                                          bp->rx_ticks, bp->tx_ticks);
4090 }
4091
4092 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4093 {
4094         spin_lock_init(&bp->spq_lock);
4095         atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
4096
4097         bp->spq_prod_idx = 0;
4098         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4099         bp->spq_prod_bd = bp->spq;
4100         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4101 }
4102
4103 static void bnx2x_init_eq_ring(struct bnx2x *bp)
4104 {
4105         int i;
4106         for (i = 1; i <= NUM_EQ_PAGES; i++) {
4107                 union event_ring_elem *elem =
4108                         &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
4109
4110                 elem->next_page.addr.hi =
4111                         cpu_to_le32(U64_HI(bp->eq_mapping +
4112                                    BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4113                 elem->next_page.addr.lo =
4114                         cpu_to_le32(U64_LO(bp->eq_mapping +
4115                                    BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
4116         }
4117         bp->eq_cons = 0;
4118         bp->eq_prod = NUM_EQ_DESC;
4119         bp->eq_cons_sb = BNX2X_EQ_INDEX;
4120 }
4121
4122 static void bnx2x_init_ind_table(struct bnx2x *bp)
4123 {
4124         int func = BP_FUNC(bp);
4125         int i;
4126
4127         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4128                 return;
4129
4130         DP(NETIF_MSG_IFUP,
4131            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4132         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4133                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4134                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4135                         bp->fp->cl_id + (i % bp->num_queues));
4136 }
4137
4138 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4139 {
4140         int mode = bp->rx_mode;
4141         u16 cl_id;
4142
4143         /* All but management unicast packets should pass to the host as well */
4144         u32 llh_mask =
4145                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4146                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4147                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4148                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
4149
4150         switch (mode) {
4151         case BNX2X_RX_MODE_NONE: /* no Rx */
4152                 cl_id = BP_L_ID(bp);
4153                 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4154                 break;
4155
4156         case BNX2X_RX_MODE_NORMAL:
4157                 cl_id = BP_L_ID(bp);
4158                 bnx2x_rxq_set_mac_filters(bp, cl_id,
4159                         BNX2X_ACCEPT_UNICAST |
4160                         BNX2X_ACCEPT_BROADCAST |
4161                         BNX2X_ACCEPT_MULTICAST);
4162                 break;
4163
4164         case BNX2X_RX_MODE_ALLMULTI:
4165                 cl_id = BP_L_ID(bp);
4166                 bnx2x_rxq_set_mac_filters(bp, cl_id,
4167                         BNX2X_ACCEPT_UNICAST |
4168                         BNX2X_ACCEPT_BROADCAST |
4169                         BNX2X_ACCEPT_ALL_MULTICAST);
4170                 break;
4171
4172         case BNX2X_RX_MODE_PROMISC:
4173                 cl_id = BP_L_ID(bp);
4174                 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
4175
4176                 /* pass management unicast packets as well */
4177                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
4178                 break;
4179
4180         default:
4181                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4182                 break;
4183         }
4184
4185         REG_WR(bp,
4186                BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
4187                              NIG_REG_LLH0_BRB1_DRV_MASK,
4188                llh_mask);
4189
4190         DP(NETIF_MSG_IFUP, "rx mode %d\n"
4191                 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4192                 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
4193                 bp->mac_filters.ucast_drop_all,
4194                 bp->mac_filters.mcast_drop_all,
4195                 bp->mac_filters.bcast_drop_all,
4196                 bp->mac_filters.ucast_accept_all,
4197                 bp->mac_filters.mcast_accept_all,
4198                 bp->mac_filters.bcast_accept_all
4199         );
4200
4201         storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
4202 }
4203
4204 static void bnx2x_init_internal_common(struct bnx2x *bp)
4205 {
4206         int i;
4207
4208         if (!CHIP_IS_E1(bp)) {
4209
4210                 /* xstorm needs to know whether to add  ovlan to packets or not,
4211                  * in switch-independent we'll write 0 to here... */
4212                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4213                         bp->mf_mode);
4214                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4215                         bp->mf_mode);
4216                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4217                         bp->mf_mode);
4218                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4219                         bp->mf_mode);
4220         }
4221
4222         /* Zero this manually as its initialization is
4223            currently missing in the initTool */
4224         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4225                 REG_WR(bp, BAR_USTRORM_INTMEM +
4226                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4227         if (CHIP_IS_E2(bp)) {
4228                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4229                         CHIP_INT_MODE_IS_BC(bp) ?
4230                         HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4231         }
4232 }
4233
4234 static void bnx2x_init_internal_port(struct bnx2x *bp)
4235 {
4236         /* port */
4237 }
4238
4239 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4240 {
4241         switch (load_code) {
4242         case FW_MSG_CODE_DRV_LOAD_COMMON:
4243         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
4244                 bnx2x_init_internal_common(bp);
4245                 /* no break */
4246
4247         case FW_MSG_CODE_DRV_LOAD_PORT:
4248                 bnx2x_init_internal_port(bp);
4249                 /* no break */
4250
4251         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4252                 /* internal memory per function is
4253                    initialized inside bnx2x_pf_init */
4254                 break;
4255
4256         default:
4257                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4258                 break;
4259         }
4260 }
4261
4262 static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4263 {
4264         struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4265
4266         fp->state = BNX2X_FP_STATE_CLOSED;
4267
4268         fp->index = fp->cid = fp_idx;
4269         fp->cl_id = BP_L_ID(bp) + fp_idx;
4270         fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4271         fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4272         /* qZone id equals to FW (per path) client id */
4273         fp->cl_qzone_id  = fp->cl_id +
4274                            BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4275                                 ETH_MAX_RX_CLIENTS_E1H);
4276         /* init shortcut */
4277         fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4278                             USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
4279                             USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4280         /* Setup SB indicies */
4281         fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4282         fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4283
4284         DP(NETIF_MSG_IFUP, "queue[%d]:  bnx2x_init_sb(%p,%p)  "
4285                                    "cl_id %d  fw_sb %d  igu_sb %d\n",
4286                    fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4287                    fp->igu_sb_id);
4288         bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4289                       fp->fw_sb_id, fp->igu_sb_id);
4290
4291         bnx2x_update_fpsb_idx(fp);
4292 }
4293
4294 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4295 {
4296         int i;
4297
4298         for_each_queue(bp, i)
4299                 bnx2x_init_fp_sb(bp, i);
4300 #ifdef BCM_CNIC
4301
4302         bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4303                       BNX2X_VF_ID_INVALID, false,
4304                       CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4305
4306 #endif
4307
4308         /* ensure status block indices were read */
4309         rmb();
4310
4311         bnx2x_init_def_sb(bp);
4312         bnx2x_update_dsb_idx(bp);
4313         bnx2x_init_rx_rings(bp);
4314         bnx2x_init_tx_rings(bp);
4315         bnx2x_init_sp_ring(bp);
4316         bnx2x_init_eq_ring(bp);
4317         bnx2x_init_internal(bp, load_code);
4318         bnx2x_pf_init(bp);
4319         bnx2x_init_ind_table(bp);
4320         bnx2x_stats_init(bp);
4321
4322         /* At this point, we are ready for interrupts */
4323         atomic_set(&bp->intr_sem, 0);
4324
4325         /* flush all before enabling interrupts */
4326         mb();
4327         mmiowb();
4328
4329         bnx2x_int_enable(bp);
4330
4331         /* Check for SPIO5 */
4332         bnx2x_attn_int_deasserted0(bp,
4333                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4334                                    AEU_INPUTS_ATTN_BITS_SPIO5);
4335 }
4336
4337 /* end of nic init */
4338
4339 /*
4340  * gzip service functions
4341  */
4342
4343 static int bnx2x_gunzip_init(struct bnx2x *bp)
4344 {
4345         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4346                                             &bp->gunzip_mapping, GFP_KERNEL);
4347         if (bp->gunzip_buf  == NULL)
4348                 goto gunzip_nomem1;
4349
4350         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4351         if (bp->strm  == NULL)
4352                 goto gunzip_nomem2;
4353
4354         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4355                                       GFP_KERNEL);
4356         if (bp->strm->workspace == NULL)
4357                 goto gunzip_nomem3;
4358
4359         return 0;
4360
4361 gunzip_nomem3:
4362         kfree(bp->strm);
4363         bp->strm = NULL;
4364
4365 gunzip_nomem2:
4366         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4367                           bp->gunzip_mapping);
4368         bp->gunzip_buf = NULL;
4369
4370 gunzip_nomem1:
4371         netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4372                " un-compression\n");
4373         return -ENOMEM;
4374 }
4375
4376 static void bnx2x_gunzip_end(struct bnx2x *bp)
4377 {
4378         kfree(bp->strm->workspace);
4379         kfree(bp->strm);
4380         bp->strm = NULL;
4381
4382         if (bp->gunzip_buf) {
4383                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4384                                   bp->gunzip_mapping);
4385                 bp->gunzip_buf = NULL;
4386         }
4387 }
4388
4389 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
4390 {
4391         int n, rc;
4392
4393         /* check gzip header */
4394         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4395                 BNX2X_ERR("Bad gzip header\n");
4396                 return -EINVAL;
4397         }
4398
4399         n = 10;
4400
4401 #define FNAME                           0x8
4402
4403         if (zbuf[3] & FNAME)
4404                 while ((zbuf[n++] != 0) && (n < len));
4405
4406         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
4407         bp->strm->avail_in = len - n;
4408         bp->strm->next_out = bp->gunzip_buf;
4409         bp->strm->avail_out = FW_BUF_SIZE;
4410
4411         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4412         if (rc != Z_OK)
4413                 return rc;
4414
4415         rc = zlib_inflate(bp->strm, Z_FINISH);
4416         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4417                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4418                            bp->strm->msg);
4419
4420         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4421         if (bp->gunzip_outlen & 0x3)
4422                 netdev_err(bp->dev, "Firmware decompression error:"
4423                                     " gunzip_outlen (%d) not aligned\n",
4424                                 bp->gunzip_outlen);
4425         bp->gunzip_outlen >>= 2;
4426
4427         zlib_inflateEnd(bp->strm);
4428
4429         if (rc == Z_STREAM_END)
4430                 return 0;
4431
4432         return rc;
4433 }
4434
4435 /* nic load/unload */
4436
4437 /*
4438  * General service functions
4439  */
4440
4441 /* send a NIG loopback debug packet */
4442 static void bnx2x_lb_pckt(struct bnx2x *bp)
4443 {
4444         u32 wb_write[3];
4445
4446         /* Ethernet source and destination addresses */
4447         wb_write[0] = 0x55555555;
4448         wb_write[1] = 0x55555555;
4449         wb_write[2] = 0x20;             /* SOP */
4450         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4451
4452         /* NON-IP protocol */
4453         wb_write[0] = 0x09000000;
4454         wb_write[1] = 0x55555555;
4455         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4456         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4457 }
4458
4459 /* some of the internal memories
4460  * are not directly readable from the driver
4461  * to test them we send debug packets
4462  */
4463 static int bnx2x_int_mem_test(struct bnx2x *bp)
4464 {
4465         int factor;
4466         int count, i;
4467         u32 val = 0;
4468
4469         if (CHIP_REV_IS_FPGA(bp))
4470                 factor = 120;
4471         else if (CHIP_REV_IS_EMUL(bp))
4472                 factor = 200;
4473         else
4474                 factor = 1;
4475
4476         /* Disable inputs of parser neighbor blocks */
4477         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4478         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4479         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4480         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4481
4482         /*  Write 0 to parser credits for CFC search request */
4483         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4484
4485         /* send Ethernet packet */
4486         bnx2x_lb_pckt(bp);
4487
4488         /* TODO do i reset NIG statistic? */
4489         /* Wait until NIG register shows 1 packet of size 0x10 */
4490         count = 1000 * factor;
4491         while (count) {
4492
4493                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4494                 val = *bnx2x_sp(bp, wb_data[0]);
4495                 if (val == 0x10)
4496                         break;
4497
4498                 msleep(10);
4499                 count--;
4500         }
4501         if (val != 0x10) {
4502                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4503                 return -1;
4504         }
4505
4506         /* Wait until PRS register shows 1 packet */
4507         count = 1000 * factor;
4508         while (count) {
4509                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4510                 if (val == 1)
4511                         break;
4512
4513                 msleep(10);
4514                 count--;
4515         }
4516         if (val != 0x1) {
4517                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4518                 return -2;
4519         }
4520
4521         /* Reset and init BRB, PRS */
4522         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4523         msleep(50);
4524         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4525         msleep(50);
4526         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4527         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4528
4529         DP(NETIF_MSG_HW, "part2\n");
4530
4531         /* Disable inputs of parser neighbor blocks */
4532         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4533         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4534         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4535         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4536
4537         /* Write 0 to parser credits for CFC search request */
4538         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4539
4540         /* send 10 Ethernet packets */
4541         for (i = 0; i < 10; i++)
4542                 bnx2x_lb_pckt(bp);
4543
4544         /* Wait until NIG register shows 10 + 1
4545            packets of size 11*0x10 = 0xb0 */
4546         count = 1000 * factor;
4547         while (count) {
4548
4549                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4550                 val = *bnx2x_sp(bp, wb_data[0]);
4551                 if (val == 0xb0)
4552                         break;
4553
4554                 msleep(10);
4555                 count--;
4556         }
4557         if (val != 0xb0) {
4558                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4559                 return -3;
4560         }
4561
4562         /* Wait until PRS register shows 2 packets */
4563         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4564         if (val != 2)
4565                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
4566
4567         /* Write 1 to parser credits for CFC search request */
4568         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4569
4570         /* Wait until PRS register shows 3 packets */
4571         msleep(10 * factor);
4572         /* Wait until NIG register shows 1 packet of size 0x10 */
4573         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4574         if (val != 3)
4575                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
4576
4577         /* clear NIG EOP FIFO */
4578         for (i = 0; i < 11; i++)
4579                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4580         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4581         if (val != 1) {
4582                 BNX2X_ERR("clear of NIG failed\n");
4583                 return -4;
4584         }
4585
4586         /* Reset and init BRB, PRS, NIG */
4587         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4588         msleep(50);
4589         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4590         msleep(50);
4591         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4592         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4593 #ifndef BCM_CNIC
4594         /* set NIC mode */
4595         REG_WR(bp, PRS_REG_NIC_MODE, 1);
4596 #endif
4597
4598         /* Enable inputs of parser neighbor blocks */
4599         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4600         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4601         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
4602         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
4603
4604         DP(NETIF_MSG_HW, "done\n");
4605
4606         return 0; /* OK */
4607 }
4608
4609 static void enable_blocks_attention(struct bnx2x *bp)
4610 {
4611         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4612         if (CHIP_IS_E2(bp))
4613                 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4614         else
4615                 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
4616         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4617         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4618         /*
4619          * mask read length error interrupts in brb for parser
4620          * (parsing unit and 'checksum and crc' unit)
4621          * these errors are legal (PU reads fixed length and CAC can cause
4622          * read length error on truncated packets)
4623          */
4624         REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
4625         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4626         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4627         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4628         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4629         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
4630 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4631 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
4632         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4633         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4634         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
4635 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4636 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
4637         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4638         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4639         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4640         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
4641 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4642 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4643
4644         if (CHIP_REV_IS_FPGA(bp))
4645                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
4646         else if (CHIP_IS_E2(bp))
4647                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4648                            (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4649                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4650                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4651                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4652                                 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
4653         else
4654                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
4655         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4656         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4657         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
4658 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4659 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
4660         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4661         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
4662 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4663         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
4664 }
4665
4666 static const struct {
4667         u32 addr;
4668         u32 mask;
4669 } bnx2x_parity_mask[] = {
4670         {PXP_REG_PXP_PRTY_MASK,         0x3ffffff},
4671         {PXP2_REG_PXP2_PRTY_MASK_0,     0xffffffff},
4672         {PXP2_REG_PXP2_PRTY_MASK_1,     0x7f},
4673         {HC_REG_HC_PRTY_MASK,           0x7},
4674         {MISC_REG_MISC_PRTY_MASK,       0x1},
4675         {QM_REG_QM_PRTY_MASK,           0x0},
4676         {DORQ_REG_DORQ_PRTY_MASK,       0x0},
4677         {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4678         {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
4679         {SRC_REG_SRC_PRTY_MASK,         0x4}, /* bit 2 */
4680         {CDU_REG_CDU_PRTY_MASK,         0x0},
4681         {CFC_REG_CFC_PRTY_MASK,         0x0},
4682         {DBG_REG_DBG_PRTY_MASK,         0x0},
4683         {DMAE_REG_DMAE_PRTY_MASK,       0x0},
4684         {BRB1_REG_BRB1_PRTY_MASK,       0x0},
4685         {PRS_REG_PRS_PRTY_MASK,         (1<<6)},/* bit 6 */
4686         {TSDM_REG_TSDM_PRTY_MASK,       0x18},  /* bit 3,4 */
4687         {CSDM_REG_CSDM_PRTY_MASK,       0x8},   /* bit 3 */
4688         {USDM_REG_USDM_PRTY_MASK,       0x38},  /* bit 3,4,5 */
4689         {XSDM_REG_XSDM_PRTY_MASK,       0x8},   /* bit 3 */
4690         {TSEM_REG_TSEM_PRTY_MASK_0,     0x0},
4691         {TSEM_REG_TSEM_PRTY_MASK_1,     0x0},
4692         {USEM_REG_USEM_PRTY_MASK_0,     0x0},
4693         {USEM_REG_USEM_PRTY_MASK_1,     0x0},
4694         {CSEM_REG_CSEM_PRTY_MASK_0,     0x0},
4695         {CSEM_REG_CSEM_PRTY_MASK_1,     0x0},
4696         {XSEM_REG_XSEM_PRTY_MASK_0,     0x0},
4697         {XSEM_REG_XSEM_PRTY_MASK_1,     0x0}
4698 };
4699
4700 static void enable_blocks_parity(struct bnx2x *bp)
4701 {
4702         int i;
4703
4704         for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
4705                 REG_WR(bp, bnx2x_parity_mask[i].addr,
4706                         bnx2x_parity_mask[i].mask);
4707 }
4708
4709
4710 static void bnx2x_reset_common(struct bnx2x *bp)
4711 {
4712         /* reset_common */
4713         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4714                0xd3ffff7f);
4715         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4716 }
4717
4718 static void bnx2x_init_pxp(struct bnx2x *bp)
4719 {
4720         u16 devctl;
4721         int r_order, w_order;
4722
4723         pci_read_config_word(bp->pdev,
4724                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4725         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4726         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4727         if (bp->mrrs == -1)
4728                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4729         else {
4730                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4731                 r_order = bp->mrrs;
4732         }
4733
4734         bnx2x_init_pxp_arb(bp, r_order, w_order);
4735 }
4736
4737 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4738 {
4739         int is_required;
4740         u32 val;
4741         int port;
4742
4743         if (BP_NOMCP(bp))
4744                 return;
4745
4746         is_required = 0;
4747         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4748               SHARED_HW_CFG_FAN_FAILURE_MASK;
4749
4750         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4751                 is_required = 1;
4752
4753         /*
4754          * The fan failure mechanism is usually related to the PHY type since
4755          * the power consumption of the board is affected by the PHY. Currently,
4756          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4757          */
4758         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4759                 for (port = PORT_0; port < PORT_MAX; port++) {
4760                         is_required |=
4761                                 bnx2x_fan_failure_det_req(
4762                                         bp,
4763                                         bp->common.shmem_base,
4764                                         bp->common.shmem2_base,
4765                                         port);
4766                 }
4767
4768         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4769
4770         if (is_required == 0)
4771                 return;
4772
4773         /* Fan failure is indicated by SPIO 5 */
4774         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4775                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
4776
4777         /* set to active low mode */
4778         val = REG_RD(bp, MISC_REG_SPIO_INT);
4779         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
4780                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
4781         REG_WR(bp, MISC_REG_SPIO_INT, val);
4782
4783         /* enable interrupt to signal the IGU */
4784         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4785         val |= (1 << MISC_REGISTERS_SPIO_5);
4786         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4787 }
4788
4789 static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4790 {
4791         u32 offset = 0;
4792
4793         if (CHIP_IS_E1(bp))
4794                 return;
4795         if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4796                 return;
4797
4798         switch (BP_ABS_FUNC(bp)) {
4799         case 0:
4800                 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4801                 break;
4802         case 1:
4803                 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4804                 break;
4805         case 2:
4806                 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4807                 break;
4808         case 3:
4809                 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4810                 break;
4811         case 4:
4812                 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4813                 break;
4814         case 5:
4815                 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4816                 break;
4817         case 6:
4818                 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4819                 break;
4820         case 7:
4821                 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4822                 break;
4823         default:
4824                 return;
4825         }
4826
4827         REG_WR(bp, offset, pretend_func_num);
4828         REG_RD(bp, offset);
4829         DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4830 }
4831
4832 static void bnx2x_pf_disable(struct bnx2x *bp)
4833 {
4834         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4835         val &= ~IGU_PF_CONF_FUNC_EN;
4836
4837         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4838         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4839         REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4840 }
4841
4842 static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
4843 {
4844         u32 val, i;
4845
4846         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_ABS_FUNC(bp));
4847
4848         bnx2x_reset_common(bp);
4849         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4850         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4851
4852         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
4853         if (!CHIP_IS_E1(bp))
4854                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
4855
4856         if (CHIP_IS_E2(bp)) {
4857                 u8 fid;
4858
4859                 /**
4860                  * 4-port mode or 2-port mode we need to turn of master-enable
4861                  * for everyone, after that, turn it back on for self.
4862                  * so, we disregard multi-function or not, and always disable
4863                  * for all functions on the given path, this means 0,2,4,6 for
4864                  * path 0 and 1,3,5,7 for path 1
4865                  */
4866                 for (fid = BP_PATH(bp); fid  < E2_FUNC_MAX*2; fid += 2) {
4867                         if (fid == BP_ABS_FUNC(bp)) {
4868                                 REG_WR(bp,
4869                                     PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4870                                     1);
4871                                 continue;
4872                         }
4873
4874                         bnx2x_pretend_func(bp, fid);
4875                         /* clear pf enable */
4876                         bnx2x_pf_disable(bp);
4877                         bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4878                 }
4879         }
4880
4881         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
4882         if (CHIP_IS_E1(bp)) {
4883                 /* enable HW interrupt from PXP on USDM overflow
4884                    bit 16 on INT_MASK_0 */
4885                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4886         }
4887
4888         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
4889         bnx2x_init_pxp(bp);
4890
4891 #ifdef __BIG_ENDIAN
4892         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4893         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4894         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4895         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4896         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
4897         /* make sure this value is 0 */
4898         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
4899
4900 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4901         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4902         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4903         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4904         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
4905 #endif
4906
4907         bnx2x_ilt_init_page_size(bp, INITOP_SET);
4908
4909         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4910                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
4911
4912         /* let the HW do it's magic ... */
4913         msleep(100);
4914         /* finish PXP init */
4915         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4916         if (val != 1) {
4917                 BNX2X_ERR("PXP2 CFG failed\n");
4918                 return -EBUSY;
4919         }
4920         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4921         if (val != 1) {
4922                 BNX2X_ERR("PXP2 RD_INIT failed\n");
4923                 return -EBUSY;
4924         }
4925
4926         /* Timers bug workaround E2 only. We need to set the entire ILT to
4927          * have entries with value "0" and valid bit on.
4928          * This needs to be done by the first PF that is loaded in a path
4929          * (i.e. common phase)
4930          */
4931         if (CHIP_IS_E2(bp)) {
4932                 struct ilt_client_info ilt_cli;
4933                 struct bnx2x_ilt ilt;
4934                 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
4935                 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
4936
4937                 /* initalize dummy TM client */
4938                 ilt_cli.start = 0;
4939                 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
4940                 ilt_cli.client_num = ILT_CLIENT_TM;
4941
4942                 /* Step 1: set zeroes to all ilt page entries with valid bit on
4943                  * Step 2: set the timers first/last ilt entry to point
4944                  * to the entire range to prevent ILT range error for 3rd/4th
4945                  * vnic (this code assumes existance of the vnic)
4946                  *
4947                  * both steps performed by call to bnx2x_ilt_client_init_op()
4948                  * with dummy TM client
4949                  *
4950                  * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
4951                  * and his brother are split registers
4952                  */
4953                 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
4954                 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
4955                 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4956
4957                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
4958                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
4959                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
4960         }
4961
4962
4963         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4964         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
4965
4966         if (CHIP_IS_E2(bp)) {
4967                 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
4968                                 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
4969                 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
4970
4971                 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
4972
4973                 /* let the HW do it's magic ... */
4974                 do {
4975                         msleep(200);
4976                         val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
4977                 } while (factor-- && (val != 1));
4978
4979                 if (val != 1) {
4980                         BNX2X_ERR("ATC_INIT failed\n");
4981                         return -EBUSY;
4982                 }
4983         }
4984
4985         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
4986
4987         /* clean the DMAE memory */
4988         bp->dmae_ready = 1;
4989         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
4990
4991         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
4992         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
4993         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
4994         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
4995
4996         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
4997         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
4998         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
4999         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5000
5001         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5002
5003         if (CHIP_MODE_IS_4_PORT(bp))
5004                 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
5005
5006         /* QM queues pointers table */
5007         bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
5008
5009         /* soft reset pulse */
5010         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5011         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5012
5013 #ifdef BCM_CNIC
5014         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5015 #endif
5016
5017         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5018         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5019
5020         if (!CHIP_REV_IS_SLOW(bp)) {
5021                 /* enable hw interrupt from doorbell Q */
5022                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5023         }
5024
5025         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5026         if (CHIP_MODE_IS_4_PORT(bp)) {
5027                 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5028                 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5029         }
5030
5031         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5032         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5033 #ifndef BCM_CNIC
5034         /* set NIC mode */
5035         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5036 #endif
5037         if (!CHIP_IS_E1(bp))
5038                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
5039
5040         if (CHIP_IS_E2(bp)) {
5041                 /* Bit-map indicating which L2 hdrs may appear after the
5042                    basic Ethernet header */
5043                 int has_ovlan = IS_MF(bp);
5044                 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5045                 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5046         }
5047
5048         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5049         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5050         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5051         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5052
5053         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5054         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5055         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5056         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5057
5058         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5059         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5060         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5061         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5062
5063         if (CHIP_MODE_IS_4_PORT(bp))
5064                 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5065
5066         /* sync semi rtc */
5067         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5068                0x80000000);
5069         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5070                0x80000000);
5071
5072         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5073         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5074         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5075
5076         if (CHIP_IS_E2(bp)) {
5077                 int has_ovlan = IS_MF(bp);
5078                 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5079                 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5080         }
5081
5082         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5083         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5084                 REG_WR(bp, i, random32());
5085
5086         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5087 #ifdef BCM_CNIC
5088         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5089         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5090         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5091         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5092         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5093         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5094         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5095         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5096         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5097         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5098 #endif
5099         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5100
5101         if (sizeof(union cdu_context) != 1024)
5102                 /* we currently assume that a context is 1024 bytes */
5103                 dev_alert(&bp->pdev->dev, "please adjust the size "
5104                                           "of cdu_context(%ld)\n",
5105                          (long)sizeof(union cdu_context));
5106
5107         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5108         val = (4 << 24) + (0 << 12) + 1024;
5109         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5110
5111         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5112         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5113         /* enable context validation interrupt from CFC */
5114         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5115
5116         /* set the thresholds to prevent CFC/CDU race */
5117         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5118
5119         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5120
5121         if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5122                 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5123
5124         bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
5125         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5126
5127         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5128         /* Reset PCIE errors for debug */
5129         REG_WR(bp, 0x2814, 0xffffffff);
5130         REG_WR(bp, 0x3820, 0xffffffff);
5131
5132         if (CHIP_IS_E2(bp)) {
5133                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5134                            (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5135                                 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5136                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5137                            (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5138                                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5139                                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5140                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5141                            (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5142                                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5143                                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5144         }
5145
5146         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5147         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5148         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5149         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5150
5151         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5152         if (!CHIP_IS_E1(bp)) {
5153                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5154                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
5155         }
5156         if (CHIP_IS_E2(bp)) {
5157                 /* Bit-map indicating which L2 hdrs may appear after the
5158                    basic Ethernet header */
5159                 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
5160         }
5161
5162         if (CHIP_REV_IS_SLOW(bp))
5163                 msleep(200);
5164
5165         /* finish CFC init */
5166         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5167         if (val != 1) {
5168                 BNX2X_ERR("CFC LL_INIT failed\n");
5169                 return -EBUSY;
5170         }
5171         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5172         if (val != 1) {
5173                 BNX2X_ERR("CFC AC_INIT failed\n");
5174                 return -EBUSY;
5175         }
5176         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5177         if (val != 1) {
5178                 BNX2X_ERR("CFC CAM_INIT failed\n");
5179                 return -EBUSY;
5180         }
5181         REG_WR(bp, CFC_REG_DEBUG0, 0);
5182
5183         if (CHIP_IS_E1(bp)) {
5184                 /* read NIG statistic
5185                    to see if this is our first up since powerup */
5186                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5187                 val = *bnx2x_sp(bp, wb_data[0]);
5188
5189                 /* do internal memory self test */
5190                 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5191                         BNX2X_ERR("internal mem self test failed\n");
5192                         return -EBUSY;
5193                 }
5194         }
5195
5196         bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
5197                                                        bp->common.shmem_base,
5198                                                        bp->common.shmem2_base);
5199
5200         bnx2x_setup_fan_failure_detection(bp);
5201
5202         /* clear PXP2 attentions */
5203         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5204
5205         enable_blocks_attention(bp);
5206         if (CHIP_PARITY_SUPPORTED(bp))
5207                 enable_blocks_parity(bp);
5208
5209         if (!BP_NOMCP(bp)) {
5210                 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5211                 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5212                     CHIP_IS_E1x(bp)) {
5213                         u32 shmem_base[2], shmem2_base[2];
5214                         shmem_base[0] =  bp->common.shmem_base;
5215                         shmem2_base[0] = bp->common.shmem2_base;
5216                         if (CHIP_IS_E2(bp)) {
5217                                 shmem_base[1] =
5218                                         SHMEM2_RD(bp, other_shmem_base_addr);
5219                                 shmem2_base[1] =
5220                                         SHMEM2_RD(bp, other_shmem2_base_addr);
5221                         }
5222                         bnx2x_acquire_phy_lock(bp);
5223                         bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5224                                               bp->common.chip_id);
5225                         bnx2x_release_phy_lock(bp);
5226                 }
5227         } else
5228                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5229
5230         return 0;
5231 }
5232
5233 static int bnx2x_init_hw_port(struct bnx2x *bp)
5234 {
5235         int port = BP_PORT(bp);
5236         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5237         u32 low, high;
5238         u32 val;
5239
5240         DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
5241
5242         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5243
5244         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5245         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5246
5247         /* Timers bug workaround: disables the pf_master bit in pglue at
5248          * common phase, we need to enable it here before any dmae access are
5249          * attempted. Therefore we manually added the enable-master to the
5250          * port phase (it also happens in the function phase)
5251          */
5252         if (CHIP_IS_E2(bp))
5253                 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5254
5255         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5256         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5257         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
5258         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5259
5260         /* QM cid (connection) count */
5261         bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
5262
5263 #ifdef BCM_CNIC
5264         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5265         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5266         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
5267 #endif
5268
5269         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5270
5271         if (CHIP_MODE_IS_4_PORT(bp))
5272                 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
5273
5274         if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5275                 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5276                 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5277                         /* no pause for emulation and FPGA */
5278                         low = 0;
5279                         high = 513;
5280                 } else {
5281                         if (IS_MF(bp))
5282                                 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5283                         else if (bp->dev->mtu > 4096) {
5284                                 if (bp->flags & ONE_PORT_FLAG)
5285                                         low = 160;
5286                                 else {
5287                                         val = bp->dev->mtu;
5288                                         /* (24*1024 + val*4)/256 */
5289                                         low = 96 + (val/64) +
5290                                                         ((val % 64) ? 1 : 0);
5291                                 }
5292                         } else
5293                                 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5294                         high = low + 56;        /* 14*1024/256 */
5295                 }
5296                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5297                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5298         }
5299
5300         if (CHIP_MODE_IS_4_PORT(bp)) {
5301                 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5302                 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5303                 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5304                                           BRB1_REG_MAC_GUARANTIED_0), 40);
5305         }
5306
5307         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5308
5309         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5310         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5311         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5312         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5313
5314         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5315         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5316         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5317         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5318         if (CHIP_MODE_IS_4_PORT(bp))
5319                 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
5320
5321         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5322         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5323
5324         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5325
5326         if (!CHIP_IS_E2(bp)) {
5327                 /* configure PBF to work without PAUSE mtu 9000 */
5328                 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5329
5330                 /* update threshold */
5331                 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5332                 /* update init credit */
5333                 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5334
5335                 /* probe changes */
5336                 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5337                 udelay(50);
5338                 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5339         }
5340
5341 #ifdef BCM_CNIC
5342         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
5343 #endif
5344         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5345         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5346
5347         if (CHIP_IS_E1(bp)) {
5348                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5349                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5350         }
5351         bnx2x_init_block(bp, HC_BLOCK, init_stage);
5352
5353         bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5354
5355         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
5356         /* init aeu_mask_attn_func_0/1:
5357          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5358          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5359          *             bits 4-7 are used for "per vn group attention" */
5360         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5361                (IS_MF(bp) ? 0xF7 : 0x7));
5362
5363         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
5364         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
5365         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
5366         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
5367         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
5368
5369         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
5370
5371         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5372
5373         if (!CHIP_IS_E1(bp)) {
5374                 /* 0x2 disable mf_ov, 0x1 enable */
5375                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5376                        (IS_MF(bp) ? 0x1 : 0x2));
5377
5378                 if (CHIP_IS_E2(bp)) {
5379                         val = 0;
5380                         switch (bp->mf_mode) {
5381                         case MULTI_FUNCTION_SD:
5382                                 val = 1;
5383                                 break;
5384                         case MULTI_FUNCTION_SI:
5385                                 val = 2;
5386                                 break;
5387                         }
5388
5389                         REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5390                                                   NIG_REG_LLH0_CLS_TYPE), val);
5391                 }
5392                 {
5393                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5394                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5395                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5396                 }
5397         }
5398
5399         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
5400         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
5401         bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
5402                                                        bp->common.shmem_base,
5403                                                        bp->common.shmem2_base);
5404         if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
5405                                       bp->common.shmem2_base, port)) {
5406                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5407                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5408                 val = REG_RD(bp, reg_addr);
5409                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5410                 REG_WR(bp, reg_addr, val);
5411         }
5412         bnx2x__link_reset(bp);
5413
5414         return 0;
5415 }
5416
5417 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5418 {
5419         int reg;
5420
5421         if (CHIP_IS_E1(bp))
5422                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5423         else
5424                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5425
5426         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5427 }
5428
5429 static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5430 {
5431         bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5432 }
5433
5434 static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5435 {
5436         u32 i, base = FUNC_ILT_BASE(func);
5437         for (i = base; i < base + ILT_PER_FUNC; i++)
5438                 bnx2x_ilt_wr(bp, i, 0);
5439 }
5440
5441 static int bnx2x_init_hw_func(struct bnx2x *bp)
5442 {
5443         int port = BP_PORT(bp);
5444         int func = BP_FUNC(bp);
5445         struct bnx2x_ilt *ilt = BP_ILT(bp);
5446         u16 cdu_ilt_start;
5447         u32 addr, val;
5448         int i;
5449
5450         DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
5451
5452         /* set MSI reconfigure capability */
5453         if (bp->common.int_block == INT_BLOCK_HC) {
5454                 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5455                 val = REG_RD(bp, addr);
5456                 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5457                 REG_WR(bp, addr, val);
5458         }
5459
5460         ilt = BP_ILT(bp);
5461         cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
5462
5463         for (i = 0; i < L2_ILT_LINES(bp); i++) {
5464                 ilt->lines[cdu_ilt_start + i].page =
5465                         bp->context.vcxt + (ILT_PAGE_CIDS * i);
5466                 ilt->lines[cdu_ilt_start + i].page_mapping =
5467                         bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5468                 /* cdu ilt pages are allocated manually so there's no need to
5469                 set the size */
5470         }
5471         bnx2x_ilt_init_op(bp, INITOP_SET);
5472
5473 #ifdef BCM_CNIC
5474         bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
5475
5476         /* T1 hash bits value determines the T1 number of entries */
5477         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
5478 #endif
5479
5480 #ifndef BCM_CNIC
5481         /* set NIC mode */
5482         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5483 #endif  /* BCM_CNIC */
5484
5485         if (CHIP_IS_E2(bp)) {
5486                 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5487
5488                 /* Turn on a single ISR mode in IGU if driver is going to use
5489                  * INT#x or MSI
5490                  */
5491                 if (!(bp->flags & USING_MSIX_FLAG))
5492                         pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5493                 /*
5494                  * Timers workaround bug: function init part.
5495                  * Need to wait 20msec after initializing ILT,
5496                  * needed to make sure there are no requests in
5497                  * one of the PXP internal queues with "old" ILT addresses
5498                  */
5499                 msleep(20);
5500                 /*
5501                  * Master enable - Due to WB DMAE writes performed before this
5502                  * register is re-initialized as part of the regular function
5503                  * init
5504                  */
5505                 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5506                 /* Enable the function in IGU */
5507                 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5508         }
5509
5510         bp->dmae_ready = 1;
5511
5512         bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5513
5514         if (CHIP_IS_E2(bp))
5515                 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5516
5517         bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5518         bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5519         bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5520         bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5521         bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5522         bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5523         bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5524         bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5525         bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5526
5527         if (CHIP_IS_E2(bp)) {
5528                 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5529                                                                 BP_PATH(bp));
5530                 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5531                                                                 BP_PATH(bp));
5532         }
5533
5534         if (CHIP_MODE_IS_4_PORT(bp))
5535                 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5536
5537         if (CHIP_IS_E2(bp))
5538                 REG_WR(bp, QM_REG_PF_EN, 1);
5539
5540         bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
5541
5542         if (CHIP_MODE_IS_4_PORT(bp))
5543                 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5544
5545         bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5546         bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5547         bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5548         bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5549         bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5550         bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5551         bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5552         bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5553         bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5554         bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5555         bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
5556         if (CHIP_IS_E2(bp))
5557                 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5558
5559         bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5560
5561         bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5562
5563         if (CHIP_IS_E2(bp))
5564                 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5565
5566         if (IS_MF(bp)) {
5567                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5568                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
5569         }
5570
5571         bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5572
5573         /* HC init per function */
5574         if (bp->common.int_block == INT_BLOCK_HC) {
5575                 if (CHIP_IS_E1H(bp)) {
5576                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5577
5578                         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5579                         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5580                 }
5581                 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5582
5583         } else {
5584                 int num_segs, sb_idx, prod_offset;
5585
5586                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5587
5588                 if (CHIP_IS_E2(bp)) {
5589                         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5590                         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5591                 }
5592
5593                 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5594
5595                 if (CHIP_IS_E2(bp)) {
5596                         int dsb_idx = 0;
5597                         /**
5598                          * Producer memory:
5599                          * E2 mode: address 0-135 match to the mapping memory;
5600                          * 136 - PF0 default prod; 137 - PF1 default prod;
5601                          * 138 - PF2 default prod; 139 - PF3 default prod;
5602                          * 140 - PF0 attn prod;    141 - PF1 attn prod;
5603                          * 142 - PF2 attn prod;    143 - PF3 attn prod;
5604                          * 144-147 reserved.
5605                          *
5606                          * E1.5 mode - In backward compatible mode;
5607                          * for non default SB; each even line in the memory
5608                          * holds the U producer and each odd line hold
5609                          * the C producer. The first 128 producers are for
5610                          * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5611                          * producers are for the DSB for each PF.
5612                          * Each PF has five segments: (the order inside each
5613                          * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5614                          * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5615                          * 144-147 attn prods;
5616                          */
5617                         /* non-default-status-blocks */
5618                         num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5619                                 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5620                         for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5621                                 prod_offset = (bp->igu_base_sb + sb_idx) *
5622                                         num_segs;
5623
5624                                 for (i = 0; i < num_segs; i++) {
5625                                         addr = IGU_REG_PROD_CONS_MEMORY +
5626                                                         (prod_offset + i) * 4;
5627                                         REG_WR(bp, addr, 0);
5628                                 }
5629                                 /* send consumer update with value 0 */
5630                                 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5631                                              USTORM_ID, 0, IGU_INT_NOP, 1);
5632                                 bnx2x_igu_clear_sb(bp,
5633                                                    bp->igu_base_sb + sb_idx);
5634                         }
5635
5636                         /* default-status-blocks */
5637                         num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5638                                 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5639
5640                         if (CHIP_MODE_IS_4_PORT(bp))
5641                                 dsb_idx = BP_FUNC(bp);
5642                         else
5643                                 dsb_idx = BP_E1HVN(bp);
5644
5645                         prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5646                                        IGU_BC_BASE_DSB_PROD + dsb_idx :
5647                                        IGU_NORM_BASE_DSB_PROD + dsb_idx);
5648
5649                         for (i = 0; i < (num_segs * E1HVN_MAX);
5650                              i += E1HVN_MAX) {
5651                                 addr = IGU_REG_PROD_CONS_MEMORY +
5652                                                         (prod_offset + i)*4;
5653                                 REG_WR(bp, addr, 0);
5654                         }
5655                         /* send consumer update with 0 */
5656                         if (CHIP_INT_MODE_IS_BC(bp)) {
5657                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5658                                              USTORM_ID, 0, IGU_INT_NOP, 1);
5659                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5660                                              CSTORM_ID, 0, IGU_INT_NOP, 1);
5661                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5662                                              XSTORM_ID, 0, IGU_INT_NOP, 1);
5663                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5664                                              TSTORM_ID, 0, IGU_INT_NOP, 1);
5665                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5666                                              ATTENTION_ID, 0, IGU_INT_NOP, 1);
5667                         } else {
5668                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5669                                              USTORM_ID, 0, IGU_INT_NOP, 1);
5670                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5671                                              ATTENTION_ID, 0, IGU_INT_NOP, 1);
5672                         }
5673                         bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5674
5675                         /* !!! these should become driver const once
5676                            rf-tool supports split-68 const */
5677                         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5678                         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5679                         REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5680                         REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5681                         REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5682                         REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5683                 }
5684         }
5685
5686         /* Reset PCIE errors for debug */
5687         REG_WR(bp, 0x2114, 0xffffffff);
5688         REG_WR(bp, 0x2120, 0xffffffff);
5689
5690         bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5691         bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5692         bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5693         bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5694         bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5695         bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5696
5697         bnx2x_phy_probe(&bp->link_params);
5698
5699         return 0;
5700 }
5701
5702 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5703 {
5704         int rc = 0;
5705
5706         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5707            BP_ABS_FUNC(bp), load_code);
5708
5709         bp->dmae_ready = 0;
5710         mutex_init(&bp->dmae_mutex);
5711         rc = bnx2x_gunzip_init(bp);
5712         if (rc)
5713                 return rc;
5714
5715         switch (load_code) {
5716         case FW_MSG_CODE_DRV_LOAD_COMMON:
5717         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5718                 rc = bnx2x_init_hw_common(bp, load_code);
5719                 if (rc)
5720                         goto init_hw_err;
5721                 /* no break */
5722
5723         case FW_MSG_CODE_DRV_LOAD_PORT:
5724                 rc = bnx2x_init_hw_port(bp);
5725                 if (rc)
5726                         goto init_hw_err;
5727                 /* no break */
5728
5729         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5730                 rc = bnx2x_init_hw_func(bp);
5731                 if (rc)
5732                         goto init_hw_err;
5733                 break;
5734
5735         default:
5736                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5737                 break;
5738         }
5739
5740         if (!BP_NOMCP(bp)) {
5741                 int mb_idx = BP_FW_MB_IDX(bp);
5742
5743                 bp->fw_drv_pulse_wr_seq =
5744                                 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
5745                                  DRV_PULSE_SEQ_MASK);
5746                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5747         }
5748
5749 init_hw_err:
5750         bnx2x_gunzip_end(bp);
5751
5752         return rc;
5753 }
5754
5755 void bnx2x_free_mem(struct bnx2x *bp)
5756 {
5757
5758 #define BNX2X_PCI_FREE(x, y, size) \
5759         do { \
5760                 if (x) { \
5761                         dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
5762                         x = NULL; \
5763                         y = 0; \
5764                 } \
5765         } while (0)
5766
5767 #define BNX2X_FREE(x) \
5768         do { \
5769                 if (x) { \
5770                         kfree((void *)x); \
5771                         x = NULL; \
5772                 } \
5773         } while (0)
5774
5775         int i;
5776
5777         /* fastpath */
5778         /* Common */
5779         for_each_queue(bp, i) {
5780                 /* status blocks */
5781                 if (CHIP_IS_E2(bp))
5782                         BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5783                                        bnx2x_fp(bp, i, status_blk_mapping),
5784                                        sizeof(struct host_hc_status_block_e2));
5785                 else
5786                         BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5787                                        bnx2x_fp(bp, i, status_blk_mapping),
5788                                        sizeof(struct host_hc_status_block_e1x));
5789         }
5790         /* Rx */
5791         for_each_queue(bp, i) {
5792
5793                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5794                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5795                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5796                                bnx2x_fp(bp, i, rx_desc_mapping),
5797                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5798
5799                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5800                                bnx2x_fp(bp, i, rx_comp_mapping),
5801                                sizeof(struct eth_fast_path_rx_cqe) *
5802                                NUM_RCQ_BD);
5803
5804                 /* SGE ring */
5805                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5806                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5807                                bnx2x_fp(bp, i, rx_sge_mapping),
5808                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5809         }
5810         /* Tx */
5811         for_each_queue(bp, i) {
5812
5813                 /* fastpath tx rings: tx_buf tx_desc */
5814                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5815                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5816                                bnx2x_fp(bp, i, tx_desc_mapping),
5817                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
5818         }
5819         /* end of fastpath */
5820
5821         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5822                        sizeof(struct host_sp_status_block));
5823
5824         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5825                        sizeof(struct bnx2x_slowpath));
5826
5827         BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5828                        bp->context.size);
5829
5830         bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5831
5832         BNX2X_FREE(bp->ilt->lines);
5833
5834 #ifdef BCM_CNIC
5835         if (CHIP_IS_E2(bp))
5836                 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5837                                sizeof(struct host_hc_status_block_e2));
5838         else
5839                 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5840                                sizeof(struct host_hc_status_block_e1x));
5841
5842         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
5843 #endif
5844
5845         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5846
5847         BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5848                        BCM_PAGE_SIZE * NUM_EQ_PAGES);
5849
5850 #undef BNX2X_PCI_FREE
5851 #undef BNX2X_KFREE
5852 }
5853
5854 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
5855 {
5856         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
5857         if (CHIP_IS_E2(bp)) {
5858                 bnx2x_fp(bp, index, sb_index_values) =
5859                         (__le16 *)status_blk.e2_sb->sb.index_values;
5860                 bnx2x_fp(bp, index, sb_running_index) =
5861                         (__le16 *)status_blk.e2_sb->sb.running_index;
5862         } else {
5863                 bnx2x_fp(bp, index, sb_index_values) =
5864                         (__le16 *)status_blk.e1x_sb->sb.index_values;
5865                 bnx2x_fp(bp, index, sb_running_index) =
5866                         (__le16 *)status_blk.e1x_sb->sb.running_index;
5867         }
5868 }
5869
5870 int bnx2x_alloc_mem(struct bnx2x *bp)
5871 {
5872 #define BNX2X_PCI_ALLOC(x, y, size) \
5873         do { \
5874                 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
5875                 if (x == NULL) \
5876                         goto alloc_mem_err; \
5877                 memset(x, 0, size); \
5878         } while (0)
5879
5880 #define BNX2X_ALLOC(x, size) \
5881         do { \
5882                 x = kzalloc(size, GFP_KERNEL); \
5883                 if (x == NULL) \
5884                         goto alloc_mem_err; \
5885         } while (0)
5886
5887         int i;
5888
5889         /* fastpath */
5890         /* Common */
5891         for_each_queue(bp, i) {
5892                 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
5893                 bnx2x_fp(bp, i, bp) = bp;
5894                 /* status blocks */
5895                 if (CHIP_IS_E2(bp))
5896                         BNX2X_PCI_ALLOC(sb->e2_sb,
5897                                 &bnx2x_fp(bp, i, status_blk_mapping),
5898                                 sizeof(struct host_hc_status_block_e2));
5899                 else
5900                         BNX2X_PCI_ALLOC(sb->e1x_sb,
5901                                 &bnx2x_fp(bp, i, status_blk_mapping),
5902                                 sizeof(struct host_hc_status_block_e1x));
5903
5904                 set_sb_shortcuts(bp, i);
5905         }
5906         /* Rx */
5907         for_each_queue(bp, i) {
5908
5909                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5910                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5911                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5912                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5913                                 &bnx2x_fp(bp, i, rx_desc_mapping),
5914                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5915
5916                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5917                                 &bnx2x_fp(bp, i, rx_comp_mapping),
5918                                 sizeof(struct eth_fast_path_rx_cqe) *
5919                                 NUM_RCQ_BD);
5920
5921                 /* SGE ring */
5922                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5923                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5924                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5925                                 &bnx2x_fp(bp, i, rx_sge_mapping),
5926                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5927         }
5928         /* Tx */
5929         for_each_queue(bp, i) {
5930
5931                 /* fastpath tx rings: tx_buf tx_desc */
5932                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5933                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5934                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5935                                 &bnx2x_fp(bp, i, tx_desc_mapping),
5936                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
5937         }
5938         /* end of fastpath */
5939
5940 #ifdef BCM_CNIC
5941         if (CHIP_IS_E2(bp))
5942                 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5943                                 sizeof(struct host_hc_status_block_e2));
5944         else
5945                 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5946                                 sizeof(struct host_hc_status_block_e1x));
5947
5948         /* allocate searcher T2 table */
5949         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5950 #endif
5951
5952
5953         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5954                         sizeof(struct host_sp_status_block));
5955
5956         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5957                         sizeof(struct bnx2x_slowpath));
5958
5959         bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
5960
5961         BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
5962                         bp->context.size);
5963
5964         BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
5965
5966         if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
5967                 goto alloc_mem_err;
5968
5969         /* Slow path ring */
5970         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5971
5972         /* EQ */
5973         BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
5974                         BCM_PAGE_SIZE * NUM_EQ_PAGES);
5975         return 0;
5976
5977 alloc_mem_err:
5978         bnx2x_free_mem(bp);
5979         return -ENOMEM;
5980
5981 #undef BNX2X_PCI_ALLOC
5982 #undef BNX2X_ALLOC
5983 }
5984
5985 /*
5986  * Init service functions
5987  */
5988 int bnx2x_func_start(struct bnx2x *bp)
5989 {
5990         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
5991
5992         /* Wait for completion */
5993         return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
5994                                  WAIT_RAMROD_COMMON);
5995 }
5996
5997 int bnx2x_func_stop(struct bnx2x *bp)
5998 {
5999         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
6000
6001         /* Wait for completion */
6002         return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6003                                       0, &(bp->state), WAIT_RAMROD_COMMON);
6004 }
6005
6006 /**
6007  * Sets a MAC in a CAM for a few L2 Clients for E1x chips
6008  *
6009  * @param bp driver descriptor
6010  * @param set set or clear an entry (1 or 0)
6011  * @param mac pointer to a buffer containing a MAC
6012  * @param cl_bit_vec bit vector of clients to register a MAC for
6013  * @param cam_offset offset in a CAM to use
6014  * @param is_bcast is the set MAC a broadcast address (for E1 only)
6015  */
6016 static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
6017                                    u32 cl_bit_vec, u8 cam_offset,
6018                                    u8 is_bcast)
6019 {
6020         struct mac_configuration_cmd *config =
6021                 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6022         int ramrod_flags = WAIT_RAMROD_COMMON;
6023
6024         bp->set_mac_pending = 1;
6025         smp_wmb();
6026
6027         config->hdr.length = 1;
6028         config->hdr.offset = cam_offset;
6029         config->hdr.client_id = 0xff;
6030         config->hdr.reserved1 = 0;
6031
6032         /* primary MAC */
6033         config->config_table[0].msb_mac_addr =
6034                                         swab16(*(u16 *)&mac[0]);
6035         config->config_table[0].middle_mac_addr =
6036                                         swab16(*(u16 *)&mac[2]);
6037         config->config_table[0].lsb_mac_addr =
6038                                         swab16(*(u16 *)&mac[4]);
6039         config->config_table[0].clients_bit_vector =
6040                                         cpu_to_le32(cl_bit_vec);
6041         config->config_table[0].vlan_id = 0;
6042         config->config_table[0].pf_id = BP_FUNC(bp);
6043         if (set)
6044                 SET_FLAG(config->config_table[0].flags,
6045                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6046                         T_ETH_MAC_COMMAND_SET);
6047         else
6048                 SET_FLAG(config->config_table[0].flags,
6049                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6050                         T_ETH_MAC_COMMAND_INVALIDATE);
6051
6052         if (is_bcast)
6053                 SET_FLAG(config->config_table[0].flags,
6054                         MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6055
6056         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  PF_ID %d  CLID mask %d\n",
6057            (set ? "setting" : "clearing"),
6058            config->config_table[0].msb_mac_addr,
6059            config->config_table[0].middle_mac_addr,
6060            config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
6061
6062         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6063                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6064                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6065
6066         /* Wait for a completion */
6067         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
6068 }
6069
6070 int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6071                       int *state_p, int flags)
6072 {
6073         /* can take a while if any port is running */
6074         int cnt = 5000;
6075         u8 poll = flags & WAIT_RAMROD_POLL;
6076         u8 common = flags & WAIT_RAMROD_COMMON;
6077
6078         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6079            poll ? "polling" : "waiting", state, idx);
6080
6081         might_sleep();
6082         while (cnt--) {
6083                 if (poll) {
6084                         if (common)
6085                                 bnx2x_eq_int(bp);
6086                         else {
6087                                 bnx2x_rx_int(bp->fp, 10);
6088                                 /* if index is different from 0
6089                                  * the reply for some commands will
6090                                  * be on the non default queue
6091                                  */
6092                                 if (idx)
6093                                         bnx2x_rx_int(&bp->fp[idx], 10);
6094                         }
6095                 }
6096
6097                 mb(); /* state is changed by bnx2x_sp_event() */
6098                 if (*state_p == state) {
6099 #ifdef BNX2X_STOP_ON_ERROR
6100                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
6101 #endif
6102                         return 0;
6103                 }
6104
6105                 msleep(1);
6106
6107                 if (bp->panic)
6108                         return -EIO;
6109         }
6110
6111         /* timeout! */
6112         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6113                   poll ? "polling" : "waiting", state, idx);
6114 #ifdef BNX2X_STOP_ON_ERROR
6115         bnx2x_panic();
6116 #endif
6117
6118         return -EBUSY;
6119 }
6120
6121 u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
6122 {
6123         if (CHIP_IS_E1H(bp))
6124                 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6125         else if (CHIP_MODE_IS_4_PORT(bp))
6126                 return BP_FUNC(bp) * 32  + rel_offset;
6127         else
6128                 return BP_VN(bp) * 32  + rel_offset;
6129 }
6130
6131 void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6132 {
6133         u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6134                          bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6135
6136         /* networking  MAC */
6137         bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6138                                (1 << bp->fp->cl_id), cam_offset , 0);
6139
6140         if (CHIP_IS_E1(bp)) {
6141                 /* broadcast MAC */
6142                 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6143                 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6144         }
6145 }
6146 static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6147 {
6148         int i = 0, old;
6149         struct net_device *dev = bp->dev;
6150         struct netdev_hw_addr *ha;
6151         struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6152         dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6153
6154         netdev_for_each_mc_addr(ha, dev) {
6155                 /* copy mac */
6156                 config_cmd->config_table[i].msb_mac_addr =
6157                         swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6158                 config_cmd->config_table[i].middle_mac_addr =
6159                         swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6160                 config_cmd->config_table[i].lsb_mac_addr =
6161                         swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6162
6163                 config_cmd->config_table[i].vlan_id = 0;
6164                 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6165                 config_cmd->config_table[i].clients_bit_vector =
6166                         cpu_to_le32(1 << BP_L_ID(bp));
6167
6168                 SET_FLAG(config_cmd->config_table[i].flags,
6169                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6170                         T_ETH_MAC_COMMAND_SET);
6171
6172                 DP(NETIF_MSG_IFUP,
6173                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6174                    config_cmd->config_table[i].msb_mac_addr,
6175                    config_cmd->config_table[i].middle_mac_addr,
6176                    config_cmd->config_table[i].lsb_mac_addr);
6177                 i++;
6178         }
6179         old = config_cmd->hdr.length;
6180         if (old > i) {
6181                 for (; i < old; i++) {
6182                         if (CAM_IS_INVALID(config_cmd->
6183                                            config_table[i])) {
6184                                 /* already invalidated */
6185                                 break;
6186                         }
6187                         /* invalidate */
6188                         SET_FLAG(config_cmd->config_table[i].flags,
6189                                 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6190                                 T_ETH_MAC_COMMAND_INVALIDATE);
6191                 }
6192         }
6193
6194         config_cmd->hdr.length = i;
6195         config_cmd->hdr.offset = offset;
6196         config_cmd->hdr.client_id = 0xff;
6197         config_cmd->hdr.reserved1 = 0;
6198
6199         bp->set_mac_pending = 1;
6200         smp_wmb();
6201
6202         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6203                    U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6204 }
6205 static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6206 {
6207         int i;
6208         struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6209         dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6210         int ramrod_flags = WAIT_RAMROD_COMMON;
6211
6212         bp->set_mac_pending = 1;
6213         smp_wmb();
6214
6215         for (i = 0; i < config_cmd->hdr.length; i++)
6216                 SET_FLAG(config_cmd->config_table[i].flags,
6217                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6218                         T_ETH_MAC_COMMAND_INVALIDATE);
6219
6220         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6221                       U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6222
6223         /* Wait for a completion */
6224         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6225                                 ramrod_flags);
6226
6227 }
6228
6229 #ifdef BCM_CNIC
6230 /**
6231  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6232  * MAC(s). This function will wait until the ramdord completion
6233  * returns.
6234  *
6235  * @param bp driver handle
6236  * @param set set or clear the CAM entry
6237  *
6238  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6239  */
6240 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6241 {
6242         u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6243                          bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6244         u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
6245         u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
6246
6247         /* Send a SET_MAC ramrod */
6248         bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6249                                cam_offset, 0);
6250         return 0;
6251 }
6252 #endif
6253
6254 static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6255                                     struct bnx2x_client_init_params *params,
6256                                     u8 activate,
6257                                     struct client_init_ramrod_data *data)
6258 {
6259         /* Clear the buffer */
6260         memset(data, 0, sizeof(*data));
6261
6262         /* general */
6263         data->general.client_id = params->rxq_params.cl_id;
6264         data->general.statistics_counter_id = params->rxq_params.stat_id;
6265         data->general.statistics_en_flg =
6266                 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6267         data->general.activate_flg = activate;
6268         data->general.sp_client_id = params->rxq_params.spcl_id;
6269
6270         /* Rx data */
6271         data->rx.tpa_en_flg =
6272                 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6273         data->rx.vmqueue_mode_en_flg = 0;
6274         data->rx.cache_line_alignment_log_size =
6275                 params->rxq_params.cache_line_log;
6276         data->rx.enable_dynamic_hc =
6277                 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6278         data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6279         data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6280         data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6281
6282         /* We don't set drop flags */
6283         data->rx.drop_ip_cs_err_flg = 0;
6284         data->rx.drop_tcp_cs_err_flg = 0;
6285         data->rx.drop_ttl0_flg = 0;
6286         data->rx.drop_udp_cs_err_flg = 0;
6287
6288         data->rx.inner_vlan_removal_enable_flg =
6289                 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6290         data->rx.outer_vlan_removal_enable_flg =
6291                 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6292         data->rx.status_block_id = params->rxq_params.fw_sb_id;
6293         data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6294         data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6295         data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6296         data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6297         data->rx.bd_page_base.lo =
6298                 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6299         data->rx.bd_page_base.hi =
6300                 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6301         data->rx.sge_page_base.lo =
6302                 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6303         data->rx.sge_page_base.hi =
6304                 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6305         data->rx.cqe_page_base.lo =
6306                 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6307         data->rx.cqe_page_base.hi =
6308                 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6309         data->rx.is_leading_rss =
6310                 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6311         data->rx.is_approx_mcast = data->rx.is_leading_rss;
6312
6313         /* Tx data */
6314         data->tx.enforce_security_flg = 0; /* VF specific */
6315         data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6316         data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6317         data->tx.mtu = 0; /* VF specific */
6318         data->tx.tx_bd_page_base.lo =
6319                 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6320         data->tx.tx_bd_page_base.hi =
6321                 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6322
6323         /* flow control data */
6324         data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6325         data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6326         data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6327         data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6328         data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6329         data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6330         data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6331
6332         data->fc.safc_group_num = params->txq_params.cos;
6333         data->fc.safc_group_en_flg =
6334                 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6335         data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
6336 }
6337
6338 static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6339 {
6340         /* ustorm cxt validation */
6341         cxt->ustorm_ag_context.cdu_usage =
6342                 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6343                                        ETH_CONNECTION_TYPE);
6344         /* xcontext validation */
6345         cxt->xstorm_ag_context.cdu_reserved =
6346                 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6347                                        ETH_CONNECTION_TYPE);
6348 }
6349
6350 int bnx2x_setup_fw_client(struct bnx2x *bp,
6351                           struct bnx2x_client_init_params *params,
6352                           u8 activate,
6353                           struct client_init_ramrod_data *data,
6354                           dma_addr_t data_mapping)
6355 {
6356         u16 hc_usec;
6357         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6358         int ramrod_flags = 0, rc;
6359
6360         /* HC and context validation values */
6361         hc_usec = params->txq_params.hc_rate ?
6362                 1000000 / params->txq_params.hc_rate : 0;
6363         bnx2x_update_coalesce_sb_index(bp,
6364                         params->txq_params.fw_sb_id,
6365                         params->txq_params.sb_cq_index,
6366                         !(params->txq_params.flags & QUEUE_FLG_HC),
6367                         hc_usec);
6368
6369         *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6370
6371         hc_usec = params->rxq_params.hc_rate ?
6372                 1000000 / params->rxq_params.hc_rate : 0;
6373         bnx2x_update_coalesce_sb_index(bp,
6374                         params->rxq_params.fw_sb_id,
6375                         params->rxq_params.sb_cq_index,
6376                         !(params->rxq_params.flags & QUEUE_FLG_HC),
6377                         hc_usec);
6378
6379         bnx2x_set_ctx_validation(params->rxq_params.cxt,
6380                                  params->rxq_params.cid);
6381
6382         /* zero stats */
6383         if (params->txq_params.flags & QUEUE_FLG_STATS)
6384                 storm_memset_xstats_zero(bp, BP_PORT(bp),
6385                                          params->txq_params.stat_id);
6386
6387         if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6388                 storm_memset_ustats_zero(bp, BP_PORT(bp),
6389                                          params->rxq_params.stat_id);
6390                 storm_memset_tstats_zero(bp, BP_PORT(bp),
6391                                          params->rxq_params.stat_id);
6392         }
6393
6394         /* Fill the ramrod data */
6395         bnx2x_fill_cl_init_data(bp, params, activate, data);
6396
6397         /* SETUP ramrod.
6398          *
6399          * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6400          * barrier except from mmiowb() is needed to impose a
6401          * proper ordering of memory operations.
6402          */
6403         mmiowb();
6404
6405
6406         bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6407                       U64_HI(data_mapping), U64_LO(data_mapping), 0);
6408
6409         /* Wait for completion */
6410         rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6411                                  params->ramrod_params.index,
6412                                  params->ramrod_params.pstate,
6413                                  ramrod_flags);
6414         return rc;
6415 }
6416
6417 /**
6418  * Configure interrupt mode according to current configuration.
6419  * In case of MSI-X it will also try to enable MSI-X.
6420  *
6421  * @param bp
6422  *
6423  * @return int
6424  */
6425 static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
6426 {
6427         int rc = 0;
6428
6429         switch (bp->int_mode) {
6430         case INT_MODE_MSI:
6431                 bnx2x_enable_msi(bp);
6432                 /* falling through... */
6433         case INT_MODE_INTx:
6434                 bp->num_queues = 1;
6435                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
6436                 break;
6437         default:
6438                 /* Set number of queues according to bp->multi_mode value */
6439                 bnx2x_set_num_queues(bp);
6440
6441                 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6442                    bp->num_queues);
6443
6444                 /* if we can't use MSI-X we only need one fp,
6445                  * so try to enable MSI-X with the requested number of fp's
6446                  * and fallback to MSI or legacy INTx with one fp
6447                  */
6448                 rc = bnx2x_enable_msix(bp);
6449                 if (rc) {
6450                         /* failed to enable MSI-X */
6451                         if (bp->multi_mode)
6452                                 DP(NETIF_MSG_IFUP,
6453                                           "Multi requested but failed to "
6454                                           "enable MSI-X (%d), "
6455                                           "set number of queues to %d\n",
6456                                    bp->num_queues,
6457                                    1);
6458                         bp->num_queues = 1;
6459
6460                         if (!(bp->flags & DISABLE_MSI_FLAG))
6461                                 bnx2x_enable_msi(bp);
6462                 }
6463
6464                 break;
6465         }
6466
6467         return rc;
6468 }
6469
6470 /* must be called prioir to any HW initializations */
6471 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6472 {
6473         return L2_ILT_LINES(bp);
6474 }
6475
6476 void bnx2x_ilt_set_info(struct bnx2x *bp)
6477 {
6478         struct ilt_client_info *ilt_client;
6479         struct bnx2x_ilt *ilt = BP_ILT(bp);
6480         u16 line = 0;
6481
6482         ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6483         DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6484
6485         /* CDU */
6486         ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6487         ilt_client->client_num = ILT_CLIENT_CDU;
6488         ilt_client->page_size = CDU_ILT_PAGE_SZ;
6489         ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6490         ilt_client->start = line;
6491         line += L2_ILT_LINES(bp);
6492 #ifdef BCM_CNIC
6493         line += CNIC_ILT_LINES;
6494 #endif
6495         ilt_client->end = line - 1;
6496
6497         DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6498                                          "flags 0x%x, hw psz %d\n",
6499            ilt_client->start,
6500            ilt_client->end,
6501            ilt_client->page_size,
6502            ilt_client->flags,
6503            ilog2(ilt_client->page_size >> 12));
6504
6505         /* QM */
6506         if (QM_INIT(bp->qm_cid_count)) {
6507                 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6508                 ilt_client->client_num = ILT_CLIENT_QM;
6509                 ilt_client->page_size = QM_ILT_PAGE_SZ;
6510                 ilt_client->flags = 0;
6511                 ilt_client->start = line;
6512
6513                 /* 4 bytes for each cid */
6514                 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6515                                                          QM_ILT_PAGE_SZ);
6516
6517                 ilt_client->end = line - 1;
6518
6519                 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6520                                                  "flags 0x%x, hw psz %d\n",
6521                    ilt_client->start,
6522                    ilt_client->end,
6523                    ilt_client->page_size,
6524                    ilt_client->flags,
6525                    ilog2(ilt_client->page_size >> 12));
6526
6527         }
6528         /* SRC */
6529         ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6530 #ifdef BCM_CNIC
6531         ilt_client->client_num = ILT_CLIENT_SRC;
6532         ilt_client->page_size = SRC_ILT_PAGE_SZ;
6533         ilt_client->flags = 0;
6534         ilt_client->start = line;
6535         line += SRC_ILT_LINES;
6536         ilt_client->end = line - 1;
6537
6538         DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6539                                          "flags 0x%x, hw psz %d\n",
6540            ilt_client->start,
6541            ilt_client->end,
6542            ilt_client->page_size,
6543            ilt_client->flags,
6544            ilog2(ilt_client->page_size >> 12));
6545
6546 #else
6547         ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6548 #endif
6549
6550         /* TM */
6551         ilt_client = &ilt->clients[ILT_CLIENT_TM];
6552 #ifdef BCM_CNIC
6553         ilt_client->client_num = ILT_CLIENT_TM;
6554         ilt_client->page_size = TM_ILT_PAGE_SZ;
6555         ilt_client->flags = 0;
6556         ilt_client->start = line;
6557         line += TM_ILT_LINES;
6558         ilt_client->end = line - 1;
6559
6560         DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6561                                          "flags 0x%x, hw psz %d\n",
6562            ilt_client->start,
6563            ilt_client->end,
6564            ilt_client->page_size,
6565            ilt_client->flags,
6566            ilog2(ilt_client->page_size >> 12));
6567
6568 #else
6569         ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6570 #endif
6571 }
6572
6573 int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6574                        int is_leading)
6575 {
6576         struct bnx2x_client_init_params params = { {0} };
6577         int rc;
6578
6579         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6580                              IGU_INT_ENABLE, 0);
6581
6582         params.ramrod_params.pstate = &fp->state;
6583         params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6584         params.ramrod_params.index = fp->index;
6585         params.ramrod_params.cid = fp->cid;
6586
6587         if (is_leading)
6588                 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6589
6590         bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6591
6592         bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6593
6594         rc = bnx2x_setup_fw_client(bp, &params, 1,
6595                                      bnx2x_sp(bp, client_init_data),
6596                                      bnx2x_sp_mapping(bp, client_init_data));
6597         return rc;
6598 }
6599
6600 int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
6601 {
6602         int rc;
6603
6604         int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
6605
6606         /* halt the connection */
6607         *p->pstate = BNX2X_FP_STATE_HALTING;
6608         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6609                                                   p->cl_id, 0);
6610
6611         /* Wait for completion */
6612         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6613                                p->pstate, poll_flag);
6614         if (rc) /* timeout */
6615                 return rc;
6616
6617         *p->pstate = BNX2X_FP_STATE_TERMINATING;
6618         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6619                                                        p->cl_id, 0);
6620         /* Wait for completion */
6621         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6622                                p->pstate, poll_flag);
6623         if (rc) /* timeout */
6624                 return rc;
6625
6626
6627         /* delete cfc entry */
6628         bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
6629
6630         /* Wait for completion */
6631         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6632                                p->pstate, WAIT_RAMROD_COMMON);
6633         return rc;
6634 }
6635
6636 static int bnx2x_stop_client(struct bnx2x *bp, int index)
6637 {
6638         struct bnx2x_client_ramrod_params client_stop = {0};
6639         struct bnx2x_fastpath *fp = &bp->fp[index];
6640
6641         client_stop.index = index;
6642         client_stop.cid = fp->cid;
6643         client_stop.cl_id = fp->cl_id;
6644         client_stop.pstate = &(fp->state);
6645         client_stop.poll = 0;
6646
6647         return bnx2x_stop_fw_client(bp, &client_stop);
6648 }
6649
6650
6651 static void bnx2x_reset_func(struct bnx2x *bp)
6652 {
6653         int port = BP_PORT(bp);
6654         int func = BP_FUNC(bp);
6655         int i;
6656         int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
6657                         (CHIP_IS_E2(bp) ?
6658                          offsetof(struct hc_status_block_data_e2, common) :
6659                          offsetof(struct hc_status_block_data_e1x, common));
6660         int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6661         int pfid_offset = offsetof(struct pci_entity, pf_id);
6662
6663         /* Disable the function in the FW */
6664         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6665         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6666         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6667         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6668
6669         /* FP SBs */
6670         for_each_queue(bp, i) {
6671                 struct bnx2x_fastpath *fp = &bp->fp[i];
6672                 REG_WR8(bp,
6673                         BAR_CSTRORM_INTMEM +
6674                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6675                         + pfunc_offset_fp + pfid_offset,
6676                         HC_FUNCTION_DISABLED);
6677         }
6678
6679         /* SP SB */
6680         REG_WR8(bp,
6681                 BAR_CSTRORM_INTMEM +
6682                 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6683                 pfunc_offset_sp + pfid_offset,
6684                 HC_FUNCTION_DISABLED);
6685
6686
6687         for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6688                 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6689                        0);
6690
6691         /* Configure IGU */
6692         if (bp->common.int_block == INT_BLOCK_HC) {
6693                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6694                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6695         } else {
6696                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6697                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6698         }
6699
6700 #ifdef BCM_CNIC
6701         /* Disable Timer scan */
6702         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6703         /*
6704          * Wait for at least 10ms and up to 2 second for the timers scan to
6705          * complete
6706          */
6707         for (i = 0; i < 200; i++) {
6708                 msleep(10);
6709                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6710                         break;
6711         }
6712 #endif
6713         /* Clear ILT */
6714         bnx2x_clear_func_ilt(bp, func);
6715
6716         /* Timers workaround bug for E2: if this is vnic-3,
6717          * we need to set the entire ilt range for this timers.
6718          */
6719         if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6720                 struct ilt_client_info ilt_cli;
6721                 /* use dummy TM client */
6722                 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6723                 ilt_cli.start = 0;
6724                 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6725                 ilt_cli.client_num = ILT_CLIENT_TM;
6726
6727                 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6728         }
6729
6730         /* this assumes that reset_port() called before reset_func()*/
6731         if (CHIP_IS_E2(bp))
6732                 bnx2x_pf_disable(bp);
6733
6734         bp->dmae_ready = 0;
6735 }
6736
6737 static void bnx2x_reset_port(struct bnx2x *bp)
6738 {
6739         int port = BP_PORT(bp);
6740         u32 val;
6741
6742         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6743
6744         /* Do not rcv packets to BRB */
6745         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6746         /* Do not direct rcv packets that are not for MCP to the BRB */
6747         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6748                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6749
6750         /* Configure AEU */
6751         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6752
6753         msleep(100);
6754         /* Check for BRB port occupancy */
6755         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6756         if (val)
6757                 DP(NETIF_MSG_IFDOWN,
6758                    "BRB1 is not empty  %d blocks are occupied\n", val);
6759
6760         /* TODO: Close Doorbell port? */
6761 }
6762
6763 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6764 {
6765         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
6766            BP_ABS_FUNC(bp), reset_code);
6767
6768         switch (reset_code) {
6769         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6770                 bnx2x_reset_port(bp);
6771                 bnx2x_reset_func(bp);
6772                 bnx2x_reset_common(bp);
6773                 break;
6774
6775         case FW_MSG_CODE_DRV_UNLOAD_PORT:
6776                 bnx2x_reset_port(bp);
6777                 bnx2x_reset_func(bp);
6778                 break;
6779
6780         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6781                 bnx2x_reset_func(bp);
6782                 break;
6783
6784         default:
6785                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6786                 break;
6787         }
6788 }
6789
6790 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
6791 {
6792         int port = BP_PORT(bp);
6793         u32 reset_code = 0;
6794         int i, cnt, rc;
6795
6796         /* Wait until tx fastpath tasks complete */
6797         for_each_queue(bp, i) {
6798                 struct bnx2x_fastpath *fp = &bp->fp[i];
6799
6800                 cnt = 1000;
6801                 while (bnx2x_has_tx_work_unload(fp)) {
6802
6803                         if (!cnt) {
6804                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
6805                                           i);
6806 #ifdef BNX2X_STOP_ON_ERROR
6807                                 bnx2x_panic();
6808                                 return -EBUSY;
6809 #else
6810                                 break;
6811 #endif
6812                         }
6813                         cnt--;
6814                         msleep(1);
6815                 }
6816         }
6817         /* Give HW time to discard old tx messages */
6818         msleep(1);
6819
6820         if (CHIP_IS_E1(bp)) {
6821                 /* invalidate mc list,
6822                  * wait and poll (interrupts are off)
6823                  */
6824                 bnx2x_invlidate_e1_mc_list(bp);
6825                 bnx2x_set_eth_mac(bp, 0);
6826
6827         } else {
6828                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6829
6830                 bnx2x_set_eth_mac(bp, 0);
6831
6832                 for (i = 0; i < MC_HASH_SIZE; i++)
6833                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6834         }
6835
6836 #ifdef BCM_CNIC
6837         /* Clear iSCSI L2 MAC */
6838         mutex_lock(&bp->cnic_mutex);
6839         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
6840                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
6841                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
6842         }
6843         mutex_unlock(&bp->cnic_mutex);
6844 #endif
6845
6846         if (unload_mode == UNLOAD_NORMAL)
6847                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6848
6849         else if (bp->flags & NO_WOL_FLAG)
6850                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6851
6852         else if (bp->wol) {
6853                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6854                 u8 *mac_addr = bp->dev->dev_addr;
6855                 u32 val;
6856                 /* The mac address is written to entries 1-4 to
6857                    preserve entry 0 which is used by the PMF */
6858                 u8 entry = (BP_E1HVN(bp) + 1)*8;
6859
6860                 val = (mac_addr[0] << 8) | mac_addr[1];
6861                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6862
6863                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6864                       (mac_addr[4] << 8) | mac_addr[5];
6865                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6866
6867                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6868
6869         } else
6870                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6871
6872         /* Close multi and leading connections
6873            Completions for ramrods are collected in a synchronous way */
6874         for_each_queue(bp, i)
6875
6876                 if (bnx2x_stop_client(bp, i))
6877 #ifdef BNX2X_STOP_ON_ERROR
6878                         return;
6879 #else
6880                         goto unload_error;
6881 #endif
6882
6883         rc = bnx2x_func_stop(bp);
6884         if (rc) {
6885                 BNX2X_ERR("Function stop failed!\n");
6886 #ifdef BNX2X_STOP_ON_ERROR
6887                 return;
6888 #else
6889                 goto unload_error;
6890 #endif
6891         }
6892 #ifndef BNX2X_STOP_ON_ERROR
6893 unload_error:
6894 #endif
6895         if (!BP_NOMCP(bp))
6896                 reset_code = bnx2x_fw_command(bp, reset_code, 0);
6897         else {
6898                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d]      "
6899                                      "%d, %d, %d\n", BP_PATH(bp),
6900                    load_count[BP_PATH(bp)][0],
6901                    load_count[BP_PATH(bp)][1],
6902                    load_count[BP_PATH(bp)][2]);
6903                 load_count[BP_PATH(bp)][0]--;
6904                 load_count[BP_PATH(bp)][1 + port]--;
6905                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d]  "
6906                                      "%d, %d, %d\n", BP_PATH(bp),
6907                    load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
6908                    load_count[BP_PATH(bp)][2]);
6909                 if (load_count[BP_PATH(bp)][0] == 0)
6910                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6911                 else if (load_count[BP_PATH(bp)][1 + port] == 0)
6912                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6913                 else
6914                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6915         }
6916
6917         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6918             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6919                 bnx2x__link_reset(bp);
6920
6921         /* Disable HW interrupts, NAPI */
6922         bnx2x_netif_stop(bp, 1);
6923
6924         /* Release IRQs */
6925         bnx2x_free_irq(bp);
6926
6927         /* Reset the chip */
6928         bnx2x_reset_chip(bp, reset_code);
6929
6930         /* Report UNLOAD_DONE to MCP */
6931         if (!BP_NOMCP(bp))
6932                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
6933
6934 }
6935
6936 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
6937 {
6938         u32 val;
6939
6940         DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6941
6942         if (CHIP_IS_E1(bp)) {
6943                 int port = BP_PORT(bp);
6944                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6945                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
6946
6947                 val = REG_RD(bp, addr);
6948                 val &= ~(0x300);
6949                 REG_WR(bp, addr, val);
6950         } else if (CHIP_IS_E1H(bp)) {
6951                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6952                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6953                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6954                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6955         }
6956 }
6957
6958 /* Close gates #2, #3 and #4: */
6959 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
6960 {
6961         u32 val, addr;
6962
6963         /* Gates #2 and #4a are closed/opened for "not E1" only */
6964         if (!CHIP_IS_E1(bp)) {
6965                 /* #4 */
6966                 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
6967                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
6968                        close ? (val | 0x1) : (val & (~(u32)1)));
6969                 /* #2 */
6970                 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
6971                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
6972                        close ? (val | 0x1) : (val & (~(u32)1)));
6973         }
6974
6975         /* #3 */
6976         addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
6977         val = REG_RD(bp, addr);
6978         REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
6979
6980         DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
6981                 close ? "closing" : "opening");
6982         mmiowb();
6983 }
6984
6985 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
6986
6987 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
6988 {
6989         /* Do some magic... */
6990         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6991         *magic_val = val & SHARED_MF_CLP_MAGIC;
6992         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
6993 }
6994
6995 /* Restore the value of the `magic' bit.
6996  *
6997  * @param pdev Device handle.
6998  * @param magic_val Old value of the `magic' bit.
6999  */
7000 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7001 {
7002         /* Restore the `magic' bit value... */
7003         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7004         MF_CFG_WR(bp, shared_mf_config.clp_mb,
7005                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7006 }
7007
7008 /**
7009  * Prepares for MCP reset: takes care of CLP configurations.
7010  *
7011  * @param bp
7012  * @param magic_val Old value of 'magic' bit.
7013  */
7014 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7015 {
7016         u32 shmem;
7017         u32 validity_offset;
7018
7019         DP(NETIF_MSG_HW, "Starting\n");
7020
7021         /* Set `magic' bit in order to save MF config */
7022         if (!CHIP_IS_E1(bp))
7023                 bnx2x_clp_reset_prep(bp, magic_val);
7024
7025         /* Get shmem offset */
7026         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7027         validity_offset = offsetof(struct shmem_region, validity_map[0]);
7028
7029         /* Clear validity map flags */
7030         if (shmem > 0)
7031                 REG_WR(bp, shmem + validity_offset, 0);
7032 }
7033
7034 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
7035 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
7036
7037 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7038  * depending on the HW type.
7039  *
7040  * @param bp
7041  */
7042 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7043 {
7044         /* special handling for emulation and FPGA,
7045            wait 10 times longer */
7046         if (CHIP_REV_IS_SLOW(bp))
7047                 msleep(MCP_ONE_TIMEOUT*10);
7048         else
7049                 msleep(MCP_ONE_TIMEOUT);
7050 }
7051
7052 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7053 {
7054         u32 shmem, cnt, validity_offset, val;
7055         int rc = 0;
7056
7057         msleep(100);
7058
7059         /* Get shmem offset */
7060         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7061         if (shmem == 0) {
7062                 BNX2X_ERR("Shmem 0 return failure\n");
7063                 rc = -ENOTTY;
7064                 goto exit_lbl;
7065         }
7066
7067         validity_offset = offsetof(struct shmem_region, validity_map[0]);
7068
7069         /* Wait for MCP to come up */
7070         for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7071                 /* TBD: its best to check validity map of last port.
7072                  * currently checks on port 0.
7073                  */
7074                 val = REG_RD(bp, shmem + validity_offset);
7075                 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7076                    shmem + validity_offset, val);
7077
7078                 /* check that shared memory is valid. */
7079                 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7080                     == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7081                         break;
7082
7083                 bnx2x_mcp_wait_one(bp);
7084         }
7085
7086         DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7087
7088         /* Check that shared memory is valid. This indicates that MCP is up. */
7089         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7090             (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7091                 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7092                 rc = -ENOTTY;
7093                 goto exit_lbl;
7094         }
7095
7096 exit_lbl:
7097         /* Restore the `magic' bit value */
7098         if (!CHIP_IS_E1(bp))
7099                 bnx2x_clp_reset_done(bp, magic_val);
7100
7101         return rc;
7102 }
7103
7104 static void bnx2x_pxp_prep(struct bnx2x *bp)
7105 {
7106         if (!CHIP_IS_E1(bp)) {
7107                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7108                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7109                 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7110                 mmiowb();
7111         }
7112 }
7113
7114 /*
7115  * Reset the whole chip except for:
7116  *      - PCIE core
7117  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7118  *              one reset bit)
7119  *      - IGU
7120  *      - MISC (including AEU)
7121  *      - GRC
7122  *      - RBCN, RBCP
7123  */
7124 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7125 {
7126         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7127
7128         not_reset_mask1 =
7129                 MISC_REGISTERS_RESET_REG_1_RST_HC |
7130                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7131                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7132
7133         not_reset_mask2 =
7134                 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7135                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7136                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7137                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7138                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7139                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
7140                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7141                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7142
7143         reset_mask1 = 0xffffffff;
7144
7145         if (CHIP_IS_E1(bp))
7146                 reset_mask2 = 0xffff;
7147         else
7148                 reset_mask2 = 0x1ffff;
7149
7150         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7151                reset_mask1 & (~not_reset_mask1));
7152         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7153                reset_mask2 & (~not_reset_mask2));
7154
7155         barrier();
7156         mmiowb();
7157
7158         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7159         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7160         mmiowb();
7161 }
7162
7163 static int bnx2x_process_kill(struct bnx2x *bp)
7164 {
7165         int cnt = 1000;
7166         u32 val = 0;
7167         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7168
7169
7170         /* Empty the Tetris buffer, wait for 1s */
7171         do {
7172                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7173                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7174                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7175                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7176                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7177                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7178                     ((port_is_idle_0 & 0x1) == 0x1) &&
7179                     ((port_is_idle_1 & 0x1) == 0x1) &&
7180                     (pgl_exp_rom2 == 0xffffffff))
7181                         break;
7182                 msleep(1);
7183         } while (cnt-- > 0);
7184
7185         if (cnt <= 0) {
7186                 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7187                           " are still"
7188                           " outstanding read requests after 1s!\n");
7189                 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7190                           " port_is_idle_0=0x%08x,"
7191                           " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7192                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7193                           pgl_exp_rom2);
7194                 return -EAGAIN;
7195         }
7196
7197         barrier();
7198
7199         /* Close gates #2, #3 and #4 */
7200         bnx2x_set_234_gates(bp, true);
7201
7202         /* TBD: Indicate that "process kill" is in progress to MCP */
7203
7204         /* Clear "unprepared" bit */
7205         REG_WR(bp, MISC_REG_UNPREPARED, 0);
7206         barrier();
7207
7208         /* Make sure all is written to the chip before the reset */
7209         mmiowb();
7210
7211         /* Wait for 1ms to empty GLUE and PCI-E core queues,
7212          * PSWHST, GRC and PSWRD Tetris buffer.
7213          */
7214         msleep(1);
7215
7216         /* Prepare to chip reset: */
7217         /* MCP */
7218         bnx2x_reset_mcp_prep(bp, &val);
7219
7220         /* PXP */
7221         bnx2x_pxp_prep(bp);
7222         barrier();
7223
7224         /* reset the chip */
7225         bnx2x_process_kill_chip_reset(bp);
7226         barrier();
7227
7228         /* Recover after reset: */
7229         /* MCP */
7230         if (bnx2x_reset_mcp_comp(bp, val))
7231                 return -EAGAIN;
7232
7233         /* PXP */
7234         bnx2x_pxp_prep(bp);
7235
7236         /* Open the gates #2, #3 and #4 */
7237         bnx2x_set_234_gates(bp, false);
7238
7239         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7240          * reset state, re-enable attentions. */
7241
7242         return 0;
7243 }
7244
7245 static int bnx2x_leader_reset(struct bnx2x *bp)
7246 {
7247         int rc = 0;
7248         /* Try to recover after the failure */
7249         if (bnx2x_process_kill(bp)) {
7250                 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7251                        bp->dev->name);
7252                 rc = -EAGAIN;
7253                 goto exit_leader_reset;
7254         }
7255
7256         /* Clear "reset is in progress" bit and update the driver state */
7257         bnx2x_set_reset_done(bp);
7258         bp->recovery_state = BNX2X_RECOVERY_DONE;
7259
7260 exit_leader_reset:
7261         bp->is_leader = 0;
7262         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7263         smp_wmb();
7264         return rc;
7265 }
7266
7267 /* Assumption: runs under rtnl lock. This together with the fact
7268  * that it's called only from bnx2x_reset_task() ensure that it
7269  * will never be called when netif_running(bp->dev) is false.
7270  */
7271 static void bnx2x_parity_recover(struct bnx2x *bp)
7272 {
7273         DP(NETIF_MSG_HW, "Handling parity\n");
7274         while (1) {
7275                 switch (bp->recovery_state) {
7276                 case BNX2X_RECOVERY_INIT:
7277                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7278                         /* Try to get a LEADER_LOCK HW lock */
7279                         if (bnx2x_trylock_hw_lock(bp,
7280                                 HW_LOCK_RESOURCE_RESERVED_08))
7281                                 bp->is_leader = 1;
7282
7283                         /* Stop the driver */
7284                         /* If interface has been removed - break */
7285                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7286                                 return;
7287
7288                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
7289                         /* Ensure "is_leader" and "recovery_state"
7290                          *  update values are seen on other CPUs
7291                          */
7292                         smp_wmb();
7293                         break;
7294
7295                 case BNX2X_RECOVERY_WAIT:
7296                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7297                         if (bp->is_leader) {
7298                                 u32 load_counter = bnx2x_get_load_cnt(bp);
7299                                 if (load_counter) {
7300                                         /* Wait until all other functions get
7301                                          * down.
7302                                          */
7303                                         schedule_delayed_work(&bp->reset_task,
7304                                                                 HZ/10);
7305                                         return;
7306                                 } else {
7307                                         /* If all other functions got down -
7308                                          * try to bring the chip back to
7309                                          * normal. In any case it's an exit
7310                                          * point for a leader.
7311                                          */
7312                                         if (bnx2x_leader_reset(bp) ||
7313                                         bnx2x_nic_load(bp, LOAD_NORMAL)) {
7314                                                 printk(KERN_ERR"%s: Recovery "
7315                                                 "has failed. Power cycle is "
7316                                                 "needed.\n", bp->dev->name);
7317                                                 /* Disconnect this device */
7318                                                 netif_device_detach(bp->dev);
7319                                                 /* Block ifup for all function
7320                                                  * of this ASIC until
7321                                                  * "process kill" or power
7322                                                  * cycle.
7323                                                  */
7324                                                 bnx2x_set_reset_in_progress(bp);
7325                                                 /* Shut down the power */
7326                                                 bnx2x_set_power_state(bp,
7327                                                                 PCI_D3hot);
7328                                                 return;
7329                                         }
7330
7331                                         return;
7332                                 }
7333                         } else { /* non-leader */
7334                                 if (!bnx2x_reset_is_done(bp)) {
7335                                         /* Try to get a LEADER_LOCK HW lock as
7336                                          * long as a former leader may have
7337                                          * been unloaded by the user or
7338                                          * released a leadership by another
7339                                          * reason.
7340                                          */
7341                                         if (bnx2x_trylock_hw_lock(bp,
7342                                             HW_LOCK_RESOURCE_RESERVED_08)) {
7343                                                 /* I'm a leader now! Restart a
7344                                                  * switch case.
7345                                                  */
7346                                                 bp->is_leader = 1;
7347                                                 break;
7348                                         }
7349
7350                                         schedule_delayed_work(&bp->reset_task,
7351                                                                 HZ/10);
7352                                         return;
7353
7354                                 } else { /* A leader has completed
7355                                           * the "process kill". It's an exit
7356                                           * point for a non-leader.
7357                                           */
7358                                         bnx2x_nic_load(bp, LOAD_NORMAL);
7359                                         bp->recovery_state =
7360                                                 BNX2X_RECOVERY_DONE;
7361                                         smp_wmb();
7362                                         return;
7363                                 }
7364                         }
7365                 default:
7366                         return;
7367                 }
7368         }
7369 }
7370
7371 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7372  * scheduled on a general queue in order to prevent a dead lock.
7373  */
7374 static void bnx2x_reset_task(struct work_struct *work)
7375 {
7376         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
7377
7378 #ifdef BNX2X_STOP_ON_ERROR
7379         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7380                   " so reset not done to allow debug dump,\n"
7381          KERN_ERR " you will need to reboot when done\n");
7382         return;
7383 #endif
7384
7385         rtnl_lock();
7386
7387         if (!netif_running(bp->dev))
7388                 goto reset_task_exit;
7389
7390         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7391                 bnx2x_parity_recover(bp);
7392         else {
7393                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7394                 bnx2x_nic_load(bp, LOAD_NORMAL);
7395         }
7396
7397 reset_task_exit:
7398         rtnl_unlock();
7399 }
7400
7401 /* end of nic load/unload */
7402
7403 /*
7404  * Init service functions
7405  */
7406
7407 u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
7408 {
7409         u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7410         u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7411         return base + (BP_ABS_FUNC(bp)) * stride;
7412 }
7413
7414 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
7415 {
7416         u32 reg = bnx2x_get_pretend_reg(bp);
7417
7418         /* Flush all outstanding writes */
7419         mmiowb();
7420
7421         /* Pretend to be function 0 */
7422         REG_WR(bp, reg, 0);
7423         REG_RD(bp, reg);        /* Flush the GRC transaction (in the chip) */
7424
7425         /* From now we are in the "like-E1" mode */
7426         bnx2x_int_disable(bp);
7427
7428         /* Flush all outstanding writes */
7429         mmiowb();
7430
7431         /* Restore the original function */
7432         REG_WR(bp, reg, BP_ABS_FUNC(bp));
7433         REG_RD(bp, reg);
7434 }
7435
7436 static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
7437 {
7438         if (CHIP_IS_E1(bp))
7439                 bnx2x_int_disable(bp);
7440         else
7441                 bnx2x_undi_int_disable_e1h(bp);
7442 }
7443
7444 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7445 {
7446         u32 val;
7447
7448         /* Check if there is any driver already loaded */
7449         val = REG_RD(bp, MISC_REG_UNPREPARED);
7450         if (val == 0x1) {
7451                 /* Check if it is the UNDI driver
7452                  * UNDI driver initializes CID offset for normal bell to 0x7
7453                  */
7454                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7455                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7456                 if (val == 0x7) {
7457                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7458                         /* save our pf_num */
7459                         int orig_pf_num = bp->pf_num;
7460                         u32 swap_en;
7461                         u32 swap_val;
7462
7463                         /* clear the UNDI indication */
7464                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7465
7466                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
7467
7468                         /* try unload UNDI on port 0 */
7469                         bp->pf_num = 0;
7470                         bp->fw_seq =
7471                               (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7472                                 DRV_MSG_SEQ_NUMBER_MASK);
7473                         reset_code = bnx2x_fw_command(bp, reset_code, 0);
7474
7475                         /* if UNDI is loaded on the other port */
7476                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7477
7478                                 /* send "DONE" for previous unload */
7479                                 bnx2x_fw_command(bp,
7480                                                  DRV_MSG_CODE_UNLOAD_DONE, 0);
7481
7482                                 /* unload UNDI on port 1 */
7483                                 bp->pf_num = 1;
7484                                 bp->fw_seq =
7485                               (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7486                                         DRV_MSG_SEQ_NUMBER_MASK);
7487                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7488
7489                                 bnx2x_fw_command(bp, reset_code, 0);
7490                         }
7491
7492                         /* now it's safe to release the lock */
7493                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7494
7495                         bnx2x_undi_int_disable(bp);
7496
7497                         /* close input traffic and wait for it */
7498                         /* Do not rcv packets to BRB */
7499                         REG_WR(bp,
7500                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7501                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7502                         /* Do not direct rcv packets that are not for MCP to
7503                          * the BRB */
7504                         REG_WR(bp,
7505                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7506                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7507                         /* clear AEU */
7508                         REG_WR(bp,
7509                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7510                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7511                         msleep(10);
7512
7513                         /* save NIG port swap info */
7514                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7515                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7516                         /* reset device */
7517                         REG_WR(bp,
7518                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7519                                0xd3ffffff);
7520                         REG_WR(bp,
7521                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7522                                0x1403);
7523                         /* take the NIG out of reset and restore swap values */
7524                         REG_WR(bp,
7525                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7526                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
7527                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7528                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7529
7530                         /* send unload done to the MCP */
7531                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
7532
7533                         /* restore our func and fw_seq */
7534                         bp->pf_num = orig_pf_num;
7535                         bp->fw_seq =
7536                               (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
7537                                 DRV_MSG_SEQ_NUMBER_MASK);
7538                 } else
7539                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7540         }
7541 }
7542
7543 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7544 {
7545         u32 val, val2, val3, val4, id;
7546         u16 pmc;
7547
7548         /* Get the chip revision id and number. */
7549         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7550         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7551         id = ((val & 0xffff) << 16);
7552         val = REG_RD(bp, MISC_REG_CHIP_REV);
7553         id |= ((val & 0xf) << 12);
7554         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7555         id |= ((val & 0xff) << 4);
7556         val = REG_RD(bp, MISC_REG_BOND_ID);
7557         id |= (val & 0xf);
7558         bp->common.chip_id = id;
7559
7560         /* Set doorbell size */
7561         bp->db_size = (1 << BNX2X_DB_SHIFT);
7562
7563         if (CHIP_IS_E2(bp)) {
7564                 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7565                 if ((val & 1) == 0)
7566                         val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7567                 else
7568                         val = (val >> 1) & 1;
7569                 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7570                                                        "2_PORT_MODE");
7571                 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7572                                                  CHIP_2_PORT_MODE;
7573
7574                 if (CHIP_MODE_IS_4_PORT(bp))
7575                         bp->pfid = (bp->pf_num >> 1);   /* 0..3 */
7576                 else
7577                         bp->pfid = (bp->pf_num & 0x6);  /* 0, 2, 4, 6 */
7578         } else {
7579                 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7580                 bp->pfid = bp->pf_num;                  /* 0..7 */
7581         }
7582
7583         /*
7584          * set base FW non-default (fast path) status block id, this value is
7585          * used to initialize the fw_sb_id saved on the fp/queue structure to
7586          * determine the id used by the FW.
7587          */
7588         if (CHIP_IS_E1x(bp))
7589                 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7590         else /* E2 */
7591                 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7592
7593         bp->link_params.chip_id = bp->common.chip_id;
7594         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7595
7596         val = (REG_RD(bp, 0x2874) & 0x55);
7597         if ((bp->common.chip_id & 0x1) ||
7598             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7599                 bp->flags |= ONE_PORT_FLAG;
7600                 BNX2X_DEV_INFO("single port device\n");
7601         }
7602
7603         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7604         bp->common.flash_size = (NVRAM_1MB_SIZE <<
7605                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
7606         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7607                        bp->common.flash_size, bp->common.flash_size);
7608
7609         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7610         bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7611                                         MISC_REG_GENERIC_CR_1 :
7612                                         MISC_REG_GENERIC_CR_0));
7613         bp->link_params.shmem_base = bp->common.shmem_base;
7614         bp->link_params.shmem2_base = bp->common.shmem2_base;
7615         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
7616                        bp->common.shmem_base, bp->common.shmem2_base);
7617
7618         if (!bp->common.shmem_base) {
7619                 BNX2X_DEV_INFO("MCP not active\n");
7620                 bp->flags |= NO_MCP_FLAG;
7621                 return;
7622         }
7623
7624         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7625         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7626                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7627                 BNX2X_ERR("BAD MCP validity signature\n");
7628
7629         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7630         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7631
7632         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7633                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7634                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7635
7636         bp->link_params.feature_config_flags = 0;
7637         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7638         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7639                 bp->link_params.feature_config_flags |=
7640                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7641         else
7642                 bp->link_params.feature_config_flags &=
7643                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7644
7645         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7646         bp->common.bc_ver = val;
7647         BNX2X_DEV_INFO("bc_ver %X\n", val);
7648         if (val < BNX2X_BC_VER) {
7649                 /* for now only warn
7650                  * later we might need to enforce this */
7651                 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7652                           "please upgrade BC\n", BNX2X_BC_VER, val);
7653         }
7654         bp->link_params.feature_config_flags |=
7655                                 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
7656                                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7657
7658         bp->link_params.feature_config_flags |=
7659                 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7660                 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
7661
7662         if (BP_E1HVN(bp) == 0) {
7663                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7664                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7665         } else {
7666                 /* no WOL capability for E1HVN != 0 */
7667                 bp->flags |= NO_WOL_FLAG;
7668         }
7669         BNX2X_DEV_INFO("%sWoL capable\n",
7670                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
7671
7672         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7673         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7674         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7675         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7676
7677         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7678                  val, val2, val3, val4);
7679 }
7680
7681 #define IGU_FID(val)    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7682 #define IGU_VEC(val)    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7683
7684 static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7685 {
7686         int pfid = BP_FUNC(bp);
7687         int vn = BP_E1HVN(bp);
7688         int igu_sb_id;
7689         u32 val;
7690         u8 fid;
7691
7692         bp->igu_base_sb = 0xff;
7693         bp->igu_sb_cnt = 0;
7694         if (CHIP_INT_MODE_IS_BC(bp)) {
7695                 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7696                                        bp->l2_cid_count);
7697
7698                 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7699                         FP_SB_MAX_E1x;
7700
7701                 bp->igu_dsb_id =  E1HVN_MAX * FP_SB_MAX_E1x +
7702                         (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7703
7704                 return;
7705         }
7706
7707         /* IGU in normal mode - read CAM */
7708         for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7709              igu_sb_id++) {
7710                 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7711                 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7712                         continue;
7713                 fid = IGU_FID(val);
7714                 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7715                         if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7716                                 continue;
7717                         if (IGU_VEC(val) == 0)
7718                                 /* default status block */
7719                                 bp->igu_dsb_id = igu_sb_id;
7720                         else {
7721                                 if (bp->igu_base_sb == 0xff)
7722                                         bp->igu_base_sb = igu_sb_id;
7723                                 bp->igu_sb_cnt++;
7724                         }
7725                 }
7726         }
7727         bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
7728         if (bp->igu_sb_cnt == 0)
7729                 BNX2X_ERR("CAM configuration error\n");
7730 }
7731
7732 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7733                                                     u32 switch_cfg)
7734 {
7735         int cfg_size = 0, idx, port = BP_PORT(bp);
7736
7737         /* Aggregation of supported attributes of all external phys */
7738         bp->port.supported[0] = 0;
7739         bp->port.supported[1] = 0;
7740         switch (bp->link_params.num_phys) {
7741         case 1:
7742                 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
7743                 cfg_size = 1;
7744                 break;
7745         case 2:
7746                 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
7747                 cfg_size = 1;
7748                 break;
7749         case 3:
7750                 if (bp->link_params.multi_phy_config &
7751                     PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7752                         bp->port.supported[1] =
7753                                 bp->link_params.phy[EXT_PHY1].supported;
7754                         bp->port.supported[0] =
7755                                 bp->link_params.phy[EXT_PHY2].supported;
7756                 } else {
7757                         bp->port.supported[0] =
7758                                 bp->link_params.phy[EXT_PHY1].supported;
7759                         bp->port.supported[1] =
7760                                 bp->link_params.phy[EXT_PHY2].supported;
7761                 }
7762                 cfg_size = 2;
7763                 break;
7764         }
7765
7766         if (!(bp->port.supported[0] || bp->port.supported[1])) {
7767                 BNX2X_ERR("NVRAM config error. BAD phy config."
7768                           "PHY1 config 0x%x, PHY2 config 0x%x\n",
7769                            SHMEM_RD(bp,
7770                            dev_info.port_hw_config[port].external_phy_config),
7771                            SHMEM_RD(bp,
7772                            dev_info.port_hw_config[port].external_phy_config2));
7773                         return;
7774         }
7775
7776         switch (switch_cfg) {
7777         case SWITCH_CFG_1G:
7778                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7779                                            port*0x10);
7780                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7781                 break;
7782
7783         case SWITCH_CFG_10G:
7784                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7785                                            port*0x18);
7786                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7787                 break;
7788
7789         default:
7790                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7791                           bp->port.link_config[0]);
7792                 return;
7793         }
7794         /* mask what we support according to speed_cap_mask per configuration */
7795         for (idx = 0; idx < cfg_size; idx++) {
7796                 if (!(bp->link_params.speed_cap_mask[idx] &
7797                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7798                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
7799
7800                 if (!(bp->link_params.speed_cap_mask[idx] &
7801                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7802                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
7803
7804                 if (!(bp->link_params.speed_cap_mask[idx] &
7805                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7806                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
7807
7808                 if (!(bp->link_params.speed_cap_mask[idx] &
7809                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7810                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
7811
7812                 if (!(bp->link_params.speed_cap_mask[idx] &
7813                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7814                         bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
7815                                                      SUPPORTED_1000baseT_Full);
7816
7817                 if (!(bp->link_params.speed_cap_mask[idx] &
7818                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7819                         bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
7820
7821                 if (!(bp->link_params.speed_cap_mask[idx] &
7822                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7823                         bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
7824
7825         }
7826
7827         BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
7828                        bp->port.supported[1]);
7829 }
7830
7831 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7832 {
7833         u32 link_config, idx, cfg_size = 0;
7834         bp->port.advertising[0] = 0;
7835         bp->port.advertising[1] = 0;
7836         switch (bp->link_params.num_phys) {
7837         case 1:
7838         case 2:
7839                 cfg_size = 1;
7840                 break;
7841         case 3:
7842                 cfg_size = 2;
7843                 break;
7844         }
7845         for (idx = 0; idx < cfg_size; idx++) {
7846                 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
7847                 link_config = bp->port.link_config[idx];
7848                 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7849                 case PORT_FEATURE_LINK_SPEED_AUTO:
7850                         if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
7851                                 bp->link_params.req_line_speed[idx] =
7852                                         SPEED_AUTO_NEG;
7853                                 bp->port.advertising[idx] |=
7854                                         bp->port.supported[idx];
7855                         } else {
7856                                 /* force 10G, no AN */
7857                                 bp->link_params.req_line_speed[idx] =
7858                                         SPEED_10000;
7859                                 bp->port.advertising[idx] |=
7860                                         (ADVERTISED_10000baseT_Full |
7861                                          ADVERTISED_FIBRE);
7862                                 continue;
7863                         }
7864                         break;
7865
7866                 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7867                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
7868                                 bp->link_params.req_line_speed[idx] =
7869                                         SPEED_10;
7870                                 bp->port.advertising[idx] |=
7871                                         (ADVERTISED_10baseT_Full |
7872                                          ADVERTISED_TP);
7873                         } else {
7874                                 BNX2X_ERROR("NVRAM config error. "
7875                                             "Invalid link_config 0x%x"
7876                                             "  speed_cap_mask 0x%x\n",
7877                                             link_config,
7878                                     bp->link_params.speed_cap_mask[idx]);
7879                                 return;
7880                         }
7881                         break;
7882
7883                 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7884                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
7885                                 bp->link_params.req_line_speed[idx] =
7886                                         SPEED_10;
7887                                 bp->link_params.req_duplex[idx] =
7888                                         DUPLEX_HALF;
7889                                 bp->port.advertising[idx] |=
7890                                         (ADVERTISED_10baseT_Half |
7891                                          ADVERTISED_TP);
7892                         } else {
7893                                 BNX2X_ERROR("NVRAM config error. "
7894                                             "Invalid link_config 0x%x"
7895                                             "  speed_cap_mask 0x%x\n",
7896                                             link_config,
7897                                           bp->link_params.speed_cap_mask[idx]);
7898                                 return;
7899                         }
7900                         break;
7901
7902                 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7903                         if (bp->port.supported[idx] &
7904                             SUPPORTED_100baseT_Full) {
7905                                 bp->link_params.req_line_speed[idx] =
7906                                         SPEED_100;
7907                                 bp->port.advertising[idx] |=
7908                                         (ADVERTISED_100baseT_Full |
7909                                          ADVERTISED_TP);
7910                         } else {
7911                                 BNX2X_ERROR("NVRAM config error. "
7912                                             "Invalid link_config 0x%x"
7913                                             "  speed_cap_mask 0x%x\n",
7914                                             link_config,
7915                                           bp->link_params.speed_cap_mask[idx]);
7916                                 return;
7917                         }
7918                         break;
7919
7920                 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7921                         if (bp->port.supported[idx] &
7922                             SUPPORTED_100baseT_Half) {
7923                                 bp->link_params.req_line_speed[idx] =
7924                                                                 SPEED_100;
7925                                 bp->link_params.req_duplex[idx] =
7926                                                                 DUPLEX_HALF;
7927                                 bp->port.advertising[idx] |=
7928                                         (ADVERTISED_100baseT_Half |
7929                                          ADVERTISED_TP);
7930                         } else {
7931                                 BNX2X_ERROR("NVRAM config error. "
7932                                     "Invalid link_config 0x%x"
7933                                     "  speed_cap_mask 0x%x\n",
7934                                     link_config,
7935                                     bp->link_params.speed_cap_mask[idx]);
7936                                 return;
7937                         }
7938                         break;
7939
7940                 case PORT_FEATURE_LINK_SPEED_1G:
7941                         if (bp->port.supported[idx] &
7942                             SUPPORTED_1000baseT_Full) {
7943                                 bp->link_params.req_line_speed[idx] =
7944                                         SPEED_1000;
7945                                 bp->port.advertising[idx] |=
7946                                         (ADVERTISED_1000baseT_Full |
7947                                          ADVERTISED_TP);
7948                         } else {
7949                                 BNX2X_ERROR("NVRAM config error. "
7950                                     "Invalid link_config 0x%x"
7951                                     "  speed_cap_mask 0x%x\n",
7952                                     link_config,
7953                                     bp->link_params.speed_cap_mask[idx]);
7954                                 return;
7955                         }
7956                         break;
7957
7958                 case PORT_FEATURE_LINK_SPEED_2_5G:
7959                         if (bp->port.supported[idx] &
7960                             SUPPORTED_2500baseX_Full) {
7961                                 bp->link_params.req_line_speed[idx] =
7962                                         SPEED_2500;
7963                                 bp->port.advertising[idx] |=
7964                                         (ADVERTISED_2500baseX_Full |
7965                                                 ADVERTISED_TP);
7966                         } else {
7967                                 BNX2X_ERROR("NVRAM config error. "
7968                                     "Invalid link_config 0x%x"
7969                                     "  speed_cap_mask 0x%x\n",
7970                                     link_config,
7971                                     bp->link_params.speed_cap_mask[idx]);
7972                                 return;
7973                         }
7974                         break;
7975
7976                 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7977                 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7978                 case PORT_FEATURE_LINK_SPEED_10G_KR:
7979                         if (bp->port.supported[idx] &
7980                             SUPPORTED_10000baseT_Full) {
7981                                 bp->link_params.req_line_speed[idx] =
7982                                         SPEED_10000;
7983                                 bp->port.advertising[idx] |=
7984                                         (ADVERTISED_10000baseT_Full |
7985                                                 ADVERTISED_FIBRE);
7986                         } else {
7987                                 BNX2X_ERROR("NVRAM config error. "
7988                                     "Invalid link_config 0x%x"
7989                                     "  speed_cap_mask 0x%x\n",
7990                                     link_config,
7991                                     bp->link_params.speed_cap_mask[idx]);
7992                                 return;
7993                         }
7994                         break;
7995
7996                 default:
7997                         BNX2X_ERROR("NVRAM config error. "
7998                                     "BAD link speed link_config 0x%x\n",
7999                                           link_config);
8000                                 bp->link_params.req_line_speed[idx] =
8001                                                         SPEED_AUTO_NEG;
8002                                 bp->port.advertising[idx] =
8003                                                 bp->port.supported[idx];
8004                         break;
8005                 }
8006
8007                 bp->link_params.req_flow_ctrl[idx] = (link_config &
8008                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8009                 if ((bp->link_params.req_flow_ctrl[idx] ==
8010                      BNX2X_FLOW_CTRL_AUTO) &&
8011                     !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8012                         bp->link_params.req_flow_ctrl[idx] =
8013                                 BNX2X_FLOW_CTRL_NONE;
8014                 }
8015
8016                 BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d req_flow_ctrl"
8017                                " 0x%x advertising 0x%x\n",
8018                                bp->link_params.req_line_speed[idx],
8019                                bp->link_params.req_duplex[idx],
8020                                bp->link_params.req_flow_ctrl[idx],
8021                                bp->port.advertising[idx]);
8022         }
8023 }
8024
8025 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8026 {
8027         mac_hi = cpu_to_be16(mac_hi);
8028         mac_lo = cpu_to_be32(mac_lo);
8029         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8030         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8031 }
8032
8033 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8034 {
8035         int port = BP_PORT(bp);
8036         u32 val, val2;
8037         u32 config;
8038         u32 ext_phy_type, ext_phy_config;;
8039
8040         bp->link_params.bp = bp;
8041         bp->link_params.port = port;
8042
8043         bp->link_params.lane_config =
8044                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8045
8046         bp->link_params.speed_cap_mask[0] =
8047                 SHMEM_RD(bp,
8048                          dev_info.port_hw_config[port].speed_capability_mask);
8049         bp->link_params.speed_cap_mask[1] =
8050                 SHMEM_RD(bp,
8051                          dev_info.port_hw_config[port].speed_capability_mask2);
8052         bp->port.link_config[0] =
8053                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8054
8055         bp->port.link_config[1] =
8056                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
8057
8058         bp->link_params.multi_phy_config =
8059                 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
8060         /* If the device is capable of WoL, set the default state according
8061          * to the HW
8062          */
8063         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8064         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8065                    (config & PORT_FEATURE_WOL_ENABLED));
8066
8067         BNX2X_DEV_INFO("lane_config 0x%08x  "
8068                        "speed_cap_mask0 0x%08x  link_config0 0x%08x\n",
8069                        bp->link_params.lane_config,
8070                        bp->link_params.speed_cap_mask[0],
8071                        bp->port.link_config[0]);
8072
8073         bp->link_params.switch_cfg = (bp->port.link_config[0] &
8074                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
8075         bnx2x_phy_probe(&bp->link_params);
8076         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8077
8078         bnx2x_link_settings_requested(bp);
8079
8080         /*
8081          * If connected directly, work with the internal PHY, otherwise, work
8082          * with the external PHY
8083          */
8084         ext_phy_config =
8085                 SHMEM_RD(bp,
8086                          dev_info.port_hw_config[port].external_phy_config);
8087         ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
8088         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8089                 bp->mdio.prtad = bp->port.phy_addr;
8090
8091         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8092                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8093                 bp->mdio.prtad =
8094                         XGXS_EXT_PHY_ADDR(ext_phy_config);
8095
8096         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8097         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8098         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8099         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8100         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8101
8102 #ifdef BCM_CNIC
8103         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8104         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8105         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8106 #endif
8107 }
8108
8109 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8110 {
8111         int func = BP_ABS_FUNC(bp);
8112         int vn;
8113         u32 val, val2;
8114         int rc = 0;
8115
8116         bnx2x_get_common_hwinfo(bp);
8117
8118         if (CHIP_IS_E1x(bp)) {
8119                 bp->common.int_block = INT_BLOCK_HC;
8120
8121                 bp->igu_dsb_id = DEF_SB_IGU_ID;
8122                 bp->igu_base_sb = 0;
8123                 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
8124         } else {
8125                 bp->common.int_block = INT_BLOCK_IGU;
8126                 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8127                 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8128                         DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8129                         bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8130                 } else
8131                         DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8132
8133                 bnx2x_get_igu_cam_info(bp);
8134
8135         }
8136         DP(NETIF_MSG_PROBE, "igu_dsb_id %d  igu_base_sb %d  igu_sb_cnt %d\n",
8137                              bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8138
8139         /*
8140          * Initialize MF configuration
8141          */
8142
8143         bp->mf_ov = 0;
8144         bp->mf_mode = 0;
8145         vn = BP_E1HVN(bp);
8146         if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8147                 if (SHMEM2_HAS(bp, mf_cfg_addr))
8148                         bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8149                 else
8150                         bp->common.mf_cfg_base = bp->common.shmem_base +
8151                                 offsetof(struct shmem_region, func_mb) +
8152                                 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
8153                 bp->mf_config[vn] =
8154                         MF_CFG_RD(bp, func_mf_config[func].config);
8155
8156                 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
8157                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8158                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8159                         bp->mf_mode = 1;
8160                 BNX2X_DEV_INFO("%s function mode\n",
8161                                IS_MF(bp) ? "multi" : "single");
8162
8163                 if (IS_MF(bp)) {
8164                         val = (MF_CFG_RD(bp, func_mf_config[func].
8165                                                                 e1hov_tag) &
8166                                FUNC_MF_CFG_E1HOV_TAG_MASK);
8167                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8168                                 bp->mf_ov = val;
8169                                 BNX2X_DEV_INFO("MF OV for func %d is %d "
8170                                                "(0x%04x)\n",
8171                                                func, bp->mf_ov, bp->mf_ov);
8172                         } else {
8173                                 BNX2X_ERROR("No valid MF OV for func %d,"
8174                                             "  aborting\n", func);
8175                                 rc = -EPERM;
8176                         }
8177                 } else {
8178                         if (BP_VN(bp)) {
8179                                 BNX2X_ERROR("VN %d in single function mode,"
8180                                             "  aborting\n", BP_E1HVN(bp));
8181                                 rc = -EPERM;
8182                         }
8183                 }
8184         }
8185
8186         /* adjust igu_sb_cnt to MF for E1x */
8187         if (CHIP_IS_E1x(bp) && IS_MF(bp))
8188                 bp->igu_sb_cnt /= E1HVN_MAX;
8189
8190         /*
8191          * adjust E2 sb count: to be removed when FW will support
8192          * more then 16 L2 clients
8193          */
8194 #define MAX_L2_CLIENTS                          16
8195         if (CHIP_IS_E2(bp))
8196                 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8197                                        MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8198
8199         if (!BP_NOMCP(bp)) {
8200                 bnx2x_get_port_hwinfo(bp);
8201
8202                 bp->fw_seq =
8203                         (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8204                          DRV_MSG_SEQ_NUMBER_MASK);
8205                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8206         }
8207
8208         if (IS_MF(bp)) {
8209                 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8210                 val = MF_CFG_RD(bp,  func_mf_config[func].mac_lower);
8211                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8212                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8213                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8214                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8215                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8216                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8217                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8218                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8219                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8220                                ETH_ALEN);
8221                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8222                                ETH_ALEN);
8223                 }
8224
8225                 return rc;
8226         }
8227
8228         if (BP_NOMCP(bp)) {
8229                 /* only supposed to happen on emulation/FPGA */
8230                 BNX2X_ERROR("warning: random MAC workaround active\n");
8231                 random_ether_addr(bp->dev->dev_addr);
8232                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8233         }
8234
8235         return rc;
8236 }
8237
8238 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8239 {
8240         int cnt, i, block_end, rodi;
8241         char vpd_data[BNX2X_VPD_LEN+1];
8242         char str_id_reg[VENDOR_ID_LEN+1];
8243         char str_id_cap[VENDOR_ID_LEN+1];
8244         u8 len;
8245
8246         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8247         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8248
8249         if (cnt < BNX2X_VPD_LEN)
8250                 goto out_not_found;
8251
8252         i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8253                              PCI_VPD_LRDT_RO_DATA);
8254         if (i < 0)
8255                 goto out_not_found;
8256
8257
8258         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8259                     pci_vpd_lrdt_size(&vpd_data[i]);
8260
8261         i += PCI_VPD_LRDT_TAG_SIZE;
8262
8263         if (block_end > BNX2X_VPD_LEN)
8264                 goto out_not_found;
8265
8266         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8267                                    PCI_VPD_RO_KEYWORD_MFR_ID);
8268         if (rodi < 0)
8269                 goto out_not_found;
8270
8271         len = pci_vpd_info_field_size(&vpd_data[rodi]);
8272
8273         if (len != VENDOR_ID_LEN)
8274                 goto out_not_found;
8275
8276         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8277
8278         /* vendor specific info */
8279         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8280         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8281         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8282             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8283
8284                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8285                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
8286                 if (rodi >= 0) {
8287                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
8288
8289                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8290
8291                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8292                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8293                                 bp->fw_ver[len] = ' ';
8294                         }
8295                 }
8296                 return;
8297         }
8298 out_not_found:
8299         return;
8300 }
8301
8302 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8303 {
8304         int func;
8305         int timer_interval;
8306         int rc;
8307
8308         /* Disable interrupt handling until HW is initialized */
8309         atomic_set(&bp->intr_sem, 1);
8310         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8311
8312         mutex_init(&bp->port.phy_mutex);
8313         mutex_init(&bp->fw_mb_mutex);
8314         spin_lock_init(&bp->stats_lock);
8315 #ifdef BCM_CNIC
8316         mutex_init(&bp->cnic_mutex);
8317 #endif
8318
8319         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8320         INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
8321
8322         rc = bnx2x_get_hwinfo(bp);
8323
8324         if (!rc)
8325                 rc = bnx2x_alloc_mem_bp(bp);
8326
8327         bnx2x_read_fwinfo(bp);
8328
8329         func = BP_FUNC(bp);
8330
8331         /* need to reset chip if undi was active */
8332         if (!BP_NOMCP(bp))
8333                 bnx2x_undi_unload(bp);
8334
8335         if (CHIP_REV_IS_FPGA(bp))
8336                 dev_err(&bp->pdev->dev, "FPGA detected\n");
8337
8338         if (BP_NOMCP(bp) && (func == 0))
8339                 dev_err(&bp->pdev->dev, "MCP disabled, "
8340                                         "must load devices in order!\n");
8341
8342         /* Set multi queue mode */
8343         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8344             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8345                 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8346                                         "requested is not MSI-X\n");
8347                 multi_mode = ETH_RSS_MODE_DISABLED;
8348         }
8349         bp->multi_mode = multi_mode;
8350         bp->int_mode = int_mode;
8351
8352         bp->dev->features |= NETIF_F_GRO;
8353
8354         /* Set TPA flags */
8355         if (disable_tpa) {
8356                 bp->flags &= ~TPA_ENABLE_FLAG;
8357                 bp->dev->features &= ~NETIF_F_LRO;
8358         } else {
8359                 bp->flags |= TPA_ENABLE_FLAG;
8360                 bp->dev->features |= NETIF_F_LRO;
8361         }
8362         bp->disable_tpa = disable_tpa;
8363
8364         if (CHIP_IS_E1(bp))
8365                 bp->dropless_fc = 0;
8366         else
8367                 bp->dropless_fc = dropless_fc;
8368
8369         bp->mrrs = mrrs;
8370
8371         bp->tx_ring_size = MAX_TX_AVAIL;
8372
8373         bp->rx_csum = 1;
8374
8375         /* make sure that the numbers are in the right granularity */
8376         bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8377         bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
8378
8379         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8380         bp->current_interval = (poll ? poll : timer_interval);
8381
8382         init_timer(&bp->timer);
8383         bp->timer.expires = jiffies + bp->current_interval;
8384         bp->timer.data = (unsigned long) bp;
8385         bp->timer.function = bnx2x_timer;
8386
8387         return rc;
8388 }
8389
8390
8391 /****************************************************************************
8392 * General service functions
8393 ****************************************************************************/
8394
8395 /* called with rtnl_lock */
8396 static int bnx2x_open(struct net_device *dev)
8397 {
8398         struct bnx2x *bp = netdev_priv(dev);
8399
8400         netif_carrier_off(dev);
8401
8402         bnx2x_set_power_state(bp, PCI_D0);
8403
8404         if (!bnx2x_reset_is_done(bp)) {
8405                 do {
8406                         /* Reset MCP mail box sequence if there is on going
8407                          * recovery
8408                          */
8409                         bp->fw_seq = 0;
8410
8411                         /* If it's the first function to load and reset done
8412                          * is still not cleared it may mean that. We don't
8413                          * check the attention state here because it may have
8414                          * already been cleared by a "common" reset but we
8415                          * shell proceed with "process kill" anyway.
8416                          */
8417                         if ((bnx2x_get_load_cnt(bp) == 0) &&
8418                                 bnx2x_trylock_hw_lock(bp,
8419                                 HW_LOCK_RESOURCE_RESERVED_08) &&
8420                                 (!bnx2x_leader_reset(bp))) {
8421                                 DP(NETIF_MSG_HW, "Recovered in open\n");
8422                                 break;
8423                         }
8424
8425                         bnx2x_set_power_state(bp, PCI_D3hot);
8426
8427                         printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8428                         " completed yet. Try again later. If u still see this"
8429                         " message after a few retries then power cycle is"
8430                         " required.\n", bp->dev->name);
8431
8432                         return -EAGAIN;
8433                 } while (0);
8434         }
8435
8436         bp->recovery_state = BNX2X_RECOVERY_DONE;
8437
8438         return bnx2x_nic_load(bp, LOAD_OPEN);
8439 }
8440
8441 /* called with rtnl_lock */
8442 static int bnx2x_close(struct net_device *dev)
8443 {
8444         struct bnx2x *bp = netdev_priv(dev);
8445
8446         /* Unload the driver, release IRQs */
8447         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
8448         bnx2x_set_power_state(bp, PCI_D3hot);
8449
8450         return 0;
8451 }
8452
8453 /* called with netif_tx_lock from dev_mcast.c */
8454 void bnx2x_set_rx_mode(struct net_device *dev)
8455 {
8456         struct bnx2x *bp = netdev_priv(dev);
8457         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8458         int port = BP_PORT(bp);
8459
8460         if (bp->state != BNX2X_STATE_OPEN) {
8461                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8462                 return;
8463         }
8464
8465         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8466
8467         if (dev->flags & IFF_PROMISC)
8468                 rx_mode = BNX2X_RX_MODE_PROMISC;
8469         else if ((dev->flags & IFF_ALLMULTI) ||
8470                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8471                   CHIP_IS_E1(bp)))
8472                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8473         else { /* some multicasts */
8474                 if (CHIP_IS_E1(bp)) {
8475                         /*
8476                          * set mc list, do not wait as wait implies sleep
8477                          * and set_rx_mode can be invoked from non-sleepable
8478                          * context
8479                          */
8480                         u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8481                                      BNX2X_MAX_EMUL_MULTI*(1 + port) :
8482                                      BNX2X_MAX_MULTICAST*(1 + port));
8483
8484                         bnx2x_set_e1_mc_list(bp, offset);
8485                 } else { /* E1H */
8486                         /* Accept one or more multicasts */
8487                         struct netdev_hw_addr *ha;
8488                         u32 mc_filter[MC_HASH_SIZE];
8489                         u32 crc, bit, regidx;
8490                         int i;
8491
8492                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8493
8494                         netdev_for_each_mc_addr(ha, dev) {
8495                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
8496                                    bnx2x_mc_addr(ha));
8497
8498                                 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8499                                                 ETH_ALEN);
8500                                 bit = (crc >> 24) & 0xff;
8501                                 regidx = bit >> 5;
8502                                 bit &= 0x1f;
8503                                 mc_filter[regidx] |= (1 << bit);
8504                         }
8505
8506                         for (i = 0; i < MC_HASH_SIZE; i++)
8507                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8508                                        mc_filter[i]);
8509                 }
8510         }
8511
8512         bp->rx_mode = rx_mode;
8513         bnx2x_set_storm_rx_mode(bp);
8514 }
8515
8516 /* called with rtnl_lock */
8517 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8518                            int devad, u16 addr)
8519 {
8520         struct bnx2x *bp = netdev_priv(netdev);
8521         u16 value;
8522         int rc;
8523
8524         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8525            prtad, devad, addr);
8526
8527         /* The HW expects different devad if CL22 is used */
8528         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8529
8530         bnx2x_acquire_phy_lock(bp);
8531         rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
8532         bnx2x_release_phy_lock(bp);
8533         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
8534
8535         if (!rc)
8536                 rc = value;
8537         return rc;
8538 }
8539
8540 /* called with rtnl_lock */
8541 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8542                             u16 addr, u16 value)
8543 {
8544         struct bnx2x *bp = netdev_priv(netdev);
8545         int rc;
8546
8547         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8548                            " value 0x%x\n", prtad, devad, addr, value);
8549
8550         /* The HW expects different devad if CL22 is used */
8551         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8552
8553         bnx2x_acquire_phy_lock(bp);
8554         rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
8555         bnx2x_release_phy_lock(bp);
8556         return rc;
8557 }
8558
8559 /* called with rtnl_lock */
8560 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8561 {
8562         struct bnx2x *bp = netdev_priv(dev);
8563         struct mii_ioctl_data *mdio = if_mii(ifr);
8564
8565         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8566            mdio->phy_id, mdio->reg_num, mdio->val_in);
8567
8568         if (!netif_running(dev))
8569                 return -EAGAIN;
8570
8571         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
8572 }
8573
8574 #ifdef CONFIG_NET_POLL_CONTROLLER
8575 static void poll_bnx2x(struct net_device *dev)
8576 {
8577         struct bnx2x *bp = netdev_priv(dev);
8578
8579         disable_irq(bp->pdev->irq);
8580         bnx2x_interrupt(bp->pdev->irq, dev);
8581         enable_irq(bp->pdev->irq);
8582 }
8583 #endif
8584
8585 static const struct net_device_ops bnx2x_netdev_ops = {
8586         .ndo_open               = bnx2x_open,
8587         .ndo_stop               = bnx2x_close,
8588         .ndo_start_xmit         = bnx2x_start_xmit,
8589         .ndo_set_multicast_list = bnx2x_set_rx_mode,
8590         .ndo_set_mac_address    = bnx2x_change_mac_addr,
8591         .ndo_validate_addr      = eth_validate_addr,
8592         .ndo_do_ioctl           = bnx2x_ioctl,
8593         .ndo_change_mtu         = bnx2x_change_mtu,
8594         .ndo_tx_timeout         = bnx2x_tx_timeout,
8595 #ifdef BCM_VLAN
8596         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
8597 #endif
8598 #ifdef CONFIG_NET_POLL_CONTROLLER
8599         .ndo_poll_controller    = poll_bnx2x,
8600 #endif
8601 };
8602
8603 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8604                                     struct net_device *dev)
8605 {
8606         struct bnx2x *bp;
8607         int rc;
8608
8609         SET_NETDEV_DEV(dev, &pdev->dev);
8610         bp = netdev_priv(dev);
8611
8612         bp->dev = dev;
8613         bp->pdev = pdev;
8614         bp->flags = 0;
8615         bp->pf_num = PCI_FUNC(pdev->devfn);
8616
8617         rc = pci_enable_device(pdev);
8618         if (rc) {
8619                 dev_err(&bp->pdev->dev,
8620                         "Cannot enable PCI device, aborting\n");
8621                 goto err_out;
8622         }
8623
8624         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8625                 dev_err(&bp->pdev->dev,
8626                         "Cannot find PCI device base address, aborting\n");
8627                 rc = -ENODEV;
8628                 goto err_out_disable;
8629         }
8630
8631         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
8632                 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
8633                        " base address, aborting\n");
8634                 rc = -ENODEV;
8635                 goto err_out_disable;
8636         }
8637
8638         if (atomic_read(&pdev->enable_cnt) == 1) {
8639                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8640                 if (rc) {
8641                         dev_err(&bp->pdev->dev,
8642                                 "Cannot obtain PCI resources, aborting\n");
8643                         goto err_out_disable;
8644                 }
8645
8646                 pci_set_master(pdev);
8647                 pci_save_state(pdev);
8648         }
8649
8650         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8651         if (bp->pm_cap == 0) {
8652                 dev_err(&bp->pdev->dev,
8653                         "Cannot find power management capability, aborting\n");
8654                 rc = -EIO;
8655                 goto err_out_release;
8656         }
8657
8658         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8659         if (bp->pcie_cap == 0) {
8660                 dev_err(&bp->pdev->dev,
8661                         "Cannot find PCI Express capability, aborting\n");
8662                 rc = -EIO;
8663                 goto err_out_release;
8664         }
8665
8666         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
8667                 bp->flags |= USING_DAC_FLAG;
8668                 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
8669                         dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
8670                                " failed, aborting\n");
8671                         rc = -EIO;
8672                         goto err_out_release;
8673                 }
8674
8675         } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
8676                 dev_err(&bp->pdev->dev,
8677                         "System does not support DMA, aborting\n");
8678                 rc = -EIO;
8679                 goto err_out_release;
8680         }
8681
8682         dev->mem_start = pci_resource_start(pdev, 0);
8683         dev->base_addr = dev->mem_start;
8684         dev->mem_end = pci_resource_end(pdev, 0);
8685
8686         dev->irq = pdev->irq;
8687
8688         bp->regview = pci_ioremap_bar(pdev, 0);
8689         if (!bp->regview) {
8690                 dev_err(&bp->pdev->dev,
8691                         "Cannot map register space, aborting\n");
8692                 rc = -ENOMEM;
8693                 goto err_out_release;
8694         }
8695
8696         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
8697                                         min_t(u64, BNX2X_DB_SIZE(bp),
8698                                               pci_resource_len(pdev, 2)));
8699         if (!bp->doorbells) {
8700                 dev_err(&bp->pdev->dev,
8701                         "Cannot map doorbell space, aborting\n");
8702                 rc = -ENOMEM;
8703                 goto err_out_unmap;
8704         }
8705
8706         bnx2x_set_power_state(bp, PCI_D0);
8707
8708         /* clean indirect addresses */
8709         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
8710                                PCICFG_VENDOR_ID_OFFSET);
8711         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
8712         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
8713         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
8714         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
8715
8716         /* Reset the load counter */
8717         bnx2x_clear_load_cnt(bp);
8718
8719         dev->watchdog_timeo = TX_TIMEOUT;
8720
8721         dev->netdev_ops = &bnx2x_netdev_ops;
8722         bnx2x_set_ethtool_ops(dev);
8723         dev->features |= NETIF_F_SG;
8724         dev->features |= NETIF_F_HW_CSUM;
8725         if (bp->flags & USING_DAC_FLAG)
8726                 dev->features |= NETIF_F_HIGHDMA;
8727         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8728         dev->features |= NETIF_F_TSO6;
8729 #ifdef BCM_VLAN
8730         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
8731         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
8732
8733         dev->vlan_features |= NETIF_F_SG;
8734         dev->vlan_features |= NETIF_F_HW_CSUM;
8735         if (bp->flags & USING_DAC_FLAG)
8736                 dev->vlan_features |= NETIF_F_HIGHDMA;
8737         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8738         dev->vlan_features |= NETIF_F_TSO6;
8739 #endif
8740
8741         /* get_port_hwinfo() will set prtad and mmds properly */
8742         bp->mdio.prtad = MDIO_PRTAD_NONE;
8743         bp->mdio.mmds = 0;
8744         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8745         bp->mdio.dev = dev;
8746         bp->mdio.mdio_read = bnx2x_mdio_read;
8747         bp->mdio.mdio_write = bnx2x_mdio_write;
8748
8749         return 0;
8750
8751 err_out_unmap:
8752         if (bp->regview) {
8753                 iounmap(bp->regview);
8754                 bp->regview = NULL;
8755         }
8756         if (bp->doorbells) {
8757                 iounmap(bp->doorbells);
8758                 bp->doorbells = NULL;
8759         }
8760
8761 err_out_release:
8762         if (atomic_read(&pdev->enable_cnt) == 1)
8763                 pci_release_regions(pdev);
8764
8765 err_out_disable:
8766         pci_disable_device(pdev);
8767         pci_set_drvdata(pdev, NULL);
8768
8769 err_out:
8770         return rc;
8771 }
8772
8773 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
8774                                                  int *width, int *speed)
8775 {
8776         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
8777
8778         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
8779
8780         /* return value of 1=2.5GHz 2=5GHz */
8781         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
8782 }
8783
8784 static int bnx2x_check_firmware(struct bnx2x *bp)
8785 {
8786         const struct firmware *firmware = bp->firmware;
8787         struct bnx2x_fw_file_hdr *fw_hdr;
8788         struct bnx2x_fw_file_section *sections;
8789         u32 offset, len, num_ops;
8790         u16 *ops_offsets;
8791         int i;
8792         const u8 *fw_ver;
8793
8794         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
8795                 return -EINVAL;
8796
8797         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
8798         sections = (struct bnx2x_fw_file_section *)fw_hdr;
8799
8800         /* Make sure none of the offsets and sizes make us read beyond
8801          * the end of the firmware data */
8802         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
8803                 offset = be32_to_cpu(sections[i].offset);
8804                 len = be32_to_cpu(sections[i].len);
8805                 if (offset + len > firmware->size) {
8806                         dev_err(&bp->pdev->dev,
8807                                 "Section %d length is out of bounds\n", i);
8808                         return -EINVAL;
8809                 }
8810         }
8811
8812         /* Likewise for the init_ops offsets */
8813         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
8814         ops_offsets = (u16 *)(firmware->data + offset);
8815         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
8816
8817         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
8818                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
8819                         dev_err(&bp->pdev->dev,
8820                                 "Section offset %d is out of bounds\n", i);
8821                         return -EINVAL;
8822                 }
8823         }
8824
8825         /* Check FW version */
8826         offset = be32_to_cpu(fw_hdr->fw_version.offset);
8827         fw_ver = firmware->data + offset;
8828         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
8829             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
8830             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
8831             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
8832                 dev_err(&bp->pdev->dev,
8833                         "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
8834                        fw_ver[0], fw_ver[1], fw_ver[2],
8835                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
8836                        BCM_5710_FW_MINOR_VERSION,
8837                        BCM_5710_FW_REVISION_VERSION,
8838                        BCM_5710_FW_ENGINEERING_VERSION);
8839                 return -EINVAL;
8840         }
8841
8842         return 0;
8843 }
8844
8845 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
8846 {
8847         const __be32 *source = (const __be32 *)_source;
8848         u32 *target = (u32 *)_target;
8849         u32 i;
8850
8851         for (i = 0; i < n/4; i++)
8852                 target[i] = be32_to_cpu(source[i]);
8853 }
8854
8855 /*
8856    Ops array is stored in the following format:
8857    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
8858  */
8859 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
8860 {
8861         const __be32 *source = (const __be32 *)_source;
8862         struct raw_op *target = (struct raw_op *)_target;
8863         u32 i, j, tmp;
8864
8865         for (i = 0, j = 0; i < n/8; i++, j += 2) {
8866                 tmp = be32_to_cpu(source[j]);
8867                 target[i].op = (tmp >> 24) & 0xff;
8868                 target[i].offset = tmp & 0xffffff;
8869                 target[i].raw_data = be32_to_cpu(source[j + 1]);
8870         }
8871 }
8872
8873 /**
8874  * IRO array is stored in the following format:
8875  * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
8876  */
8877 static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
8878 {
8879         const __be32 *source = (const __be32 *)_source;
8880         struct iro *target = (struct iro *)_target;
8881         u32 i, j, tmp;
8882
8883         for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
8884                 target[i].base = be32_to_cpu(source[j]);
8885                 j++;
8886                 tmp = be32_to_cpu(source[j]);
8887                 target[i].m1 = (tmp >> 16) & 0xffff;
8888                 target[i].m2 = tmp & 0xffff;
8889                 j++;
8890                 tmp = be32_to_cpu(source[j]);
8891                 target[i].m3 = (tmp >> 16) & 0xffff;
8892                 target[i].size = tmp & 0xffff;
8893                 j++;
8894         }
8895 }
8896
8897 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
8898 {
8899         const __be16 *source = (const __be16 *)_source;
8900         u16 *target = (u16 *)_target;
8901         u32 i;
8902
8903         for (i = 0; i < n/2; i++)
8904                 target[i] = be16_to_cpu(source[i]);
8905 }
8906
8907 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
8908 do {                                                                    \
8909         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
8910         bp->arr = kmalloc(len, GFP_KERNEL);                             \
8911         if (!bp->arr) {                                                 \
8912                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
8913                 goto lbl;                                               \
8914         }                                                               \
8915         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
8916              (u8 *)bp->arr, len);                                       \
8917 } while (0)
8918
8919 int bnx2x_init_firmware(struct bnx2x *bp)
8920 {
8921         const char *fw_file_name;
8922         struct bnx2x_fw_file_hdr *fw_hdr;
8923         int rc;
8924
8925         if (CHIP_IS_E1(bp))
8926                 fw_file_name = FW_FILE_NAME_E1;
8927         else if (CHIP_IS_E1H(bp))
8928                 fw_file_name = FW_FILE_NAME_E1H;
8929         else if (CHIP_IS_E2(bp))
8930                 fw_file_name = FW_FILE_NAME_E2;
8931         else {
8932                 BNX2X_ERR("Unsupported chip revision\n");
8933                 return -EINVAL;
8934         }
8935
8936         BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
8937
8938         rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
8939         if (rc) {
8940                 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
8941                 goto request_firmware_exit;
8942         }
8943
8944         rc = bnx2x_check_firmware(bp);
8945         if (rc) {
8946                 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
8947                 goto request_firmware_exit;
8948         }
8949
8950         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
8951
8952         /* Initialize the pointers to the init arrays */
8953         /* Blob */
8954         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
8955
8956         /* Opcodes */
8957         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
8958
8959         /* Offsets */
8960         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
8961                             be16_to_cpu_n);
8962
8963         /* STORMs firmware */
8964         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8965                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
8966         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
8967                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
8968         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8969                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
8970         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
8971                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
8972         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8973                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
8974         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
8975                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
8976         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8977                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
8978         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
8979                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
8980         /* IRO */
8981         BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
8982
8983         return 0;
8984
8985 iro_alloc_err:
8986         kfree(bp->init_ops_offsets);
8987 init_offsets_alloc_err:
8988         kfree(bp->init_ops);
8989 init_ops_alloc_err:
8990         kfree(bp->init_data);
8991 request_firmware_exit:
8992         release_firmware(bp->firmware);
8993
8994         return rc;
8995 }
8996
8997 static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
8998 {
8999         int cid_count = L2_FP_COUNT(l2_cid_count);
9000
9001 #ifdef BCM_CNIC
9002         cid_count += CNIC_CID_MAX;
9003 #endif
9004         return roundup(cid_count, QM_CID_ROUND);
9005 }
9006
9007 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9008                                     const struct pci_device_id *ent)
9009 {
9010         struct net_device *dev = NULL;
9011         struct bnx2x *bp;
9012         int pcie_width, pcie_speed;
9013         int rc, cid_count;
9014
9015         switch (ent->driver_data) {
9016         case BCM57710:
9017         case BCM57711:
9018         case BCM57711E:
9019                 cid_count = FP_SB_MAX_E1x;
9020                 break;
9021
9022         case BCM57712:
9023         case BCM57712E:
9024                 cid_count = FP_SB_MAX_E2;
9025                 break;
9026
9027         default:
9028                 pr_err("Unknown board_type (%ld), aborting\n",
9029                            ent->driver_data);
9030                 return ENODEV;
9031         }
9032
9033         cid_count += CNIC_CONTEXT_USE;
9034
9035         /* dev zeroed in init_etherdev */
9036         dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
9037         if (!dev) {
9038                 dev_err(&pdev->dev, "Cannot allocate net device\n");
9039                 return -ENOMEM;
9040         }
9041
9042         bp = netdev_priv(dev);
9043         bp->msg_enable = debug;
9044
9045         pci_set_drvdata(pdev, dev);
9046
9047         bp->l2_cid_count = cid_count;
9048
9049         rc = bnx2x_init_dev(pdev, dev);
9050         if (rc < 0) {
9051                 free_netdev(dev);
9052                 return rc;
9053         }
9054
9055         rc = bnx2x_init_bp(bp);
9056         if (rc)
9057                 goto init_one_exit;
9058
9059         /* calc qm_cid_count */
9060         bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9061
9062         rc = register_netdev(dev);
9063         if (rc) {
9064                 dev_err(&pdev->dev, "Cannot register net device\n");
9065                 goto init_one_exit;
9066         }
9067
9068         /* Configure interupt mode: try to enable MSI-X/MSI if
9069          * needed, set bp->num_queues appropriately.
9070          */
9071         bnx2x_set_int_mode(bp);
9072
9073         /* Add all NAPI objects */
9074         bnx2x_add_all_napi(bp);
9075
9076         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
9077
9078         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9079                " IRQ %d, ", board_info[ent->driver_data].name,
9080                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
9081                pcie_width,
9082                ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9083                  (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9084                                                 "5GHz (Gen2)" : "2.5GHz",
9085                dev->base_addr, bp->pdev->irq);
9086         pr_cont("node addr %pM\n", dev->dev_addr);
9087
9088         return 0;
9089
9090 init_one_exit:
9091         if (bp->regview)
9092                 iounmap(bp->regview);
9093
9094         if (bp->doorbells)
9095                 iounmap(bp->doorbells);
9096
9097         free_netdev(dev);
9098
9099         if (atomic_read(&pdev->enable_cnt) == 1)
9100                 pci_release_regions(pdev);
9101
9102         pci_disable_device(pdev);
9103         pci_set_drvdata(pdev, NULL);
9104
9105         return rc;
9106 }
9107
9108 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9109 {
9110         struct net_device *dev = pci_get_drvdata(pdev);
9111         struct bnx2x *bp;
9112
9113         if (!dev) {
9114                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
9115                 return;
9116         }
9117         bp = netdev_priv(dev);
9118
9119         unregister_netdev(dev);
9120
9121         /* Delete all NAPI objects */
9122         bnx2x_del_all_napi(bp);
9123
9124         /* Disable MSI/MSI-X */
9125         bnx2x_disable_msi(bp);
9126
9127         /* Make sure RESET task is not scheduled before continuing */
9128         cancel_delayed_work_sync(&bp->reset_task);
9129
9130         if (bp->regview)
9131                 iounmap(bp->regview);
9132
9133         if (bp->doorbells)
9134                 iounmap(bp->doorbells);
9135
9136         bnx2x_free_mem_bp(bp);
9137
9138         free_netdev(dev);
9139
9140         if (atomic_read(&pdev->enable_cnt) == 1)
9141                 pci_release_regions(pdev);
9142
9143         pci_disable_device(pdev);
9144         pci_set_drvdata(pdev, NULL);
9145 }
9146
9147 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9148 {
9149         int i;
9150
9151         bp->state = BNX2X_STATE_ERROR;
9152
9153         bp->rx_mode = BNX2X_RX_MODE_NONE;
9154
9155         bnx2x_netif_stop(bp, 0);
9156         netif_carrier_off(bp->dev);
9157
9158         del_timer_sync(&bp->timer);
9159         bp->stats_state = STATS_STATE_DISABLED;
9160         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9161
9162         /* Release IRQs */
9163         bnx2x_free_irq(bp);
9164
9165         /* Free SKBs, SGEs, TPA pool and driver internals */
9166         bnx2x_free_skbs(bp);
9167
9168         for_each_queue(bp, i)
9169                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
9170
9171         bnx2x_free_mem(bp);
9172
9173         bp->state = BNX2X_STATE_CLOSED;
9174
9175         return 0;
9176 }
9177
9178 static void bnx2x_eeh_recover(struct bnx2x *bp)
9179 {
9180         u32 val;
9181
9182         mutex_init(&bp->port.phy_mutex);
9183
9184         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9185         bp->link_params.shmem_base = bp->common.shmem_base;
9186         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9187
9188         if (!bp->common.shmem_base ||
9189             (bp->common.shmem_base < 0xA0000) ||
9190             (bp->common.shmem_base >= 0xC0000)) {
9191                 BNX2X_DEV_INFO("MCP not active\n");
9192                 bp->flags |= NO_MCP_FLAG;
9193                 return;
9194         }
9195
9196         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9197         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9198                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9199                 BNX2X_ERR("BAD MCP validity signature\n");
9200
9201         if (!BP_NOMCP(bp)) {
9202                 bp->fw_seq =
9203                     (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9204                     DRV_MSG_SEQ_NUMBER_MASK);
9205                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9206         }
9207 }
9208
9209 /**
9210  * bnx2x_io_error_detected - called when PCI error is detected
9211  * @pdev: Pointer to PCI device
9212  * @state: The current pci connection state
9213  *
9214  * This function is called after a PCI bus error affecting
9215  * this device has been detected.
9216  */
9217 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9218                                                 pci_channel_state_t state)
9219 {
9220         struct net_device *dev = pci_get_drvdata(pdev);
9221         struct bnx2x *bp = netdev_priv(dev);
9222
9223         rtnl_lock();
9224
9225         netif_device_detach(dev);
9226
9227         if (state == pci_channel_io_perm_failure) {
9228                 rtnl_unlock();
9229                 return PCI_ERS_RESULT_DISCONNECT;
9230         }
9231
9232         if (netif_running(dev))
9233                 bnx2x_eeh_nic_unload(bp);
9234
9235         pci_disable_device(pdev);
9236
9237         rtnl_unlock();
9238
9239         /* Request a slot reset */
9240         return PCI_ERS_RESULT_NEED_RESET;
9241 }
9242
9243 /**
9244  * bnx2x_io_slot_reset - called after the PCI bus has been reset
9245  * @pdev: Pointer to PCI device
9246  *
9247  * Restart the card from scratch, as if from a cold-boot.
9248  */
9249 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9250 {
9251         struct net_device *dev = pci_get_drvdata(pdev);
9252         struct bnx2x *bp = netdev_priv(dev);
9253
9254         rtnl_lock();
9255
9256         if (pci_enable_device(pdev)) {
9257                 dev_err(&pdev->dev,
9258                         "Cannot re-enable PCI device after reset\n");
9259                 rtnl_unlock();
9260                 return PCI_ERS_RESULT_DISCONNECT;
9261         }
9262
9263         pci_set_master(pdev);
9264         pci_restore_state(pdev);
9265
9266         if (netif_running(dev))
9267                 bnx2x_set_power_state(bp, PCI_D0);
9268
9269         rtnl_unlock();
9270
9271         return PCI_ERS_RESULT_RECOVERED;
9272 }
9273
9274 /**
9275  * bnx2x_io_resume - called when traffic can start flowing again
9276  * @pdev: Pointer to PCI device
9277  *
9278  * This callback is called when the error recovery driver tells us that
9279  * its OK to resume normal operation.
9280  */
9281 static void bnx2x_io_resume(struct pci_dev *pdev)
9282 {
9283         struct net_device *dev = pci_get_drvdata(pdev);
9284         struct bnx2x *bp = netdev_priv(dev);
9285
9286         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
9287                 printk(KERN_ERR "Handling parity error recovery. "
9288                                 "Try again later\n");
9289                 return;
9290         }
9291
9292         rtnl_lock();
9293
9294         bnx2x_eeh_recover(bp);
9295
9296         if (netif_running(dev))
9297                 bnx2x_nic_load(bp, LOAD_NORMAL);
9298
9299         netif_device_attach(dev);
9300
9301         rtnl_unlock();
9302 }
9303
9304 static struct pci_error_handlers bnx2x_err_handler = {
9305         .error_detected = bnx2x_io_error_detected,
9306         .slot_reset     = bnx2x_io_slot_reset,
9307         .resume         = bnx2x_io_resume,
9308 };
9309
9310 static struct pci_driver bnx2x_pci_driver = {
9311         .name        = DRV_MODULE_NAME,
9312         .id_table    = bnx2x_pci_tbl,
9313         .probe       = bnx2x_init_one,
9314         .remove      = __devexit_p(bnx2x_remove_one),
9315         .suspend     = bnx2x_suspend,
9316         .resume      = bnx2x_resume,
9317         .err_handler = &bnx2x_err_handler,
9318 };
9319
9320 static int __init bnx2x_init(void)
9321 {
9322         int ret;
9323
9324         pr_info("%s", version);
9325
9326         bnx2x_wq = create_singlethread_workqueue("bnx2x");
9327         if (bnx2x_wq == NULL) {
9328                 pr_err("Cannot create workqueue\n");
9329                 return -ENOMEM;
9330         }
9331
9332         ret = pci_register_driver(&bnx2x_pci_driver);
9333         if (ret) {
9334                 pr_err("Cannot register driver\n");
9335                 destroy_workqueue(bnx2x_wq);
9336         }
9337         return ret;
9338 }
9339
9340 static void __exit bnx2x_cleanup(void)
9341 {
9342         pci_unregister_driver(&bnx2x_pci_driver);
9343
9344         destroy_workqueue(bnx2x_wq);
9345 }
9346
9347 module_init(bnx2x_init);
9348 module_exit(bnx2x_cleanup);
9349
9350 #ifdef BCM_CNIC
9351
9352 /* count denotes the number of new completions we have seen */
9353 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9354 {
9355         struct eth_spe *spe;
9356
9357 #ifdef BNX2X_STOP_ON_ERROR
9358         if (unlikely(bp->panic))
9359                 return;
9360 #endif
9361
9362         spin_lock_bh(&bp->spq_lock);
9363         BUG_ON(bp->cnic_spq_pending < count);
9364         bp->cnic_spq_pending -= count;
9365
9366
9367         for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9368                 u16 type =  (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9369                                 & SPE_HDR_CONN_TYPE) >>
9370                                 SPE_HDR_CONN_TYPE_SHIFT;
9371
9372                 /* Set validation for iSCSI L2 client before sending SETUP
9373                  *  ramrod
9374                  */
9375                 if (type == ETH_CONNECTION_TYPE) {
9376                         u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9377                                              hdr.conn_and_cmd_data) >>
9378                                 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9379
9380                         if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9381                                 bnx2x_set_ctx_validation(&bp->context.
9382                                                 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9383                                         HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9384                 }
9385
9386                 /* There may be not more than 8 L2 and COMMON SPEs and not more
9387                  * than 8 L5 SPEs in the air.
9388                  */
9389                 if ((type == NONE_CONNECTION_TYPE) ||
9390                     (type == ETH_CONNECTION_TYPE)) {
9391                         if (!atomic_read(&bp->spq_left))
9392                                 break;
9393                         else
9394                                 atomic_dec(&bp->spq_left);
9395                 } else if (type == ISCSI_CONNECTION_TYPE) {
9396                         if (bp->cnic_spq_pending >=
9397                             bp->cnic_eth_dev.max_kwqe_pending)
9398                                 break;
9399                         else
9400                                 bp->cnic_spq_pending++;
9401                 } else {
9402                         BNX2X_ERR("Unknown SPE type: %d\n", type);
9403                         bnx2x_panic();
9404                         break;
9405                 }
9406
9407                 spe = bnx2x_sp_get_next(bp);
9408                 *spe = *bp->cnic_kwq_cons;
9409
9410                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9411                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9412
9413                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9414                         bp->cnic_kwq_cons = bp->cnic_kwq;
9415                 else
9416                         bp->cnic_kwq_cons++;
9417         }
9418         bnx2x_sp_prod_update(bp);
9419         spin_unlock_bh(&bp->spq_lock);
9420 }
9421
9422 static int bnx2x_cnic_sp_queue(struct net_device *dev,
9423                                struct kwqe_16 *kwqes[], u32 count)
9424 {
9425         struct bnx2x *bp = netdev_priv(dev);
9426         int i;
9427
9428 #ifdef BNX2X_STOP_ON_ERROR
9429         if (unlikely(bp->panic))
9430                 return -EIO;
9431 #endif
9432
9433         spin_lock_bh(&bp->spq_lock);
9434
9435         for (i = 0; i < count; i++) {
9436                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9437
9438                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9439                         break;
9440
9441                 *bp->cnic_kwq_prod = *spe;
9442
9443                 bp->cnic_kwq_pending++;
9444
9445                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9446                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
9447                    spe->data.update_data_addr.hi,
9448                    spe->data.update_data_addr.lo,
9449                    bp->cnic_kwq_pending);
9450
9451                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9452                         bp->cnic_kwq_prod = bp->cnic_kwq;
9453                 else
9454                         bp->cnic_kwq_prod++;
9455         }
9456
9457         spin_unlock_bh(&bp->spq_lock);
9458
9459         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9460                 bnx2x_cnic_sp_post(bp, 0);
9461
9462         return i;
9463 }
9464
9465 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9466 {
9467         struct cnic_ops *c_ops;
9468         int rc = 0;
9469
9470         mutex_lock(&bp->cnic_mutex);
9471         c_ops = bp->cnic_ops;
9472         if (c_ops)
9473                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9474         mutex_unlock(&bp->cnic_mutex);
9475
9476         return rc;
9477 }
9478
9479 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9480 {
9481         struct cnic_ops *c_ops;
9482         int rc = 0;
9483
9484         rcu_read_lock();
9485         c_ops = rcu_dereference(bp->cnic_ops);
9486         if (c_ops)
9487                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9488         rcu_read_unlock();
9489
9490         return rc;
9491 }
9492
9493 /*
9494  * for commands that have no data
9495  */
9496 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
9497 {
9498         struct cnic_ctl_info ctl = {0};
9499
9500         ctl.cmd = cmd;
9501
9502         return bnx2x_cnic_ctl_send(bp, &ctl);
9503 }
9504
9505 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9506 {
9507         struct cnic_ctl_info ctl;
9508
9509         /* first we tell CNIC and only then we count this as a completion */
9510         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9511         ctl.data.comp.cid = cid;
9512
9513         bnx2x_cnic_ctl_send_bh(bp, &ctl);
9514         bnx2x_cnic_sp_post(bp, 0);
9515 }
9516
9517 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9518 {
9519         struct bnx2x *bp = netdev_priv(dev);
9520         int rc = 0;
9521
9522         switch (ctl->cmd) {
9523         case DRV_CTL_CTXTBL_WR_CMD: {
9524                 u32 index = ctl->data.io.offset;
9525                 dma_addr_t addr = ctl->data.io.dma_addr;
9526
9527                 bnx2x_ilt_wr(bp, index, addr);
9528                 break;
9529         }
9530
9531         case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9532                 int count = ctl->data.credit.credit_count;
9533
9534                 bnx2x_cnic_sp_post(bp, count);
9535                 break;
9536         }
9537
9538         /* rtnl_lock is held.  */
9539         case DRV_CTL_START_L2_CMD: {
9540                 u32 cli = ctl->data.ring.client_id;
9541
9542                 /* Set iSCSI MAC address */
9543                 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9544
9545                 mmiowb();
9546                 barrier();
9547
9548                 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9549                  * because it's the only way for UIO Client to accept
9550                  * multicasts (in non-promiscuous mode only one Client per
9551                  * function will receive multicast packets (leading in our
9552                  * case).
9553                  */
9554                 bnx2x_rxq_set_mac_filters(bp, cli,
9555                         BNX2X_ACCEPT_UNICAST |
9556                         BNX2X_ACCEPT_BROADCAST |
9557                         BNX2X_ACCEPT_ALL_MULTICAST);
9558                 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9559
9560                 break;
9561         }
9562
9563         /* rtnl_lock is held.  */
9564         case DRV_CTL_STOP_L2_CMD: {
9565                 u32 cli = ctl->data.ring.client_id;
9566
9567                 /* Stop accepting on iSCSI L2 ring */
9568                 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9569                 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9570
9571                 mmiowb();
9572                 barrier();
9573
9574                 /* Unset iSCSI L2 MAC */
9575                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9576                 break;
9577         }
9578         case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9579                 int count = ctl->data.credit.credit_count;
9580
9581                 smp_mb__before_atomic_inc();
9582                 atomic_add(count, &bp->spq_left);
9583                 smp_mb__after_atomic_inc();
9584                 break;
9585         }
9586
9587         default:
9588                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
9589                 rc = -EINVAL;
9590         }
9591
9592         return rc;
9593 }
9594
9595 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
9596 {
9597         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9598
9599         if (bp->flags & USING_MSIX_FLAG) {
9600                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
9601                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
9602                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
9603         } else {
9604                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
9605                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
9606         }
9607         if (CHIP_IS_E2(bp))
9608                 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
9609         else
9610                 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
9611
9612         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
9613         cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
9614         cp->irq_arr[1].status_blk = bp->def_status_blk;
9615         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
9616         cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
9617
9618         cp->num_irq = 2;
9619 }
9620
9621 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
9622                                void *data)
9623 {
9624         struct bnx2x *bp = netdev_priv(dev);
9625         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9626
9627         if (ops == NULL)
9628                 return -EINVAL;
9629
9630         if (atomic_read(&bp->intr_sem) != 0)
9631                 return -EBUSY;
9632
9633         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
9634         if (!bp->cnic_kwq)
9635                 return -ENOMEM;
9636
9637         bp->cnic_kwq_cons = bp->cnic_kwq;
9638         bp->cnic_kwq_prod = bp->cnic_kwq;
9639         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
9640
9641         bp->cnic_spq_pending = 0;
9642         bp->cnic_kwq_pending = 0;
9643
9644         bp->cnic_data = data;
9645
9646         cp->num_irq = 0;
9647         cp->drv_state = CNIC_DRV_STATE_REGD;
9648         cp->iro_arr = bp->iro_arr;
9649
9650         bnx2x_setup_cnic_irq_info(bp);
9651
9652         rcu_assign_pointer(bp->cnic_ops, ops);
9653
9654         return 0;
9655 }
9656
9657 static int bnx2x_unregister_cnic(struct net_device *dev)
9658 {
9659         struct bnx2x *bp = netdev_priv(dev);
9660         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9661
9662         mutex_lock(&bp->cnic_mutex);
9663         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
9664                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
9665                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9666         }
9667         cp->drv_state = 0;
9668         rcu_assign_pointer(bp->cnic_ops, NULL);
9669         mutex_unlock(&bp->cnic_mutex);
9670         synchronize_rcu();
9671         kfree(bp->cnic_kwq);
9672         bp->cnic_kwq = NULL;
9673
9674         return 0;
9675 }
9676
9677 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
9678 {
9679         struct bnx2x *bp = netdev_priv(dev);
9680         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9681
9682         cp->drv_owner = THIS_MODULE;
9683         cp->chip_id = CHIP_ID(bp);
9684         cp->pdev = bp->pdev;
9685         cp->io_base = bp->regview;
9686         cp->io_base2 = bp->doorbells;
9687         cp->max_kwqe_pending = 8;
9688         cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
9689         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
9690                              bnx2x_cid_ilt_lines(bp);
9691         cp->ctx_tbl_len = CNIC_ILT_LINES;
9692         cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
9693         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
9694         cp->drv_ctl = bnx2x_drv_ctl;
9695         cp->drv_register_cnic = bnx2x_register_cnic;
9696         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
9697         cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID;
9698         cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
9699
9700         DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
9701                          "starting cid %d\n",
9702            cp->ctx_blk_size,
9703            cp->ctx_tbl_offset,
9704            cp->ctx_tbl_len,
9705            cp->starting_cid);
9706         return cp;
9707 }
9708 EXPORT_SYMBOL(bnx2x_cnic_probe);
9709
9710 #endif /* BCM_CNIC */
9711