]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/bnx2x/bnx2x_main.c
net: poll() optimizations
[net-next-2.6.git] / drivers / net / bnx2x / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 #include <linux/stringify.h>
53
54 #define BNX2X_MAIN
55 #include "bnx2x.h"
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_cmn.h"
59
60
61 #include <linux/firmware.h>
62 #include "bnx2x_fw_file_hdr.h"
63 /* FW files */
64 #define FW_FILE_VERSION                                 \
65         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
66         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
67         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
68         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
69 #define FW_FILE_NAME_E1         "bnx2x-e1-" FW_FILE_VERSION ".fw"
70 #define FW_FILE_NAME_E1H        "bnx2x-e1h-" FW_FILE_VERSION ".fw"
71
72 /* Time in jiffies before concluding the transmitter is hung */
73 #define TX_TIMEOUT              (5*HZ)
74
75 static char version[] __devinitdata =
76         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
77         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
79 MODULE_AUTHOR("Eliezer Tamir");
80 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_MODULE_VERSION);
83 MODULE_FIRMWARE(FW_FILE_NAME_E1);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
85
86 static int multi_mode = 1;
87 module_param(multi_mode, int, 0);
88 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
89                              "(0 Disable; 1 Enable (default))");
90
91 static int num_queues;
92 module_param(num_queues, int, 0);
93 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
94                                 " (default is as a number of CPUs)");
95
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
99
100 static int int_mode;
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
103                                 "(1 INT#x; 2 MSI)");
104
105 static int dropless_fc;
106 module_param(dropless_fc, int, 0);
107 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
108
109 static int poll;
110 module_param(poll, int, 0);
111 MODULE_PARM_DESC(poll, " Use polling (for debug)");
112
113 static int mrrs = -1;
114 module_param(mrrs, int, 0);
115 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
116
117 static int debug;
118 module_param(debug, int, 0);
119 MODULE_PARM_DESC(debug, " Default debug msglevel");
120
121 static struct workqueue_struct *bnx2x_wq;
122
123 enum bnx2x_board_type {
124         BCM57710 = 0,
125         BCM57711 = 1,
126         BCM57711E = 2,
127 };
128
129 /* indexed by board_type, above */
130 static struct {
131         char *name;
132 } board_info[] __devinitdata = {
133         { "Broadcom NetXtreme II BCM57710 XGb" },
134         { "Broadcom NetXtreme II BCM57711 XGb" },
135         { "Broadcom NetXtreme II BCM57711E XGb" }
136 };
137
138
139 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
140         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
143         { 0 }
144 };
145
146 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148 /****************************************************************************
149 * General service functions
150 ****************************************************************************/
151
152 /* used only at init
153  * locking is done by mcp
154  */
155 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
156 {
157         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160                                PCICFG_VENDOR_ID_OFFSET);
161 }
162
163 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164 {
165         u32 val;
166
167         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170                                PCICFG_VENDOR_ID_OFFSET);
171
172         return val;
173 }
174
175 const u32 dmae_reg_go_c[] = {
176         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180 };
181
182 /* copy command into DMAE command memory and set DMAE command go */
183 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
184 {
185         u32 cmd_offset;
186         int i;
187
188         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
189         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
190                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
191
192                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
193                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
194         }
195         REG_WR(bp, dmae_reg_go_c[idx], 1);
196 }
197
198 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
199                       u32 len32)
200 {
201         struct dmae_command dmae;
202         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
203         int cnt = 200;
204
205         if (!bp->dmae_ready) {
206                 u32 *data = bnx2x_sp(bp, wb_data[0]);
207
208                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
209                    "  using indirect\n", dst_addr, len32);
210                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
211                 return;
212         }
213
214         memset(&dmae, 0, sizeof(struct dmae_command));
215
216         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
217                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
218                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
219 #ifdef __BIG_ENDIAN
220                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
221 #else
222                        DMAE_CMD_ENDIANITY_DW_SWAP |
223 #endif
224                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
225                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
226         dmae.src_addr_lo = U64_LO(dma_addr);
227         dmae.src_addr_hi = U64_HI(dma_addr);
228         dmae.dst_addr_lo = dst_addr >> 2;
229         dmae.dst_addr_hi = 0;
230         dmae.len = len32;
231         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
232         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
233         dmae.comp_val = DMAE_COMP_VAL;
234
235         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
236            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
237                     "dst_addr [%x:%08x (%08x)]\n"
238            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
239            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
240            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
241            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
242         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
243            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
244            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
245
246         mutex_lock(&bp->dmae_mutex);
247
248         *wb_comp = 0;
249
250         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
251
252         udelay(5);
253
254         while (*wb_comp != DMAE_COMP_VAL) {
255                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
256
257                 if (!cnt) {
258                         BNX2X_ERR("DMAE timeout!\n");
259                         break;
260                 }
261                 cnt--;
262                 /* adjust delay for emulation/FPGA */
263                 if (CHIP_REV_IS_SLOW(bp))
264                         msleep(100);
265                 else
266                         udelay(5);
267         }
268
269         mutex_unlock(&bp->dmae_mutex);
270 }
271
272 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
273 {
274         struct dmae_command dmae;
275         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
276         int cnt = 200;
277
278         if (!bp->dmae_ready) {
279                 u32 *data = bnx2x_sp(bp, wb_data[0]);
280                 int i;
281
282                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
283                    "  using indirect\n", src_addr, len32);
284                 for (i = 0; i < len32; i++)
285                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
286                 return;
287         }
288
289         memset(&dmae, 0, sizeof(struct dmae_command));
290
291         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
292                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
293                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
294 #ifdef __BIG_ENDIAN
295                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
296 #else
297                        DMAE_CMD_ENDIANITY_DW_SWAP |
298 #endif
299                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
300                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
301         dmae.src_addr_lo = src_addr >> 2;
302         dmae.src_addr_hi = 0;
303         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
304         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
305         dmae.len = len32;
306         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
307         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
308         dmae.comp_val = DMAE_COMP_VAL;
309
310         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
311            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
312                     "dst_addr [%x:%08x (%08x)]\n"
313            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
314            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
315            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
316            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
317
318         mutex_lock(&bp->dmae_mutex);
319
320         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
321         *wb_comp = 0;
322
323         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
324
325         udelay(5);
326
327         while (*wb_comp != DMAE_COMP_VAL) {
328
329                 if (!cnt) {
330                         BNX2X_ERR("DMAE timeout!\n");
331                         break;
332                 }
333                 cnt--;
334                 /* adjust delay for emulation/FPGA */
335                 if (CHIP_REV_IS_SLOW(bp))
336                         msleep(100);
337                 else
338                         udelay(5);
339         }
340         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
341            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
342            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
343
344         mutex_unlock(&bp->dmae_mutex);
345 }
346
347 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
348                                u32 addr, u32 len)
349 {
350         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
351         int offset = 0;
352
353         while (len > dmae_wr_max) {
354                 bnx2x_write_dmae(bp, phys_addr + offset,
355                                  addr + offset, dmae_wr_max);
356                 offset += dmae_wr_max * 4;
357                 len -= dmae_wr_max;
358         }
359
360         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
361 }
362
363 /* used only for slowpath so not inlined */
364 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
365 {
366         u32 wb_write[2];
367
368         wb_write[0] = val_hi;
369         wb_write[1] = val_lo;
370         REG_WR_DMAE(bp, reg, wb_write, 2);
371 }
372
373 #ifdef USE_WB_RD
374 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
375 {
376         u32 wb_data[2];
377
378         REG_RD_DMAE(bp, reg, wb_data, 2);
379
380         return HILO_U64(wb_data[0], wb_data[1]);
381 }
382 #endif
383
384 static int bnx2x_mc_assert(struct bnx2x *bp)
385 {
386         char last_idx;
387         int i, rc = 0;
388         u32 row0, row1, row2, row3;
389
390         /* XSTORM */
391         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
392                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
393         if (last_idx)
394                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
395
396         /* print the asserts */
397         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
398
399                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
400                               XSTORM_ASSERT_LIST_OFFSET(i));
401                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
402                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
403                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
405                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
407
408                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
409                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
410                                   " 0x%08x 0x%08x 0x%08x\n",
411                                   i, row3, row2, row1, row0);
412                         rc++;
413                 } else {
414                         break;
415                 }
416         }
417
418         /* TSTORM */
419         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
420                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
421         if (last_idx)
422                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
423
424         /* print the asserts */
425         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
426
427                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
428                               TSTORM_ASSERT_LIST_OFFSET(i));
429                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
430                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
431                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
433                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
435
436                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
437                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
438                                   " 0x%08x 0x%08x 0x%08x\n",
439                                   i, row3, row2, row1, row0);
440                         rc++;
441                 } else {
442                         break;
443                 }
444         }
445
446         /* CSTORM */
447         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
448                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
449         if (last_idx)
450                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
451
452         /* print the asserts */
453         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
454
455                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
456                               CSTORM_ASSERT_LIST_OFFSET(i));
457                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
458                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
459                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
461                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
463
464                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
465                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
466                                   " 0x%08x 0x%08x 0x%08x\n",
467                                   i, row3, row2, row1, row0);
468                         rc++;
469                 } else {
470                         break;
471                 }
472         }
473
474         /* USTORM */
475         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
476                            USTORM_ASSERT_LIST_INDEX_OFFSET);
477         if (last_idx)
478                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
479
480         /* print the asserts */
481         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
482
483                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
484                               USTORM_ASSERT_LIST_OFFSET(i));
485                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
486                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
487                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
488                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
489                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
490                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
491
492                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
493                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
494                                   " 0x%08x 0x%08x 0x%08x\n",
495                                   i, row3, row2, row1, row0);
496                         rc++;
497                 } else {
498                         break;
499                 }
500         }
501
502         return rc;
503 }
504
505 static void bnx2x_fw_dump(struct bnx2x *bp)
506 {
507         u32 addr;
508         u32 mark, offset;
509         __be32 data[9];
510         int word;
511
512         if (BP_NOMCP(bp)) {
513                 BNX2X_ERR("NO MCP - can not dump\n");
514                 return;
515         }
516
517         addr = bp->common.shmem_base - 0x0800 + 4;
518         mark = REG_RD(bp, addr);
519         mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
520         pr_err("begin fw dump (mark 0x%x)\n", mark);
521
522         pr_err("");
523         for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
524                 for (word = 0; word < 8; word++)
525                         data[word] = htonl(REG_RD(bp, offset + 4*word));
526                 data[8] = 0x0;
527                 pr_cont("%s", (char *)data);
528         }
529         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
530                 for (word = 0; word < 8; word++)
531                         data[word] = htonl(REG_RD(bp, offset + 4*word));
532                 data[8] = 0x0;
533                 pr_cont("%s", (char *)data);
534         }
535         pr_err("end of fw dump\n");
536 }
537
538 void bnx2x_panic_dump(struct bnx2x *bp)
539 {
540         int i;
541         u16 j, start, end;
542
543         bp->stats_state = STATS_STATE_DISABLED;
544         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
545
546         BNX2X_ERR("begin crash dump -----------------\n");
547
548         /* Indices */
549         /* Common */
550         BNX2X_ERR("def_c_idx(0x%x)  def_u_idx(0x%x)  def_x_idx(0x%x)"
551                   "  def_t_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
552                   "  spq_prod_idx(0x%x)\n",
553                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
554                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
555
556         /* Rx */
557         for_each_queue(bp, i) {
558                 struct bnx2x_fastpath *fp = &bp->fp[i];
559
560                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
561                           "  *rx_bd_cons_sb(0x%x)  rx_comp_prod(0x%x)"
562                           "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
563                           i, fp->rx_bd_prod, fp->rx_bd_cons,
564                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
565                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
566                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
567                           "  fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
568                           fp->rx_sge_prod, fp->last_max_sge,
569                           le16_to_cpu(fp->fp_u_idx),
570                           fp->status_blk->u_status_block.status_block_index);
571         }
572
573         /* Tx */
574         for_each_queue(bp, i) {
575                 struct bnx2x_fastpath *fp = &bp->fp[i];
576
577                 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
578                           "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
579                           "  *tx_cons_sb(0x%x)\n",
580                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
581                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
582                 BNX2X_ERR("     fp_c_idx(0x%x)  *sb_c_idx(0x%x)"
583                           "  tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
584                           fp->status_blk->c_status_block.status_block_index,
585                           fp->tx_db.data.prod);
586         }
587
588         /* Rings */
589         /* Rx */
590         for_each_queue(bp, i) {
591                 struct bnx2x_fastpath *fp = &bp->fp[i];
592
593                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
594                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
595                 for (j = start; j != end; j = RX_BD(j + 1)) {
596                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
597                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
598
599                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
600                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
601                 }
602
603                 start = RX_SGE(fp->rx_sge_prod);
604                 end = RX_SGE(fp->last_max_sge);
605                 for (j = start; j != end; j = RX_SGE(j + 1)) {
606                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
607                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
608
609                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
610                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
611                 }
612
613                 start = RCQ_BD(fp->rx_comp_cons - 10);
614                 end = RCQ_BD(fp->rx_comp_cons + 503);
615                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
616                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
617
618                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
619                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
620                 }
621         }
622
623         /* Tx */
624         for_each_queue(bp, i) {
625                 struct bnx2x_fastpath *fp = &bp->fp[i];
626
627                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
628                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
629                 for (j = start; j != end; j = TX_BD(j + 1)) {
630                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
631
632                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
633                                   i, j, sw_bd->skb, sw_bd->first_bd);
634                 }
635
636                 start = TX_BD(fp->tx_bd_cons - 10);
637                 end = TX_BD(fp->tx_bd_cons + 254);
638                 for (j = start; j != end; j = TX_BD(j + 1)) {
639                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
640
641                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
642                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
643                 }
644         }
645
646         bnx2x_fw_dump(bp);
647         bnx2x_mc_assert(bp);
648         BNX2X_ERR("end crash dump -----------------\n");
649 }
650
651 void bnx2x_int_enable(struct bnx2x *bp)
652 {
653         int port = BP_PORT(bp);
654         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
655         u32 val = REG_RD(bp, addr);
656         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
657         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
658
659         if (msix) {
660                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
661                          HC_CONFIG_0_REG_INT_LINE_EN_0);
662                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
664         } else if (msi) {
665                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
666                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
668                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
669         } else {
670                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
671                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
672                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
673                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
674
675                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
676                    val, port, addr);
677
678                 REG_WR(bp, addr, val);
679
680                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
681         }
682
683         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
684            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
685
686         REG_WR(bp, addr, val);
687         /*
688          * Ensure that HC_CONFIG is written before leading/trailing edge config
689          */
690         mmiowb();
691         barrier();
692
693         if (CHIP_IS_E1H(bp)) {
694                 /* init leading/trailing edge */
695                 if (IS_E1HMF(bp)) {
696                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
697                         if (bp->port.pmf)
698                                 /* enable nig and gpio3 attention */
699                                 val |= 0x1100;
700                 } else
701                         val = 0xffff;
702
703                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
704                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
705         }
706
707         /* Make sure that interrupts are indeed enabled from here on */
708         mmiowb();
709 }
710
711 static void bnx2x_int_disable(struct bnx2x *bp)
712 {
713         int port = BP_PORT(bp);
714         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
715         u32 val = REG_RD(bp, addr);
716
717         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
718                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
719                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
720                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
721
722         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
723            val, port, addr);
724
725         /* flush all outstanding writes */
726         mmiowb();
727
728         REG_WR(bp, addr, val);
729         if (REG_RD(bp, addr) != val)
730                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
731 }
732
733 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
734 {
735         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
736         int i, offset;
737
738         /* disable interrupt handling */
739         atomic_inc(&bp->intr_sem);
740         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
741
742         if (disable_hw)
743                 /* prevent the HW from sending interrupts */
744                 bnx2x_int_disable(bp);
745
746         /* make sure all ISRs are done */
747         if (msix) {
748                 synchronize_irq(bp->msix_table[0].vector);
749                 offset = 1;
750 #ifdef BCM_CNIC
751                 offset++;
752 #endif
753                 for_each_queue(bp, i)
754                         synchronize_irq(bp->msix_table[i + offset].vector);
755         } else
756                 synchronize_irq(bp->pdev->irq);
757
758         /* make sure sp_task is not running */
759         cancel_delayed_work(&bp->sp_task);
760         flush_workqueue(bnx2x_wq);
761 }
762
763 /* fast path */
764
765 /*
766  * General service functions
767  */
768
769 /* Return true if succeeded to acquire the lock */
770 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
771 {
772         u32 lock_status;
773         u32 resource_bit = (1 << resource);
774         int func = BP_FUNC(bp);
775         u32 hw_lock_control_reg;
776
777         DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
778
779         /* Validating that the resource is within range */
780         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
781                 DP(NETIF_MSG_HW,
782                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
783                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
784                 return false;
785         }
786
787         if (func <= 5)
788                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
789         else
790                 hw_lock_control_reg =
791                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
792
793         /* Try to acquire the lock */
794         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
795         lock_status = REG_RD(bp, hw_lock_control_reg);
796         if (lock_status & resource_bit)
797                 return true;
798
799         DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
800         return false;
801 }
802
803
804 #ifdef BCM_CNIC
805 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
806 #endif
807
808 void bnx2x_sp_event(struct bnx2x_fastpath *fp,
809                            union eth_rx_cqe *rr_cqe)
810 {
811         struct bnx2x *bp = fp->bp;
812         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
813         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
814
815         DP(BNX2X_MSG_SP,
816            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
817            fp->index, cid, command, bp->state,
818            rr_cqe->ramrod_cqe.ramrod_type);
819
820         bp->spq_left++;
821
822         if (fp->index) {
823                 switch (command | fp->state) {
824                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
825                                                 BNX2X_FP_STATE_OPENING):
826                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
827                            cid);
828                         fp->state = BNX2X_FP_STATE_OPEN;
829                         break;
830
831                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
832                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
833                            cid);
834                         fp->state = BNX2X_FP_STATE_HALTED;
835                         break;
836
837                 default:
838                         BNX2X_ERR("unexpected MC reply (%d)  "
839                                   "fp[%d] state is %x\n",
840                                   command, fp->index, fp->state);
841                         break;
842                 }
843                 mb(); /* force bnx2x_wait_ramrod() to see the change */
844                 return;
845         }
846
847         switch (command | bp->state) {
848         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
849                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
850                 bp->state = BNX2X_STATE_OPEN;
851                 break;
852
853         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
854                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
855                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
856                 fp->state = BNX2X_FP_STATE_HALTED;
857                 break;
858
859         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
860                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
861                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
862                 break;
863
864 #ifdef BCM_CNIC
865         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
866                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
867                 bnx2x_cnic_cfc_comp(bp, cid);
868                 break;
869 #endif
870
871         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
872         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
873                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
874                 bp->set_mac_pending--;
875                 smp_wmb();
876                 break;
877
878         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
879                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
880                 bp->set_mac_pending--;
881                 smp_wmb();
882                 break;
883
884         default:
885                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
886                           command, bp->state);
887                 break;
888         }
889         mb(); /* force bnx2x_wait_ramrod() to see the change */
890 }
891
892 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
893 {
894         struct bnx2x *bp = netdev_priv(dev_instance);
895         u16 status = bnx2x_ack_int(bp);
896         u16 mask;
897         int i;
898
899         /* Return here if interrupt is shared and it's not for us */
900         if (unlikely(status == 0)) {
901                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
902                 return IRQ_NONE;
903         }
904         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
905
906         /* Return here if interrupt is disabled */
907         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
908                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
909                 return IRQ_HANDLED;
910         }
911
912 #ifdef BNX2X_STOP_ON_ERROR
913         if (unlikely(bp->panic))
914                 return IRQ_HANDLED;
915 #endif
916
917         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
918                 struct bnx2x_fastpath *fp = &bp->fp[i];
919
920                 mask = 0x2 << fp->sb_id;
921                 if (status & mask) {
922                         /* Handle Rx and Tx according to SB id */
923                         prefetch(fp->rx_cons_sb);
924                         prefetch(&fp->status_blk->u_status_block.
925                                                 status_block_index);
926                         prefetch(fp->tx_cons_sb);
927                         prefetch(&fp->status_blk->c_status_block.
928                                                 status_block_index);
929                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
930                         status &= ~mask;
931                 }
932         }
933
934 #ifdef BCM_CNIC
935         mask = 0x2 << CNIC_SB_ID(bp);
936         if (status & (mask | 0x1)) {
937                 struct cnic_ops *c_ops = NULL;
938
939                 rcu_read_lock();
940                 c_ops = rcu_dereference(bp->cnic_ops);
941                 if (c_ops)
942                         c_ops->cnic_handler(bp->cnic_data, NULL);
943                 rcu_read_unlock();
944
945                 status &= ~mask;
946         }
947 #endif
948
949         if (unlikely(status & 0x1)) {
950                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
951
952                 status &= ~0x1;
953                 if (!status)
954                         return IRQ_HANDLED;
955         }
956
957         if (unlikely(status))
958                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
959                    status);
960
961         return IRQ_HANDLED;
962 }
963
964 /* end of fast path */
965
966
967 /* Link */
968
969 /*
970  * General service functions
971  */
972
973 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
974 {
975         u32 lock_status;
976         u32 resource_bit = (1 << resource);
977         int func = BP_FUNC(bp);
978         u32 hw_lock_control_reg;
979         int cnt;
980
981         /* Validating that the resource is within range */
982         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
983                 DP(NETIF_MSG_HW,
984                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
985                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
986                 return -EINVAL;
987         }
988
989         if (func <= 5) {
990                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
991         } else {
992                 hw_lock_control_reg =
993                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
994         }
995
996         /* Validating that the resource is not already taken */
997         lock_status = REG_RD(bp, hw_lock_control_reg);
998         if (lock_status & resource_bit) {
999                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1000                    lock_status, resource_bit);
1001                 return -EEXIST;
1002         }
1003
1004         /* Try for 5 second every 5ms */
1005         for (cnt = 0; cnt < 1000; cnt++) {
1006                 /* Try to acquire the lock */
1007                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1008                 lock_status = REG_RD(bp, hw_lock_control_reg);
1009                 if (lock_status & resource_bit)
1010                         return 0;
1011
1012                 msleep(5);
1013         }
1014         DP(NETIF_MSG_HW, "Timeout\n");
1015         return -EAGAIN;
1016 }
1017
1018 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1019 {
1020         u32 lock_status;
1021         u32 resource_bit = (1 << resource);
1022         int func = BP_FUNC(bp);
1023         u32 hw_lock_control_reg;
1024
1025         DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1026
1027         /* Validating that the resource is within range */
1028         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1029                 DP(NETIF_MSG_HW,
1030                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1031                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1032                 return -EINVAL;
1033         }
1034
1035         if (func <= 5) {
1036                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1037         } else {
1038                 hw_lock_control_reg =
1039                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1040         }
1041
1042         /* Validating that the resource is currently taken */
1043         lock_status = REG_RD(bp, hw_lock_control_reg);
1044         if (!(lock_status & resource_bit)) {
1045                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1046                    lock_status, resource_bit);
1047                 return -EFAULT;
1048         }
1049
1050         REG_WR(bp, hw_lock_control_reg, resource_bit);
1051         return 0;
1052 }
1053
1054
1055 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1056 {
1057         /* The GPIO should be swapped if swap register is set and active */
1058         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1059                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1060         int gpio_shift = gpio_num +
1061                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1062         u32 gpio_mask = (1 << gpio_shift);
1063         u32 gpio_reg;
1064         int value;
1065
1066         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1067                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1068                 return -EINVAL;
1069         }
1070
1071         /* read GPIO value */
1072         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1073
1074         /* get the requested pin value */
1075         if ((gpio_reg & gpio_mask) == gpio_mask)
1076                 value = 1;
1077         else
1078                 value = 0;
1079
1080         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1081
1082         return value;
1083 }
1084
1085 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1086 {
1087         /* The GPIO should be swapped if swap register is set and active */
1088         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1089                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1090         int gpio_shift = gpio_num +
1091                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1092         u32 gpio_mask = (1 << gpio_shift);
1093         u32 gpio_reg;
1094
1095         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1096                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1097                 return -EINVAL;
1098         }
1099
1100         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1101         /* read GPIO and mask except the float bits */
1102         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1103
1104         switch (mode) {
1105         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1106                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1107                    gpio_num, gpio_shift);
1108                 /* clear FLOAT and set CLR */
1109                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1110                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1111                 break;
1112
1113         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1114                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1115                    gpio_num, gpio_shift);
1116                 /* clear FLOAT and set SET */
1117                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1118                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1119                 break;
1120
1121         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1122                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1123                    gpio_num, gpio_shift);
1124                 /* set FLOAT */
1125                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1126                 break;
1127
1128         default:
1129                 break;
1130         }
1131
1132         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1133         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1134
1135         return 0;
1136 }
1137
1138 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1139 {
1140         /* The GPIO should be swapped if swap register is set and active */
1141         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1142                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1143         int gpio_shift = gpio_num +
1144                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1145         u32 gpio_mask = (1 << gpio_shift);
1146         u32 gpio_reg;
1147
1148         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1149                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1150                 return -EINVAL;
1151         }
1152
1153         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1154         /* read GPIO int */
1155         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1156
1157         switch (mode) {
1158         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1159                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1160                                    "output low\n", gpio_num, gpio_shift);
1161                 /* clear SET and set CLR */
1162                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1163                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1164                 break;
1165
1166         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1167                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1168                                    "output high\n", gpio_num, gpio_shift);
1169                 /* clear CLR and set SET */
1170                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1171                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1172                 break;
1173
1174         default:
1175                 break;
1176         }
1177
1178         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1179         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1180
1181         return 0;
1182 }
1183
1184 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1185 {
1186         u32 spio_mask = (1 << spio_num);
1187         u32 spio_reg;
1188
1189         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1190             (spio_num > MISC_REGISTERS_SPIO_7)) {
1191                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1192                 return -EINVAL;
1193         }
1194
1195         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1196         /* read SPIO and mask except the float bits */
1197         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1198
1199         switch (mode) {
1200         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1201                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1202                 /* clear FLOAT and set CLR */
1203                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1204                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1205                 break;
1206
1207         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1208                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1209                 /* clear FLOAT and set SET */
1210                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1211                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1212                 break;
1213
1214         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1215                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1216                 /* set FLOAT */
1217                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1218                 break;
1219
1220         default:
1221                 break;
1222         }
1223
1224         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1225         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1226
1227         return 0;
1228 }
1229
1230 void bnx2x_calc_fc_adv(struct bnx2x *bp)
1231 {
1232         switch (bp->link_vars.ieee_fc &
1233                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1234         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1235                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1236                                           ADVERTISED_Pause);
1237                 break;
1238
1239         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1240                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1241                                          ADVERTISED_Pause);
1242                 break;
1243
1244         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1245                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1246                 break;
1247
1248         default:
1249                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1250                                           ADVERTISED_Pause);
1251                 break;
1252         }
1253 }
1254
1255
1256 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1257 {
1258         if (!BP_NOMCP(bp)) {
1259                 u8 rc;
1260
1261                 /* Initialize link parameters structure variables */
1262                 /* It is recommended to turn off RX FC for jumbo frames
1263                    for better performance */
1264                 if (bp->dev->mtu > 5000)
1265                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1266                 else
1267                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1268
1269                 bnx2x_acquire_phy_lock(bp);
1270
1271                 if (load_mode == LOAD_DIAG)
1272                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
1273
1274                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1275
1276                 bnx2x_release_phy_lock(bp);
1277
1278                 bnx2x_calc_fc_adv(bp);
1279
1280                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1281                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1282                         bnx2x_link_report(bp);
1283                 }
1284
1285                 return rc;
1286         }
1287         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1288         return -EINVAL;
1289 }
1290
1291 void bnx2x_link_set(struct bnx2x *bp)
1292 {
1293         if (!BP_NOMCP(bp)) {
1294                 bnx2x_acquire_phy_lock(bp);
1295                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1296                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1297                 bnx2x_release_phy_lock(bp);
1298
1299                 bnx2x_calc_fc_adv(bp);
1300         } else
1301                 BNX2X_ERR("Bootcode is missing - can not set link\n");
1302 }
1303
1304 static void bnx2x__link_reset(struct bnx2x *bp)
1305 {
1306         if (!BP_NOMCP(bp)) {
1307                 bnx2x_acquire_phy_lock(bp);
1308                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1309                 bnx2x_release_phy_lock(bp);
1310         } else
1311                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1312 }
1313
1314 u8 bnx2x_link_test(struct bnx2x *bp)
1315 {
1316         u8 rc = 0;
1317
1318         if (!BP_NOMCP(bp)) {
1319                 bnx2x_acquire_phy_lock(bp);
1320                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1321                 bnx2x_release_phy_lock(bp);
1322         } else
1323                 BNX2X_ERR("Bootcode is missing - can not test link\n");
1324
1325         return rc;
1326 }
1327
1328 static void bnx2x_init_port_minmax(struct bnx2x *bp)
1329 {
1330         u32 r_param = bp->link_vars.line_speed / 8;
1331         u32 fair_periodic_timeout_usec;
1332         u32 t_fair;
1333
1334         memset(&(bp->cmng.rs_vars), 0,
1335                sizeof(struct rate_shaping_vars_per_port));
1336         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1337
1338         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1339         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1340
1341         /* this is the threshold below which no timer arming will occur
1342            1.25 coefficient is for the threshold to be a little bigger
1343            than the real time, to compensate for timer in-accuracy */
1344         bp->cmng.rs_vars.rs_threshold =
1345                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1346
1347         /* resolution of fairness timer */
1348         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1349         /* for 10G it is 1000usec. for 1G it is 10000usec. */
1350         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1351
1352         /* this is the threshold below which we won't arm the timer anymore */
1353         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1354
1355         /* we multiply by 1e3/8 to get bytes/msec.
1356            We don't want the credits to pass a credit
1357            of the t_fair*FAIR_MEM (algorithm resolution) */
1358         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1359         /* since each tick is 4 usec */
1360         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1361 }
1362
1363 /* Calculates the sum of vn_min_rates.
1364    It's needed for further normalizing of the min_rates.
1365    Returns:
1366      sum of vn_min_rates.
1367        or
1368      0 - if all the min_rates are 0.
1369      In the later case fainess algorithm should be deactivated.
1370      If not all min_rates are zero then those that are zeroes will be set to 1.
1371  */
1372 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1373 {
1374         int all_zero = 1;
1375         int port = BP_PORT(bp);
1376         int vn;
1377
1378         bp->vn_weight_sum = 0;
1379         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1380                 int func = 2*vn + port;
1381                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1382                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1383                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1384
1385                 /* Skip hidden vns */
1386                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1387                         continue;
1388
1389                 /* If min rate is zero - set it to 1 */
1390                 if (!vn_min_rate)
1391                         vn_min_rate = DEF_MIN_RATE;
1392                 else
1393                         all_zero = 0;
1394
1395                 bp->vn_weight_sum += vn_min_rate;
1396         }
1397
1398         /* ... only if all min rates are zeros - disable fairness */
1399         if (all_zero) {
1400                 bp->cmng.flags.cmng_enables &=
1401                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1402                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1403                    "  fairness will be disabled\n");
1404         } else
1405                 bp->cmng.flags.cmng_enables |=
1406                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1407 }
1408
1409 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1410 {
1411         struct rate_shaping_vars_per_vn m_rs_vn;
1412         struct fairness_vars_per_vn m_fair_vn;
1413         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1414         u16 vn_min_rate, vn_max_rate;
1415         int i;
1416
1417         /* If function is hidden - set min and max to zeroes */
1418         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1419                 vn_min_rate = 0;
1420                 vn_max_rate = 0;
1421
1422         } else {
1423                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1424                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1425                 /* If min rate is zero - set it to 1 */
1426                 if (!vn_min_rate)
1427                         vn_min_rate = DEF_MIN_RATE;
1428                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1429                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1430         }
1431         DP(NETIF_MSG_IFUP,
1432            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
1433            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1434
1435         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1436         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1437
1438         /* global vn counter - maximal Mbps for this vn */
1439         m_rs_vn.vn_counter.rate = vn_max_rate;
1440
1441         /* quota - number of bytes transmitted in this period */
1442         m_rs_vn.vn_counter.quota =
1443                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1444
1445         if (bp->vn_weight_sum) {
1446                 /* credit for each period of the fairness algorithm:
1447                    number of bytes in T_FAIR (the vn share the port rate).
1448                    vn_weight_sum should not be larger than 10000, thus
1449                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1450                    than zero */
1451                 m_fair_vn.vn_credit_delta =
1452                         max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1453                                                    (8 * bp->vn_weight_sum))),
1454                               (bp->cmng.fair_vars.fair_threshold * 2));
1455                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
1456                    m_fair_vn.vn_credit_delta);
1457         }
1458
1459         /* Store it to internal memory */
1460         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1461                 REG_WR(bp, BAR_XSTRORM_INTMEM +
1462                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1463                        ((u32 *)(&m_rs_vn))[i]);
1464
1465         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1466                 REG_WR(bp, BAR_XSTRORM_INTMEM +
1467                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1468                        ((u32 *)(&m_fair_vn))[i]);
1469 }
1470
1471
1472 /* This function is called upon link interrupt */
1473 static void bnx2x_link_attn(struct bnx2x *bp)
1474 {
1475         u32 prev_link_status = bp->link_vars.link_status;
1476         /* Make sure that we are synced with the current statistics */
1477         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1478
1479         bnx2x_link_update(&bp->link_params, &bp->link_vars);
1480
1481         if (bp->link_vars.link_up) {
1482
1483                 /* dropless flow control */
1484                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1485                         int port = BP_PORT(bp);
1486                         u32 pause_enabled = 0;
1487
1488                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1489                                 pause_enabled = 1;
1490
1491                         REG_WR(bp, BAR_USTRORM_INTMEM +
1492                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1493                                pause_enabled);
1494                 }
1495
1496                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1497                         struct host_port_stats *pstats;
1498
1499                         pstats = bnx2x_sp(bp, port_stats);
1500                         /* reset old bmac stats */
1501                         memset(&(pstats->mac_stx[0]), 0,
1502                                sizeof(struct mac_stx));
1503                 }
1504                 if (bp->state == BNX2X_STATE_OPEN)
1505                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1506         }
1507
1508         /* indicate link status only if link status actually changed */
1509         if (prev_link_status != bp->link_vars.link_status)
1510                 bnx2x_link_report(bp);
1511
1512         if (IS_E1HMF(bp)) {
1513                 int port = BP_PORT(bp);
1514                 int func;
1515                 int vn;
1516
1517                 /* Set the attention towards other drivers on the same port */
1518                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1519                         if (vn == BP_E1HVN(bp))
1520                                 continue;
1521
1522                         func = ((vn << 1) | port);
1523                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1524                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1525                 }
1526
1527                 if (bp->link_vars.link_up) {
1528                         int i;
1529
1530                         /* Init rate shaping and fairness contexts */
1531                         bnx2x_init_port_minmax(bp);
1532
1533                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
1534                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
1535
1536                         /* Store it to internal memory */
1537                         for (i = 0;
1538                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
1539                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
1540                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1541                                        ((u32 *)(&bp->cmng))[i]);
1542                 }
1543         }
1544 }
1545
1546 void bnx2x__link_status_update(struct bnx2x *bp)
1547 {
1548         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
1549                 return;
1550
1551         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
1552
1553         if (bp->link_vars.link_up)
1554                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1555         else
1556                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1557
1558         bnx2x_calc_vn_weight_sum(bp);
1559
1560         /* indicate link status */
1561         bnx2x_link_report(bp);
1562 }
1563
1564 static void bnx2x_pmf_update(struct bnx2x *bp)
1565 {
1566         int port = BP_PORT(bp);
1567         u32 val;
1568
1569         bp->port.pmf = 1;
1570         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1571
1572         /* enable nig attention */
1573         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1574         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1575         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1576
1577         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1578 }
1579
1580 /* end of Link */
1581
1582 /* slow path */
1583
1584 /*
1585  * General service functions
1586  */
1587
1588 /* send the MCP a request, block until there is a reply */
1589 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
1590 {
1591         int func = BP_FUNC(bp);
1592         u32 seq = ++bp->fw_seq;
1593         u32 rc = 0;
1594         u32 cnt = 1;
1595         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1596
1597         mutex_lock(&bp->fw_mb_mutex);
1598         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1599         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1600
1601         do {
1602                 /* let the FW do it's magic ... */
1603                 msleep(delay);
1604
1605                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
1606
1607                 /* Give the FW up to 5 second (500*10ms) */
1608         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
1609
1610         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
1611            cnt*delay, rc, seq);
1612
1613         /* is this a reply to our command? */
1614         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
1615                 rc &= FW_MSG_CODE_MASK;
1616         else {
1617                 /* FW BUG! */
1618                 BNX2X_ERR("FW failed to respond!\n");
1619                 bnx2x_fw_dump(bp);
1620                 rc = 0;
1621         }
1622         mutex_unlock(&bp->fw_mb_mutex);
1623
1624         return rc;
1625 }
1626
1627 static void bnx2x_e1h_disable(struct bnx2x *bp)
1628 {
1629         int port = BP_PORT(bp);
1630
1631         netif_tx_disable(bp->dev);
1632
1633         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
1634
1635         netif_carrier_off(bp->dev);
1636 }
1637
1638 static void bnx2x_e1h_enable(struct bnx2x *bp)
1639 {
1640         int port = BP_PORT(bp);
1641
1642         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
1643
1644         /* Tx queue should be only reenabled */
1645         netif_tx_wake_all_queues(bp->dev);
1646
1647         /*
1648          * Should not call netif_carrier_on since it will be called if the link
1649          * is up when checking for link state
1650          */
1651 }
1652
1653 static void bnx2x_update_min_max(struct bnx2x *bp)
1654 {
1655         int port = BP_PORT(bp);
1656         int vn, i;
1657
1658         /* Init rate shaping and fairness contexts */
1659         bnx2x_init_port_minmax(bp);
1660
1661         bnx2x_calc_vn_weight_sum(bp);
1662
1663         for (vn = VN_0; vn < E1HVN_MAX; vn++)
1664                 bnx2x_init_vn_minmax(bp, 2*vn + port);
1665
1666         if (bp->port.pmf) {
1667                 int func;
1668
1669                 /* Set the attention towards other drivers on the same port */
1670                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1671                         if (vn == BP_E1HVN(bp))
1672                                 continue;
1673
1674                         func = ((vn << 1) | port);
1675                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1676                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1677                 }
1678
1679                 /* Store it to internal memory */
1680                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1681                         REG_WR(bp, BAR_XSTRORM_INTMEM +
1682                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1683                                ((u32 *)(&bp->cmng))[i]);
1684         }
1685 }
1686
1687 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1688 {
1689         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
1690
1691         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
1692
1693                 /*
1694                  * This is the only place besides the function initialization
1695                  * where the bp->flags can change so it is done without any
1696                  * locks
1697                  */
1698                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1699                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
1700                         bp->flags |= MF_FUNC_DIS;
1701
1702                         bnx2x_e1h_disable(bp);
1703                 } else {
1704                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
1705                         bp->flags &= ~MF_FUNC_DIS;
1706
1707                         bnx2x_e1h_enable(bp);
1708                 }
1709                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
1710         }
1711         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1712
1713                 bnx2x_update_min_max(bp);
1714                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1715         }
1716
1717         /* Report results to MCP */
1718         if (dcc_event)
1719                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
1720         else
1721                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
1722 }
1723
1724 /* must be called under the spq lock */
1725 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
1726 {
1727         struct eth_spe *next_spe = bp->spq_prod_bd;
1728
1729         if (bp->spq_prod_bd == bp->spq_last_bd) {
1730                 bp->spq_prod_bd = bp->spq;
1731                 bp->spq_prod_idx = 0;
1732                 DP(NETIF_MSG_TIMER, "end of spq\n");
1733         } else {
1734                 bp->spq_prod_bd++;
1735                 bp->spq_prod_idx++;
1736         }
1737         return next_spe;
1738 }
1739
1740 /* must be called under the spq lock */
1741 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1742 {
1743         int func = BP_FUNC(bp);
1744
1745         /* Make sure that BD data is updated before writing the producer */
1746         wmb();
1747
1748         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1749                bp->spq_prod_idx);
1750         mmiowb();
1751 }
1752
1753 /* the slow path queue is odd since completions arrive on the fastpath ring */
1754 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1755                          u32 data_hi, u32 data_lo, int common)
1756 {
1757         struct eth_spe *spe;
1758
1759 #ifdef BNX2X_STOP_ON_ERROR
1760         if (unlikely(bp->panic))
1761                 return -EIO;
1762 #endif
1763
1764         spin_lock_bh(&bp->spq_lock);
1765
1766         if (!bp->spq_left) {
1767                 BNX2X_ERR("BUG! SPQ ring full!\n");
1768                 spin_unlock_bh(&bp->spq_lock);
1769                 bnx2x_panic();
1770                 return -EBUSY;
1771         }
1772
1773         spe = bnx2x_sp_get_next(bp);
1774
1775         /* CID needs port number to be encoded int it */
1776         spe->hdr.conn_and_cmd_data =
1777                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1778                                     HW_CID(bp, cid));
1779         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
1780         if (common)
1781                 spe->hdr.type |=
1782                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1783
1784         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1785         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
1786
1787         bp->spq_left--;
1788
1789         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1790            "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
1791            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1792            (u32)(U64_LO(bp->spq_mapping) +
1793            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1794            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1795
1796         bnx2x_sp_prod_update(bp);
1797         spin_unlock_bh(&bp->spq_lock);
1798         return 0;
1799 }
1800
1801 /* acquire split MCP access lock register */
1802 static int bnx2x_acquire_alr(struct bnx2x *bp)
1803 {
1804         u32 j, val;
1805         int rc = 0;
1806
1807         might_sleep();
1808         for (j = 0; j < 1000; j++) {
1809                 val = (1UL << 31);
1810                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1811                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1812                 if (val & (1L << 31))
1813                         break;
1814
1815                 msleep(5);
1816         }
1817         if (!(val & (1L << 31))) {
1818                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
1819                 rc = -EBUSY;
1820         }
1821
1822         return rc;
1823 }
1824
1825 /* release split MCP access lock register */
1826 static void bnx2x_release_alr(struct bnx2x *bp)
1827 {
1828         REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
1829 }
1830
1831 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1832 {
1833         struct host_def_status_block *def_sb = bp->def_status_blk;
1834         u16 rc = 0;
1835
1836         barrier(); /* status block is written to by the chip */
1837         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1838                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1839                 rc |= 1;
1840         }
1841         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1842                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1843                 rc |= 2;
1844         }
1845         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1846                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1847                 rc |= 4;
1848         }
1849         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1850                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1851                 rc |= 8;
1852         }
1853         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1854                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1855                 rc |= 16;
1856         }
1857         return rc;
1858 }
1859
1860 /*
1861  * slow path service functions
1862  */
1863
1864 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1865 {
1866         int port = BP_PORT(bp);
1867         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
1868                        COMMAND_REG_ATTN_BITS_SET);
1869         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1870                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
1871         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1872                                        NIG_REG_MASK_INTERRUPT_PORT0;
1873         u32 aeu_mask;
1874         u32 nig_mask = 0;
1875
1876         if (bp->attn_state & asserted)
1877                 BNX2X_ERR("IGU ERROR\n");
1878
1879         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1880         aeu_mask = REG_RD(bp, aeu_addr);
1881
1882         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
1883            aeu_mask, asserted);
1884         aeu_mask &= ~(asserted & 0x3ff);
1885         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
1886
1887         REG_WR(bp, aeu_addr, aeu_mask);
1888         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1889
1890         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
1891         bp->attn_state |= asserted;
1892         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
1893
1894         if (asserted & ATTN_HARD_WIRED_MASK) {
1895                 if (asserted & ATTN_NIG_FOR_FUNC) {
1896
1897                         bnx2x_acquire_phy_lock(bp);
1898
1899                         /* save nig interrupt mask */
1900                         nig_mask = REG_RD(bp, nig_int_mask_addr);
1901                         REG_WR(bp, nig_int_mask_addr, 0);
1902
1903                         bnx2x_link_attn(bp);
1904
1905                         /* handle unicore attn? */
1906                 }
1907                 if (asserted & ATTN_SW_TIMER_4_FUNC)
1908                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1909
1910                 if (asserted & GPIO_2_FUNC)
1911                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1912
1913                 if (asserted & GPIO_3_FUNC)
1914                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1915
1916                 if (asserted & GPIO_4_FUNC)
1917                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1918
1919                 if (port == 0) {
1920                         if (asserted & ATTN_GENERAL_ATTN_1) {
1921                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1922                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1923                         }
1924                         if (asserted & ATTN_GENERAL_ATTN_2) {
1925                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1926                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1927                         }
1928                         if (asserted & ATTN_GENERAL_ATTN_3) {
1929                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1930                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1931                         }
1932                 } else {
1933                         if (asserted & ATTN_GENERAL_ATTN_4) {
1934                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1935                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1936                         }
1937                         if (asserted & ATTN_GENERAL_ATTN_5) {
1938                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1939                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1940                         }
1941                         if (asserted & ATTN_GENERAL_ATTN_6) {
1942                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1943                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1944                         }
1945                 }
1946
1947         } /* if hardwired */
1948
1949         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
1950            asserted, hc_addr);
1951         REG_WR(bp, hc_addr, asserted);
1952
1953         /* now set back the mask */
1954         if (asserted & ATTN_NIG_FOR_FUNC) {
1955                 REG_WR(bp, nig_int_mask_addr, nig_mask);
1956                 bnx2x_release_phy_lock(bp);
1957         }
1958 }
1959
1960 static inline void bnx2x_fan_failure(struct bnx2x *bp)
1961 {
1962         int port = BP_PORT(bp);
1963
1964         /* mark the failure */
1965         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
1966         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
1967         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
1968                  bp->link_params.ext_phy_config);
1969
1970         /* log the failure */
1971         netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
1972                " the driver to shutdown the card to prevent permanent"
1973                " damage.  Please contact OEM Support for assistance\n");
1974 }
1975
1976 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
1977 {
1978         int port = BP_PORT(bp);
1979         int reg_offset;
1980         u32 val, swap_val, swap_override;
1981
1982         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
1983                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
1984
1985         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
1986
1987                 val = REG_RD(bp, reg_offset);
1988                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
1989                 REG_WR(bp, reg_offset, val);
1990
1991                 BNX2X_ERR("SPIO5 hw attention\n");
1992
1993                 /* Fan failure attention */
1994                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
1995                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
1996                         /* Low power mode is controlled by GPIO 2 */
1997                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1998                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
1999                         /* The PHY reset is controlled by GPIO 1 */
2000                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2001                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2002                         break;
2003
2004                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2005                         /* The PHY reset is controlled by GPIO 1 */
2006                         /* fake the port number to cancel the swap done in
2007                            set_gpio() */
2008                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2009                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2010                         port = (swap_val && swap_override) ^ 1;
2011                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2012                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2013                         break;
2014
2015                 default:
2016                         break;
2017                 }
2018                 bnx2x_fan_failure(bp);
2019         }
2020
2021         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2022                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2023                 bnx2x_acquire_phy_lock(bp);
2024                 bnx2x_handle_module_detect_int(&bp->link_params);
2025                 bnx2x_release_phy_lock(bp);
2026         }
2027
2028         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2029
2030                 val = REG_RD(bp, reg_offset);
2031                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2032                 REG_WR(bp, reg_offset, val);
2033
2034                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2035                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2036                 bnx2x_panic();
2037         }
2038 }
2039
2040 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2041 {
2042         u32 val;
2043
2044         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2045
2046                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2047                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2048                 /* DORQ discard attention */
2049                 if (val & 0x2)
2050                         BNX2X_ERR("FATAL error from DORQ\n");
2051         }
2052
2053         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2054
2055                 int port = BP_PORT(bp);
2056                 int reg_offset;
2057
2058                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2059                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2060
2061                 val = REG_RD(bp, reg_offset);
2062                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2063                 REG_WR(bp, reg_offset, val);
2064
2065                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2066                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2067                 bnx2x_panic();
2068         }
2069 }
2070
2071 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2072 {
2073         u32 val;
2074
2075         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2076
2077                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2078                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2079                 /* CFC error attention */
2080                 if (val & 0x2)
2081                         BNX2X_ERR("FATAL error from CFC\n");
2082         }
2083
2084         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2085
2086                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2087                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2088                 /* RQ_USDMDP_FIFO_OVERFLOW */
2089                 if (val & 0x18000)
2090                         BNX2X_ERR("FATAL error from PXP\n");
2091         }
2092
2093         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2094
2095                 int port = BP_PORT(bp);
2096                 int reg_offset;
2097
2098                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2099                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2100
2101                 val = REG_RD(bp, reg_offset);
2102                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2103                 REG_WR(bp, reg_offset, val);
2104
2105                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2106                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
2107                 bnx2x_panic();
2108         }
2109 }
2110
2111 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2112 {
2113         u32 val;
2114
2115         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2116
2117                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2118                         int func = BP_FUNC(bp);
2119
2120                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2121                         bp->mf_config = SHMEM_RD(bp,
2122                                            mf_cfg.func_mf_config[func].config);
2123                         val = SHMEM_RD(bp, func_mb[func].drv_status);
2124                         if (val & DRV_STATUS_DCC_EVENT_MASK)
2125                                 bnx2x_dcc_event(bp,
2126                                             (val & DRV_STATUS_DCC_EVENT_MASK));
2127                         bnx2x__link_status_update(bp);
2128                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
2129                                 bnx2x_pmf_update(bp);
2130
2131                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2132
2133                         BNX2X_ERR("MC assert!\n");
2134                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2135                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2136                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2137                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2138                         bnx2x_panic();
2139
2140                 } else if (attn & BNX2X_MCP_ASSERT) {
2141
2142                         BNX2X_ERR("MCP assert!\n");
2143                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2144                         bnx2x_fw_dump(bp);
2145
2146                 } else
2147                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2148         }
2149
2150         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2151                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2152                 if (attn & BNX2X_GRC_TIMEOUT) {
2153                         val = CHIP_IS_E1H(bp) ?
2154                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2155                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2156                 }
2157                 if (attn & BNX2X_GRC_RSV) {
2158                         val = CHIP_IS_E1H(bp) ?
2159                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2160                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2161                 }
2162                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2163         }
2164 }
2165
2166 #define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
2167 #define LOAD_COUNTER_BITS       16 /* Number of bits for load counter */
2168 #define LOAD_COUNTER_MASK       (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2169 #define RESET_DONE_FLAG_MASK    (~LOAD_COUNTER_MASK)
2170 #define RESET_DONE_FLAG_SHIFT   LOAD_COUNTER_BITS
2171 #define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2172 /*
2173  * should be run under rtnl lock
2174  */
2175 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2176 {
2177         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2178         val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2179         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2180         barrier();
2181         mmiowb();
2182 }
2183
2184 /*
2185  * should be run under rtnl lock
2186  */
2187 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2188 {
2189         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2190         val |= (1 << 16);
2191         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2192         barrier();
2193         mmiowb();
2194 }
2195
2196 /*
2197  * should be run under rtnl lock
2198  */
2199 bool bnx2x_reset_is_done(struct bnx2x *bp)
2200 {
2201         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2202         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2203         return (val & RESET_DONE_FLAG_MASK) ? false : true;
2204 }
2205
2206 /*
2207  * should be run under rtnl lock
2208  */
2209 inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
2210 {
2211         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2212
2213         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2214
2215         val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2216         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2217         barrier();
2218         mmiowb();
2219 }
2220
2221 /*
2222  * should be run under rtnl lock
2223  */
2224 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
2225 {
2226         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2227
2228         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2229
2230         val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2231         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2232         barrier();
2233         mmiowb();
2234
2235         return val1;
2236 }
2237
2238 /*
2239  * should be run under rtnl lock
2240  */
2241 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2242 {
2243         return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2244 }
2245
2246 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2247 {
2248         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2249         REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2250 }
2251
2252 static inline void _print_next_block(int idx, const char *blk)
2253 {
2254         if (idx)
2255                 pr_cont(", ");
2256         pr_cont("%s", blk);
2257 }
2258
2259 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2260 {
2261         int i = 0;
2262         u32 cur_bit = 0;
2263         for (i = 0; sig; i++) {
2264                 cur_bit = ((u32)0x1 << i);
2265                 if (sig & cur_bit) {
2266                         switch (cur_bit) {
2267                         case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2268                                 _print_next_block(par_num++, "BRB");
2269                                 break;
2270                         case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2271                                 _print_next_block(par_num++, "PARSER");
2272                                 break;
2273                         case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2274                                 _print_next_block(par_num++, "TSDM");
2275                                 break;
2276                         case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2277                                 _print_next_block(par_num++, "SEARCHER");
2278                                 break;
2279                         case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2280                                 _print_next_block(par_num++, "TSEMI");
2281                                 break;
2282                         }
2283
2284                         /* Clear the bit */
2285                         sig &= ~cur_bit;
2286                 }
2287         }
2288
2289         return par_num;
2290 }
2291
2292 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2293 {
2294         int i = 0;
2295         u32 cur_bit = 0;
2296         for (i = 0; sig; i++) {
2297                 cur_bit = ((u32)0x1 << i);
2298                 if (sig & cur_bit) {
2299                         switch (cur_bit) {
2300                         case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2301                                 _print_next_block(par_num++, "PBCLIENT");
2302                                 break;
2303                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2304                                 _print_next_block(par_num++, "QM");
2305                                 break;
2306                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2307                                 _print_next_block(par_num++, "XSDM");
2308                                 break;
2309                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2310                                 _print_next_block(par_num++, "XSEMI");
2311                                 break;
2312                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2313                                 _print_next_block(par_num++, "DOORBELLQ");
2314                                 break;
2315                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2316                                 _print_next_block(par_num++, "VAUX PCI CORE");
2317                                 break;
2318                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2319                                 _print_next_block(par_num++, "DEBUG");
2320                                 break;
2321                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2322                                 _print_next_block(par_num++, "USDM");
2323                                 break;
2324                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2325                                 _print_next_block(par_num++, "USEMI");
2326                                 break;
2327                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2328                                 _print_next_block(par_num++, "UPB");
2329                                 break;
2330                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2331                                 _print_next_block(par_num++, "CSDM");
2332                                 break;
2333                         }
2334
2335                         /* Clear the bit */
2336                         sig &= ~cur_bit;
2337                 }
2338         }
2339
2340         return par_num;
2341 }
2342
2343 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
2344 {
2345         int i = 0;
2346         u32 cur_bit = 0;
2347         for (i = 0; sig; i++) {
2348                 cur_bit = ((u32)0x1 << i);
2349                 if (sig & cur_bit) {
2350                         switch (cur_bit) {
2351                         case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
2352                                 _print_next_block(par_num++, "CSEMI");
2353                                 break;
2354                         case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
2355                                 _print_next_block(par_num++, "PXP");
2356                                 break;
2357                         case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
2358                                 _print_next_block(par_num++,
2359                                         "PXPPCICLOCKCLIENT");
2360                                 break;
2361                         case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
2362                                 _print_next_block(par_num++, "CFC");
2363                                 break;
2364                         case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
2365                                 _print_next_block(par_num++, "CDU");
2366                                 break;
2367                         case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
2368                                 _print_next_block(par_num++, "IGU");
2369                                 break;
2370                         case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
2371                                 _print_next_block(par_num++, "MISC");
2372                                 break;
2373                         }
2374
2375                         /* Clear the bit */
2376                         sig &= ~cur_bit;
2377                 }
2378         }
2379
2380         return par_num;
2381 }
2382
2383 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
2384 {
2385         int i = 0;
2386         u32 cur_bit = 0;
2387         for (i = 0; sig; i++) {
2388                 cur_bit = ((u32)0x1 << i);
2389                 if (sig & cur_bit) {
2390                         switch (cur_bit) {
2391                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
2392                                 _print_next_block(par_num++, "MCP ROM");
2393                                 break;
2394                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
2395                                 _print_next_block(par_num++, "MCP UMP RX");
2396                                 break;
2397                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
2398                                 _print_next_block(par_num++, "MCP UMP TX");
2399                                 break;
2400                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
2401                                 _print_next_block(par_num++, "MCP SCPAD");
2402                                 break;
2403                         }
2404
2405                         /* Clear the bit */
2406                         sig &= ~cur_bit;
2407                 }
2408         }
2409
2410         return par_num;
2411 }
2412
2413 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
2414                                      u32 sig2, u32 sig3)
2415 {
2416         if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
2417             (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
2418                 int par_num = 0;
2419                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
2420                         "[0]:0x%08x [1]:0x%08x "
2421                         "[2]:0x%08x [3]:0x%08x\n",
2422                           sig0 & HW_PRTY_ASSERT_SET_0,
2423                           sig1 & HW_PRTY_ASSERT_SET_1,
2424                           sig2 & HW_PRTY_ASSERT_SET_2,
2425                           sig3 & HW_PRTY_ASSERT_SET_3);
2426                 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
2427                        bp->dev->name);
2428                 par_num = bnx2x_print_blocks_with_parity0(
2429                         sig0 & HW_PRTY_ASSERT_SET_0, par_num);
2430                 par_num = bnx2x_print_blocks_with_parity1(
2431                         sig1 & HW_PRTY_ASSERT_SET_1, par_num);
2432                 par_num = bnx2x_print_blocks_with_parity2(
2433                         sig2 & HW_PRTY_ASSERT_SET_2, par_num);
2434                 par_num = bnx2x_print_blocks_with_parity3(
2435                         sig3 & HW_PRTY_ASSERT_SET_3, par_num);
2436                 printk("\n");
2437                 return true;
2438         } else
2439                 return false;
2440 }
2441
2442 bool bnx2x_chk_parity_attn(struct bnx2x *bp)
2443 {
2444         struct attn_route attn;
2445         int port = BP_PORT(bp);
2446
2447         attn.sig[0] = REG_RD(bp,
2448                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
2449                              port*4);
2450         attn.sig[1] = REG_RD(bp,
2451                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
2452                              port*4);
2453         attn.sig[2] = REG_RD(bp,
2454                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
2455                              port*4);
2456         attn.sig[3] = REG_RD(bp,
2457                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
2458                              port*4);
2459
2460         return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
2461                                         attn.sig[3]);
2462 }
2463
2464 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2465 {
2466         struct attn_route attn, *group_mask;
2467         int port = BP_PORT(bp);
2468         int index;
2469         u32 reg_addr;
2470         u32 val;
2471         u32 aeu_mask;
2472
2473         /* need to take HW lock because MCP or other port might also
2474            try to handle this event */
2475         bnx2x_acquire_alr(bp);
2476
2477         if (bnx2x_chk_parity_attn(bp)) {
2478                 bp->recovery_state = BNX2X_RECOVERY_INIT;
2479                 bnx2x_set_reset_in_progress(bp);
2480                 schedule_delayed_work(&bp->reset_task, 0);
2481                 /* Disable HW interrupts */
2482                 bnx2x_int_disable(bp);
2483                 bnx2x_release_alr(bp);
2484                 /* In case of parity errors don't handle attentions so that
2485                  * other function would "see" parity errors.
2486                  */
2487                 return;
2488         }
2489
2490         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2491         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2492         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2493         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2494         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2495            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2496
2497         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2498                 if (deasserted & (1 << index)) {
2499                         group_mask = &bp->attn_group[index];
2500
2501                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2502                            index, group_mask->sig[0], group_mask->sig[1],
2503                            group_mask->sig[2], group_mask->sig[3]);
2504
2505                         bnx2x_attn_int_deasserted3(bp,
2506                                         attn.sig[3] & group_mask->sig[3]);
2507                         bnx2x_attn_int_deasserted1(bp,
2508                                         attn.sig[1] & group_mask->sig[1]);
2509                         bnx2x_attn_int_deasserted2(bp,
2510                                         attn.sig[2] & group_mask->sig[2]);
2511                         bnx2x_attn_int_deasserted0(bp,
2512                                         attn.sig[0] & group_mask->sig[0]);
2513                 }
2514         }
2515
2516         bnx2x_release_alr(bp);
2517
2518         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2519
2520         val = ~deasserted;
2521         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2522            val, reg_addr);
2523         REG_WR(bp, reg_addr, val);
2524
2525         if (~bp->attn_state & deasserted)
2526                 BNX2X_ERR("IGU ERROR\n");
2527
2528         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2529                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2530
2531         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2532         aeu_mask = REG_RD(bp, reg_addr);
2533
2534         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2535            aeu_mask, deasserted);
2536         aeu_mask |= (deasserted & 0x3ff);
2537         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2538
2539         REG_WR(bp, reg_addr, aeu_mask);
2540         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2541
2542         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2543         bp->attn_state &= ~deasserted;
2544         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2545 }
2546
2547 static void bnx2x_attn_int(struct bnx2x *bp)
2548 {
2549         /* read local copy of bits */
2550         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2551                                                                 attn_bits);
2552         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2553                                                                 attn_bits_ack);
2554         u32 attn_state = bp->attn_state;
2555
2556         /* look for changed bits */
2557         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2558         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2559
2560         DP(NETIF_MSG_HW,
2561            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2562            attn_bits, attn_ack, asserted, deasserted);
2563
2564         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2565                 BNX2X_ERR("BAD attention state\n");
2566
2567         /* handle bits that were raised */
2568         if (asserted)
2569                 bnx2x_attn_int_asserted(bp, asserted);
2570
2571         if (deasserted)
2572                 bnx2x_attn_int_deasserted(bp, deasserted);
2573 }
2574
2575 static void bnx2x_sp_task(struct work_struct *work)
2576 {
2577         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2578         u16 status;
2579
2580         /* Return here if interrupt is disabled */
2581         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2582                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2583                 return;
2584         }
2585
2586         status = bnx2x_update_dsb_idx(bp);
2587 /*      if (status == 0)                                     */
2588 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2589
2590         DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
2591
2592         /* HW attentions */
2593         if (status & 0x1) {
2594                 bnx2x_attn_int(bp);
2595                 status &= ~0x1;
2596         }
2597
2598         /* CStorm events: STAT_QUERY */
2599         if (status & 0x2) {
2600                 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
2601                 status &= ~0x2;
2602         }
2603
2604         if (unlikely(status))
2605                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2606                    status);
2607
2608         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2609                      IGU_INT_NOP, 1);
2610         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2611                      IGU_INT_NOP, 1);
2612         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2613                      IGU_INT_NOP, 1);
2614         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2615                      IGU_INT_NOP, 1);
2616         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2617                      IGU_INT_ENABLE, 1);
2618 }
2619
2620 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2621 {
2622         struct net_device *dev = dev_instance;
2623         struct bnx2x *bp = netdev_priv(dev);
2624
2625         /* Return here if interrupt is disabled */
2626         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2627                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2628                 return IRQ_HANDLED;
2629         }
2630
2631         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2632
2633 #ifdef BNX2X_STOP_ON_ERROR
2634         if (unlikely(bp->panic))
2635                 return IRQ_HANDLED;
2636 #endif
2637
2638 #ifdef BCM_CNIC
2639         {
2640                 struct cnic_ops *c_ops;
2641
2642                 rcu_read_lock();
2643                 c_ops = rcu_dereference(bp->cnic_ops);
2644                 if (c_ops)
2645                         c_ops->cnic_handler(bp->cnic_data, NULL);
2646                 rcu_read_unlock();
2647         }
2648 #endif
2649         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2650
2651         return IRQ_HANDLED;
2652 }
2653
2654 /* end of slow path */
2655
2656 static void bnx2x_timer(unsigned long data)
2657 {
2658         struct bnx2x *bp = (struct bnx2x *) data;
2659
2660         if (!netif_running(bp->dev))
2661                 return;
2662
2663         if (atomic_read(&bp->intr_sem) != 0)
2664                 goto timer_restart;
2665
2666         if (poll) {
2667                 struct bnx2x_fastpath *fp = &bp->fp[0];
2668                 int rc;
2669
2670                 bnx2x_tx_int(fp);
2671                 rc = bnx2x_rx_int(fp, 1000);
2672         }
2673
2674         if (!BP_NOMCP(bp)) {
2675                 int func = BP_FUNC(bp);
2676                 u32 drv_pulse;
2677                 u32 mcp_pulse;
2678
2679                 ++bp->fw_drv_pulse_wr_seq;
2680                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
2681                 /* TBD - add SYSTEM_TIME */
2682                 drv_pulse = bp->fw_drv_pulse_wr_seq;
2683                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
2684
2685                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
2686                              MCP_PULSE_SEQ_MASK);
2687                 /* The delta between driver pulse and mcp response
2688                  * should be 1 (before mcp response) or 0 (after mcp response)
2689                  */
2690                 if ((drv_pulse != mcp_pulse) &&
2691                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
2692                         /* someone lost a heartbeat... */
2693                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2694                                   drv_pulse, mcp_pulse);
2695                 }
2696         }
2697
2698         if (bp->state == BNX2X_STATE_OPEN)
2699                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
2700
2701 timer_restart:
2702         mod_timer(&bp->timer, jiffies + bp->current_interval);
2703 }
2704
2705 /* end of Statistics */
2706
2707 /* nic init */
2708
2709 /*
2710  * nic init service functions
2711  */
2712
2713 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
2714 {
2715         int port = BP_PORT(bp);
2716
2717         /* "CSTORM" */
2718         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2719                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
2720                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
2721         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2722                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
2723                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
2724 }
2725
2726 void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
2727                           dma_addr_t mapping, int sb_id)
2728 {
2729         int port = BP_PORT(bp);
2730         int func = BP_FUNC(bp);
2731         int index;
2732         u64 section;
2733
2734         /* USTORM */
2735         section = ((u64)mapping) + offsetof(struct host_status_block,
2736                                             u_status_block);
2737         sb->u_status_block.status_block_id = sb_id;
2738
2739         REG_WR(bp, BAR_CSTRORM_INTMEM +
2740                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
2741         REG_WR(bp, BAR_CSTRORM_INTMEM +
2742                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
2743                U64_HI(section));
2744         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
2745                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
2746
2747         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
2748                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2749                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
2750
2751         /* CSTORM */
2752         section = ((u64)mapping) + offsetof(struct host_status_block,
2753                                             c_status_block);
2754         sb->c_status_block.status_block_id = sb_id;
2755
2756         REG_WR(bp, BAR_CSTRORM_INTMEM +
2757                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
2758         REG_WR(bp, BAR_CSTRORM_INTMEM +
2759                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
2760                U64_HI(section));
2761         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
2762                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
2763
2764         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
2765                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2766                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
2767
2768         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2769 }
2770
2771 static void bnx2x_zero_def_sb(struct bnx2x *bp)
2772 {
2773         int func = BP_FUNC(bp);
2774
2775         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
2776                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2777                         sizeof(struct tstorm_def_status_block)/4);
2778         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2779                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
2780                         sizeof(struct cstorm_def_status_block_u)/4);
2781         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2782                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
2783                         sizeof(struct cstorm_def_status_block_c)/4);
2784         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
2785                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2786                         sizeof(struct xstorm_def_status_block)/4);
2787 }
2788
2789 static void bnx2x_init_def_sb(struct bnx2x *bp,
2790                               struct host_def_status_block *def_sb,
2791                               dma_addr_t mapping, int sb_id)
2792 {
2793         int port = BP_PORT(bp);
2794         int func = BP_FUNC(bp);
2795         int index, val, reg_offset;
2796         u64 section;
2797
2798         /* ATTN */
2799         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2800                                             atten_status_block);
2801         def_sb->atten_status_block.status_block_id = sb_id;
2802
2803         bp->attn_state = 0;
2804
2805         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2806                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2807
2808         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2809                 bp->attn_group[index].sig[0] = REG_RD(bp,
2810                                                      reg_offset + 0x10*index);
2811                 bp->attn_group[index].sig[1] = REG_RD(bp,
2812                                                reg_offset + 0x4 + 0x10*index);
2813                 bp->attn_group[index].sig[2] = REG_RD(bp,
2814                                                reg_offset + 0x8 + 0x10*index);
2815                 bp->attn_group[index].sig[3] = REG_RD(bp,
2816                                                reg_offset + 0xc + 0x10*index);
2817         }
2818
2819         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
2820                              HC_REG_ATTN_MSG0_ADDR_L);
2821
2822         REG_WR(bp, reg_offset, U64_LO(section));
2823         REG_WR(bp, reg_offset + 4, U64_HI(section));
2824
2825         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
2826
2827         val = REG_RD(bp, reg_offset);
2828         val |= sb_id;
2829         REG_WR(bp, reg_offset, val);
2830
2831         /* USTORM */
2832         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2833                                             u_def_status_block);
2834         def_sb->u_def_status_block.status_block_id = sb_id;
2835
2836         REG_WR(bp, BAR_CSTRORM_INTMEM +
2837                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
2838         REG_WR(bp, BAR_CSTRORM_INTMEM +
2839                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
2840                U64_HI(section));
2841         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
2842                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
2843
2844         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
2845                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2846                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
2847
2848         /* CSTORM */
2849         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2850                                             c_def_status_block);
2851         def_sb->c_def_status_block.status_block_id = sb_id;
2852
2853         REG_WR(bp, BAR_CSTRORM_INTMEM +
2854                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
2855         REG_WR(bp, BAR_CSTRORM_INTMEM +
2856                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
2857                U64_HI(section));
2858         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
2859                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
2860
2861         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2862                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2863                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
2864
2865         /* TSTORM */
2866         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2867                                             t_def_status_block);
2868         def_sb->t_def_status_block.status_block_id = sb_id;
2869
2870         REG_WR(bp, BAR_TSTRORM_INTMEM +
2871                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2872         REG_WR(bp, BAR_TSTRORM_INTMEM +
2873                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2874                U64_HI(section));
2875         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
2876                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2877
2878         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2879                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
2880                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2881
2882         /* XSTORM */
2883         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2884                                             x_def_status_block);
2885         def_sb->x_def_status_block.status_block_id = sb_id;
2886
2887         REG_WR(bp, BAR_XSTRORM_INTMEM +
2888                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2889         REG_WR(bp, BAR_XSTRORM_INTMEM +
2890                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2891                U64_HI(section));
2892         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
2893                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2894
2895         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2896                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
2897                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2898
2899         bp->stats_pending = 0;
2900         bp->set_mac_pending = 0;
2901
2902         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2903 }
2904
2905 void bnx2x_update_coalesce(struct bnx2x *bp)
2906 {
2907         int port = BP_PORT(bp);
2908         int i;
2909
2910         for_each_queue(bp, i) {
2911                 int sb_id = bp->fp[i].sb_id;
2912
2913                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
2914                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2915                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
2916                                                       U_SB_ETH_RX_CQ_INDEX),
2917                         bp->rx_ticks/(4 * BNX2X_BTR));
2918                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2919                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
2920                                                        U_SB_ETH_RX_CQ_INDEX),
2921                          (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2922
2923                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2924                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2925                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
2926                                                       C_SB_ETH_TX_CQ_INDEX),
2927                         bp->tx_ticks/(4 * BNX2X_BTR));
2928                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2929                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
2930                                                        C_SB_ETH_TX_CQ_INDEX),
2931                          (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2932         }
2933 }
2934
2935 static void bnx2x_init_sp_ring(struct bnx2x *bp)
2936 {
2937         int func = BP_FUNC(bp);
2938
2939         spin_lock_init(&bp->spq_lock);
2940
2941         bp->spq_left = MAX_SPQ_PENDING;
2942         bp->spq_prod_idx = 0;
2943         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
2944         bp->spq_prod_bd = bp->spq;
2945         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
2946
2947         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
2948                U64_LO(bp->spq_mapping));
2949         REG_WR(bp,
2950                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
2951                U64_HI(bp->spq_mapping));
2952
2953         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
2954                bp->spq_prod_idx);
2955 }
2956
2957 static void bnx2x_init_context(struct bnx2x *bp)
2958 {
2959         int i;
2960
2961         /* Rx */
2962         for_each_queue(bp, i) {
2963                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
2964                 struct bnx2x_fastpath *fp = &bp->fp[i];
2965                 u8 cl_id = fp->cl_id;
2966
2967                 context->ustorm_st_context.common.sb_index_numbers =
2968                                                 BNX2X_RX_SB_INDEX_NUM;
2969                 context->ustorm_st_context.common.clientId = cl_id;
2970                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
2971                 context->ustorm_st_context.common.flags =
2972                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
2973                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
2974                 context->ustorm_st_context.common.statistics_counter_id =
2975                                                 cl_id;
2976                 context->ustorm_st_context.common.mc_alignment_log_size =
2977                                                 BNX2X_RX_ALIGN_SHIFT;
2978                 context->ustorm_st_context.common.bd_buff_size =
2979                                                 bp->rx_buf_size;
2980                 context->ustorm_st_context.common.bd_page_base_hi =
2981                                                 U64_HI(fp->rx_desc_mapping);
2982                 context->ustorm_st_context.common.bd_page_base_lo =
2983                                                 U64_LO(fp->rx_desc_mapping);
2984                 if (!fp->disable_tpa) {
2985                         context->ustorm_st_context.common.flags |=
2986                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
2987                         context->ustorm_st_context.common.sge_buff_size =
2988                                 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
2989                                            0xffff);
2990                         context->ustorm_st_context.common.sge_page_base_hi =
2991                                                 U64_HI(fp->rx_sge_mapping);
2992                         context->ustorm_st_context.common.sge_page_base_lo =
2993                                                 U64_LO(fp->rx_sge_mapping);
2994
2995                         context->ustorm_st_context.common.max_sges_for_packet =
2996                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
2997                         context->ustorm_st_context.common.max_sges_for_packet =
2998                                 ((context->ustorm_st_context.common.
2999                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
3000                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
3001                 }
3002
3003                 context->ustorm_ag_context.cdu_usage =
3004                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3005                                                CDU_REGION_NUMBER_UCM_AG,
3006                                                ETH_CONNECTION_TYPE);
3007
3008                 context->xstorm_ag_context.cdu_reserved =
3009                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3010                                                CDU_REGION_NUMBER_XCM_AG,
3011                                                ETH_CONNECTION_TYPE);
3012         }
3013
3014         /* Tx */
3015         for_each_queue(bp, i) {
3016                 struct bnx2x_fastpath *fp = &bp->fp[i];
3017                 struct eth_context *context =
3018                         bnx2x_sp(bp, context[i].eth);
3019
3020                 context->cstorm_st_context.sb_index_number =
3021                                                 C_SB_ETH_TX_CQ_INDEX;
3022                 context->cstorm_st_context.status_block_id = fp->sb_id;
3023
3024                 context->xstorm_st_context.tx_bd_page_base_hi =
3025                                                 U64_HI(fp->tx_desc_mapping);
3026                 context->xstorm_st_context.tx_bd_page_base_lo =
3027                                                 U64_LO(fp->tx_desc_mapping);
3028                 context->xstorm_st_context.statistics_data = (fp->cl_id |
3029                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
3030         }
3031 }
3032
3033 static void bnx2x_init_ind_table(struct bnx2x *bp)
3034 {
3035         int func = BP_FUNC(bp);
3036         int i;
3037
3038         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
3039                 return;
3040
3041         DP(NETIF_MSG_IFUP,
3042            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
3043         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
3044                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
3045                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
3046                         bp->fp->cl_id + (i % bp->num_queues));
3047 }
3048
3049 void bnx2x_set_client_config(struct bnx2x *bp)
3050 {
3051         struct tstorm_eth_client_config tstorm_client = {0};
3052         int port = BP_PORT(bp);
3053         int i;
3054
3055         tstorm_client.mtu = bp->dev->mtu;
3056         tstorm_client.config_flags =
3057                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
3058                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
3059 #ifdef BCM_VLAN
3060         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
3061                 tstorm_client.config_flags |=
3062                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
3063                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3064         }
3065 #endif
3066
3067         for_each_queue(bp, i) {
3068                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
3069
3070                 REG_WR(bp, BAR_TSTRORM_INTMEM +
3071                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
3072                        ((u32 *)&tstorm_client)[0]);
3073                 REG_WR(bp, BAR_TSTRORM_INTMEM +
3074                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
3075                        ((u32 *)&tstorm_client)[1]);
3076         }
3077
3078         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
3079            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
3080 }
3081
3082 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3083 {
3084         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
3085         int mode = bp->rx_mode;
3086         int mask = bp->rx_mode_cl_mask;
3087         int func = BP_FUNC(bp);
3088         int port = BP_PORT(bp);
3089         int i;
3090         /* All but management unicast packets should pass to the host as well */
3091         u32 llh_mask =
3092                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
3093                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
3094                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3095                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
3096
3097         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
3098
3099         switch (mode) {
3100         case BNX2X_RX_MODE_NONE: /* no Rx */
3101                 tstorm_mac_filter.ucast_drop_all = mask;
3102                 tstorm_mac_filter.mcast_drop_all = mask;
3103                 tstorm_mac_filter.bcast_drop_all = mask;
3104                 break;
3105
3106         case BNX2X_RX_MODE_NORMAL:
3107                 tstorm_mac_filter.bcast_accept_all = mask;
3108                 break;
3109
3110         case BNX2X_RX_MODE_ALLMULTI:
3111                 tstorm_mac_filter.mcast_accept_all = mask;
3112                 tstorm_mac_filter.bcast_accept_all = mask;
3113                 break;
3114
3115         case BNX2X_RX_MODE_PROMISC:
3116                 tstorm_mac_filter.ucast_accept_all = mask;
3117                 tstorm_mac_filter.mcast_accept_all = mask;
3118                 tstorm_mac_filter.bcast_accept_all = mask;
3119                 /* pass management unicast packets as well */
3120                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
3121                 break;
3122
3123         default:
3124                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
3125                 break;
3126         }
3127
3128         REG_WR(bp,
3129                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
3130                llh_mask);
3131
3132         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
3133                 REG_WR(bp, BAR_TSTRORM_INTMEM +
3134                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
3135                        ((u32 *)&tstorm_mac_filter)[i]);
3136
3137 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
3138                    ((u32 *)&tstorm_mac_filter)[i]); */
3139         }
3140
3141         if (mode != BNX2X_RX_MODE_NONE)
3142                 bnx2x_set_client_config(bp);
3143 }
3144
3145 static void bnx2x_init_internal_common(struct bnx2x *bp)
3146 {
3147         int i;
3148
3149         /* Zero this manually as its initialization is
3150            currently missing in the initTool */
3151         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3152                 REG_WR(bp, BAR_USTRORM_INTMEM +
3153                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
3154 }
3155
3156 static void bnx2x_init_internal_port(struct bnx2x *bp)
3157 {
3158         int port = BP_PORT(bp);
3159
3160         REG_WR(bp,
3161                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
3162         REG_WR(bp,
3163                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
3164         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3165         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3166 }
3167
3168 static void bnx2x_init_internal_func(struct bnx2x *bp)
3169 {
3170         struct tstorm_eth_function_common_config tstorm_config = {0};
3171         struct stats_indication_flags stats_flags = {0};
3172         int port = BP_PORT(bp);
3173         int func = BP_FUNC(bp);
3174         int i, j;
3175         u32 offset;
3176         u16 max_agg_size;
3177
3178         tstorm_config.config_flags = RSS_FLAGS(bp);
3179
3180         if (is_multi(bp))
3181                 tstorm_config.rss_result_mask = MULTI_MASK;
3182
3183         /* Enable TPA if needed */
3184         if (bp->flags & TPA_ENABLE_FLAG)
3185                 tstorm_config.config_flags |=
3186                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
3187
3188         if (IS_E1HMF(bp))
3189                 tstorm_config.config_flags |=
3190                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
3191
3192         tstorm_config.leading_client_id = BP_L_ID(bp);
3193
3194         REG_WR(bp, BAR_TSTRORM_INTMEM +
3195                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
3196                (*(u32 *)&tstorm_config));
3197
3198         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
3199         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
3200         bnx2x_set_storm_rx_mode(bp);
3201
3202         for_each_queue(bp, i) {
3203                 u8 cl_id = bp->fp[i].cl_id;
3204
3205                 /* reset xstorm per client statistics */
3206                 offset = BAR_XSTRORM_INTMEM +
3207                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3208                 for (j = 0;
3209                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
3210                         REG_WR(bp, offset + j*4, 0);
3211
3212                 /* reset tstorm per client statistics */
3213                 offset = BAR_TSTRORM_INTMEM +
3214                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3215                 for (j = 0;
3216                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
3217                         REG_WR(bp, offset + j*4, 0);
3218
3219                 /* reset ustorm per client statistics */
3220                 offset = BAR_USTRORM_INTMEM +
3221                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3222                 for (j = 0;
3223                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
3224                         REG_WR(bp, offset + j*4, 0);
3225         }
3226
3227         /* Init statistics related context */
3228         stats_flags.collect_eth = 1;
3229
3230         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
3231                ((u32 *)&stats_flags)[0]);
3232         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
3233                ((u32 *)&stats_flags)[1]);
3234
3235         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
3236                ((u32 *)&stats_flags)[0]);
3237         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
3238                ((u32 *)&stats_flags)[1]);
3239
3240         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
3241                ((u32 *)&stats_flags)[0]);
3242         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
3243                ((u32 *)&stats_flags)[1]);
3244
3245         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
3246                ((u32 *)&stats_flags)[0]);
3247         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
3248                ((u32 *)&stats_flags)[1]);
3249
3250         REG_WR(bp, BAR_XSTRORM_INTMEM +
3251                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3252                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3253         REG_WR(bp, BAR_XSTRORM_INTMEM +
3254                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3255                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3256
3257         REG_WR(bp, BAR_TSTRORM_INTMEM +
3258                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3259                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3260         REG_WR(bp, BAR_TSTRORM_INTMEM +
3261                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3262                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3263
3264         REG_WR(bp, BAR_USTRORM_INTMEM +
3265                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3266                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3267         REG_WR(bp, BAR_USTRORM_INTMEM +
3268                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3269                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3270
3271         if (CHIP_IS_E1H(bp)) {
3272                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3273                         IS_E1HMF(bp));
3274                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3275                         IS_E1HMF(bp));
3276                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3277                         IS_E1HMF(bp));
3278                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3279                         IS_E1HMF(bp));
3280
3281                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
3282                          bp->e1hov);
3283         }
3284
3285         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
3286         max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
3287                                    SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
3288         for_each_queue(bp, i) {
3289                 struct bnx2x_fastpath *fp = &bp->fp[i];
3290
3291                 REG_WR(bp, BAR_USTRORM_INTMEM +
3292                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
3293                        U64_LO(fp->rx_comp_mapping));
3294                 REG_WR(bp, BAR_USTRORM_INTMEM +
3295                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
3296                        U64_HI(fp->rx_comp_mapping));
3297
3298                 /* Next page */
3299                 REG_WR(bp, BAR_USTRORM_INTMEM +
3300                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
3301                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3302                 REG_WR(bp, BAR_USTRORM_INTMEM +
3303                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
3304                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3305
3306                 REG_WR16(bp, BAR_USTRORM_INTMEM +
3307                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
3308                          max_agg_size);
3309         }
3310
3311         /* dropless flow control */
3312         if (CHIP_IS_E1H(bp)) {
3313                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
3314
3315                 rx_pause.bd_thr_low = 250;
3316                 rx_pause.cqe_thr_low = 250;
3317                 rx_pause.cos = 1;
3318                 rx_pause.sge_thr_low = 0;
3319                 rx_pause.bd_thr_high = 350;
3320                 rx_pause.cqe_thr_high = 350;
3321                 rx_pause.sge_thr_high = 0;
3322
3323                 for_each_queue(bp, i) {
3324                         struct bnx2x_fastpath *fp = &bp->fp[i];
3325
3326                         if (!fp->disable_tpa) {
3327                                 rx_pause.sge_thr_low = 150;
3328                                 rx_pause.sge_thr_high = 250;
3329                         }
3330
3331
3332                         offset = BAR_USTRORM_INTMEM +
3333                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
3334                                                                    fp->cl_id);
3335                         for (j = 0;
3336                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
3337                              j++)
3338                                 REG_WR(bp, offset + j*4,
3339                                        ((u32 *)&rx_pause)[j]);
3340                 }
3341         }
3342
3343         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3344
3345         /* Init rate shaping and fairness contexts */
3346         if (IS_E1HMF(bp)) {
3347                 int vn;
3348
3349                 /* During init there is no active link
3350                    Until link is up, set link rate to 10Gbps */
3351                 bp->link_vars.line_speed = SPEED_10000;
3352                 bnx2x_init_port_minmax(bp);
3353
3354                 if (!BP_NOMCP(bp))
3355                         bp->mf_config =
3356                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
3357                 bnx2x_calc_vn_weight_sum(bp);
3358
3359                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
3360                         bnx2x_init_vn_minmax(bp, 2*vn + port);
3361
3362                 /* Enable rate shaping and fairness */
3363                 bp->cmng.flags.cmng_enables |=
3364                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
3365
3366         } else {
3367                 /* rate shaping and fairness are disabled */
3368                 DP(NETIF_MSG_IFUP,
3369                    "single function mode  minmax will be disabled\n");
3370         }
3371
3372
3373         /* Store cmng structures to internal memory */
3374         if (bp->port.pmf)
3375                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
3376                         REG_WR(bp, BAR_XSTRORM_INTMEM +
3377                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
3378                                ((u32 *)(&bp->cmng))[i]);
3379 }
3380
3381 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3382 {
3383         switch (load_code) {
3384         case FW_MSG_CODE_DRV_LOAD_COMMON:
3385                 bnx2x_init_internal_common(bp);
3386                 /* no break */
3387
3388         case FW_MSG_CODE_DRV_LOAD_PORT:
3389                 bnx2x_init_internal_port(bp);
3390                 /* no break */
3391
3392         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3393                 bnx2x_init_internal_func(bp);
3394                 break;
3395
3396         default:
3397                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3398                 break;
3399         }
3400 }
3401
3402 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
3403 {
3404         int i;
3405
3406         for_each_queue(bp, i) {
3407                 struct bnx2x_fastpath *fp = &bp->fp[i];
3408
3409                 fp->bp = bp;
3410                 fp->state = BNX2X_FP_STATE_CLOSED;
3411                 fp->index = i;
3412                 fp->cl_id = BP_L_ID(bp) + i;
3413 #ifdef BCM_CNIC
3414                 fp->sb_id = fp->cl_id + 1;
3415 #else
3416                 fp->sb_id = fp->cl_id;
3417 #endif
3418                 DP(NETIF_MSG_IFUP,
3419                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
3420                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
3421                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
3422                               fp->sb_id);
3423                 bnx2x_update_fpsb_idx(fp);
3424         }
3425
3426         /* ensure status block indices were read */
3427         rmb();
3428
3429
3430         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
3431                           DEF_SB_ID);
3432         bnx2x_update_dsb_idx(bp);
3433         bnx2x_update_coalesce(bp);
3434         bnx2x_init_rx_rings(bp);
3435         bnx2x_init_tx_ring(bp);
3436         bnx2x_init_sp_ring(bp);
3437         bnx2x_init_context(bp);
3438         bnx2x_init_internal(bp, load_code);
3439         bnx2x_init_ind_table(bp);
3440         bnx2x_stats_init(bp);
3441
3442         /* At this point, we are ready for interrupts */
3443         atomic_set(&bp->intr_sem, 0);
3444
3445         /* flush all before enabling interrupts */
3446         mb();
3447         mmiowb();
3448
3449         bnx2x_int_enable(bp);
3450
3451         /* Check for SPIO5 */
3452         bnx2x_attn_int_deasserted0(bp,
3453                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
3454                                    AEU_INPUTS_ATTN_BITS_SPIO5);
3455 }
3456
3457 /* end of nic init */
3458
3459 /*
3460  * gzip service functions
3461  */
3462
3463 static int bnx2x_gunzip_init(struct bnx2x *bp)
3464 {
3465         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
3466                                             &bp->gunzip_mapping, GFP_KERNEL);
3467         if (bp->gunzip_buf  == NULL)
3468                 goto gunzip_nomem1;
3469
3470         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3471         if (bp->strm  == NULL)
3472                 goto gunzip_nomem2;
3473
3474         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3475                                       GFP_KERNEL);
3476         if (bp->strm->workspace == NULL)
3477                 goto gunzip_nomem3;
3478
3479         return 0;
3480
3481 gunzip_nomem3:
3482         kfree(bp->strm);
3483         bp->strm = NULL;
3484
3485 gunzip_nomem2:
3486         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3487                           bp->gunzip_mapping);
3488         bp->gunzip_buf = NULL;
3489
3490 gunzip_nomem1:
3491         netdev_err(bp->dev, "Cannot allocate firmware buffer for"
3492                " un-compression\n");
3493         return -ENOMEM;
3494 }
3495
3496 static void bnx2x_gunzip_end(struct bnx2x *bp)
3497 {
3498         kfree(bp->strm->workspace);
3499
3500         kfree(bp->strm);
3501         bp->strm = NULL;
3502
3503         if (bp->gunzip_buf) {
3504                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3505                                   bp->gunzip_mapping);
3506                 bp->gunzip_buf = NULL;
3507         }
3508 }
3509
3510 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
3511 {
3512         int n, rc;
3513
3514         /* check gzip header */
3515         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
3516                 BNX2X_ERR("Bad gzip header\n");
3517                 return -EINVAL;
3518         }
3519
3520         n = 10;
3521
3522 #define FNAME                           0x8
3523
3524         if (zbuf[3] & FNAME)
3525                 while ((zbuf[n++] != 0) && (n < len));
3526
3527         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
3528         bp->strm->avail_in = len - n;
3529         bp->strm->next_out = bp->gunzip_buf;
3530         bp->strm->avail_out = FW_BUF_SIZE;
3531
3532         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
3533         if (rc != Z_OK)
3534                 return rc;
3535
3536         rc = zlib_inflate(bp->strm, Z_FINISH);
3537         if ((rc != Z_OK) && (rc != Z_STREAM_END))
3538                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
3539                            bp->strm->msg);
3540
3541         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
3542         if (bp->gunzip_outlen & 0x3)
3543                 netdev_err(bp->dev, "Firmware decompression error:"
3544                                     " gunzip_outlen (%d) not aligned\n",
3545                                 bp->gunzip_outlen);
3546         bp->gunzip_outlen >>= 2;
3547
3548         zlib_inflateEnd(bp->strm);
3549
3550         if (rc == Z_STREAM_END)
3551                 return 0;
3552
3553         return rc;
3554 }
3555
3556 /* nic load/unload */
3557
3558 /*
3559  * General service functions
3560  */
3561
3562 /* send a NIG loopback debug packet */
3563 static void bnx2x_lb_pckt(struct bnx2x *bp)
3564 {
3565         u32 wb_write[3];
3566
3567         /* Ethernet source and destination addresses */
3568         wb_write[0] = 0x55555555;
3569         wb_write[1] = 0x55555555;
3570         wb_write[2] = 0x20;             /* SOP */
3571         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3572
3573         /* NON-IP protocol */
3574         wb_write[0] = 0x09000000;
3575         wb_write[1] = 0x55555555;
3576         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
3577         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3578 }
3579
3580 /* some of the internal memories
3581  * are not directly readable from the driver
3582  * to test them we send debug packets
3583  */
3584 static int bnx2x_int_mem_test(struct bnx2x *bp)
3585 {
3586         int factor;
3587         int count, i;
3588         u32 val = 0;
3589
3590         if (CHIP_REV_IS_FPGA(bp))
3591                 factor = 120;
3592         else if (CHIP_REV_IS_EMUL(bp))
3593                 factor = 200;
3594         else
3595                 factor = 1;
3596
3597         DP(NETIF_MSG_HW, "start part1\n");
3598
3599         /* Disable inputs of parser neighbor blocks */
3600         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3601         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3602         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3603         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
3604
3605         /*  Write 0 to parser credits for CFC search request */
3606         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3607
3608         /* send Ethernet packet */
3609         bnx2x_lb_pckt(bp);
3610
3611         /* TODO do i reset NIG statistic? */
3612         /* Wait until NIG register shows 1 packet of size 0x10 */
3613         count = 1000 * factor;
3614         while (count) {
3615
3616                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3617                 val = *bnx2x_sp(bp, wb_data[0]);
3618                 if (val == 0x10)
3619                         break;
3620
3621                 msleep(10);
3622                 count--;
3623         }
3624         if (val != 0x10) {
3625                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
3626                 return -1;
3627         }
3628
3629         /* Wait until PRS register shows 1 packet */
3630         count = 1000 * factor;
3631         while (count) {
3632                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3633                 if (val == 1)
3634                         break;
3635
3636                 msleep(10);
3637                 count--;
3638         }
3639         if (val != 0x1) {
3640                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3641                 return -2;
3642         }
3643
3644         /* Reset and init BRB, PRS */
3645         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3646         msleep(50);
3647         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3648         msleep(50);
3649         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3650         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3651
3652         DP(NETIF_MSG_HW, "part2\n");
3653
3654         /* Disable inputs of parser neighbor blocks */
3655         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3656         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3657         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3658         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
3659
3660         /* Write 0 to parser credits for CFC search request */
3661         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3662
3663         /* send 10 Ethernet packets */
3664         for (i = 0; i < 10; i++)
3665                 bnx2x_lb_pckt(bp);
3666
3667         /* Wait until NIG register shows 10 + 1
3668            packets of size 11*0x10 = 0xb0 */
3669         count = 1000 * factor;
3670         while (count) {
3671
3672                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3673                 val = *bnx2x_sp(bp, wb_data[0]);
3674                 if (val == 0xb0)
3675                         break;
3676
3677                 msleep(10);
3678                 count--;
3679         }
3680         if (val != 0xb0) {
3681                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
3682                 return -3;
3683         }
3684
3685         /* Wait until PRS register shows 2 packets */
3686         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3687         if (val != 2)
3688                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
3689
3690         /* Write 1 to parser credits for CFC search request */
3691         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
3692
3693         /* Wait until PRS register shows 3 packets */
3694         msleep(10 * factor);
3695         /* Wait until NIG register shows 1 packet of size 0x10 */
3696         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3697         if (val != 3)
3698                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
3699
3700         /* clear NIG EOP FIFO */
3701         for (i = 0; i < 11; i++)
3702                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
3703         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
3704         if (val != 1) {
3705                 BNX2X_ERR("clear of NIG failed\n");
3706                 return -4;
3707         }
3708
3709         /* Reset and init BRB, PRS, NIG */
3710         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3711         msleep(50);
3712         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3713         msleep(50);
3714         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3715         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3716 #ifndef BCM_CNIC
3717         /* set NIC mode */
3718         REG_WR(bp, PRS_REG_NIC_MODE, 1);
3719 #endif
3720
3721         /* Enable inputs of parser neighbor blocks */
3722         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
3723         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
3724         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3725         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
3726
3727         DP(NETIF_MSG_HW, "done\n");
3728
3729         return 0; /* OK */
3730 }
3731
3732 static void enable_blocks_attention(struct bnx2x *bp)
3733 {
3734         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3735         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
3736         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3737         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
3738         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
3739         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
3740         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
3741         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
3742         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
3743 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3744 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
3745         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
3746         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
3747         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
3748 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3749 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
3750         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
3751         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
3752         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
3753         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
3754 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3755 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3756         if (CHIP_REV_IS_FPGA(bp))
3757                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
3758         else
3759                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
3760         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
3761         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
3762         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
3763 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3764 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
3765         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
3766         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
3767 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3768         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
3769 }
3770
3771 static const struct {
3772         u32 addr;
3773         u32 mask;
3774 } bnx2x_parity_mask[] = {
3775         {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
3776         {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
3777         {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
3778         {HC_REG_HC_PRTY_MASK, 0xffffffff},
3779         {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
3780         {QM_REG_QM_PRTY_MASK, 0x0},
3781         {DORQ_REG_DORQ_PRTY_MASK, 0x0},
3782         {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
3783         {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
3784         {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
3785         {CDU_REG_CDU_PRTY_MASK, 0x0},
3786         {CFC_REG_CFC_PRTY_MASK, 0x0},
3787         {DBG_REG_DBG_PRTY_MASK, 0x0},
3788         {DMAE_REG_DMAE_PRTY_MASK, 0x0},
3789         {BRB1_REG_BRB1_PRTY_MASK, 0x0},
3790         {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
3791         {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
3792         {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
3793         {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
3794         {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
3795         {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
3796         {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
3797         {USEM_REG_USEM_PRTY_MASK_0, 0x0},
3798         {USEM_REG_USEM_PRTY_MASK_1, 0x0},
3799         {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
3800         {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
3801         {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
3802         {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
3803 };
3804
3805 static void enable_blocks_parity(struct bnx2x *bp)
3806 {
3807         int i, mask_arr_len =
3808                 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
3809
3810         for (i = 0; i < mask_arr_len; i++)
3811                 REG_WR(bp, bnx2x_parity_mask[i].addr,
3812                         bnx2x_parity_mask[i].mask);
3813 }
3814
3815
3816 static void bnx2x_reset_common(struct bnx2x *bp)
3817 {
3818         /* reset_common */
3819         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
3820                0xd3ffff7f);
3821         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
3822 }
3823
3824 static void bnx2x_init_pxp(struct bnx2x *bp)
3825 {
3826         u16 devctl;
3827         int r_order, w_order;
3828
3829         pci_read_config_word(bp->pdev,
3830                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
3831         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
3832         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3833         if (bp->mrrs == -1)
3834                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3835         else {
3836                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
3837                 r_order = bp->mrrs;
3838         }
3839
3840         bnx2x_init_pxp_arb(bp, r_order, w_order);
3841 }
3842
3843 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3844 {
3845         int is_required;
3846         u32 val;
3847         int port;
3848
3849         if (BP_NOMCP(bp))
3850                 return;
3851
3852         is_required = 0;
3853         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
3854               SHARED_HW_CFG_FAN_FAILURE_MASK;
3855
3856         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
3857                 is_required = 1;
3858
3859         /*
3860          * The fan failure mechanism is usually related to the PHY type since
3861          * the power consumption of the board is affected by the PHY. Currently,
3862          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
3863          */
3864         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
3865                 for (port = PORT_0; port < PORT_MAX; port++) {
3866                         u32 phy_type =
3867                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
3868                                          external_phy_config) &
3869                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3870                         is_required |=
3871                                 ((phy_type ==
3872                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
3873                                  (phy_type ==
3874                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
3875                                  (phy_type ==
3876                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
3877                 }
3878
3879         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
3880
3881         if (is_required == 0)
3882                 return;
3883
3884         /* Fan failure is indicated by SPIO 5 */
3885         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
3886                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
3887
3888         /* set to active low mode */
3889         val = REG_RD(bp, MISC_REG_SPIO_INT);
3890         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
3891                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
3892         REG_WR(bp, MISC_REG_SPIO_INT, val);
3893
3894         /* enable interrupt to signal the IGU */
3895         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
3896         val |= (1 << MISC_REGISTERS_SPIO_5);
3897         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3898 }
3899
3900 static int bnx2x_init_common(struct bnx2x *bp)
3901 {
3902         u32 val, i;
3903 #ifdef BCM_CNIC
3904         u32 wb_write[2];
3905 #endif
3906
3907         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
3908
3909         bnx2x_reset_common(bp);
3910         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
3911         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
3912
3913         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
3914         if (CHIP_IS_E1H(bp))
3915                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
3916
3917         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
3918         msleep(30);
3919         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
3920
3921         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
3922         if (CHIP_IS_E1(bp)) {
3923                 /* enable HW interrupt from PXP on USDM overflow
3924                    bit 16 on INT_MASK_0 */
3925                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3926         }
3927
3928         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
3929         bnx2x_init_pxp(bp);
3930
3931 #ifdef __BIG_ENDIAN
3932         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
3933         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
3934         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
3935         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
3936         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
3937         /* make sure this value is 0 */
3938         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
3939
3940 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3941         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
3942         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
3943         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
3944         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
3945 #endif
3946
3947         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
3948 #ifdef BCM_CNIC
3949         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3950         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3951         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
3952 #endif
3953
3954         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
3955                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
3956
3957         /* let the HW do it's magic ... */
3958         msleep(100);
3959         /* finish PXP init */
3960         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
3961         if (val != 1) {
3962                 BNX2X_ERR("PXP2 CFG failed\n");
3963                 return -EBUSY;
3964         }
3965         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
3966         if (val != 1) {
3967                 BNX2X_ERR("PXP2 RD_INIT failed\n");
3968                 return -EBUSY;
3969         }
3970
3971         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
3972         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
3973
3974         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
3975
3976         /* clean the DMAE memory */
3977         bp->dmae_ready = 1;
3978         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
3979
3980         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
3981         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
3982         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
3983         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
3984
3985         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
3986         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
3987         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
3988         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
3989
3990         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
3991
3992 #ifdef BCM_CNIC
3993         wb_write[0] = 0;
3994         wb_write[1] = 0;
3995         for (i = 0; i < 64; i++) {
3996                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
3997                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
3998
3999                 if (CHIP_IS_E1H(bp)) {
4000                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
4001                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
4002                                           wb_write, 2);
4003                 }
4004         }
4005 #endif
4006         /* soft reset pulse */
4007         REG_WR(bp, QM_REG_SOFT_RESET, 1);
4008         REG_WR(bp, QM_REG_SOFT_RESET, 0);
4009
4010 #ifdef BCM_CNIC
4011         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
4012 #endif
4013
4014         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
4015         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
4016         if (!CHIP_REV_IS_SLOW(bp)) {
4017                 /* enable hw interrupt from doorbell Q */
4018                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4019         }
4020
4021         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4022         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4023         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4024 #ifndef BCM_CNIC
4025         /* set NIC mode */
4026         REG_WR(bp, PRS_REG_NIC_MODE, 1);
4027 #endif
4028         if (CHIP_IS_E1H(bp))
4029                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
4030
4031         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4032         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4033         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4034         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
4035
4036         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4037         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4038         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4039         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4040
4041         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4042         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4043         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4044         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
4045
4046         /* sync semi rtc */
4047         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4048                0x80000000);
4049         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4050                0x80000000);
4051
4052         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4053         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4054         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
4055
4056         REG_WR(bp, SRC_REG_SOFT_RST, 1);
4057         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4058                 REG_WR(bp, i, random32());
4059         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
4060 #ifdef BCM_CNIC
4061         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4062         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4063         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4064         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4065         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4066         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4067         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4068         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4069         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4070         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4071 #endif
4072         REG_WR(bp, SRC_REG_SOFT_RST, 0);
4073
4074         if (sizeof(union cdu_context) != 1024)
4075                 /* we currently assume that a context is 1024 bytes */
4076                 dev_alert(&bp->pdev->dev, "please adjust the size "
4077                                           "of cdu_context(%ld)\n",
4078                          (long)sizeof(union cdu_context));
4079
4080         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
4081         val = (4 << 24) + (0 << 12) + 1024;
4082         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
4083
4084         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
4085         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
4086         /* enable context validation interrupt from CFC */
4087         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4088
4089         /* set the thresholds to prevent CFC/CDU race */
4090         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
4091
4092         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
4093         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
4094
4095         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
4096         /* Reset PCIE errors for debug */
4097         REG_WR(bp, 0x2814, 0xffffffff);
4098         REG_WR(bp, 0x3820, 0xffffffff);
4099
4100         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
4101         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
4102         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
4103         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
4104
4105         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
4106         if (CHIP_IS_E1H(bp)) {
4107                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
4108                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
4109         }
4110
4111         if (CHIP_REV_IS_SLOW(bp))
4112                 msleep(200);
4113
4114         /* finish CFC init */
4115         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4116         if (val != 1) {
4117                 BNX2X_ERR("CFC LL_INIT failed\n");
4118                 return -EBUSY;
4119         }
4120         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4121         if (val != 1) {
4122                 BNX2X_ERR("CFC AC_INIT failed\n");
4123                 return -EBUSY;
4124         }
4125         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4126         if (val != 1) {
4127                 BNX2X_ERR("CFC CAM_INIT failed\n");
4128                 return -EBUSY;
4129         }
4130         REG_WR(bp, CFC_REG_DEBUG0, 0);
4131
4132         /* read NIG statistic
4133            to see if this is our first up since powerup */
4134         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4135         val = *bnx2x_sp(bp, wb_data[0]);
4136
4137         /* do internal memory self test */
4138         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4139                 BNX2X_ERR("internal mem self test failed\n");
4140                 return -EBUSY;
4141         }
4142
4143         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
4144         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4145         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4146         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4147         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4148                 bp->port.need_hw_lock = 1;
4149                 break;
4150
4151         default:
4152                 break;
4153         }
4154
4155         bnx2x_setup_fan_failure_detection(bp);
4156
4157         /* clear PXP2 attentions */
4158         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
4159
4160         enable_blocks_attention(bp);
4161         if (CHIP_PARITY_SUPPORTED(bp))
4162                 enable_blocks_parity(bp);
4163
4164         if (!BP_NOMCP(bp)) {
4165                 bnx2x_acquire_phy_lock(bp);
4166                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
4167                 bnx2x_release_phy_lock(bp);
4168         } else
4169                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4170
4171         return 0;
4172 }
4173
4174 static int bnx2x_init_port(struct bnx2x *bp)
4175 {
4176         int port = BP_PORT(bp);
4177         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
4178         u32 low, high;
4179         u32 val;
4180
4181         DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
4182
4183         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
4184
4185         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
4186         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
4187
4188         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4189         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4190         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
4191         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
4192
4193 #ifdef BCM_CNIC
4194         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
4195
4196         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
4197         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4198         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
4199 #endif
4200
4201         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
4202
4203         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
4204         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
4205                 /* no pause for emulation and FPGA */
4206                 low = 0;
4207                 high = 513;
4208         } else {
4209                 if (IS_E1HMF(bp))
4210                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4211                 else if (bp->dev->mtu > 4096) {
4212                         if (bp->flags & ONE_PORT_FLAG)
4213                                 low = 160;
4214                         else {
4215                                 val = bp->dev->mtu;
4216                                 /* (24*1024 + val*4)/256 */
4217                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
4218                         }
4219                 } else
4220                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4221                 high = low + 56;        /* 14*1024/256 */
4222         }
4223         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4224         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4225
4226
4227         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
4228
4229         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
4230         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
4231         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
4232         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
4233
4234         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4235         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4236         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4237         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
4238
4239         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
4240         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
4241
4242         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
4243
4244         /* configure PBF to work without PAUSE mtu 9000 */
4245         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
4246
4247         /* update threshold */
4248         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
4249         /* update init credit */
4250         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
4251
4252         /* probe changes */
4253         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
4254         msleep(5);
4255         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
4256
4257 #ifdef BCM_CNIC
4258         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
4259 #endif
4260         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
4261         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
4262
4263         if (CHIP_IS_E1(bp)) {
4264                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4265                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4266         }
4267         bnx2x_init_block(bp, HC_BLOCK, init_stage);
4268
4269         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
4270         /* init aeu_mask_attn_func_0/1:
4271          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4272          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4273          *             bits 4-7 are used for "per vn group attention" */
4274         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4275                (IS_E1HMF(bp) ? 0xF7 : 0x7));
4276
4277         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
4278         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
4279         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
4280         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
4281         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
4282
4283         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
4284
4285         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4286
4287         if (CHIP_IS_E1H(bp)) {
4288                 /* 0x2 disable e1hov, 0x1 enable */
4289                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4290                        (IS_E1HMF(bp) ? 0x1 : 0x2));
4291
4292                 {
4293                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4294                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
4295                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
4296                 }
4297         }
4298
4299         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
4300         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
4301
4302         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
4303         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4304                 {
4305                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
4306
4307                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
4308                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
4309
4310                 /* The GPIO should be swapped if the swap register is
4311                    set and active */
4312                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4313                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
4314
4315                 /* Select function upon port-swap configuration */
4316                 if (port == 0) {
4317                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
4318                         aeu_gpio_mask = (swap_val && swap_override) ?
4319                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
4320                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
4321                 } else {
4322                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
4323                         aeu_gpio_mask = (swap_val && swap_override) ?
4324                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
4325                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
4326                 }
4327                 val = REG_RD(bp, offset);
4328                 /* add GPIO3 to group */
4329                 val |= aeu_gpio_mask;
4330                 REG_WR(bp, offset, val);
4331                 }
4332                 bp->port.need_hw_lock = 1;
4333                 break;
4334
4335         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4336                 bp->port.need_hw_lock = 1;
4337         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4338                 /* add SPIO 5 to group 0 */
4339                 {
4340                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4341                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4342                 val = REG_RD(bp, reg_addr);
4343                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4344                 REG_WR(bp, reg_addr, val);
4345                 }
4346                 break;
4347         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4348         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4349                 bp->port.need_hw_lock = 1;
4350                 break;
4351         default:
4352                 break;
4353         }
4354
4355         bnx2x__link_reset(bp);
4356
4357         return 0;
4358 }
4359
4360 #define ILT_PER_FUNC            (768/2)
4361 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
4362 /* the phys address is shifted right 12 bits and has an added
4363    1=valid bit added to the 53rd bit
4364    then since this is a wide register(TM)
4365    we split it into two 32 bit writes
4366  */
4367 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4368 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
4369 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
4370 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
4371
4372 #ifdef BCM_CNIC
4373 #define CNIC_ILT_LINES          127
4374 #define CNIC_CTX_PER_ILT        16
4375 #else
4376 #define CNIC_ILT_LINES          0
4377 #endif
4378
4379 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4380 {
4381         int reg;
4382
4383         if (CHIP_IS_E1H(bp))
4384                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4385         else /* E1 */
4386                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4387
4388         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4389 }
4390
4391 static int bnx2x_init_func(struct bnx2x *bp)
4392 {
4393         int port = BP_PORT(bp);
4394         int func = BP_FUNC(bp);
4395         u32 addr, val;
4396         int i;
4397
4398         DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
4399
4400         /* set MSI reconfigure capability */
4401         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4402         val = REG_RD(bp, addr);
4403         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
4404         REG_WR(bp, addr, val);
4405
4406         i = FUNC_ILT_BASE(func);
4407
4408         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
4409         if (CHIP_IS_E1H(bp)) {
4410                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
4411                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
4412         } else /* E1 */
4413                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
4414                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
4415
4416 #ifdef BCM_CNIC
4417         i += 1 + CNIC_ILT_LINES;
4418         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
4419         if (CHIP_IS_E1(bp))
4420                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
4421         else {
4422                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
4423                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
4424         }
4425
4426         i++;
4427         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
4428         if (CHIP_IS_E1(bp))
4429                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
4430         else {
4431                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
4432                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
4433         }
4434
4435         i++;
4436         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
4437         if (CHIP_IS_E1(bp))
4438                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
4439         else {
4440                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
4441                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
4442         }
4443
4444         /* tell the searcher where the T2 table is */
4445         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
4446
4447         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
4448                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
4449
4450         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
4451                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
4452                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
4453
4454         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
4455 #endif
4456
4457         if (CHIP_IS_E1H(bp)) {
4458                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4459                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4460                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4461                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4462                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4463                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4464                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4465                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4466                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
4467
4468                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4469                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
4470         }
4471
4472         /* HC init per function */
4473         if (CHIP_IS_E1H(bp)) {
4474                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4475
4476                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4477                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4478         }
4479         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
4480
4481         /* Reset PCIE errors for debug */
4482         REG_WR(bp, 0x2114, 0xffffffff);
4483         REG_WR(bp, 0x2120, 0xffffffff);
4484
4485         return 0;
4486 }
4487
4488 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4489 {
4490         int i, rc = 0;
4491
4492         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
4493            BP_FUNC(bp), load_code);
4494
4495         bp->dmae_ready = 0;
4496         mutex_init(&bp->dmae_mutex);
4497         rc = bnx2x_gunzip_init(bp);
4498         if (rc)
4499                 return rc;
4500
4501         switch (load_code) {
4502         case FW_MSG_CODE_DRV_LOAD_COMMON:
4503                 rc = bnx2x_init_common(bp);
4504                 if (rc)
4505                         goto init_hw_err;
4506                 /* no break */
4507
4508         case FW_MSG_CODE_DRV_LOAD_PORT:
4509                 bp->dmae_ready = 1;
4510                 rc = bnx2x_init_port(bp);
4511                 if (rc)
4512                         goto init_hw_err;
4513                 /* no break */
4514
4515         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4516                 bp->dmae_ready = 1;
4517                 rc = bnx2x_init_func(bp);
4518                 if (rc)
4519                         goto init_hw_err;
4520                 break;
4521
4522         default:
4523                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4524                 break;
4525         }
4526
4527         if (!BP_NOMCP(bp)) {
4528                 int func = BP_FUNC(bp);
4529
4530                 bp->fw_drv_pulse_wr_seq =
4531                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
4532                                  DRV_PULSE_SEQ_MASK);
4533                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4534         }
4535
4536         /* this needs to be done before gunzip end */
4537         bnx2x_zero_def_sb(bp);
4538         for_each_queue(bp, i)
4539                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4540 #ifdef BCM_CNIC
4541         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4542 #endif
4543
4544 init_hw_err:
4545         bnx2x_gunzip_end(bp);
4546
4547         return rc;
4548 }
4549
4550 void bnx2x_free_mem(struct bnx2x *bp)
4551 {
4552
4553 #define BNX2X_PCI_FREE(x, y, size) \
4554         do { \
4555                 if (x) { \
4556                         dma_free_coherent(&bp->pdev->dev, size, x, y); \
4557                         x = NULL; \
4558                         y = 0; \
4559                 } \
4560         } while (0)
4561
4562 #define BNX2X_FREE(x) \
4563         do { \
4564                 if (x) { \
4565                         vfree(x); \
4566                         x = NULL; \
4567                 } \
4568         } while (0)
4569
4570         int i;
4571
4572         /* fastpath */
4573         /* Common */
4574         for_each_queue(bp, i) {
4575
4576                 /* status blocks */
4577                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
4578                                bnx2x_fp(bp, i, status_blk_mapping),
4579                                sizeof(struct host_status_block));
4580         }
4581         /* Rx */
4582         for_each_queue(bp, i) {
4583
4584                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4585                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
4586                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
4587                                bnx2x_fp(bp, i, rx_desc_mapping),
4588                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
4589
4590                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
4591                                bnx2x_fp(bp, i, rx_comp_mapping),
4592                                sizeof(struct eth_fast_path_rx_cqe) *
4593                                NUM_RCQ_BD);
4594
4595                 /* SGE ring */
4596                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
4597                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
4598                                bnx2x_fp(bp, i, rx_sge_mapping),
4599                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4600         }
4601         /* Tx */
4602         for_each_queue(bp, i) {
4603
4604                 /* fastpath tx rings: tx_buf tx_desc */
4605                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
4606                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
4607                                bnx2x_fp(bp, i, tx_desc_mapping),
4608                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4609         }
4610         /* end of fastpath */
4611
4612         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
4613                        sizeof(struct host_def_status_block));
4614
4615         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
4616                        sizeof(struct bnx2x_slowpath));
4617
4618 #ifdef BCM_CNIC
4619         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
4620         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
4621         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
4622         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
4623         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
4624                        sizeof(struct host_status_block));
4625 #endif
4626         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
4627
4628 #undef BNX2X_PCI_FREE
4629 #undef BNX2X_KFREE
4630 }
4631
4632 int bnx2x_alloc_mem(struct bnx2x *bp)
4633 {
4634
4635 #define BNX2X_PCI_ALLOC(x, y, size) \
4636         do { \
4637                 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
4638                 if (x == NULL) \
4639                         goto alloc_mem_err; \
4640                 memset(x, 0, size); \
4641         } while (0)
4642
4643 #define BNX2X_ALLOC(x, size) \
4644         do { \
4645                 x = vmalloc(size); \
4646                 if (x == NULL) \
4647                         goto alloc_mem_err; \
4648                 memset(x, 0, size); \
4649         } while (0)
4650
4651         int i;
4652
4653         /* fastpath */
4654         /* Common */
4655         for_each_queue(bp, i) {
4656                 bnx2x_fp(bp, i, bp) = bp;
4657
4658                 /* status blocks */
4659                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
4660                                 &bnx2x_fp(bp, i, status_blk_mapping),
4661                                 sizeof(struct host_status_block));
4662         }
4663         /* Rx */
4664         for_each_queue(bp, i) {
4665
4666                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4667                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
4668                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4669                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
4670                                 &bnx2x_fp(bp, i, rx_desc_mapping),
4671                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4672
4673                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
4674                                 &bnx2x_fp(bp, i, rx_comp_mapping),
4675                                 sizeof(struct eth_fast_path_rx_cqe) *
4676                                 NUM_RCQ_BD);
4677
4678                 /* SGE ring */
4679                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
4680                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4681                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
4682                                 &bnx2x_fp(bp, i, rx_sge_mapping),
4683                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4684         }
4685         /* Tx */
4686         for_each_queue(bp, i) {
4687
4688                 /* fastpath tx rings: tx_buf tx_desc */
4689                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
4690                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4691                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
4692                                 &bnx2x_fp(bp, i, tx_desc_mapping),
4693                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4694         }
4695         /* end of fastpath */
4696
4697         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4698                         sizeof(struct host_def_status_block));
4699
4700         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4701                         sizeof(struct bnx2x_slowpath));
4702
4703 #ifdef BCM_CNIC
4704         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
4705
4706         /* allocate searcher T2 table
4707            we allocate 1/4 of alloc num for T2
4708           (which is not entered into the ILT) */
4709         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
4710
4711         /* Initialize T2 (for 1024 connections) */
4712         for (i = 0; i < 16*1024; i += 64)
4713                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
4714
4715         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
4716         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
4717
4718         /* QM queues (128*MAX_CONN) */
4719         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
4720
4721         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
4722                         sizeof(struct host_status_block));
4723 #endif
4724
4725         /* Slow path ring */
4726         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
4727
4728         return 0;
4729
4730 alloc_mem_err:
4731         bnx2x_free_mem(bp);
4732         return -ENOMEM;
4733
4734 #undef BNX2X_PCI_ALLOC
4735 #undef BNX2X_ALLOC
4736 }
4737
4738
4739 /*
4740  * Init service functions
4741  */
4742
4743 /**
4744  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
4745  *
4746  * @param bp driver descriptor
4747  * @param set set or clear an entry (1 or 0)
4748  * @param mac pointer to a buffer containing a MAC
4749  * @param cl_bit_vec bit vector of clients to register a MAC for
4750  * @param cam_offset offset in a CAM to use
4751  * @param with_bcast set broadcast MAC as well
4752  */
4753 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
4754                                       u32 cl_bit_vec, u8 cam_offset,
4755                                       u8 with_bcast)
4756 {
4757         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
4758         int port = BP_PORT(bp);
4759
4760         /* CAM allocation
4761          * unicasts 0-31:port0 32-63:port1
4762          * multicast 64-127:port0 128-191:port1
4763          */
4764         config->hdr.length = 1 + (with_bcast ? 1 : 0);
4765         config->hdr.offset = cam_offset;
4766         config->hdr.client_id = 0xff;
4767         config->hdr.reserved1 = 0;
4768
4769         /* primary MAC */
4770         config->config_table[0].cam_entry.msb_mac_addr =
4771                                         swab16(*(u16 *)&mac[0]);
4772         config->config_table[0].cam_entry.middle_mac_addr =
4773                                         swab16(*(u16 *)&mac[2]);
4774         config->config_table[0].cam_entry.lsb_mac_addr =
4775                                         swab16(*(u16 *)&mac[4]);
4776         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
4777         if (set)
4778                 config->config_table[0].target_table_entry.flags = 0;
4779         else
4780                 CAM_INVALIDATE(config->config_table[0]);
4781         config->config_table[0].target_table_entry.clients_bit_vector =
4782                                                 cpu_to_le32(cl_bit_vec);
4783         config->config_table[0].target_table_entry.vlan_id = 0;
4784
4785         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
4786            (set ? "setting" : "clearing"),
4787            config->config_table[0].cam_entry.msb_mac_addr,
4788            config->config_table[0].cam_entry.middle_mac_addr,
4789            config->config_table[0].cam_entry.lsb_mac_addr);
4790
4791         /* broadcast */
4792         if (with_bcast) {
4793                 config->config_table[1].cam_entry.msb_mac_addr =
4794                         cpu_to_le16(0xffff);
4795                 config->config_table[1].cam_entry.middle_mac_addr =
4796                         cpu_to_le16(0xffff);
4797                 config->config_table[1].cam_entry.lsb_mac_addr =
4798                         cpu_to_le16(0xffff);
4799                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
4800                 if (set)
4801                         config->config_table[1].target_table_entry.flags =
4802                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4803                 else
4804                         CAM_INVALIDATE(config->config_table[1]);
4805                 config->config_table[1].target_table_entry.clients_bit_vector =
4806                                                         cpu_to_le32(cl_bit_vec);
4807                 config->config_table[1].target_table_entry.vlan_id = 0;
4808         }
4809
4810         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4811                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4812                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4813 }
4814
4815 /**
4816  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
4817  *
4818  * @param bp driver descriptor
4819  * @param set set or clear an entry (1 or 0)
4820  * @param mac pointer to a buffer containing a MAC
4821  * @param cl_bit_vec bit vector of clients to register a MAC for
4822  * @param cam_offset offset in a CAM to use
4823  */
4824 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
4825                                        u32 cl_bit_vec, u8 cam_offset)
4826 {
4827         struct mac_configuration_cmd_e1h *config =
4828                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
4829
4830         config->hdr.length = 1;
4831         config->hdr.offset = cam_offset;
4832         config->hdr.client_id = 0xff;
4833         config->hdr.reserved1 = 0;
4834
4835         /* primary MAC */
4836         config->config_table[0].msb_mac_addr =
4837                                         swab16(*(u16 *)&mac[0]);
4838         config->config_table[0].middle_mac_addr =
4839                                         swab16(*(u16 *)&mac[2]);
4840         config->config_table[0].lsb_mac_addr =
4841                                         swab16(*(u16 *)&mac[4]);
4842         config->config_table[0].clients_bit_vector =
4843                                         cpu_to_le32(cl_bit_vec);
4844         config->config_table[0].vlan_id = 0;
4845         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
4846         if (set)
4847                 config->config_table[0].flags = BP_PORT(bp);
4848         else
4849                 config->config_table[0].flags =
4850                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
4851
4852         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
4853            (set ? "setting" : "clearing"),
4854            config->config_table[0].msb_mac_addr,
4855            config->config_table[0].middle_mac_addr,
4856            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
4857
4858         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4859                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4860                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4861 }
4862
4863 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4864                              int *state_p, int poll)
4865 {
4866         /* can take a while if any port is running */
4867         int cnt = 5000;
4868
4869         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4870            poll ? "polling" : "waiting", state, idx);
4871
4872         might_sleep();
4873         while (cnt--) {
4874                 if (poll) {
4875                         bnx2x_rx_int(bp->fp, 10);
4876                         /* if index is different from 0
4877                          * the reply for some commands will
4878                          * be on the non default queue
4879                          */
4880                         if (idx)
4881                                 bnx2x_rx_int(&bp->fp[idx], 10);
4882                 }
4883
4884                 mb(); /* state is changed by bnx2x_sp_event() */
4885                 if (*state_p == state) {
4886 #ifdef BNX2X_STOP_ON_ERROR
4887                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
4888 #endif
4889                         return 0;
4890                 }
4891
4892                 msleep(1);
4893
4894                 if (bp->panic)
4895                         return -EIO;
4896         }
4897
4898         /* timeout! */
4899         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4900                   poll ? "polling" : "waiting", state, idx);
4901 #ifdef BNX2X_STOP_ON_ERROR
4902         bnx2x_panic();
4903 #endif
4904
4905         return -EBUSY;
4906 }
4907
4908 void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
4909 {
4910         bp->set_mac_pending++;
4911         smp_wmb();
4912
4913         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
4914                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
4915
4916         /* Wait for a completion */
4917         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4918 }
4919
4920 void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
4921 {
4922         bp->set_mac_pending++;
4923         smp_wmb();
4924
4925         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
4926                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
4927                                   1);
4928
4929         /* Wait for a completion */
4930         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4931 }
4932
4933 #ifdef BCM_CNIC
4934 /**
4935  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
4936  * MAC(s). This function will wait until the ramdord completion
4937  * returns.
4938  *
4939  * @param bp driver handle
4940  * @param set set or clear the CAM entry
4941  *
4942  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
4943  */
4944 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
4945 {
4946         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
4947
4948         bp->set_mac_pending++;
4949         smp_wmb();
4950
4951         /* Send a SET_MAC ramrod */
4952         if (CHIP_IS_E1(bp))
4953                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
4954                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
4955                                   1);
4956         else
4957                 /* CAM allocation for E1H
4958                 * unicasts: by func number
4959                 * multicast: 20+FUNC*20, 20 each
4960                 */
4961                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
4962                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
4963
4964         /* Wait for a completion when setting */
4965         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4966
4967         return 0;
4968 }
4969 #endif
4970
4971 int bnx2x_setup_leading(struct bnx2x *bp)
4972 {
4973         int rc;
4974
4975         /* reset IGU state */
4976         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4977
4978         /* SETUP ramrod */
4979         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
4980
4981         /* Wait for completion */
4982         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
4983
4984         return rc;
4985 }
4986
4987 int bnx2x_setup_multi(struct bnx2x *bp, int index)
4988 {
4989         struct bnx2x_fastpath *fp = &bp->fp[index];
4990
4991         /* reset IGU state */
4992         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4993
4994         /* SETUP ramrod */
4995         fp->state = BNX2X_FP_STATE_OPENING;
4996         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
4997                       fp->cl_id, 0);
4998
4999         /* Wait for completion */
5000         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
5001                                  &(fp->state), 0);
5002 }
5003
5004
5005 void bnx2x_set_num_queues_msix(struct bnx2x *bp)
5006 {
5007
5008         switch (bp->multi_mode) {
5009         case ETH_RSS_MODE_DISABLED:
5010                 bp->num_queues = 1;
5011                 break;
5012
5013         case ETH_RSS_MODE_REGULAR:
5014                 if (num_queues)
5015                         bp->num_queues = min_t(u32, num_queues,
5016                                                   BNX2X_MAX_QUEUES(bp));
5017                 else
5018                         bp->num_queues = min_t(u32, num_online_cpus(),
5019                                                   BNX2X_MAX_QUEUES(bp));
5020                 break;
5021
5022
5023         default:
5024                 bp->num_queues = 1;
5025                 break;
5026         }
5027 }
5028
5029
5030
5031 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
5032 {
5033         struct bnx2x_fastpath *fp = &bp->fp[index];
5034         int rc;
5035
5036         /* halt the connection */
5037         fp->state = BNX2X_FP_STATE_HALTING;
5038         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
5039
5040         /* Wait for completion */
5041         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
5042                                &(fp->state), 1);
5043         if (rc) /* timeout */
5044                 return rc;
5045
5046         /* delete cfc entry */
5047         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
5048
5049         /* Wait for completion */
5050         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
5051                                &(fp->state), 1);
5052         return rc;
5053 }
5054
5055 static int bnx2x_stop_leading(struct bnx2x *bp)
5056 {
5057         __le16 dsb_sp_prod_idx;
5058         /* if the other port is handling traffic,
5059            this can take a lot of time */
5060         int cnt = 500;
5061         int rc;
5062
5063         might_sleep();
5064
5065         /* Send HALT ramrod */
5066         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
5067         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
5068
5069         /* Wait for completion */
5070         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
5071                                &(bp->fp[0].state), 1);
5072         if (rc) /* timeout */
5073                 return rc;
5074
5075         dsb_sp_prod_idx = *bp->dsb_sp_prod;
5076
5077         /* Send PORT_DELETE ramrod */
5078         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
5079
5080         /* Wait for completion to arrive on default status block
5081            we are going to reset the chip anyway
5082            so there is not much to do if this times out
5083          */
5084         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
5085                 if (!cnt) {
5086                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
5087                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5088                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
5089 #ifdef BNX2X_STOP_ON_ERROR
5090                         bnx2x_panic();
5091 #endif
5092                         rc = -EBUSY;
5093                         break;
5094                 }
5095                 cnt--;
5096                 msleep(1);
5097                 rmb(); /* Refresh the dsb_sp_prod */
5098         }
5099         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
5100         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
5101
5102         return rc;
5103 }
5104
5105 static void bnx2x_reset_func(struct bnx2x *bp)
5106 {
5107         int port = BP_PORT(bp);
5108         int func = BP_FUNC(bp);
5109         int base, i;
5110
5111         /* Configure IGU */
5112         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5113         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5114
5115 #ifdef BCM_CNIC
5116         /* Disable Timer scan */
5117         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
5118         /*
5119          * Wait for at least 10ms and up to 2 second for the timers scan to
5120          * complete
5121          */
5122         for (i = 0; i < 200; i++) {
5123                 msleep(10);
5124                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
5125                         break;
5126         }
5127 #endif
5128         /* Clear ILT */
5129         base = FUNC_ILT_BASE(func);
5130         for (i = base; i < base + ILT_PER_FUNC; i++)
5131                 bnx2x_ilt_wr(bp, i, 0);
5132 }
5133
5134 static void bnx2x_reset_port(struct bnx2x *bp)
5135 {
5136         int port = BP_PORT(bp);
5137         u32 val;
5138
5139         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5140
5141         /* Do not rcv packets to BRB */
5142         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5143         /* Do not direct rcv packets that are not for MCP to the BRB */
5144         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5145                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5146
5147         /* Configure AEU */
5148         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5149
5150         msleep(100);
5151         /* Check for BRB port occupancy */
5152         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5153         if (val)
5154                 DP(NETIF_MSG_IFDOWN,
5155                    "BRB1 is not empty  %d blocks are occupied\n", val);
5156
5157         /* TODO: Close Doorbell port? */
5158 }
5159
5160 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5161 {
5162         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
5163            BP_FUNC(bp), reset_code);
5164
5165         switch (reset_code) {
5166         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5167                 bnx2x_reset_port(bp);
5168                 bnx2x_reset_func(bp);
5169                 bnx2x_reset_common(bp);
5170                 break;
5171
5172         case FW_MSG_CODE_DRV_UNLOAD_PORT:
5173                 bnx2x_reset_port(bp);
5174                 bnx2x_reset_func(bp);
5175                 break;
5176
5177         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5178                 bnx2x_reset_func(bp);
5179                 break;
5180
5181         default:
5182                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5183                 break;
5184         }
5185 }
5186
5187 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5188 {
5189         int port = BP_PORT(bp);
5190         u32 reset_code = 0;
5191         int i, cnt, rc;
5192
5193         /* Wait until tx fastpath tasks complete */
5194         for_each_queue(bp, i) {
5195                 struct bnx2x_fastpath *fp = &bp->fp[i];
5196
5197                 cnt = 1000;
5198                 while (bnx2x_has_tx_work_unload(fp)) {
5199
5200                         bnx2x_tx_int(fp);
5201                         if (!cnt) {
5202                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
5203                                           i);
5204 #ifdef BNX2X_STOP_ON_ERROR
5205                                 bnx2x_panic();
5206                                 return -EBUSY;
5207 #else
5208                                 break;
5209 #endif
5210                         }
5211                         cnt--;
5212                         msleep(1);
5213                 }
5214         }
5215         /* Give HW time to discard old tx messages */
5216         msleep(1);
5217
5218         if (CHIP_IS_E1(bp)) {
5219                 struct mac_configuration_cmd *config =
5220                                                 bnx2x_sp(bp, mcast_config);
5221
5222                 bnx2x_set_eth_mac_addr_e1(bp, 0);
5223
5224                 for (i = 0; i < config->hdr.length; i++)
5225                         CAM_INVALIDATE(config->config_table[i]);
5226
5227                 config->hdr.length = i;
5228                 if (CHIP_REV_IS_SLOW(bp))
5229                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
5230                 else
5231                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
5232                 config->hdr.client_id = bp->fp->cl_id;
5233                 config->hdr.reserved1 = 0;
5234
5235                 bp->set_mac_pending++;
5236                 smp_wmb();
5237
5238                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5239                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
5240                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
5241
5242         } else { /* E1H */
5243                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5244
5245                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
5246
5247                 for (i = 0; i < MC_HASH_SIZE; i++)
5248                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
5249
5250                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
5251         }
5252 #ifdef BCM_CNIC
5253         /* Clear iSCSI L2 MAC */
5254         mutex_lock(&bp->cnic_mutex);
5255         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
5256                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
5257                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
5258         }
5259         mutex_unlock(&bp->cnic_mutex);
5260 #endif
5261
5262         if (unload_mode == UNLOAD_NORMAL)
5263                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5264
5265         else if (bp->flags & NO_WOL_FLAG)
5266                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
5267
5268         else if (bp->wol) {
5269                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5270                 u8 *mac_addr = bp->dev->dev_addr;
5271                 u32 val;
5272                 /* The mac address is written to entries 1-4 to
5273                    preserve entry 0 which is used by the PMF */
5274                 u8 entry = (BP_E1HVN(bp) + 1)*8;
5275
5276                 val = (mac_addr[0] << 8) | mac_addr[1];
5277                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
5278
5279                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
5280                       (mac_addr[4] << 8) | mac_addr[5];
5281                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
5282
5283                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
5284
5285         } else
5286                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5287
5288         /* Close multi and leading connections
5289            Completions for ramrods are collected in a synchronous way */
5290         for_each_nondefault_queue(bp, i)
5291                 if (bnx2x_stop_multi(bp, i))
5292                         goto unload_error;
5293
5294         rc = bnx2x_stop_leading(bp);
5295         if (rc) {
5296                 BNX2X_ERR("Stop leading failed!\n");
5297 #ifdef BNX2X_STOP_ON_ERROR
5298                 return -EBUSY;
5299 #else
5300                 goto unload_error;
5301 #endif
5302         }
5303
5304 unload_error:
5305         if (!BP_NOMCP(bp))
5306                 reset_code = bnx2x_fw_command(bp, reset_code);
5307         else {
5308                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
5309                    load_count[0], load_count[1], load_count[2]);
5310                 load_count[0]--;
5311                 load_count[1 + port]--;
5312                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
5313                    load_count[0], load_count[1], load_count[2]);
5314                 if (load_count[0] == 0)
5315                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
5316                 else if (load_count[1 + port] == 0)
5317                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
5318                 else
5319                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
5320         }
5321
5322         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
5323             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
5324                 bnx2x__link_reset(bp);
5325
5326         /* Reset the chip */
5327         bnx2x_reset_chip(bp, reset_code);
5328
5329         /* Report UNLOAD_DONE to MCP */
5330         if (!BP_NOMCP(bp))
5331                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5332
5333 }
5334
5335 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
5336 {
5337         u32 val;
5338
5339         DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
5340
5341         if (CHIP_IS_E1(bp)) {
5342                 int port = BP_PORT(bp);
5343                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5344                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
5345
5346                 val = REG_RD(bp, addr);
5347                 val &= ~(0x300);
5348                 REG_WR(bp, addr, val);
5349         } else if (CHIP_IS_E1H(bp)) {
5350                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
5351                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
5352                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
5353                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
5354         }
5355 }
5356
5357
5358 /* Close gates #2, #3 and #4: */
5359 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
5360 {
5361         u32 val, addr;
5362
5363         /* Gates #2 and #4a are closed/opened for "not E1" only */
5364         if (!CHIP_IS_E1(bp)) {
5365                 /* #4 */
5366                 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
5367                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
5368                        close ? (val | 0x1) : (val & (~(u32)1)));
5369                 /* #2 */
5370                 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
5371                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
5372                        close ? (val | 0x1) : (val & (~(u32)1)));
5373         }
5374
5375         /* #3 */
5376         addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
5377         val = REG_RD(bp, addr);
5378         REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
5379
5380         DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
5381                 close ? "closing" : "opening");
5382         mmiowb();
5383 }
5384
5385 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
5386
5387 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
5388 {
5389         /* Do some magic... */
5390         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5391         *magic_val = val & SHARED_MF_CLP_MAGIC;
5392         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
5393 }
5394
5395 /* Restore the value of the `magic' bit.
5396  *
5397  * @param pdev Device handle.
5398  * @param magic_val Old value of the `magic' bit.
5399  */
5400 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
5401 {
5402         /* Restore the `magic' bit value... */
5403         /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
5404         SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
5405                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
5406         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5407         MF_CFG_WR(bp, shared_mf_config.clp_mb,
5408                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
5409 }
5410
5411 /* Prepares for MCP reset: takes care of CLP configurations.
5412  *
5413  * @param bp
5414  * @param magic_val Old value of 'magic' bit.
5415  */
5416 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
5417 {
5418         u32 shmem;
5419         u32 validity_offset;
5420
5421         DP(NETIF_MSG_HW, "Starting\n");
5422
5423         /* Set `magic' bit in order to save MF config */
5424         if (!CHIP_IS_E1(bp))
5425                 bnx2x_clp_reset_prep(bp, magic_val);
5426
5427         /* Get shmem offset */
5428         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5429         validity_offset = offsetof(struct shmem_region, validity_map[0]);
5430
5431         /* Clear validity map flags */
5432         if (shmem > 0)
5433                 REG_WR(bp, shmem + validity_offset, 0);
5434 }
5435
5436 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
5437 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
5438
5439 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
5440  * depending on the HW type.
5441  *
5442  * @param bp
5443  */
5444 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
5445 {
5446         /* special handling for emulation and FPGA,
5447            wait 10 times longer */
5448         if (CHIP_REV_IS_SLOW(bp))
5449                 msleep(MCP_ONE_TIMEOUT*10);
5450         else
5451                 msleep(MCP_ONE_TIMEOUT);
5452 }
5453
5454 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
5455 {
5456         u32 shmem, cnt, validity_offset, val;
5457         int rc = 0;
5458
5459         msleep(100);
5460
5461         /* Get shmem offset */
5462         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5463         if (shmem == 0) {
5464                 BNX2X_ERR("Shmem 0 return failure\n");
5465                 rc = -ENOTTY;
5466                 goto exit_lbl;
5467         }
5468
5469         validity_offset = offsetof(struct shmem_region, validity_map[0]);
5470
5471         /* Wait for MCP to come up */
5472         for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
5473                 /* TBD: its best to check validity map of last port.
5474                  * currently checks on port 0.
5475                  */
5476                 val = REG_RD(bp, shmem + validity_offset);
5477                 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
5478                    shmem + validity_offset, val);
5479
5480                 /* check that shared memory is valid. */
5481                 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5482                     == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5483                         break;
5484
5485                 bnx2x_mcp_wait_one(bp);
5486         }
5487
5488         DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
5489
5490         /* Check that shared memory is valid. This indicates that MCP is up. */
5491         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
5492             (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
5493                 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
5494                 rc = -ENOTTY;
5495                 goto exit_lbl;
5496         }
5497
5498 exit_lbl:
5499         /* Restore the `magic' bit value */
5500         if (!CHIP_IS_E1(bp))
5501                 bnx2x_clp_reset_done(bp, magic_val);
5502
5503         return rc;
5504 }
5505
5506 static void bnx2x_pxp_prep(struct bnx2x *bp)
5507 {
5508         if (!CHIP_IS_E1(bp)) {
5509                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
5510                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
5511                 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
5512                 mmiowb();
5513         }
5514 }
5515
5516 /*
5517  * Reset the whole chip except for:
5518  *      - PCIE core
5519  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
5520  *              one reset bit)
5521  *      - IGU
5522  *      - MISC (including AEU)
5523  *      - GRC
5524  *      - RBCN, RBCP
5525  */
5526 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
5527 {
5528         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
5529
5530         not_reset_mask1 =
5531                 MISC_REGISTERS_RESET_REG_1_RST_HC |
5532                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
5533                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
5534
5535         not_reset_mask2 =
5536                 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
5537                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
5538                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
5539                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
5540                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
5541                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
5542                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
5543                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
5544
5545         reset_mask1 = 0xffffffff;
5546
5547         if (CHIP_IS_E1(bp))
5548                 reset_mask2 = 0xffff;
5549         else
5550                 reset_mask2 = 0x1ffff;
5551
5552         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5553                reset_mask1 & (~not_reset_mask1));
5554         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5555                reset_mask2 & (~not_reset_mask2));
5556
5557         barrier();
5558         mmiowb();
5559
5560         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
5561         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
5562         mmiowb();
5563 }
5564
5565 static int bnx2x_process_kill(struct bnx2x *bp)
5566 {
5567         int cnt = 1000;
5568         u32 val = 0;
5569         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
5570
5571
5572         /* Empty the Tetris buffer, wait for 1s */
5573         do {
5574                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
5575                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
5576                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
5577                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
5578                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
5579                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
5580                     ((port_is_idle_0 & 0x1) == 0x1) &&
5581                     ((port_is_idle_1 & 0x1) == 0x1) &&
5582                     (pgl_exp_rom2 == 0xffffffff))
5583                         break;
5584                 msleep(1);
5585         } while (cnt-- > 0);
5586
5587         if (cnt <= 0) {
5588                 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
5589                           " are still"
5590                           " outstanding read requests after 1s!\n");
5591                 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
5592                           " port_is_idle_0=0x%08x,"
5593                           " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
5594                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
5595                           pgl_exp_rom2);
5596                 return -EAGAIN;
5597         }
5598
5599         barrier();
5600
5601         /* Close gates #2, #3 and #4 */
5602         bnx2x_set_234_gates(bp, true);
5603
5604         /* TBD: Indicate that "process kill" is in progress to MCP */
5605
5606         /* Clear "unprepared" bit */
5607         REG_WR(bp, MISC_REG_UNPREPARED, 0);
5608         barrier();
5609
5610         /* Make sure all is written to the chip before the reset */
5611         mmiowb();
5612
5613         /* Wait for 1ms to empty GLUE and PCI-E core queues,
5614          * PSWHST, GRC and PSWRD Tetris buffer.
5615          */
5616         msleep(1);
5617
5618         /* Prepare to chip reset: */
5619         /* MCP */
5620         bnx2x_reset_mcp_prep(bp, &val);
5621
5622         /* PXP */
5623         bnx2x_pxp_prep(bp);
5624         barrier();
5625
5626         /* reset the chip */
5627         bnx2x_process_kill_chip_reset(bp);
5628         barrier();
5629
5630         /* Recover after reset: */
5631         /* MCP */
5632         if (bnx2x_reset_mcp_comp(bp, val))
5633                 return -EAGAIN;
5634
5635         /* PXP */
5636         bnx2x_pxp_prep(bp);
5637
5638         /* Open the gates #2, #3 and #4 */
5639         bnx2x_set_234_gates(bp, false);
5640
5641         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
5642          * reset state, re-enable attentions. */
5643
5644         return 0;
5645 }
5646
5647 static int bnx2x_leader_reset(struct bnx2x *bp)
5648 {
5649         int rc = 0;
5650         /* Try to recover after the failure */
5651         if (bnx2x_process_kill(bp)) {
5652                 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
5653                        bp->dev->name);
5654                 rc = -EAGAIN;
5655                 goto exit_leader_reset;
5656         }
5657
5658         /* Clear "reset is in progress" bit and update the driver state */
5659         bnx2x_set_reset_done(bp);
5660         bp->recovery_state = BNX2X_RECOVERY_DONE;
5661
5662 exit_leader_reset:
5663         bp->is_leader = 0;
5664         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
5665         smp_wmb();
5666         return rc;
5667 }
5668
5669 /* Assumption: runs under rtnl lock. This together with the fact
5670  * that it's called only from bnx2x_reset_task() ensure that it
5671  * will never be called when netif_running(bp->dev) is false.
5672  */
5673 static void bnx2x_parity_recover(struct bnx2x *bp)
5674 {
5675         DP(NETIF_MSG_HW, "Handling parity\n");
5676         while (1) {
5677                 switch (bp->recovery_state) {
5678                 case BNX2X_RECOVERY_INIT:
5679                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
5680                         /* Try to get a LEADER_LOCK HW lock */
5681                         if (bnx2x_trylock_hw_lock(bp,
5682                                 HW_LOCK_RESOURCE_RESERVED_08))
5683                                 bp->is_leader = 1;
5684
5685                         /* Stop the driver */
5686                         /* If interface has been removed - break */
5687                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
5688                                 return;
5689
5690                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
5691                         /* Ensure "is_leader" and "recovery_state"
5692                          *  update values are seen on other CPUs
5693                          */
5694                         smp_wmb();
5695                         break;
5696
5697                 case BNX2X_RECOVERY_WAIT:
5698                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
5699                         if (bp->is_leader) {
5700                                 u32 load_counter = bnx2x_get_load_cnt(bp);
5701                                 if (load_counter) {
5702                                         /* Wait until all other functions get
5703                                          * down.
5704                                          */
5705                                         schedule_delayed_work(&bp->reset_task,
5706                                                                 HZ/10);
5707                                         return;
5708                                 } else {
5709                                         /* If all other functions got down -
5710                                          * try to bring the chip back to
5711                                          * normal. In any case it's an exit
5712                                          * point for a leader.
5713                                          */
5714                                         if (bnx2x_leader_reset(bp) ||
5715                                         bnx2x_nic_load(bp, LOAD_NORMAL)) {
5716                                                 printk(KERN_ERR"%s: Recovery "
5717                                                 "has failed. Power cycle is "
5718                                                 "needed.\n", bp->dev->name);
5719                                                 /* Disconnect this device */
5720                                                 netif_device_detach(bp->dev);
5721                                                 /* Block ifup for all function
5722                                                  * of this ASIC until
5723                                                  * "process kill" or power
5724                                                  * cycle.
5725                                                  */
5726                                                 bnx2x_set_reset_in_progress(bp);
5727                                                 /* Shut down the power */
5728                                                 bnx2x_set_power_state(bp,
5729                                                                 PCI_D3hot);
5730                                                 return;
5731                                         }
5732
5733                                         return;
5734                                 }
5735                         } else { /* non-leader */
5736                                 if (!bnx2x_reset_is_done(bp)) {
5737                                         /* Try to get a LEADER_LOCK HW lock as
5738                                          * long as a former leader may have
5739                                          * been unloaded by the user or
5740                                          * released a leadership by another
5741                                          * reason.
5742                                          */
5743                                         if (bnx2x_trylock_hw_lock(bp,
5744                                             HW_LOCK_RESOURCE_RESERVED_08)) {
5745                                                 /* I'm a leader now! Restart a
5746                                                  * switch case.
5747                                                  */
5748                                                 bp->is_leader = 1;
5749                                                 break;
5750                                         }
5751
5752                                         schedule_delayed_work(&bp->reset_task,
5753                                                                 HZ/10);
5754                                         return;
5755
5756                                 } else { /* A leader has completed
5757                                           * the "process kill". It's an exit
5758                                           * point for a non-leader.
5759                                           */
5760                                         bnx2x_nic_load(bp, LOAD_NORMAL);
5761                                         bp->recovery_state =
5762                                                 BNX2X_RECOVERY_DONE;
5763                                         smp_wmb();
5764                                         return;
5765                                 }
5766                         }
5767                 default:
5768                         return;
5769                 }
5770         }
5771 }
5772
5773 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
5774  * scheduled on a general queue in order to prevent a dead lock.
5775  */
5776 static void bnx2x_reset_task(struct work_struct *work)
5777 {
5778         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
5779
5780 #ifdef BNX2X_STOP_ON_ERROR
5781         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
5782                   " so reset not done to allow debug dump,\n"
5783          KERN_ERR " you will need to reboot when done\n");
5784         return;
5785 #endif
5786
5787         rtnl_lock();
5788
5789         if (!netif_running(bp->dev))
5790                 goto reset_task_exit;
5791
5792         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
5793                 bnx2x_parity_recover(bp);
5794         else {
5795                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
5796                 bnx2x_nic_load(bp, LOAD_NORMAL);
5797         }
5798
5799 reset_task_exit:
5800         rtnl_unlock();
5801 }
5802
5803 /* end of nic load/unload */
5804
5805 /*
5806  * Init service functions
5807  */
5808
5809 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
5810 {
5811         switch (func) {
5812         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
5813         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
5814         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
5815         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
5816         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
5817         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
5818         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
5819         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
5820         default:
5821                 BNX2X_ERR("Unsupported function index: %d\n", func);
5822                 return (u32)(-1);
5823         }
5824 }
5825
5826 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
5827 {
5828         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
5829
5830         /* Flush all outstanding writes */
5831         mmiowb();
5832
5833         /* Pretend to be function 0 */
5834         REG_WR(bp, reg, 0);
5835         /* Flush the GRC transaction (in the chip) */
5836         new_val = REG_RD(bp, reg);
5837         if (new_val != 0) {
5838                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
5839                           new_val);
5840                 BUG();
5841         }
5842
5843         /* From now we are in the "like-E1" mode */
5844         bnx2x_int_disable(bp);
5845
5846         /* Flush all outstanding writes */
5847         mmiowb();
5848
5849         /* Restore the original funtion settings */
5850         REG_WR(bp, reg, orig_func);
5851         new_val = REG_RD(bp, reg);
5852         if (new_val != orig_func) {
5853                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
5854                           orig_func, new_val);
5855                 BUG();
5856         }
5857 }
5858
5859 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
5860 {
5861         if (CHIP_IS_E1H(bp))
5862                 bnx2x_undi_int_disable_e1h(bp, func);
5863         else
5864                 bnx2x_int_disable(bp);
5865 }
5866
5867 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5868 {
5869         u32 val;
5870
5871         /* Check if there is any driver already loaded */
5872         val = REG_RD(bp, MISC_REG_UNPREPARED);
5873         if (val == 0x1) {
5874                 /* Check if it is the UNDI driver
5875                  * UNDI driver initializes CID offset for normal bell to 0x7
5876                  */
5877                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5878                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
5879                 if (val == 0x7) {
5880                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5881                         /* save our func */
5882                         int func = BP_FUNC(bp);
5883                         u32 swap_en;
5884                         u32 swap_val;
5885
5886                         /* clear the UNDI indication */
5887                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
5888
5889                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
5890
5891                         /* try unload UNDI on port 0 */
5892                         bp->func = 0;
5893                         bp->fw_seq =
5894                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5895                                 DRV_MSG_SEQ_NUMBER_MASK);
5896                         reset_code = bnx2x_fw_command(bp, reset_code);
5897
5898                         /* if UNDI is loaded on the other port */
5899                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
5900
5901                                 /* send "DONE" for previous unload */
5902                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5903
5904                                 /* unload UNDI on port 1 */
5905                                 bp->func = 1;
5906                                 bp->fw_seq =
5907                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5908                                         DRV_MSG_SEQ_NUMBER_MASK);
5909                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5910
5911                                 bnx2x_fw_command(bp, reset_code);
5912                         }
5913
5914                         /* now it's safe to release the lock */
5915                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5916
5917                         bnx2x_undi_int_disable(bp, func);
5918
5919                         /* close input traffic and wait for it */
5920                         /* Do not rcv packets to BRB */
5921                         REG_WR(bp,
5922                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
5923                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
5924                         /* Do not direct rcv packets that are not for MCP to
5925                          * the BRB */
5926                         REG_WR(bp,
5927                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
5928                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5929                         /* clear AEU */
5930                         REG_WR(bp,
5931                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5932                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
5933                         msleep(10);
5934
5935                         /* save NIG port swap info */
5936                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5937                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5938                         /* reset device */
5939                         REG_WR(bp,
5940                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5941                                0xd3ffffff);
5942                         REG_WR(bp,
5943                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5944                                0x1403);
5945                         /* take the NIG out of reset and restore swap values */
5946                         REG_WR(bp,
5947                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5948                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
5949                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
5950                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
5951
5952                         /* send unload done to the MCP */
5953                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5954
5955                         /* restore our func and fw_seq */
5956                         bp->func = func;
5957                         bp->fw_seq =
5958                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5959                                 DRV_MSG_SEQ_NUMBER_MASK);
5960
5961                 } else
5962                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5963         }
5964 }
5965
5966 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5967 {
5968         u32 val, val2, val3, val4, id;
5969         u16 pmc;
5970
5971         /* Get the chip revision id and number. */
5972         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5973         val = REG_RD(bp, MISC_REG_CHIP_NUM);
5974         id = ((val & 0xffff) << 16);
5975         val = REG_RD(bp, MISC_REG_CHIP_REV);
5976         id |= ((val & 0xf) << 12);
5977         val = REG_RD(bp, MISC_REG_CHIP_METAL);
5978         id |= ((val & 0xff) << 4);
5979         val = REG_RD(bp, MISC_REG_BOND_ID);
5980         id |= (val & 0xf);
5981         bp->common.chip_id = id;
5982         bp->link_params.chip_id = bp->common.chip_id;
5983         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
5984
5985         val = (REG_RD(bp, 0x2874) & 0x55);
5986         if ((bp->common.chip_id & 0x1) ||
5987             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
5988                 bp->flags |= ONE_PORT_FLAG;
5989                 BNX2X_DEV_INFO("single port device\n");
5990         }
5991
5992         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
5993         bp->common.flash_size = (NVRAM_1MB_SIZE <<
5994                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
5995         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5996                        bp->common.flash_size, bp->common.flash_size);
5997
5998         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5999         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
6000         bp->link_params.shmem_base = bp->common.shmem_base;
6001         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
6002                        bp->common.shmem_base, bp->common.shmem2_base);
6003
6004         if (!bp->common.shmem_base ||
6005             (bp->common.shmem_base < 0xA0000) ||
6006             (bp->common.shmem_base >= 0xC0000)) {
6007                 BNX2X_DEV_INFO("MCP not active\n");
6008                 bp->flags |= NO_MCP_FLAG;
6009                 return;
6010         }
6011
6012         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6013         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6014                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6015                 BNX2X_ERROR("BAD MCP validity signature\n");
6016
6017         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6018         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
6019
6020         bp->link_params.hw_led_mode = ((bp->common.hw_config &
6021                                         SHARED_HW_CFG_LED_MODE_MASK) >>
6022                                        SHARED_HW_CFG_LED_MODE_SHIFT);
6023
6024         bp->link_params.feature_config_flags = 0;
6025         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
6026         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
6027                 bp->link_params.feature_config_flags |=
6028                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6029         else
6030                 bp->link_params.feature_config_flags &=
6031                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6032
6033         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6034         bp->common.bc_ver = val;
6035         BNX2X_DEV_INFO("bc_ver %X\n", val);
6036         if (val < BNX2X_BC_VER) {
6037                 /* for now only warn
6038                  * later we might need to enforce this */
6039                 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
6040                             "please upgrade BC\n", BNX2X_BC_VER, val);
6041         }
6042         bp->link_params.feature_config_flags |=
6043                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
6044                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
6045
6046         if (BP_E1HVN(bp) == 0) {
6047                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6048                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6049         } else {
6050                 /* no WOL capability for E1HVN != 0 */
6051                 bp->flags |= NO_WOL_FLAG;
6052         }
6053         BNX2X_DEV_INFO("%sWoL capable\n",
6054                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
6055
6056         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6057         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6058         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6059         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6060
6061         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
6062                  val, val2, val3, val4);
6063 }
6064
6065 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6066                                                     u32 switch_cfg)
6067 {
6068         int port = BP_PORT(bp);
6069         u32 ext_phy_type;
6070
6071         switch (switch_cfg) {
6072         case SWITCH_CFG_1G:
6073                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6074
6075                 ext_phy_type =
6076                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6077                 switch (ext_phy_type) {
6078                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6079                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6080                                        ext_phy_type);
6081
6082                         bp->port.supported |= (SUPPORTED_10baseT_Half |
6083                                                SUPPORTED_10baseT_Full |
6084                                                SUPPORTED_100baseT_Half |
6085                                                SUPPORTED_100baseT_Full |
6086                                                SUPPORTED_1000baseT_Full |
6087                                                SUPPORTED_2500baseX_Full |
6088                                                SUPPORTED_TP |
6089                                                SUPPORTED_FIBRE |
6090                                                SUPPORTED_Autoneg |
6091                                                SUPPORTED_Pause |
6092                                                SUPPORTED_Asym_Pause);
6093                         break;
6094
6095                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6096                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6097                                        ext_phy_type);
6098
6099                         bp->port.supported |= (SUPPORTED_10baseT_Half |
6100                                                SUPPORTED_10baseT_Full |
6101                                                SUPPORTED_100baseT_Half |
6102                                                SUPPORTED_100baseT_Full |
6103                                                SUPPORTED_1000baseT_Full |
6104                                                SUPPORTED_TP |
6105                                                SUPPORTED_FIBRE |
6106                                                SUPPORTED_Autoneg |
6107                                                SUPPORTED_Pause |
6108                                                SUPPORTED_Asym_Pause);
6109                         break;
6110
6111                 default:
6112                         BNX2X_ERR("NVRAM config error. "
6113                                   "BAD SerDes ext_phy_config 0x%x\n",
6114                                   bp->link_params.ext_phy_config);
6115                         return;
6116                 }
6117
6118                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6119                                            port*0x10);
6120                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6121                 break;
6122
6123         case SWITCH_CFG_10G:
6124                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6125
6126                 ext_phy_type =
6127                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6128                 switch (ext_phy_type) {
6129                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6130                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6131                                        ext_phy_type);
6132
6133                         bp->port.supported |= (SUPPORTED_10baseT_Half |
6134                                                SUPPORTED_10baseT_Full |
6135                                                SUPPORTED_100baseT_Half |
6136                                                SUPPORTED_100baseT_Full |
6137                                                SUPPORTED_1000baseT_Full |
6138                                                SUPPORTED_2500baseX_Full |
6139                                                SUPPORTED_10000baseT_Full |
6140                                                SUPPORTED_TP |
6141                                                SUPPORTED_FIBRE |
6142                                                SUPPORTED_Autoneg |
6143                                                SUPPORTED_Pause |
6144                                                SUPPORTED_Asym_Pause);
6145                         break;
6146
6147                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6148                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
6149                                        ext_phy_type);
6150
6151                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6152                                                SUPPORTED_1000baseT_Full |
6153                                                SUPPORTED_FIBRE |
6154                                                SUPPORTED_Autoneg |
6155                                                SUPPORTED_Pause |
6156                                                SUPPORTED_Asym_Pause);
6157                         break;
6158
6159                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6160                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
6161                                        ext_phy_type);
6162
6163                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6164                                                SUPPORTED_2500baseX_Full |
6165                                                SUPPORTED_1000baseT_Full |
6166                                                SUPPORTED_FIBRE |
6167                                                SUPPORTED_Autoneg |
6168                                                SUPPORTED_Pause |
6169                                                SUPPORTED_Asym_Pause);
6170                         break;
6171
6172                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
6173                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
6174                                        ext_phy_type);
6175
6176                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6177                                                SUPPORTED_FIBRE |
6178                                                SUPPORTED_Pause |
6179                                                SUPPORTED_Asym_Pause);
6180                         break;
6181
6182                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6183                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
6184                                        ext_phy_type);
6185
6186                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6187                                                SUPPORTED_1000baseT_Full |
6188                                                SUPPORTED_FIBRE |
6189                                                SUPPORTED_Pause |
6190                                                SUPPORTED_Asym_Pause);
6191                         break;
6192
6193                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6194                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
6195                                        ext_phy_type);
6196
6197                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6198                                                SUPPORTED_1000baseT_Full |
6199                                                SUPPORTED_Autoneg |
6200                                                SUPPORTED_FIBRE |
6201                                                SUPPORTED_Pause |
6202                                                SUPPORTED_Asym_Pause);
6203                         break;
6204
6205                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6206                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
6207                                        ext_phy_type);
6208
6209                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6210                                                SUPPORTED_1000baseT_Full |
6211                                                SUPPORTED_Autoneg |
6212                                                SUPPORTED_FIBRE |
6213                                                SUPPORTED_Pause |
6214                                                SUPPORTED_Asym_Pause);
6215                         break;
6216
6217                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6218                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
6219                                        ext_phy_type);
6220
6221                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6222                                                SUPPORTED_TP |
6223                                                SUPPORTED_Autoneg |
6224                                                SUPPORTED_Pause |
6225                                                SUPPORTED_Asym_Pause);
6226                         break;
6227
6228                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
6229                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
6230                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM848xx)\n",
6231                                        ext_phy_type);
6232
6233                         bp->port.supported |= (SUPPORTED_10baseT_Half |
6234                                                SUPPORTED_10baseT_Full |
6235                                                SUPPORTED_100baseT_Half |
6236                                                SUPPORTED_100baseT_Full |
6237                                                SUPPORTED_1000baseT_Full |
6238                                                SUPPORTED_10000baseT_Full |
6239                                                SUPPORTED_TP |
6240                                                SUPPORTED_Autoneg |
6241                                                SUPPORTED_Pause |
6242                                                SUPPORTED_Asym_Pause);
6243                         break;
6244
6245                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
6246                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
6247                                   bp->link_params.ext_phy_config);
6248                         break;
6249
6250                 default:
6251                         BNX2X_ERR("NVRAM config error. "
6252                                   "BAD XGXS ext_phy_config 0x%x\n",
6253                                   bp->link_params.ext_phy_config);
6254                         return;
6255                 }
6256
6257                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6258                                            port*0x18);
6259                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6260
6261                 break;
6262
6263         default:
6264                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
6265                           bp->port.link_config);
6266                 return;
6267         }
6268         bp->link_params.phy_addr = bp->port.phy_addr;
6269
6270         /* mask what we support according to speed_cap_mask */
6271         if (!(bp->link_params.speed_cap_mask &
6272                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
6273                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
6274
6275         if (!(bp->link_params.speed_cap_mask &
6276                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
6277                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
6278
6279         if (!(bp->link_params.speed_cap_mask &
6280                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
6281                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
6282
6283         if (!(bp->link_params.speed_cap_mask &
6284                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
6285                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
6286
6287         if (!(bp->link_params.speed_cap_mask &
6288                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
6289                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
6290                                         SUPPORTED_1000baseT_Full);
6291
6292         if (!(bp->link_params.speed_cap_mask &
6293                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
6294                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
6295
6296         if (!(bp->link_params.speed_cap_mask &
6297                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
6298                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
6299
6300         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
6301 }
6302
6303 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
6304 {
6305         bp->link_params.req_duplex = DUPLEX_FULL;
6306
6307         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
6308         case PORT_FEATURE_LINK_SPEED_AUTO:
6309                 if (bp->port.supported & SUPPORTED_Autoneg) {
6310                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
6311                         bp->port.advertising = bp->port.supported;
6312                 } else {
6313                         u32 ext_phy_type =
6314                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6315
6316                         if ((ext_phy_type ==
6317                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
6318                             (ext_phy_type ==
6319                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
6320                                 /* force 10G, no AN */
6321                                 bp->link_params.req_line_speed = SPEED_10000;
6322                                 bp->port.advertising =
6323                                                 (ADVERTISED_10000baseT_Full |
6324                                                  ADVERTISED_FIBRE);
6325                                 break;
6326                         }
6327                         BNX2X_ERR("NVRAM config error. "
6328                                   "Invalid link_config 0x%x"
6329                                   "  Autoneg not supported\n",
6330                                   bp->port.link_config);
6331                         return;
6332                 }
6333                 break;
6334
6335         case PORT_FEATURE_LINK_SPEED_10M_FULL:
6336                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
6337                         bp->link_params.req_line_speed = SPEED_10;
6338                         bp->port.advertising = (ADVERTISED_10baseT_Full |
6339                                                 ADVERTISED_TP);
6340                 } else {
6341                         BNX2X_ERROR("NVRAM config error. "
6342                                     "Invalid link_config 0x%x"
6343                                     "  speed_cap_mask 0x%x\n",
6344                                     bp->port.link_config,
6345                                     bp->link_params.speed_cap_mask);
6346                         return;
6347                 }
6348                 break;
6349
6350         case PORT_FEATURE_LINK_SPEED_10M_HALF:
6351                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
6352                         bp->link_params.req_line_speed = SPEED_10;
6353                         bp->link_params.req_duplex = DUPLEX_HALF;
6354                         bp->port.advertising = (ADVERTISED_10baseT_Half |
6355                                                 ADVERTISED_TP);
6356                 } else {
6357                         BNX2X_ERROR("NVRAM config error. "
6358                                     "Invalid link_config 0x%x"
6359                                     "  speed_cap_mask 0x%x\n",
6360                                     bp->port.link_config,
6361                                     bp->link_params.speed_cap_mask);
6362                         return;
6363                 }
6364                 break;
6365
6366         case PORT_FEATURE_LINK_SPEED_100M_FULL:
6367                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
6368                         bp->link_params.req_line_speed = SPEED_100;
6369                         bp->port.advertising = (ADVERTISED_100baseT_Full |
6370                                                 ADVERTISED_TP);
6371                 } else {
6372                         BNX2X_ERROR("NVRAM config error. "
6373                                     "Invalid link_config 0x%x"
6374                                     "  speed_cap_mask 0x%x\n",
6375                                     bp->port.link_config,
6376                                     bp->link_params.speed_cap_mask);
6377                         return;
6378                 }
6379                 break;
6380
6381         case PORT_FEATURE_LINK_SPEED_100M_HALF:
6382                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
6383                         bp->link_params.req_line_speed = SPEED_100;
6384                         bp->link_params.req_duplex = DUPLEX_HALF;
6385                         bp->port.advertising = (ADVERTISED_100baseT_Half |
6386                                                 ADVERTISED_TP);
6387                 } else {
6388                         BNX2X_ERROR("NVRAM config error. "
6389                                     "Invalid link_config 0x%x"
6390                                     "  speed_cap_mask 0x%x\n",
6391                                     bp->port.link_config,
6392                                     bp->link_params.speed_cap_mask);
6393                         return;
6394                 }
6395                 break;
6396
6397         case PORT_FEATURE_LINK_SPEED_1G:
6398                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
6399                         bp->link_params.req_line_speed = SPEED_1000;
6400                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
6401                                                 ADVERTISED_TP);
6402                 } else {
6403                         BNX2X_ERROR("NVRAM config error. "
6404                                     "Invalid link_config 0x%x"
6405                                     "  speed_cap_mask 0x%x\n",
6406                                     bp->port.link_config,
6407                                     bp->link_params.speed_cap_mask);
6408                         return;
6409                 }
6410                 break;
6411
6412         case PORT_FEATURE_LINK_SPEED_2_5G:
6413                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
6414                         bp->link_params.req_line_speed = SPEED_2500;
6415                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
6416                                                 ADVERTISED_TP);
6417                 } else {
6418                         BNX2X_ERROR("NVRAM config error. "
6419                                     "Invalid link_config 0x%x"
6420                                     "  speed_cap_mask 0x%x\n",
6421                                     bp->port.link_config,
6422                                     bp->link_params.speed_cap_mask);
6423                         return;
6424                 }
6425                 break;
6426
6427         case PORT_FEATURE_LINK_SPEED_10G_CX4:
6428         case PORT_FEATURE_LINK_SPEED_10G_KX4:
6429         case PORT_FEATURE_LINK_SPEED_10G_KR:
6430                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
6431                         bp->link_params.req_line_speed = SPEED_10000;
6432                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
6433                                                 ADVERTISED_FIBRE);
6434                 } else {
6435                         BNX2X_ERROR("NVRAM config error. "
6436                                     "Invalid link_config 0x%x"
6437                                     "  speed_cap_mask 0x%x\n",
6438                                     bp->port.link_config,
6439                                     bp->link_params.speed_cap_mask);
6440                         return;
6441                 }
6442                 break;
6443
6444         default:
6445                 BNX2X_ERROR("NVRAM config error. "
6446                             "BAD link speed link_config 0x%x\n",
6447                             bp->port.link_config);
6448                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
6449                 bp->port.advertising = bp->port.supported;
6450                 break;
6451         }
6452
6453         bp->link_params.req_flow_ctrl = (bp->port.link_config &
6454                                          PORT_FEATURE_FLOW_CONTROL_MASK);
6455         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
6456             !(bp->port.supported & SUPPORTED_Autoneg))
6457                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
6458
6459         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
6460                        "  advertising 0x%x\n",
6461                        bp->link_params.req_line_speed,
6462                        bp->link_params.req_duplex,
6463                        bp->link_params.req_flow_ctrl, bp->port.advertising);
6464 }
6465
6466 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
6467 {
6468         mac_hi = cpu_to_be16(mac_hi);
6469         mac_lo = cpu_to_be32(mac_lo);
6470         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
6471         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
6472 }
6473
6474 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
6475 {
6476         int port = BP_PORT(bp);
6477         u32 val, val2;
6478         u32 config;
6479         u16 i;
6480         u32 ext_phy_type;
6481
6482         bp->link_params.bp = bp;
6483         bp->link_params.port = port;
6484
6485         bp->link_params.lane_config =
6486                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
6487         bp->link_params.ext_phy_config =
6488                 SHMEM_RD(bp,
6489                          dev_info.port_hw_config[port].external_phy_config);
6490         /* BCM8727_NOC => BCM8727 no over current */
6491         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
6492             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
6493                 bp->link_params.ext_phy_config &=
6494                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6495                 bp->link_params.ext_phy_config |=
6496                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
6497                 bp->link_params.feature_config_flags |=
6498                         FEATURE_CONFIG_BCM8727_NOC;
6499         }
6500
6501         bp->link_params.speed_cap_mask =
6502                 SHMEM_RD(bp,
6503                          dev_info.port_hw_config[port].speed_capability_mask);
6504
6505         bp->port.link_config =
6506                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6507
6508         /* Get the 4 lanes xgxs config rx and tx */
6509         for (i = 0; i < 2; i++) {
6510                 val = SHMEM_RD(bp,
6511                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
6512                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
6513                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
6514
6515                 val = SHMEM_RD(bp,
6516                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
6517                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
6518                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
6519         }
6520
6521         /* If the device is capable of WoL, set the default state according
6522          * to the HW
6523          */
6524         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
6525         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
6526                    (config & PORT_FEATURE_WOL_ENABLED));
6527
6528         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
6529                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
6530                        bp->link_params.lane_config,
6531                        bp->link_params.ext_phy_config,
6532                        bp->link_params.speed_cap_mask, bp->port.link_config);
6533
6534         bp->link_params.switch_cfg |= (bp->port.link_config &
6535                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
6536         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
6537
6538         bnx2x_link_settings_requested(bp);
6539
6540         /*
6541          * If connected directly, work with the internal PHY, otherwise, work
6542          * with the external PHY
6543          */
6544         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6545         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
6546                 bp->mdio.prtad = bp->link_params.phy_addr;
6547
6548         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
6549                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
6550                 bp->mdio.prtad =
6551                         XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
6552
6553         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6554         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
6555         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
6556         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
6557         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6558
6559 #ifdef BCM_CNIC
6560         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
6561         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
6562         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
6563 #endif
6564 }
6565
6566 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6567 {
6568         int func = BP_FUNC(bp);
6569         u32 val, val2;
6570         int rc = 0;
6571
6572         bnx2x_get_common_hwinfo(bp);
6573
6574         bp->e1hov = 0;
6575         bp->e1hmf = 0;
6576         if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
6577                 bp->mf_config =
6578                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
6579
6580                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
6581                        FUNC_MF_CFG_E1HOV_TAG_MASK);
6582                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
6583                         bp->e1hmf = 1;
6584                 BNX2X_DEV_INFO("%s function mode\n",
6585                                IS_E1HMF(bp) ? "multi" : "single");
6586
6587                 if (IS_E1HMF(bp)) {
6588                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
6589                                                                 e1hov_tag) &
6590                                FUNC_MF_CFG_E1HOV_TAG_MASK);
6591                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
6592                                 bp->e1hov = val;
6593                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
6594                                                "(0x%04x)\n",
6595                                                func, bp->e1hov, bp->e1hov);
6596                         } else {
6597                                 BNX2X_ERROR("No valid E1HOV for func %d,"
6598                                             "  aborting\n", func);
6599                                 rc = -EPERM;
6600                         }
6601                 } else {
6602                         if (BP_E1HVN(bp)) {
6603                                 BNX2X_ERROR("VN %d in single function mode,"
6604                                             "  aborting\n", BP_E1HVN(bp));
6605                                 rc = -EPERM;
6606                         }
6607                 }
6608         }
6609
6610         if (!BP_NOMCP(bp)) {
6611                 bnx2x_get_port_hwinfo(bp);
6612
6613                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
6614                               DRV_MSG_SEQ_NUMBER_MASK);
6615                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
6616         }
6617
6618         if (IS_E1HMF(bp)) {
6619                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
6620                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
6621                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
6622                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
6623                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6624                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6625                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6626                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6627                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
6628                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
6629                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
6630                                ETH_ALEN);
6631                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
6632                                ETH_ALEN);
6633                 }
6634
6635                 return rc;
6636         }
6637
6638         if (BP_NOMCP(bp)) {
6639                 /* only supposed to happen on emulation/FPGA */
6640                 BNX2X_ERROR("warning: random MAC workaround active\n");
6641                 random_ether_addr(bp->dev->dev_addr);
6642                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6643         }
6644
6645         return rc;
6646 }
6647
6648 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
6649 {
6650         int cnt, i, block_end, rodi;
6651         char vpd_data[BNX2X_VPD_LEN+1];
6652         char str_id_reg[VENDOR_ID_LEN+1];
6653         char str_id_cap[VENDOR_ID_LEN+1];
6654         u8 len;
6655
6656         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
6657         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
6658
6659         if (cnt < BNX2X_VPD_LEN)
6660                 goto out_not_found;
6661
6662         i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
6663                              PCI_VPD_LRDT_RO_DATA);
6664         if (i < 0)
6665                 goto out_not_found;
6666
6667
6668         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
6669                     pci_vpd_lrdt_size(&vpd_data[i]);
6670
6671         i += PCI_VPD_LRDT_TAG_SIZE;
6672
6673         if (block_end > BNX2X_VPD_LEN)
6674                 goto out_not_found;
6675
6676         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6677                                    PCI_VPD_RO_KEYWORD_MFR_ID);
6678         if (rodi < 0)
6679                 goto out_not_found;
6680
6681         len = pci_vpd_info_field_size(&vpd_data[rodi]);
6682
6683         if (len != VENDOR_ID_LEN)
6684                 goto out_not_found;
6685
6686         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6687
6688         /* vendor specific info */
6689         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
6690         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
6691         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
6692             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
6693
6694                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6695                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
6696                 if (rodi >= 0) {
6697                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
6698
6699                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6700
6701                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
6702                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
6703                                 bp->fw_ver[len] = ' ';
6704                         }
6705                 }
6706                 return;
6707         }
6708 out_not_found:
6709         return;
6710 }
6711
6712 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6713 {
6714         int func = BP_FUNC(bp);
6715         int timer_interval;
6716         int rc;
6717
6718         /* Disable interrupt handling until HW is initialized */
6719         atomic_set(&bp->intr_sem, 1);
6720         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6721
6722         mutex_init(&bp->port.phy_mutex);
6723         mutex_init(&bp->fw_mb_mutex);
6724         spin_lock_init(&bp->stats_lock);
6725 #ifdef BCM_CNIC
6726         mutex_init(&bp->cnic_mutex);
6727 #endif
6728
6729         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
6730         INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
6731
6732         rc = bnx2x_get_hwinfo(bp);
6733
6734         bnx2x_read_fwinfo(bp);
6735         /* need to reset chip if undi was active */
6736         if (!BP_NOMCP(bp))
6737                 bnx2x_undi_unload(bp);
6738
6739         if (CHIP_REV_IS_FPGA(bp))
6740                 dev_err(&bp->pdev->dev, "FPGA detected\n");
6741
6742         if (BP_NOMCP(bp) && (func == 0))
6743                 dev_err(&bp->pdev->dev, "MCP disabled, "
6744                                         "must load devices in order!\n");
6745
6746         /* Set multi queue mode */
6747         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
6748             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
6749                 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
6750                                         "requested is not MSI-X\n");
6751                 multi_mode = ETH_RSS_MODE_DISABLED;
6752         }
6753         bp->multi_mode = multi_mode;
6754         bp->int_mode = int_mode;
6755
6756         bp->dev->features |= NETIF_F_GRO;
6757
6758         /* Set TPA flags */
6759         if (disable_tpa) {
6760                 bp->flags &= ~TPA_ENABLE_FLAG;
6761                 bp->dev->features &= ~NETIF_F_LRO;
6762         } else {
6763                 bp->flags |= TPA_ENABLE_FLAG;
6764                 bp->dev->features |= NETIF_F_LRO;
6765         }
6766         bp->disable_tpa = disable_tpa;
6767
6768         if (CHIP_IS_E1(bp))
6769                 bp->dropless_fc = 0;
6770         else
6771                 bp->dropless_fc = dropless_fc;
6772
6773         bp->mrrs = mrrs;
6774
6775         bp->tx_ring_size = MAX_TX_AVAIL;
6776         bp->rx_ring_size = MAX_RX_AVAIL;
6777
6778         bp->rx_csum = 1;
6779
6780         /* make sure that the numbers are in the right granularity */
6781         bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6782         bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6783
6784         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
6785         bp->current_interval = (poll ? poll : timer_interval);
6786
6787         init_timer(&bp->timer);
6788         bp->timer.expires = jiffies + bp->current_interval;
6789         bp->timer.data = (unsigned long) bp;
6790         bp->timer.function = bnx2x_timer;
6791
6792         return rc;
6793 }
6794
6795
6796 /****************************************************************************
6797 * General service functions
6798 ****************************************************************************/
6799
6800 /* called with rtnl_lock */
6801 static int bnx2x_open(struct net_device *dev)
6802 {
6803         struct bnx2x *bp = netdev_priv(dev);
6804
6805         netif_carrier_off(dev);
6806
6807         bnx2x_set_power_state(bp, PCI_D0);
6808
6809         if (!bnx2x_reset_is_done(bp)) {
6810                 do {
6811                         /* Reset MCP mail box sequence if there is on going
6812                          * recovery
6813                          */
6814                         bp->fw_seq = 0;
6815
6816                         /* If it's the first function to load and reset done
6817                          * is still not cleared it may mean that. We don't
6818                          * check the attention state here because it may have
6819                          * already been cleared by a "common" reset but we
6820                          * shell proceed with "process kill" anyway.
6821                          */
6822                         if ((bnx2x_get_load_cnt(bp) == 0) &&
6823                                 bnx2x_trylock_hw_lock(bp,
6824                                 HW_LOCK_RESOURCE_RESERVED_08) &&
6825                                 (!bnx2x_leader_reset(bp))) {
6826                                 DP(NETIF_MSG_HW, "Recovered in open\n");
6827                                 break;
6828                         }
6829
6830                         bnx2x_set_power_state(bp, PCI_D3hot);
6831
6832                         printk(KERN_ERR"%s: Recovery flow hasn't been properly"
6833                         " completed yet. Try again later. If u still see this"
6834                         " message after a few retries then power cycle is"
6835                         " required.\n", bp->dev->name);
6836
6837                         return -EAGAIN;
6838                 } while (0);
6839         }
6840
6841         bp->recovery_state = BNX2X_RECOVERY_DONE;
6842
6843         return bnx2x_nic_load(bp, LOAD_OPEN);
6844 }
6845
6846 /* called with rtnl_lock */
6847 static int bnx2x_close(struct net_device *dev)
6848 {
6849         struct bnx2x *bp = netdev_priv(dev);
6850
6851         /* Unload the driver, release IRQs */
6852         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
6853         bnx2x_set_power_state(bp, PCI_D3hot);
6854
6855         return 0;
6856 }
6857
6858 /* called with netif_tx_lock from dev_mcast.c */
6859 void bnx2x_set_rx_mode(struct net_device *dev)
6860 {
6861         struct bnx2x *bp = netdev_priv(dev);
6862         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
6863         int port = BP_PORT(bp);
6864
6865         if (bp->state != BNX2X_STATE_OPEN) {
6866                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6867                 return;
6868         }
6869
6870         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
6871
6872         if (dev->flags & IFF_PROMISC)
6873                 rx_mode = BNX2X_RX_MODE_PROMISC;
6874
6875         else if ((dev->flags & IFF_ALLMULTI) ||
6876                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
6877                   CHIP_IS_E1(bp)))
6878                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6879
6880         else { /* some multicasts */
6881                 if (CHIP_IS_E1(bp)) {
6882                         int i, old, offset;
6883                         struct netdev_hw_addr *ha;
6884                         struct mac_configuration_cmd *config =
6885                                                 bnx2x_sp(bp, mcast_config);
6886
6887                         i = 0;
6888                         netdev_for_each_mc_addr(ha, dev) {
6889                                 config->config_table[i].
6890                                         cam_entry.msb_mac_addr =
6891                                         swab16(*(u16 *)&ha->addr[0]);
6892                                 config->config_table[i].
6893                                         cam_entry.middle_mac_addr =
6894                                         swab16(*(u16 *)&ha->addr[2]);
6895                                 config->config_table[i].
6896                                         cam_entry.lsb_mac_addr =
6897                                         swab16(*(u16 *)&ha->addr[4]);
6898                                 config->config_table[i].cam_entry.flags =
6899                                                         cpu_to_le16(port);
6900                                 config->config_table[i].
6901                                         target_table_entry.flags = 0;
6902                                 config->config_table[i].target_table_entry.
6903                                         clients_bit_vector =
6904                                                 cpu_to_le32(1 << BP_L_ID(bp));
6905                                 config->config_table[i].
6906                                         target_table_entry.vlan_id = 0;
6907
6908                                 DP(NETIF_MSG_IFUP,
6909                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6910                                    config->config_table[i].
6911                                                 cam_entry.msb_mac_addr,
6912                                    config->config_table[i].
6913                                                 cam_entry.middle_mac_addr,
6914                                    config->config_table[i].
6915                                                 cam_entry.lsb_mac_addr);
6916                                 i++;
6917                         }
6918                         old = config->hdr.length;
6919                         if (old > i) {
6920                                 for (; i < old; i++) {
6921                                         if (CAM_IS_INVALID(config->
6922                                                            config_table[i])) {
6923                                                 /* already invalidated */
6924                                                 break;
6925                                         }
6926                                         /* invalidate */
6927                                         CAM_INVALIDATE(config->
6928                                                        config_table[i]);
6929                                 }
6930                         }
6931
6932                         if (CHIP_REV_IS_SLOW(bp))
6933                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6934                         else
6935                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
6936
6937                         config->hdr.length = i;
6938                         config->hdr.offset = offset;
6939                         config->hdr.client_id = bp->fp->cl_id;
6940                         config->hdr.reserved1 = 0;
6941
6942                         bp->set_mac_pending++;
6943                         smp_wmb();
6944
6945                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6946                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6947                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
6948                                       0);
6949                 } else { /* E1H */
6950                         /* Accept one or more multicasts */
6951                         struct netdev_hw_addr *ha;
6952                         u32 mc_filter[MC_HASH_SIZE];
6953                         u32 crc, bit, regidx;
6954                         int i;
6955
6956                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6957
6958                         netdev_for_each_mc_addr(ha, dev) {
6959                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6960                                    ha->addr);
6961
6962                                 crc = crc32c_le(0, ha->addr, ETH_ALEN);
6963                                 bit = (crc >> 24) & 0xff;
6964                                 regidx = bit >> 5;
6965                                 bit &= 0x1f;
6966                                 mc_filter[regidx] |= (1 << bit);
6967                         }
6968
6969                         for (i = 0; i < MC_HASH_SIZE; i++)
6970                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6971                                        mc_filter[i]);
6972                 }
6973         }
6974
6975         bp->rx_mode = rx_mode;
6976         bnx2x_set_storm_rx_mode(bp);
6977 }
6978
6979
6980 /* called with rtnl_lock */
6981 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
6982                            int devad, u16 addr)
6983 {
6984         struct bnx2x *bp = netdev_priv(netdev);
6985         u16 value;
6986         int rc;
6987         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6988
6989         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
6990            prtad, devad, addr);
6991
6992         if (prtad != bp->mdio.prtad) {
6993                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
6994                    prtad, bp->mdio.prtad);
6995                 return -EINVAL;
6996         }
6997
6998         /* The HW expects different devad if CL22 is used */
6999         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
7000
7001         bnx2x_acquire_phy_lock(bp);
7002         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
7003                              devad, addr, &value);
7004         bnx2x_release_phy_lock(bp);
7005         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
7006
7007         if (!rc)
7008                 rc = value;
7009         return rc;
7010 }
7011
7012 /* called with rtnl_lock */
7013 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
7014                             u16 addr, u16 value)
7015 {
7016         struct bnx2x *bp = netdev_priv(netdev);
7017         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7018         int rc;
7019
7020         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
7021                            " value 0x%x\n", prtad, devad, addr, value);
7022
7023         if (prtad != bp->mdio.prtad) {
7024                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
7025                    prtad, bp->mdio.prtad);
7026                 return -EINVAL;
7027         }
7028
7029         /* The HW expects different devad if CL22 is used */
7030         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
7031
7032         bnx2x_acquire_phy_lock(bp);
7033         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
7034                               devad, addr, value);
7035         bnx2x_release_phy_lock(bp);
7036         return rc;
7037 }
7038
7039 /* called with rtnl_lock */
7040 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7041 {
7042         struct bnx2x *bp = netdev_priv(dev);
7043         struct mii_ioctl_data *mdio = if_mii(ifr);
7044
7045         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
7046            mdio->phy_id, mdio->reg_num, mdio->val_in);
7047
7048         if (!netif_running(dev))
7049                 return -EAGAIN;
7050
7051         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
7052 }
7053
7054 #ifdef CONFIG_NET_POLL_CONTROLLER
7055 static void poll_bnx2x(struct net_device *dev)
7056 {
7057         struct bnx2x *bp = netdev_priv(dev);
7058
7059         disable_irq(bp->pdev->irq);
7060         bnx2x_interrupt(bp->pdev->irq, dev);
7061         enable_irq(bp->pdev->irq);
7062 }
7063 #endif
7064
7065 static const struct net_device_ops bnx2x_netdev_ops = {
7066         .ndo_open               = bnx2x_open,
7067         .ndo_stop               = bnx2x_close,
7068         .ndo_start_xmit         = bnx2x_start_xmit,
7069         .ndo_set_multicast_list = bnx2x_set_rx_mode,
7070         .ndo_set_mac_address    = bnx2x_change_mac_addr,
7071         .ndo_validate_addr      = eth_validate_addr,
7072         .ndo_do_ioctl           = bnx2x_ioctl,
7073         .ndo_change_mtu         = bnx2x_change_mtu,
7074         .ndo_tx_timeout         = bnx2x_tx_timeout,
7075 #ifdef BCM_VLAN
7076         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
7077 #endif
7078 #ifdef CONFIG_NET_POLL_CONTROLLER
7079         .ndo_poll_controller    = poll_bnx2x,
7080 #endif
7081 };
7082
7083 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
7084                                     struct net_device *dev)
7085 {
7086         struct bnx2x *bp;
7087         int rc;
7088
7089         SET_NETDEV_DEV(dev, &pdev->dev);
7090         bp = netdev_priv(dev);
7091
7092         bp->dev = dev;
7093         bp->pdev = pdev;
7094         bp->flags = 0;
7095         bp->func = PCI_FUNC(pdev->devfn);
7096
7097         rc = pci_enable_device(pdev);
7098         if (rc) {
7099                 dev_err(&bp->pdev->dev,
7100                         "Cannot enable PCI device, aborting\n");
7101                 goto err_out;
7102         }
7103
7104         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7105                 dev_err(&bp->pdev->dev,
7106                         "Cannot find PCI device base address, aborting\n");
7107                 rc = -ENODEV;
7108                 goto err_out_disable;
7109         }
7110
7111         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
7112                 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
7113                        " base address, aborting\n");
7114                 rc = -ENODEV;
7115                 goto err_out_disable;
7116         }
7117
7118         if (atomic_read(&pdev->enable_cnt) == 1) {
7119                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7120                 if (rc) {
7121                         dev_err(&bp->pdev->dev,
7122                                 "Cannot obtain PCI resources, aborting\n");
7123                         goto err_out_disable;
7124                 }
7125
7126                 pci_set_master(pdev);
7127                 pci_save_state(pdev);
7128         }
7129
7130         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7131         if (bp->pm_cap == 0) {
7132                 dev_err(&bp->pdev->dev,
7133                         "Cannot find power management capability, aborting\n");
7134                 rc = -EIO;
7135                 goto err_out_release;
7136         }
7137
7138         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
7139         if (bp->pcie_cap == 0) {
7140                 dev_err(&bp->pdev->dev,
7141                         "Cannot find PCI Express capability, aborting\n");
7142                 rc = -EIO;
7143                 goto err_out_release;
7144         }
7145
7146         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
7147                 bp->flags |= USING_DAC_FLAG;
7148                 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
7149                         dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
7150                                " failed, aborting\n");
7151                         rc = -EIO;
7152                         goto err_out_release;
7153                 }
7154
7155         } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
7156                 dev_err(&bp->pdev->dev,
7157                         "System does not support DMA, aborting\n");
7158                 rc = -EIO;
7159                 goto err_out_release;
7160         }
7161
7162         dev->mem_start = pci_resource_start(pdev, 0);
7163         dev->base_addr = dev->mem_start;
7164         dev->mem_end = pci_resource_end(pdev, 0);
7165
7166         dev->irq = pdev->irq;
7167
7168         bp->regview = pci_ioremap_bar(pdev, 0);
7169         if (!bp->regview) {
7170                 dev_err(&bp->pdev->dev,
7171                         "Cannot map register space, aborting\n");
7172                 rc = -ENOMEM;
7173                 goto err_out_release;
7174         }
7175
7176         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
7177                                         min_t(u64, BNX2X_DB_SIZE,
7178                                               pci_resource_len(pdev, 2)));
7179         if (!bp->doorbells) {
7180                 dev_err(&bp->pdev->dev,
7181                         "Cannot map doorbell space, aborting\n");
7182                 rc = -ENOMEM;
7183                 goto err_out_unmap;
7184         }
7185
7186         bnx2x_set_power_state(bp, PCI_D0);
7187
7188         /* clean indirect addresses */
7189         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
7190                                PCICFG_VENDOR_ID_OFFSET);
7191         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
7192         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
7193         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
7194         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
7195
7196         /* Reset the load counter */
7197         bnx2x_clear_load_cnt(bp);
7198
7199         dev->watchdog_timeo = TX_TIMEOUT;
7200
7201         dev->netdev_ops = &bnx2x_netdev_ops;
7202         bnx2x_set_ethtool_ops(dev);
7203         dev->features |= NETIF_F_SG;
7204         dev->features |= NETIF_F_HW_CSUM;
7205         if (bp->flags & USING_DAC_FLAG)
7206                 dev->features |= NETIF_F_HIGHDMA;
7207         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7208         dev->features |= NETIF_F_TSO6;
7209 #ifdef BCM_VLAN
7210         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
7211         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
7212
7213         dev->vlan_features |= NETIF_F_SG;
7214         dev->vlan_features |= NETIF_F_HW_CSUM;
7215         if (bp->flags & USING_DAC_FLAG)
7216                 dev->vlan_features |= NETIF_F_HIGHDMA;
7217         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7218         dev->vlan_features |= NETIF_F_TSO6;
7219 #endif
7220
7221         /* get_port_hwinfo() will set prtad and mmds properly */
7222         bp->mdio.prtad = MDIO_PRTAD_NONE;
7223         bp->mdio.mmds = 0;
7224         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7225         bp->mdio.dev = dev;
7226         bp->mdio.mdio_read = bnx2x_mdio_read;
7227         bp->mdio.mdio_write = bnx2x_mdio_write;
7228
7229         return 0;
7230
7231 err_out_unmap:
7232         if (bp->regview) {
7233                 iounmap(bp->regview);
7234                 bp->regview = NULL;
7235         }
7236         if (bp->doorbells) {
7237                 iounmap(bp->doorbells);
7238                 bp->doorbells = NULL;
7239         }
7240
7241 err_out_release:
7242         if (atomic_read(&pdev->enable_cnt) == 1)
7243                 pci_release_regions(pdev);
7244
7245 err_out_disable:
7246         pci_disable_device(pdev);
7247         pci_set_drvdata(pdev, NULL);
7248
7249 err_out:
7250         return rc;
7251 }
7252
7253 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
7254                                                  int *width, int *speed)
7255 {
7256         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7257
7258         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
7259
7260         /* return value of 1=2.5GHz 2=5GHz */
7261         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
7262 }
7263
7264 static int bnx2x_check_firmware(struct bnx2x *bp)
7265 {
7266         const struct firmware *firmware = bp->firmware;
7267         struct bnx2x_fw_file_hdr *fw_hdr;
7268         struct bnx2x_fw_file_section *sections;
7269         u32 offset, len, num_ops;
7270         u16 *ops_offsets;
7271         int i;
7272         const u8 *fw_ver;
7273
7274         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
7275                 return -EINVAL;
7276
7277         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
7278         sections = (struct bnx2x_fw_file_section *)fw_hdr;
7279
7280         /* Make sure none of the offsets and sizes make us read beyond
7281          * the end of the firmware data */
7282         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
7283                 offset = be32_to_cpu(sections[i].offset);
7284                 len = be32_to_cpu(sections[i].len);
7285                 if (offset + len > firmware->size) {
7286                         dev_err(&bp->pdev->dev,
7287                                 "Section %d length is out of bounds\n", i);
7288                         return -EINVAL;
7289                 }
7290         }
7291
7292         /* Likewise for the init_ops offsets */
7293         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
7294         ops_offsets = (u16 *)(firmware->data + offset);
7295         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
7296
7297         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
7298                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
7299                         dev_err(&bp->pdev->dev,
7300                                 "Section offset %d is out of bounds\n", i);
7301                         return -EINVAL;
7302                 }
7303         }
7304
7305         /* Check FW version */
7306         offset = be32_to_cpu(fw_hdr->fw_version.offset);
7307         fw_ver = firmware->data + offset;
7308         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
7309             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
7310             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
7311             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
7312                 dev_err(&bp->pdev->dev,
7313                         "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
7314                        fw_ver[0], fw_ver[1], fw_ver[2],
7315                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
7316                        BCM_5710_FW_MINOR_VERSION,
7317                        BCM_5710_FW_REVISION_VERSION,
7318                        BCM_5710_FW_ENGINEERING_VERSION);
7319                 return -EINVAL;
7320         }
7321
7322         return 0;
7323 }
7324
7325 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7326 {
7327         const __be32 *source = (const __be32 *)_source;
7328         u32 *target = (u32 *)_target;
7329         u32 i;
7330
7331         for (i = 0; i < n/4; i++)
7332                 target[i] = be32_to_cpu(source[i]);
7333 }
7334
7335 /*
7336    Ops array is stored in the following format:
7337    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7338  */
7339 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
7340 {
7341         const __be32 *source = (const __be32 *)_source;
7342         struct raw_op *target = (struct raw_op *)_target;
7343         u32 i, j, tmp;
7344
7345         for (i = 0, j = 0; i < n/8; i++, j += 2) {
7346                 tmp = be32_to_cpu(source[j]);
7347                 target[i].op = (tmp >> 24) & 0xff;
7348                 target[i].offset = tmp & 0xffffff;
7349                 target[i].raw_data = be32_to_cpu(source[j + 1]);
7350         }
7351 }
7352
7353 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7354 {
7355         const __be16 *source = (const __be16 *)_source;
7356         u16 *target = (u16 *)_target;
7357         u32 i;
7358
7359         for (i = 0; i < n/2; i++)
7360                 target[i] = be16_to_cpu(source[i]);
7361 }
7362
7363 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
7364 do {                                                                    \
7365         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
7366         bp->arr = kmalloc(len, GFP_KERNEL);                             \
7367         if (!bp->arr) {                                                 \
7368                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7369                 goto lbl;                                               \
7370         }                                                               \
7371         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
7372              (u8 *)bp->arr, len);                                       \
7373 } while (0)
7374
7375 int bnx2x_init_firmware(struct bnx2x *bp)
7376 {
7377         const char *fw_file_name;
7378         struct bnx2x_fw_file_hdr *fw_hdr;
7379         int rc;
7380
7381         if (CHIP_IS_E1(bp))
7382                 fw_file_name = FW_FILE_NAME_E1;
7383         else if (CHIP_IS_E1H(bp))
7384                 fw_file_name = FW_FILE_NAME_E1H;
7385         else {
7386                 BNX2X_ERR("Unsupported chip revision\n");
7387                 return -EINVAL;
7388         }
7389
7390         BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
7391
7392         rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
7393         if (rc) {
7394                 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
7395                 goto request_firmware_exit;
7396         }
7397
7398         rc = bnx2x_check_firmware(bp);
7399         if (rc) {
7400                 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
7401                 goto request_firmware_exit;
7402         }
7403
7404         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
7405
7406         /* Initialize the pointers to the init arrays */
7407         /* Blob */
7408         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
7409
7410         /* Opcodes */
7411         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
7412
7413         /* Offsets */
7414         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
7415                             be16_to_cpu_n);
7416
7417         /* STORMs firmware */
7418         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7419                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
7420         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
7421                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
7422         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7423                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
7424         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
7425                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
7426         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7427                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
7428         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
7429                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
7430         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7431                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
7432         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
7433                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
7434
7435         return 0;
7436
7437 init_offsets_alloc_err:
7438         kfree(bp->init_ops);
7439 init_ops_alloc_err:
7440         kfree(bp->init_data);
7441 request_firmware_exit:
7442         release_firmware(bp->firmware);
7443
7444         return rc;
7445 }
7446
7447
7448 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7449                                     const struct pci_device_id *ent)
7450 {
7451         struct net_device *dev = NULL;
7452         struct bnx2x *bp;
7453         int pcie_width, pcie_speed;
7454         int rc;
7455
7456         /* dev zeroed in init_etherdev */
7457         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
7458         if (!dev) {
7459                 dev_err(&pdev->dev, "Cannot allocate net device\n");
7460                 return -ENOMEM;
7461         }
7462
7463         bp = netdev_priv(dev);
7464         bp->msg_enable = debug;
7465
7466         pci_set_drvdata(pdev, dev);
7467
7468         rc = bnx2x_init_dev(pdev, dev);
7469         if (rc < 0) {
7470                 free_netdev(dev);
7471                 return rc;
7472         }
7473
7474         rc = bnx2x_init_bp(bp);
7475         if (rc)
7476                 goto init_one_exit;
7477
7478         rc = register_netdev(dev);
7479         if (rc) {
7480                 dev_err(&pdev->dev, "Cannot register net device\n");
7481                 goto init_one_exit;
7482         }
7483
7484         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
7485         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
7486                " IRQ %d, ", board_info[ent->driver_data].name,
7487                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
7488                pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
7489                dev->base_addr, bp->pdev->irq);
7490         pr_cont("node addr %pM\n", dev->dev_addr);
7491
7492         return 0;
7493
7494 init_one_exit:
7495         if (bp->regview)
7496                 iounmap(bp->regview);
7497
7498         if (bp->doorbells)
7499                 iounmap(bp->doorbells);
7500
7501         free_netdev(dev);
7502
7503         if (atomic_read(&pdev->enable_cnt) == 1)
7504                 pci_release_regions(pdev);
7505
7506         pci_disable_device(pdev);
7507         pci_set_drvdata(pdev, NULL);
7508
7509         return rc;
7510 }
7511
7512 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7513 {
7514         struct net_device *dev = pci_get_drvdata(pdev);
7515         struct bnx2x *bp;
7516
7517         if (!dev) {
7518                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
7519                 return;
7520         }
7521         bp = netdev_priv(dev);
7522
7523         unregister_netdev(dev);
7524
7525         /* Make sure RESET task is not scheduled before continuing */
7526         cancel_delayed_work_sync(&bp->reset_task);
7527
7528         if (bp->regview)
7529                 iounmap(bp->regview);
7530
7531         if (bp->doorbells)
7532                 iounmap(bp->doorbells);
7533
7534         free_netdev(dev);
7535
7536         if (atomic_read(&pdev->enable_cnt) == 1)
7537                 pci_release_regions(pdev);
7538
7539         pci_disable_device(pdev);
7540         pci_set_drvdata(pdev, NULL);
7541 }
7542
7543 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
7544 {
7545         int i;
7546
7547         bp->state = BNX2X_STATE_ERROR;
7548
7549         bp->rx_mode = BNX2X_RX_MODE_NONE;
7550
7551         bnx2x_netif_stop(bp, 0);
7552         netif_carrier_off(bp->dev);
7553
7554         del_timer_sync(&bp->timer);
7555         bp->stats_state = STATS_STATE_DISABLED;
7556         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
7557
7558         /* Release IRQs */
7559         bnx2x_free_irq(bp, false);
7560
7561         if (CHIP_IS_E1(bp)) {
7562                 struct mac_configuration_cmd *config =
7563                                                 bnx2x_sp(bp, mcast_config);
7564
7565                 for (i = 0; i < config->hdr.length; i++)
7566                         CAM_INVALIDATE(config->config_table[i]);
7567         }
7568
7569         /* Free SKBs, SGEs, TPA pool and driver internals */
7570         bnx2x_free_skbs(bp);
7571         for_each_queue(bp, i)
7572                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7573         for_each_queue(bp, i)
7574                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7575         bnx2x_free_mem(bp);
7576
7577         bp->state = BNX2X_STATE_CLOSED;
7578
7579         return 0;
7580 }
7581
7582 static void bnx2x_eeh_recover(struct bnx2x *bp)
7583 {
7584         u32 val;
7585
7586         mutex_init(&bp->port.phy_mutex);
7587
7588         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7589         bp->link_params.shmem_base = bp->common.shmem_base;
7590         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7591
7592         if (!bp->common.shmem_base ||
7593             (bp->common.shmem_base < 0xA0000) ||
7594             (bp->common.shmem_base >= 0xC0000)) {
7595                 BNX2X_DEV_INFO("MCP not active\n");
7596                 bp->flags |= NO_MCP_FLAG;
7597                 return;
7598         }
7599
7600         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7601         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7602                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7603                 BNX2X_ERR("BAD MCP validity signature\n");
7604
7605         if (!BP_NOMCP(bp)) {
7606                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
7607                               & DRV_MSG_SEQ_NUMBER_MASK);
7608                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7609         }
7610 }
7611
7612 /**
7613  * bnx2x_io_error_detected - called when PCI error is detected
7614  * @pdev: Pointer to PCI device
7615  * @state: The current pci connection state
7616  *
7617  * This function is called after a PCI bus error affecting
7618  * this device has been detected.
7619  */
7620 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
7621                                                 pci_channel_state_t state)
7622 {
7623         struct net_device *dev = pci_get_drvdata(pdev);
7624         struct bnx2x *bp = netdev_priv(dev);
7625
7626         rtnl_lock();
7627
7628         netif_device_detach(dev);
7629
7630         if (state == pci_channel_io_perm_failure) {
7631                 rtnl_unlock();
7632                 return PCI_ERS_RESULT_DISCONNECT;
7633         }
7634
7635         if (netif_running(dev))
7636                 bnx2x_eeh_nic_unload(bp);
7637
7638         pci_disable_device(pdev);
7639
7640         rtnl_unlock();
7641
7642         /* Request a slot reset */
7643         return PCI_ERS_RESULT_NEED_RESET;
7644 }
7645
7646 /**
7647  * bnx2x_io_slot_reset - called after the PCI bus has been reset
7648  * @pdev: Pointer to PCI device
7649  *
7650  * Restart the card from scratch, as if from a cold-boot.
7651  */
7652 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
7653 {
7654         struct net_device *dev = pci_get_drvdata(pdev);
7655         struct bnx2x *bp = netdev_priv(dev);
7656
7657         rtnl_lock();
7658
7659         if (pci_enable_device(pdev)) {
7660                 dev_err(&pdev->dev,
7661                         "Cannot re-enable PCI device after reset\n");
7662                 rtnl_unlock();
7663                 return PCI_ERS_RESULT_DISCONNECT;
7664         }
7665
7666         pci_set_master(pdev);
7667         pci_restore_state(pdev);
7668
7669         if (netif_running(dev))
7670                 bnx2x_set_power_state(bp, PCI_D0);
7671
7672         rtnl_unlock();
7673
7674         return PCI_ERS_RESULT_RECOVERED;
7675 }
7676
7677 /**
7678  * bnx2x_io_resume - called when traffic can start flowing again
7679  * @pdev: Pointer to PCI device
7680  *
7681  * This callback is called when the error recovery driver tells us that
7682  * its OK to resume normal operation.
7683  */
7684 static void bnx2x_io_resume(struct pci_dev *pdev)
7685 {
7686         struct net_device *dev = pci_get_drvdata(pdev);
7687         struct bnx2x *bp = netdev_priv(dev);
7688
7689         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
7690                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
7691                 return;
7692         }
7693
7694         rtnl_lock();
7695
7696         bnx2x_eeh_recover(bp);
7697
7698         if (netif_running(dev))
7699                 bnx2x_nic_load(bp, LOAD_NORMAL);
7700
7701         netif_device_attach(dev);
7702
7703         rtnl_unlock();
7704 }
7705
7706 static struct pci_error_handlers bnx2x_err_handler = {
7707         .error_detected = bnx2x_io_error_detected,
7708         .slot_reset     = bnx2x_io_slot_reset,
7709         .resume         = bnx2x_io_resume,
7710 };
7711
7712 static struct pci_driver bnx2x_pci_driver = {
7713         .name        = DRV_MODULE_NAME,
7714         .id_table    = bnx2x_pci_tbl,
7715         .probe       = bnx2x_init_one,
7716         .remove      = __devexit_p(bnx2x_remove_one),
7717         .suspend     = bnx2x_suspend,
7718         .resume      = bnx2x_resume,
7719         .err_handler = &bnx2x_err_handler,
7720 };
7721
7722 static int __init bnx2x_init(void)
7723 {
7724         int ret;
7725
7726         pr_info("%s", version);
7727
7728         bnx2x_wq = create_singlethread_workqueue("bnx2x");
7729         if (bnx2x_wq == NULL) {
7730                 pr_err("Cannot create workqueue\n");
7731                 return -ENOMEM;
7732         }
7733
7734         ret = pci_register_driver(&bnx2x_pci_driver);
7735         if (ret) {
7736                 pr_err("Cannot register driver\n");
7737                 destroy_workqueue(bnx2x_wq);
7738         }
7739         return ret;
7740 }
7741
7742 static void __exit bnx2x_cleanup(void)
7743 {
7744         pci_unregister_driver(&bnx2x_pci_driver);
7745
7746         destroy_workqueue(bnx2x_wq);
7747 }
7748
7749 module_init(bnx2x_init);
7750 module_exit(bnx2x_cleanup);
7751
7752 #ifdef BCM_CNIC
7753
7754 /* count denotes the number of new completions we have seen */
7755 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
7756 {
7757         struct eth_spe *spe;
7758
7759 #ifdef BNX2X_STOP_ON_ERROR
7760         if (unlikely(bp->panic))
7761                 return;
7762 #endif
7763
7764         spin_lock_bh(&bp->spq_lock);
7765         bp->cnic_spq_pending -= count;
7766
7767         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
7768              bp->cnic_spq_pending++) {
7769
7770                 if (!bp->cnic_kwq_pending)
7771                         break;
7772
7773                 spe = bnx2x_sp_get_next(bp);
7774                 *spe = *bp->cnic_kwq_cons;
7775
7776                 bp->cnic_kwq_pending--;
7777
7778                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
7779                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
7780
7781                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
7782                         bp->cnic_kwq_cons = bp->cnic_kwq;
7783                 else
7784                         bp->cnic_kwq_cons++;
7785         }
7786         bnx2x_sp_prod_update(bp);
7787         spin_unlock_bh(&bp->spq_lock);
7788 }
7789
7790 static int bnx2x_cnic_sp_queue(struct net_device *dev,
7791                                struct kwqe_16 *kwqes[], u32 count)
7792 {
7793         struct bnx2x *bp = netdev_priv(dev);
7794         int i;
7795
7796 #ifdef BNX2X_STOP_ON_ERROR
7797         if (unlikely(bp->panic))
7798                 return -EIO;
7799 #endif
7800
7801         spin_lock_bh(&bp->spq_lock);
7802
7803         for (i = 0; i < count; i++) {
7804                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
7805
7806                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
7807                         break;
7808
7809                 *bp->cnic_kwq_prod = *spe;
7810
7811                 bp->cnic_kwq_pending++;
7812
7813                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
7814                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
7815                    spe->data.mac_config_addr.hi,
7816                    spe->data.mac_config_addr.lo,
7817                    bp->cnic_kwq_pending);
7818
7819                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
7820                         bp->cnic_kwq_prod = bp->cnic_kwq;
7821                 else
7822                         bp->cnic_kwq_prod++;
7823         }
7824
7825         spin_unlock_bh(&bp->spq_lock);
7826
7827         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
7828                 bnx2x_cnic_sp_post(bp, 0);
7829
7830         return i;
7831 }
7832
7833 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7834 {
7835         struct cnic_ops *c_ops;
7836         int rc = 0;
7837
7838         mutex_lock(&bp->cnic_mutex);
7839         c_ops = bp->cnic_ops;
7840         if (c_ops)
7841                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7842         mutex_unlock(&bp->cnic_mutex);
7843
7844         return rc;
7845 }
7846
7847 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7848 {
7849         struct cnic_ops *c_ops;
7850         int rc = 0;
7851
7852         rcu_read_lock();
7853         c_ops = rcu_dereference(bp->cnic_ops);
7854         if (c_ops)
7855                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7856         rcu_read_unlock();
7857
7858         return rc;
7859 }
7860
7861 /*
7862  * for commands that have no data
7863  */
7864 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
7865 {
7866         struct cnic_ctl_info ctl = {0};
7867
7868         ctl.cmd = cmd;
7869
7870         return bnx2x_cnic_ctl_send(bp, &ctl);
7871 }
7872
7873 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
7874 {
7875         struct cnic_ctl_info ctl;
7876
7877         /* first we tell CNIC and only then we count this as a completion */
7878         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
7879         ctl.data.comp.cid = cid;
7880
7881         bnx2x_cnic_ctl_send_bh(bp, &ctl);
7882         bnx2x_cnic_sp_post(bp, 1);
7883 }
7884
7885 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7886 {
7887         struct bnx2x *bp = netdev_priv(dev);
7888         int rc = 0;
7889
7890         switch (ctl->cmd) {
7891         case DRV_CTL_CTXTBL_WR_CMD: {
7892                 u32 index = ctl->data.io.offset;
7893                 dma_addr_t addr = ctl->data.io.dma_addr;
7894
7895                 bnx2x_ilt_wr(bp, index, addr);
7896                 break;
7897         }
7898
7899         case DRV_CTL_COMPLETION_CMD: {
7900                 int count = ctl->data.comp.comp_count;
7901
7902                 bnx2x_cnic_sp_post(bp, count);
7903                 break;
7904         }
7905
7906         /* rtnl_lock is held.  */
7907         case DRV_CTL_START_L2_CMD: {
7908                 u32 cli = ctl->data.ring.client_id;
7909
7910                 bp->rx_mode_cl_mask |= (1 << cli);
7911                 bnx2x_set_storm_rx_mode(bp);
7912                 break;
7913         }
7914
7915         /* rtnl_lock is held.  */
7916         case DRV_CTL_STOP_L2_CMD: {
7917                 u32 cli = ctl->data.ring.client_id;
7918
7919                 bp->rx_mode_cl_mask &= ~(1 << cli);
7920                 bnx2x_set_storm_rx_mode(bp);
7921                 break;
7922         }
7923
7924         default:
7925                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
7926                 rc = -EINVAL;
7927         }
7928
7929         return rc;
7930 }
7931
7932 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
7933 {
7934         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7935
7936         if (bp->flags & USING_MSIX_FLAG) {
7937                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
7938                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
7939                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
7940         } else {
7941                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
7942                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
7943         }
7944         cp->irq_arr[0].status_blk = bp->cnic_sb;
7945         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
7946         cp->irq_arr[1].status_blk = bp->def_status_blk;
7947         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
7948
7949         cp->num_irq = 2;
7950 }
7951
7952 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
7953                                void *data)
7954 {
7955         struct bnx2x *bp = netdev_priv(dev);
7956         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7957
7958         if (ops == NULL)
7959                 return -EINVAL;
7960
7961         if (atomic_read(&bp->intr_sem) != 0)
7962                 return -EBUSY;
7963
7964         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
7965         if (!bp->cnic_kwq)
7966                 return -ENOMEM;
7967
7968         bp->cnic_kwq_cons = bp->cnic_kwq;
7969         bp->cnic_kwq_prod = bp->cnic_kwq;
7970         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
7971
7972         bp->cnic_spq_pending = 0;
7973         bp->cnic_kwq_pending = 0;
7974
7975         bp->cnic_data = data;
7976
7977         cp->num_irq = 0;
7978         cp->drv_state = CNIC_DRV_STATE_REGD;
7979
7980         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
7981
7982         bnx2x_setup_cnic_irq_info(bp);
7983         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7984         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7985         rcu_assign_pointer(bp->cnic_ops, ops);
7986
7987         return 0;
7988 }
7989
7990 static int bnx2x_unregister_cnic(struct net_device *dev)
7991 {
7992         struct bnx2x *bp = netdev_priv(dev);
7993         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7994
7995         mutex_lock(&bp->cnic_mutex);
7996         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7997                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7998                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7999         }
8000         cp->drv_state = 0;
8001         rcu_assign_pointer(bp->cnic_ops, NULL);
8002         mutex_unlock(&bp->cnic_mutex);
8003         synchronize_rcu();
8004         kfree(bp->cnic_kwq);
8005         bp->cnic_kwq = NULL;
8006
8007         return 0;
8008 }
8009
8010 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
8011 {
8012         struct bnx2x *bp = netdev_priv(dev);
8013         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8014
8015         cp->drv_owner = THIS_MODULE;
8016         cp->chip_id = CHIP_ID(bp);
8017         cp->pdev = bp->pdev;
8018         cp->io_base = bp->regview;
8019         cp->io_base2 = bp->doorbells;
8020         cp->max_kwqe_pending = 8;
8021         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
8022         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
8023         cp->ctx_tbl_len = CNIC_ILT_LINES;
8024         cp->starting_cid = BCM_CNIC_CID_START;
8025         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
8026         cp->drv_ctl = bnx2x_drv_ctl;
8027         cp->drv_register_cnic = bnx2x_register_cnic;
8028         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
8029
8030         return cp;
8031 }
8032 EXPORT_SYMBOL(bnx2x_cnic_probe);
8033
8034 #endif /* BCM_CNIC */
8035