]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/bnx2x/bnx2x_main.c
Merge branch 'message-callback' into kbuild/kconfig
[net-next-2.6.git] / drivers / net / bnx2x / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 #include <linux/stringify.h>
53
54 #define BNX2X_MAIN
55 #include "bnx2x.h"
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_cmn.h"
59
60
61 #include <linux/firmware.h>
62 #include "bnx2x_fw_file_hdr.h"
63 /* FW files */
64 #define FW_FILE_VERSION                                 \
65         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
66         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
67         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
68         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
69 #define FW_FILE_NAME_E1         "bnx2x-e1-" FW_FILE_VERSION ".fw"
70 #define FW_FILE_NAME_E1H        "bnx2x-e1h-" FW_FILE_VERSION ".fw"
71
72 /* Time in jiffies before concluding the transmitter is hung */
73 #define TX_TIMEOUT              (5*HZ)
74
75 static char version[] __devinitdata =
76         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
77         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
79 MODULE_AUTHOR("Eliezer Tamir");
80 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_MODULE_VERSION);
83 MODULE_FIRMWARE(FW_FILE_NAME_E1);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
85
86 static int multi_mode = 1;
87 module_param(multi_mode, int, 0);
88 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
89                              "(0 Disable; 1 Enable (default))");
90
91 static int num_queues;
92 module_param(num_queues, int, 0);
93 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
94                                 " (default is as a number of CPUs)");
95
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
99
100 static int int_mode;
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
103                                 "(1 INT#x; 2 MSI)");
104
105 static int dropless_fc;
106 module_param(dropless_fc, int, 0);
107 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
108
109 static int poll;
110 module_param(poll, int, 0);
111 MODULE_PARM_DESC(poll, " Use polling (for debug)");
112
113 static int mrrs = -1;
114 module_param(mrrs, int, 0);
115 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
116
117 static int debug;
118 module_param(debug, int, 0);
119 MODULE_PARM_DESC(debug, " Default debug msglevel");
120
121 static struct workqueue_struct *bnx2x_wq;
122
123 enum bnx2x_board_type {
124         BCM57710 = 0,
125         BCM57711 = 1,
126         BCM57711E = 2,
127 };
128
129 /* indexed by board_type, above */
130 static struct {
131         char *name;
132 } board_info[] __devinitdata = {
133         { "Broadcom NetXtreme II BCM57710 XGb" },
134         { "Broadcom NetXtreme II BCM57711 XGb" },
135         { "Broadcom NetXtreme II BCM57711E XGb" }
136 };
137
138
139 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
140         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
143         { 0 }
144 };
145
146 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148 /****************************************************************************
149 * General service functions
150 ****************************************************************************/
151
152 /* used only at init
153  * locking is done by mcp
154  */
155 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
156 {
157         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160                                PCICFG_VENDOR_ID_OFFSET);
161 }
162
163 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164 {
165         u32 val;
166
167         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170                                PCICFG_VENDOR_ID_OFFSET);
171
172         return val;
173 }
174
175 const u32 dmae_reg_go_c[] = {
176         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180 };
181
182 /* copy command into DMAE command memory and set DMAE command go */
183 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
184 {
185         u32 cmd_offset;
186         int i;
187
188         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
189         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
190                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
191
192                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
193                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
194         }
195         REG_WR(bp, dmae_reg_go_c[idx], 1);
196 }
197
198 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
199                       u32 len32)
200 {
201         struct dmae_command dmae;
202         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
203         int cnt = 200;
204
205         if (!bp->dmae_ready) {
206                 u32 *data = bnx2x_sp(bp, wb_data[0]);
207
208                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
209                    "  using indirect\n", dst_addr, len32);
210                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
211                 return;
212         }
213
214         memset(&dmae, 0, sizeof(struct dmae_command));
215
216         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
217                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
218                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
219 #ifdef __BIG_ENDIAN
220                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
221 #else
222                        DMAE_CMD_ENDIANITY_DW_SWAP |
223 #endif
224                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
225                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
226         dmae.src_addr_lo = U64_LO(dma_addr);
227         dmae.src_addr_hi = U64_HI(dma_addr);
228         dmae.dst_addr_lo = dst_addr >> 2;
229         dmae.dst_addr_hi = 0;
230         dmae.len = len32;
231         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
232         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
233         dmae.comp_val = DMAE_COMP_VAL;
234
235         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
236            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
237                     "dst_addr [%x:%08x (%08x)]\n"
238            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
239            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
240            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
241            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
242         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
243            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
244            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
245
246         mutex_lock(&bp->dmae_mutex);
247
248         *wb_comp = 0;
249
250         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
251
252         udelay(5);
253
254         while (*wb_comp != DMAE_COMP_VAL) {
255                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
256
257                 if (!cnt) {
258                         BNX2X_ERR("DMAE timeout!\n");
259                         break;
260                 }
261                 cnt--;
262                 /* adjust delay for emulation/FPGA */
263                 if (CHIP_REV_IS_SLOW(bp))
264                         msleep(100);
265                 else
266                         udelay(5);
267         }
268
269         mutex_unlock(&bp->dmae_mutex);
270 }
271
272 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
273 {
274         struct dmae_command dmae;
275         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
276         int cnt = 200;
277
278         if (!bp->dmae_ready) {
279                 u32 *data = bnx2x_sp(bp, wb_data[0]);
280                 int i;
281
282                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
283                    "  using indirect\n", src_addr, len32);
284                 for (i = 0; i < len32; i++)
285                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
286                 return;
287         }
288
289         memset(&dmae, 0, sizeof(struct dmae_command));
290
291         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
292                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
293                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
294 #ifdef __BIG_ENDIAN
295                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
296 #else
297                        DMAE_CMD_ENDIANITY_DW_SWAP |
298 #endif
299                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
300                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
301         dmae.src_addr_lo = src_addr >> 2;
302         dmae.src_addr_hi = 0;
303         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
304         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
305         dmae.len = len32;
306         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
307         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
308         dmae.comp_val = DMAE_COMP_VAL;
309
310         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
311            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
312                     "dst_addr [%x:%08x (%08x)]\n"
313            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
314            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
315            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
316            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
317
318         mutex_lock(&bp->dmae_mutex);
319
320         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
321         *wb_comp = 0;
322
323         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
324
325         udelay(5);
326
327         while (*wb_comp != DMAE_COMP_VAL) {
328
329                 if (!cnt) {
330                         BNX2X_ERR("DMAE timeout!\n");
331                         break;
332                 }
333                 cnt--;
334                 /* adjust delay for emulation/FPGA */
335                 if (CHIP_REV_IS_SLOW(bp))
336                         msleep(100);
337                 else
338                         udelay(5);
339         }
340         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
341            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
342            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
343
344         mutex_unlock(&bp->dmae_mutex);
345 }
346
347 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
348                                u32 addr, u32 len)
349 {
350         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
351         int offset = 0;
352
353         while (len > dmae_wr_max) {
354                 bnx2x_write_dmae(bp, phys_addr + offset,
355                                  addr + offset, dmae_wr_max);
356                 offset += dmae_wr_max * 4;
357                 len -= dmae_wr_max;
358         }
359
360         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
361 }
362
363 /* used only for slowpath so not inlined */
364 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
365 {
366         u32 wb_write[2];
367
368         wb_write[0] = val_hi;
369         wb_write[1] = val_lo;
370         REG_WR_DMAE(bp, reg, wb_write, 2);
371 }
372
373 #ifdef USE_WB_RD
374 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
375 {
376         u32 wb_data[2];
377
378         REG_RD_DMAE(bp, reg, wb_data, 2);
379
380         return HILO_U64(wb_data[0], wb_data[1]);
381 }
382 #endif
383
384 static int bnx2x_mc_assert(struct bnx2x *bp)
385 {
386         char last_idx;
387         int i, rc = 0;
388         u32 row0, row1, row2, row3;
389
390         /* XSTORM */
391         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
392                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
393         if (last_idx)
394                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
395
396         /* print the asserts */
397         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
398
399                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
400                               XSTORM_ASSERT_LIST_OFFSET(i));
401                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
402                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
403                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
405                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
407
408                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
409                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
410                                   " 0x%08x 0x%08x 0x%08x\n",
411                                   i, row3, row2, row1, row0);
412                         rc++;
413                 } else {
414                         break;
415                 }
416         }
417
418         /* TSTORM */
419         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
420                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
421         if (last_idx)
422                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
423
424         /* print the asserts */
425         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
426
427                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
428                               TSTORM_ASSERT_LIST_OFFSET(i));
429                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
430                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
431                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
433                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
435
436                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
437                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
438                                   " 0x%08x 0x%08x 0x%08x\n",
439                                   i, row3, row2, row1, row0);
440                         rc++;
441                 } else {
442                         break;
443                 }
444         }
445
446         /* CSTORM */
447         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
448                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
449         if (last_idx)
450                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
451
452         /* print the asserts */
453         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
454
455                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
456                               CSTORM_ASSERT_LIST_OFFSET(i));
457                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
458                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
459                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
461                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
463
464                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
465                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
466                                   " 0x%08x 0x%08x 0x%08x\n",
467                                   i, row3, row2, row1, row0);
468                         rc++;
469                 } else {
470                         break;
471                 }
472         }
473
474         /* USTORM */
475         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
476                            USTORM_ASSERT_LIST_INDEX_OFFSET);
477         if (last_idx)
478                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
479
480         /* print the asserts */
481         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
482
483                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
484                               USTORM_ASSERT_LIST_OFFSET(i));
485                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
486                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
487                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
488                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
489                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
490                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
491
492                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
493                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
494                                   " 0x%08x 0x%08x 0x%08x\n",
495                                   i, row3, row2, row1, row0);
496                         rc++;
497                 } else {
498                         break;
499                 }
500         }
501
502         return rc;
503 }
504
505 static void bnx2x_fw_dump(struct bnx2x *bp)
506 {
507         u32 addr;
508         u32 mark, offset;
509         __be32 data[9];
510         int word;
511
512         if (BP_NOMCP(bp)) {
513                 BNX2X_ERR("NO MCP - can not dump\n");
514                 return;
515         }
516
517         addr = bp->common.shmem_base - 0x0800 + 4;
518         mark = REG_RD(bp, addr);
519         mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
520         pr_err("begin fw dump (mark 0x%x)\n", mark);
521
522         pr_err("");
523         for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
524                 for (word = 0; word < 8; word++)
525                         data[word] = htonl(REG_RD(bp, offset + 4*word));
526                 data[8] = 0x0;
527                 pr_cont("%s", (char *)data);
528         }
529         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
530                 for (word = 0; word < 8; word++)
531                         data[word] = htonl(REG_RD(bp, offset + 4*word));
532                 data[8] = 0x0;
533                 pr_cont("%s", (char *)data);
534         }
535         pr_err("end of fw dump\n");
536 }
537
538 void bnx2x_panic_dump(struct bnx2x *bp)
539 {
540         int i;
541         u16 j, start, end;
542
543         bp->stats_state = STATS_STATE_DISABLED;
544         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
545
546         BNX2X_ERR("begin crash dump -----------------\n");
547
548         /* Indices */
549         /* Common */
550         BNX2X_ERR("def_c_idx(0x%x)  def_u_idx(0x%x)  def_x_idx(0x%x)"
551                   "  def_t_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
552                   "  spq_prod_idx(0x%x)\n",
553                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
554                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
555
556         /* Rx */
557         for_each_queue(bp, i) {
558                 struct bnx2x_fastpath *fp = &bp->fp[i];
559
560                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
561                           "  *rx_bd_cons_sb(0x%x)  rx_comp_prod(0x%x)"
562                           "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
563                           i, fp->rx_bd_prod, fp->rx_bd_cons,
564                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
565                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
566                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
567                           "  fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
568                           fp->rx_sge_prod, fp->last_max_sge,
569                           le16_to_cpu(fp->fp_u_idx),
570                           fp->status_blk->u_status_block.status_block_index);
571         }
572
573         /* Tx */
574         for_each_queue(bp, i) {
575                 struct bnx2x_fastpath *fp = &bp->fp[i];
576
577                 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
578                           "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
579                           "  *tx_cons_sb(0x%x)\n",
580                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
581                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
582                 BNX2X_ERR("     fp_c_idx(0x%x)  *sb_c_idx(0x%x)"
583                           "  tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
584                           fp->status_blk->c_status_block.status_block_index,
585                           fp->tx_db.data.prod);
586         }
587
588         /* Rings */
589         /* Rx */
590         for_each_queue(bp, i) {
591                 struct bnx2x_fastpath *fp = &bp->fp[i];
592
593                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
594                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
595                 for (j = start; j != end; j = RX_BD(j + 1)) {
596                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
597                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
598
599                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
600                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
601                 }
602
603                 start = RX_SGE(fp->rx_sge_prod);
604                 end = RX_SGE(fp->last_max_sge);
605                 for (j = start; j != end; j = RX_SGE(j + 1)) {
606                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
607                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
608
609                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
610                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
611                 }
612
613                 start = RCQ_BD(fp->rx_comp_cons - 10);
614                 end = RCQ_BD(fp->rx_comp_cons + 503);
615                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
616                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
617
618                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
619                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
620                 }
621         }
622
623         /* Tx */
624         for_each_queue(bp, i) {
625                 struct bnx2x_fastpath *fp = &bp->fp[i];
626
627                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
628                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
629                 for (j = start; j != end; j = TX_BD(j + 1)) {
630                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
631
632                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
633                                   i, j, sw_bd->skb, sw_bd->first_bd);
634                 }
635
636                 start = TX_BD(fp->tx_bd_cons - 10);
637                 end = TX_BD(fp->tx_bd_cons + 254);
638                 for (j = start; j != end; j = TX_BD(j + 1)) {
639                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
640
641                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
642                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
643                 }
644         }
645
646         bnx2x_fw_dump(bp);
647         bnx2x_mc_assert(bp);
648         BNX2X_ERR("end crash dump -----------------\n");
649 }
650
651 void bnx2x_int_enable(struct bnx2x *bp)
652 {
653         int port = BP_PORT(bp);
654         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
655         u32 val = REG_RD(bp, addr);
656         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
657         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
658
659         if (msix) {
660                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
661                          HC_CONFIG_0_REG_INT_LINE_EN_0);
662                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
664         } else if (msi) {
665                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
666                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
668                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
669         } else {
670                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
671                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
672                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
673                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
674
675                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
676                    val, port, addr);
677
678                 REG_WR(bp, addr, val);
679
680                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
681         }
682
683         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
684            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
685
686         REG_WR(bp, addr, val);
687         /*
688          * Ensure that HC_CONFIG is written before leading/trailing edge config
689          */
690         mmiowb();
691         barrier();
692
693         if (CHIP_IS_E1H(bp)) {
694                 /* init leading/trailing edge */
695                 if (IS_E1HMF(bp)) {
696                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
697                         if (bp->port.pmf)
698                                 /* enable nig and gpio3 attention */
699                                 val |= 0x1100;
700                 } else
701                         val = 0xffff;
702
703                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
704                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
705         }
706
707         /* Make sure that interrupts are indeed enabled from here on */
708         mmiowb();
709 }
710
711 static void bnx2x_int_disable(struct bnx2x *bp)
712 {
713         int port = BP_PORT(bp);
714         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
715         u32 val = REG_RD(bp, addr);
716
717         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
718                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
719                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
720                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
721
722         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
723            val, port, addr);
724
725         /* flush all outstanding writes */
726         mmiowb();
727
728         REG_WR(bp, addr, val);
729         if (REG_RD(bp, addr) != val)
730                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
731 }
732
733 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
734 {
735         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
736         int i, offset;
737
738         /* disable interrupt handling */
739         atomic_inc(&bp->intr_sem);
740         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
741
742         if (disable_hw)
743                 /* prevent the HW from sending interrupts */
744                 bnx2x_int_disable(bp);
745
746         /* make sure all ISRs are done */
747         if (msix) {
748                 synchronize_irq(bp->msix_table[0].vector);
749                 offset = 1;
750 #ifdef BCM_CNIC
751                 offset++;
752 #endif
753                 for_each_queue(bp, i)
754                         synchronize_irq(bp->msix_table[i + offset].vector);
755         } else
756                 synchronize_irq(bp->pdev->irq);
757
758         /* make sure sp_task is not running */
759         cancel_delayed_work(&bp->sp_task);
760         flush_workqueue(bnx2x_wq);
761 }
762
763 /* fast path */
764
765 /*
766  * General service functions
767  */
768
769 /* Return true if succeeded to acquire the lock */
770 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
771 {
772         u32 lock_status;
773         u32 resource_bit = (1 << resource);
774         int func = BP_FUNC(bp);
775         u32 hw_lock_control_reg;
776
777         DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
778
779         /* Validating that the resource is within range */
780         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
781                 DP(NETIF_MSG_HW,
782                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
783                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
784                 return -EINVAL;
785         }
786
787         if (func <= 5)
788                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
789         else
790                 hw_lock_control_reg =
791                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
792
793         /* Try to acquire the lock */
794         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
795         lock_status = REG_RD(bp, hw_lock_control_reg);
796         if (lock_status & resource_bit)
797                 return true;
798
799         DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
800         return false;
801 }
802
803
804 #ifdef BCM_CNIC
805 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
806 #endif
807
808 void bnx2x_sp_event(struct bnx2x_fastpath *fp,
809                            union eth_rx_cqe *rr_cqe)
810 {
811         struct bnx2x *bp = fp->bp;
812         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
813         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
814
815         DP(BNX2X_MSG_SP,
816            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
817            fp->index, cid, command, bp->state,
818            rr_cqe->ramrod_cqe.ramrod_type);
819
820         bp->spq_left++;
821
822         if (fp->index) {
823                 switch (command | fp->state) {
824                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
825                                                 BNX2X_FP_STATE_OPENING):
826                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
827                            cid);
828                         fp->state = BNX2X_FP_STATE_OPEN;
829                         break;
830
831                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
832                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
833                            cid);
834                         fp->state = BNX2X_FP_STATE_HALTED;
835                         break;
836
837                 default:
838                         BNX2X_ERR("unexpected MC reply (%d)  "
839                                   "fp[%d] state is %x\n",
840                                   command, fp->index, fp->state);
841                         break;
842                 }
843                 mb(); /* force bnx2x_wait_ramrod() to see the change */
844                 return;
845         }
846
847         switch (command | bp->state) {
848         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
849                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
850                 bp->state = BNX2X_STATE_OPEN;
851                 break;
852
853         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
854                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
855                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
856                 fp->state = BNX2X_FP_STATE_HALTED;
857                 break;
858
859         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
860                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
861                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
862                 break;
863
864 #ifdef BCM_CNIC
865         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
866                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
867                 bnx2x_cnic_cfc_comp(bp, cid);
868                 break;
869 #endif
870
871         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
872         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
873                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
874                 bp->set_mac_pending--;
875                 smp_wmb();
876                 break;
877
878         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
879                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
880                 bp->set_mac_pending--;
881                 smp_wmb();
882                 break;
883
884         default:
885                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
886                           command, bp->state);
887                 break;
888         }
889         mb(); /* force bnx2x_wait_ramrod() to see the change */
890 }
891
892 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
893 {
894         struct bnx2x *bp = netdev_priv(dev_instance);
895         u16 status = bnx2x_ack_int(bp);
896         u16 mask;
897         int i;
898
899         /* Return here if interrupt is shared and it's not for us */
900         if (unlikely(status == 0)) {
901                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
902                 return IRQ_NONE;
903         }
904         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
905
906         /* Return here if interrupt is disabled */
907         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
908                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
909                 return IRQ_HANDLED;
910         }
911
912 #ifdef BNX2X_STOP_ON_ERROR
913         if (unlikely(bp->panic))
914                 return IRQ_HANDLED;
915 #endif
916
917         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
918                 struct bnx2x_fastpath *fp = &bp->fp[i];
919
920                 mask = 0x2 << fp->sb_id;
921                 if (status & mask) {
922                         /* Handle Rx and Tx according to SB id */
923                         prefetch(fp->rx_cons_sb);
924                         prefetch(&fp->status_blk->u_status_block.
925                                                 status_block_index);
926                         prefetch(fp->tx_cons_sb);
927                         prefetch(&fp->status_blk->c_status_block.
928                                                 status_block_index);
929                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
930                         status &= ~mask;
931                 }
932         }
933
934 #ifdef BCM_CNIC
935         mask = 0x2 << CNIC_SB_ID(bp);
936         if (status & (mask | 0x1)) {
937                 struct cnic_ops *c_ops = NULL;
938
939                 rcu_read_lock();
940                 c_ops = rcu_dereference(bp->cnic_ops);
941                 if (c_ops)
942                         c_ops->cnic_handler(bp->cnic_data, NULL);
943                 rcu_read_unlock();
944
945                 status &= ~mask;
946         }
947 #endif
948
949         if (unlikely(status & 0x1)) {
950                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
951
952                 status &= ~0x1;
953                 if (!status)
954                         return IRQ_HANDLED;
955         }
956
957         if (unlikely(status))
958                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
959                    status);
960
961         return IRQ_HANDLED;
962 }
963
964 /* end of fast path */
965
966
967 /* Link */
968
969 /*
970  * General service functions
971  */
972
973 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
974 {
975         u32 lock_status;
976         u32 resource_bit = (1 << resource);
977         int func = BP_FUNC(bp);
978         u32 hw_lock_control_reg;
979         int cnt;
980
981         /* Validating that the resource is within range */
982         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
983                 DP(NETIF_MSG_HW,
984                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
985                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
986                 return -EINVAL;
987         }
988
989         if (func <= 5) {
990                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
991         } else {
992                 hw_lock_control_reg =
993                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
994         }
995
996         /* Validating that the resource is not already taken */
997         lock_status = REG_RD(bp, hw_lock_control_reg);
998         if (lock_status & resource_bit) {
999                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1000                    lock_status, resource_bit);
1001                 return -EEXIST;
1002         }
1003
1004         /* Try for 5 second every 5ms */
1005         for (cnt = 0; cnt < 1000; cnt++) {
1006                 /* Try to acquire the lock */
1007                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1008                 lock_status = REG_RD(bp, hw_lock_control_reg);
1009                 if (lock_status & resource_bit)
1010                         return 0;
1011
1012                 msleep(5);
1013         }
1014         DP(NETIF_MSG_HW, "Timeout\n");
1015         return -EAGAIN;
1016 }
1017
1018 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1019 {
1020         u32 lock_status;
1021         u32 resource_bit = (1 << resource);
1022         int func = BP_FUNC(bp);
1023         u32 hw_lock_control_reg;
1024
1025         DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1026
1027         /* Validating that the resource is within range */
1028         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1029                 DP(NETIF_MSG_HW,
1030                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1031                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1032                 return -EINVAL;
1033         }
1034
1035         if (func <= 5) {
1036                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1037         } else {
1038                 hw_lock_control_reg =
1039                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1040         }
1041
1042         /* Validating that the resource is currently taken */
1043         lock_status = REG_RD(bp, hw_lock_control_reg);
1044         if (!(lock_status & resource_bit)) {
1045                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1046                    lock_status, resource_bit);
1047                 return -EFAULT;
1048         }
1049
1050         REG_WR(bp, hw_lock_control_reg, resource_bit);
1051         return 0;
1052 }
1053
1054
1055 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1056 {
1057         /* The GPIO should be swapped if swap register is set and active */
1058         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1059                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1060         int gpio_shift = gpio_num +
1061                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1062         u32 gpio_mask = (1 << gpio_shift);
1063         u32 gpio_reg;
1064         int value;
1065
1066         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1067                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1068                 return -EINVAL;
1069         }
1070
1071         /* read GPIO value */
1072         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1073
1074         /* get the requested pin value */
1075         if ((gpio_reg & gpio_mask) == gpio_mask)
1076                 value = 1;
1077         else
1078                 value = 0;
1079
1080         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1081
1082         return value;
1083 }
1084
1085 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1086 {
1087         /* The GPIO should be swapped if swap register is set and active */
1088         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1089                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1090         int gpio_shift = gpio_num +
1091                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1092         u32 gpio_mask = (1 << gpio_shift);
1093         u32 gpio_reg;
1094
1095         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1096                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1097                 return -EINVAL;
1098         }
1099
1100         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1101         /* read GPIO and mask except the float bits */
1102         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1103
1104         switch (mode) {
1105         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1106                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1107                    gpio_num, gpio_shift);
1108                 /* clear FLOAT and set CLR */
1109                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1110                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1111                 break;
1112
1113         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1114                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1115                    gpio_num, gpio_shift);
1116                 /* clear FLOAT and set SET */
1117                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1118                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1119                 break;
1120
1121         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1122                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1123                    gpio_num, gpio_shift);
1124                 /* set FLOAT */
1125                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1126                 break;
1127
1128         default:
1129                 break;
1130         }
1131
1132         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1133         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1134
1135         return 0;
1136 }
1137
1138 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1139 {
1140         /* The GPIO should be swapped if swap register is set and active */
1141         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1142                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1143         int gpio_shift = gpio_num +
1144                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1145         u32 gpio_mask = (1 << gpio_shift);
1146         u32 gpio_reg;
1147
1148         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1149                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1150                 return -EINVAL;
1151         }
1152
1153         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1154         /* read GPIO int */
1155         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1156
1157         switch (mode) {
1158         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1159                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1160                                    "output low\n", gpio_num, gpio_shift);
1161                 /* clear SET and set CLR */
1162                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1163                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1164                 break;
1165
1166         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1167                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1168                                    "output high\n", gpio_num, gpio_shift);
1169                 /* clear CLR and set SET */
1170                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1171                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1172                 break;
1173
1174         default:
1175                 break;
1176         }
1177
1178         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1179         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1180
1181         return 0;
1182 }
1183
1184 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1185 {
1186         u32 spio_mask = (1 << spio_num);
1187         u32 spio_reg;
1188
1189         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1190             (spio_num > MISC_REGISTERS_SPIO_7)) {
1191                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1192                 return -EINVAL;
1193         }
1194
1195         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1196         /* read SPIO and mask except the float bits */
1197         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1198
1199         switch (mode) {
1200         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1201                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1202                 /* clear FLOAT and set CLR */
1203                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1204                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1205                 break;
1206
1207         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1208                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1209                 /* clear FLOAT and set SET */
1210                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1211                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1212                 break;
1213
1214         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1215                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1216                 /* set FLOAT */
1217                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1218                 break;
1219
1220         default:
1221                 break;
1222         }
1223
1224         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1225         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1226
1227         return 0;
1228 }
1229
1230 void bnx2x_calc_fc_adv(struct bnx2x *bp)
1231 {
1232         switch (bp->link_vars.ieee_fc &
1233                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1234         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1235                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1236                                           ADVERTISED_Pause);
1237                 break;
1238
1239         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1240                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1241                                          ADVERTISED_Pause);
1242                 break;
1243
1244         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1245                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1246                 break;
1247
1248         default:
1249                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1250                                           ADVERTISED_Pause);
1251                 break;
1252         }
1253 }
1254
1255
1256 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1257 {
1258         if (!BP_NOMCP(bp)) {
1259                 u8 rc;
1260
1261                 /* Initialize link parameters structure variables */
1262                 /* It is recommended to turn off RX FC for jumbo frames
1263                    for better performance */
1264                 if (bp->dev->mtu > 5000)
1265                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1266                 else
1267                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1268
1269                 bnx2x_acquire_phy_lock(bp);
1270
1271                 if (load_mode == LOAD_DIAG)
1272                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
1273
1274                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1275
1276                 bnx2x_release_phy_lock(bp);
1277
1278                 bnx2x_calc_fc_adv(bp);
1279
1280                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1281                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1282                         bnx2x_link_report(bp);
1283                 }
1284
1285                 return rc;
1286         }
1287         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1288         return -EINVAL;
1289 }
1290
1291 void bnx2x_link_set(struct bnx2x *bp)
1292 {
1293         if (!BP_NOMCP(bp)) {
1294                 bnx2x_acquire_phy_lock(bp);
1295                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1296                 bnx2x_release_phy_lock(bp);
1297
1298                 bnx2x_calc_fc_adv(bp);
1299         } else
1300                 BNX2X_ERR("Bootcode is missing - can not set link\n");
1301 }
1302
1303 static void bnx2x__link_reset(struct bnx2x *bp)
1304 {
1305         if (!BP_NOMCP(bp)) {
1306                 bnx2x_acquire_phy_lock(bp);
1307                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1308                 bnx2x_release_phy_lock(bp);
1309         } else
1310                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1311 }
1312
1313 u8 bnx2x_link_test(struct bnx2x *bp)
1314 {
1315         u8 rc = 0;
1316
1317         if (!BP_NOMCP(bp)) {
1318                 bnx2x_acquire_phy_lock(bp);
1319                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1320                 bnx2x_release_phy_lock(bp);
1321         } else
1322                 BNX2X_ERR("Bootcode is missing - can not test link\n");
1323
1324         return rc;
1325 }
1326
1327 static void bnx2x_init_port_minmax(struct bnx2x *bp)
1328 {
1329         u32 r_param = bp->link_vars.line_speed / 8;
1330         u32 fair_periodic_timeout_usec;
1331         u32 t_fair;
1332
1333         memset(&(bp->cmng.rs_vars), 0,
1334                sizeof(struct rate_shaping_vars_per_port));
1335         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1336
1337         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1338         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1339
1340         /* this is the threshold below which no timer arming will occur
1341            1.25 coefficient is for the threshold to be a little bigger
1342            than the real time, to compensate for timer in-accuracy */
1343         bp->cmng.rs_vars.rs_threshold =
1344                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1345
1346         /* resolution of fairness timer */
1347         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1348         /* for 10G it is 1000usec. for 1G it is 10000usec. */
1349         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1350
1351         /* this is the threshold below which we won't arm the timer anymore */
1352         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1353
1354         /* we multiply by 1e3/8 to get bytes/msec.
1355            We don't want the credits to pass a credit
1356            of the t_fair*FAIR_MEM (algorithm resolution) */
1357         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1358         /* since each tick is 4 usec */
1359         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1360 }
1361
1362 /* Calculates the sum of vn_min_rates.
1363    It's needed for further normalizing of the min_rates.
1364    Returns:
1365      sum of vn_min_rates.
1366        or
1367      0 - if all the min_rates are 0.
1368      In the later case fainess algorithm should be deactivated.
1369      If not all min_rates are zero then those that are zeroes will be set to 1.
1370  */
1371 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1372 {
1373         int all_zero = 1;
1374         int port = BP_PORT(bp);
1375         int vn;
1376
1377         bp->vn_weight_sum = 0;
1378         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1379                 int func = 2*vn + port;
1380                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1381                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1382                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1383
1384                 /* Skip hidden vns */
1385                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1386                         continue;
1387
1388                 /* If min rate is zero - set it to 1 */
1389                 if (!vn_min_rate)
1390                         vn_min_rate = DEF_MIN_RATE;
1391                 else
1392                         all_zero = 0;
1393
1394                 bp->vn_weight_sum += vn_min_rate;
1395         }
1396
1397         /* ... only if all min rates are zeros - disable fairness */
1398         if (all_zero) {
1399                 bp->cmng.flags.cmng_enables &=
1400                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1401                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1402                    "  fairness will be disabled\n");
1403         } else
1404                 bp->cmng.flags.cmng_enables |=
1405                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1406 }
1407
1408 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1409 {
1410         struct rate_shaping_vars_per_vn m_rs_vn;
1411         struct fairness_vars_per_vn m_fair_vn;
1412         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1413         u16 vn_min_rate, vn_max_rate;
1414         int i;
1415
1416         /* If function is hidden - set min and max to zeroes */
1417         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1418                 vn_min_rate = 0;
1419                 vn_max_rate = 0;
1420
1421         } else {
1422                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1423                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1424                 /* If min rate is zero - set it to 1 */
1425                 if (!vn_min_rate)
1426                         vn_min_rate = DEF_MIN_RATE;
1427                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1428                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1429         }
1430         DP(NETIF_MSG_IFUP,
1431            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
1432            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1433
1434         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1435         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1436
1437         /* global vn counter - maximal Mbps for this vn */
1438         m_rs_vn.vn_counter.rate = vn_max_rate;
1439
1440         /* quota - number of bytes transmitted in this period */
1441         m_rs_vn.vn_counter.quota =
1442                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1443
1444         if (bp->vn_weight_sum) {
1445                 /* credit for each period of the fairness algorithm:
1446                    number of bytes in T_FAIR (the vn share the port rate).
1447                    vn_weight_sum should not be larger than 10000, thus
1448                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1449                    than zero */
1450                 m_fair_vn.vn_credit_delta =
1451                         max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1452                                                    (8 * bp->vn_weight_sum))),
1453                               (bp->cmng.fair_vars.fair_threshold * 2));
1454                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
1455                    m_fair_vn.vn_credit_delta);
1456         }
1457
1458         /* Store it to internal memory */
1459         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1460                 REG_WR(bp, BAR_XSTRORM_INTMEM +
1461                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1462                        ((u32 *)(&m_rs_vn))[i]);
1463
1464         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1465                 REG_WR(bp, BAR_XSTRORM_INTMEM +
1466                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1467                        ((u32 *)(&m_fair_vn))[i]);
1468 }
1469
1470
1471 /* This function is called upon link interrupt */
1472 static void bnx2x_link_attn(struct bnx2x *bp)
1473 {
1474         u32 prev_link_status = bp->link_vars.link_status;
1475         /* Make sure that we are synced with the current statistics */
1476         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1477
1478         bnx2x_link_update(&bp->link_params, &bp->link_vars);
1479
1480         if (bp->link_vars.link_up) {
1481
1482                 /* dropless flow control */
1483                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1484                         int port = BP_PORT(bp);
1485                         u32 pause_enabled = 0;
1486
1487                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1488                                 pause_enabled = 1;
1489
1490                         REG_WR(bp, BAR_USTRORM_INTMEM +
1491                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1492                                pause_enabled);
1493                 }
1494
1495                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1496                         struct host_port_stats *pstats;
1497
1498                         pstats = bnx2x_sp(bp, port_stats);
1499                         /* reset old bmac stats */
1500                         memset(&(pstats->mac_stx[0]), 0,
1501                                sizeof(struct mac_stx));
1502                 }
1503                 if (bp->state == BNX2X_STATE_OPEN)
1504                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1505         }
1506
1507         /* indicate link status only if link status actually changed */
1508         if (prev_link_status != bp->link_vars.link_status)
1509                 bnx2x_link_report(bp);
1510
1511         if (IS_E1HMF(bp)) {
1512                 int port = BP_PORT(bp);
1513                 int func;
1514                 int vn;
1515
1516                 /* Set the attention towards other drivers on the same port */
1517                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1518                         if (vn == BP_E1HVN(bp))
1519                                 continue;
1520
1521                         func = ((vn << 1) | port);
1522                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1523                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1524                 }
1525
1526                 if (bp->link_vars.link_up) {
1527                         int i;
1528
1529                         /* Init rate shaping and fairness contexts */
1530                         bnx2x_init_port_minmax(bp);
1531
1532                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
1533                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
1534
1535                         /* Store it to internal memory */
1536                         for (i = 0;
1537                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
1538                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
1539                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1540                                        ((u32 *)(&bp->cmng))[i]);
1541                 }
1542         }
1543 }
1544
1545 void bnx2x__link_status_update(struct bnx2x *bp)
1546 {
1547         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
1548                 return;
1549
1550         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
1551
1552         if (bp->link_vars.link_up)
1553                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1554         else
1555                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1556
1557         bnx2x_calc_vn_weight_sum(bp);
1558
1559         /* indicate link status */
1560         bnx2x_link_report(bp);
1561 }
1562
1563 static void bnx2x_pmf_update(struct bnx2x *bp)
1564 {
1565         int port = BP_PORT(bp);
1566         u32 val;
1567
1568         bp->port.pmf = 1;
1569         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1570
1571         /* enable nig attention */
1572         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1573         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1574         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1575
1576         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1577 }
1578
1579 /* end of Link */
1580
1581 /* slow path */
1582
1583 /*
1584  * General service functions
1585  */
1586
1587 /* send the MCP a request, block until there is a reply */
1588 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
1589 {
1590         int func = BP_FUNC(bp);
1591         u32 seq = ++bp->fw_seq;
1592         u32 rc = 0;
1593         u32 cnt = 1;
1594         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1595
1596         mutex_lock(&bp->fw_mb_mutex);
1597         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1598         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1599
1600         do {
1601                 /* let the FW do it's magic ... */
1602                 msleep(delay);
1603
1604                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
1605
1606                 /* Give the FW up to 5 second (500*10ms) */
1607         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
1608
1609         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
1610            cnt*delay, rc, seq);
1611
1612         /* is this a reply to our command? */
1613         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
1614                 rc &= FW_MSG_CODE_MASK;
1615         else {
1616                 /* FW BUG! */
1617                 BNX2X_ERR("FW failed to respond!\n");
1618                 bnx2x_fw_dump(bp);
1619                 rc = 0;
1620         }
1621         mutex_unlock(&bp->fw_mb_mutex);
1622
1623         return rc;
1624 }
1625
1626 static void bnx2x_e1h_disable(struct bnx2x *bp)
1627 {
1628         int port = BP_PORT(bp);
1629
1630         netif_tx_disable(bp->dev);
1631
1632         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
1633
1634         netif_carrier_off(bp->dev);
1635 }
1636
1637 static void bnx2x_e1h_enable(struct bnx2x *bp)
1638 {
1639         int port = BP_PORT(bp);
1640
1641         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
1642
1643         /* Tx queue should be only reenabled */
1644         netif_tx_wake_all_queues(bp->dev);
1645
1646         /*
1647          * Should not call netif_carrier_on since it will be called if the link
1648          * is up when checking for link state
1649          */
1650 }
1651
1652 static void bnx2x_update_min_max(struct bnx2x *bp)
1653 {
1654         int port = BP_PORT(bp);
1655         int vn, i;
1656
1657         /* Init rate shaping and fairness contexts */
1658         bnx2x_init_port_minmax(bp);
1659
1660         bnx2x_calc_vn_weight_sum(bp);
1661
1662         for (vn = VN_0; vn < E1HVN_MAX; vn++)
1663                 bnx2x_init_vn_minmax(bp, 2*vn + port);
1664
1665         if (bp->port.pmf) {
1666                 int func;
1667
1668                 /* Set the attention towards other drivers on the same port */
1669                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1670                         if (vn == BP_E1HVN(bp))
1671                                 continue;
1672
1673                         func = ((vn << 1) | port);
1674                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1675                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1676                 }
1677
1678                 /* Store it to internal memory */
1679                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1680                         REG_WR(bp, BAR_XSTRORM_INTMEM +
1681                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1682                                ((u32 *)(&bp->cmng))[i]);
1683         }
1684 }
1685
1686 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1687 {
1688         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
1689
1690         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
1691
1692                 /*
1693                  * This is the only place besides the function initialization
1694                  * where the bp->flags can change so it is done without any
1695                  * locks
1696                  */
1697                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1698                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
1699                         bp->flags |= MF_FUNC_DIS;
1700
1701                         bnx2x_e1h_disable(bp);
1702                 } else {
1703                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
1704                         bp->flags &= ~MF_FUNC_DIS;
1705
1706                         bnx2x_e1h_enable(bp);
1707                 }
1708                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
1709         }
1710         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1711
1712                 bnx2x_update_min_max(bp);
1713                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1714         }
1715
1716         /* Report results to MCP */
1717         if (dcc_event)
1718                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
1719         else
1720                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
1721 }
1722
1723 /* must be called under the spq lock */
1724 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
1725 {
1726         struct eth_spe *next_spe = bp->spq_prod_bd;
1727
1728         if (bp->spq_prod_bd == bp->spq_last_bd) {
1729                 bp->spq_prod_bd = bp->spq;
1730                 bp->spq_prod_idx = 0;
1731                 DP(NETIF_MSG_TIMER, "end of spq\n");
1732         } else {
1733                 bp->spq_prod_bd++;
1734                 bp->spq_prod_idx++;
1735         }
1736         return next_spe;
1737 }
1738
1739 /* must be called under the spq lock */
1740 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1741 {
1742         int func = BP_FUNC(bp);
1743
1744         /* Make sure that BD data is updated before writing the producer */
1745         wmb();
1746
1747         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1748                bp->spq_prod_idx);
1749         mmiowb();
1750 }
1751
1752 /* the slow path queue is odd since completions arrive on the fastpath ring */
1753 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1754                          u32 data_hi, u32 data_lo, int common)
1755 {
1756         struct eth_spe *spe;
1757
1758 #ifdef BNX2X_STOP_ON_ERROR
1759         if (unlikely(bp->panic))
1760                 return -EIO;
1761 #endif
1762
1763         spin_lock_bh(&bp->spq_lock);
1764
1765         if (!bp->spq_left) {
1766                 BNX2X_ERR("BUG! SPQ ring full!\n");
1767                 spin_unlock_bh(&bp->spq_lock);
1768                 bnx2x_panic();
1769                 return -EBUSY;
1770         }
1771
1772         spe = bnx2x_sp_get_next(bp);
1773
1774         /* CID needs port number to be encoded int it */
1775         spe->hdr.conn_and_cmd_data =
1776                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1777                                     HW_CID(bp, cid));
1778         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
1779         if (common)
1780                 spe->hdr.type |=
1781                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1782
1783         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1784         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
1785
1786         bp->spq_left--;
1787
1788         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1789            "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
1790            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1791            (u32)(U64_LO(bp->spq_mapping) +
1792            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1793            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1794
1795         bnx2x_sp_prod_update(bp);
1796         spin_unlock_bh(&bp->spq_lock);
1797         return 0;
1798 }
1799
1800 /* acquire split MCP access lock register */
1801 static int bnx2x_acquire_alr(struct bnx2x *bp)
1802 {
1803         u32 j, val;
1804         int rc = 0;
1805
1806         might_sleep();
1807         for (j = 0; j < 1000; j++) {
1808                 val = (1UL << 31);
1809                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1810                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1811                 if (val & (1L << 31))
1812                         break;
1813
1814                 msleep(5);
1815         }
1816         if (!(val & (1L << 31))) {
1817                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
1818                 rc = -EBUSY;
1819         }
1820
1821         return rc;
1822 }
1823
1824 /* release split MCP access lock register */
1825 static void bnx2x_release_alr(struct bnx2x *bp)
1826 {
1827         REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
1828 }
1829
1830 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1831 {
1832         struct host_def_status_block *def_sb = bp->def_status_blk;
1833         u16 rc = 0;
1834
1835         barrier(); /* status block is written to by the chip */
1836         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1837                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1838                 rc |= 1;
1839         }
1840         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1841                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1842                 rc |= 2;
1843         }
1844         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1845                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1846                 rc |= 4;
1847         }
1848         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1849                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1850                 rc |= 8;
1851         }
1852         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1853                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1854                 rc |= 16;
1855         }
1856         return rc;
1857 }
1858
1859 /*
1860  * slow path service functions
1861  */
1862
1863 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1864 {
1865         int port = BP_PORT(bp);
1866         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
1867                        COMMAND_REG_ATTN_BITS_SET);
1868         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1869                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
1870         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1871                                        NIG_REG_MASK_INTERRUPT_PORT0;
1872         u32 aeu_mask;
1873         u32 nig_mask = 0;
1874
1875         if (bp->attn_state & asserted)
1876                 BNX2X_ERR("IGU ERROR\n");
1877
1878         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1879         aeu_mask = REG_RD(bp, aeu_addr);
1880
1881         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
1882            aeu_mask, asserted);
1883         aeu_mask &= ~(asserted & 0x3ff);
1884         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
1885
1886         REG_WR(bp, aeu_addr, aeu_mask);
1887         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1888
1889         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
1890         bp->attn_state |= asserted;
1891         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
1892
1893         if (asserted & ATTN_HARD_WIRED_MASK) {
1894                 if (asserted & ATTN_NIG_FOR_FUNC) {
1895
1896                         bnx2x_acquire_phy_lock(bp);
1897
1898                         /* save nig interrupt mask */
1899                         nig_mask = REG_RD(bp, nig_int_mask_addr);
1900                         REG_WR(bp, nig_int_mask_addr, 0);
1901
1902                         bnx2x_link_attn(bp);
1903
1904                         /* handle unicore attn? */
1905                 }
1906                 if (asserted & ATTN_SW_TIMER_4_FUNC)
1907                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1908
1909                 if (asserted & GPIO_2_FUNC)
1910                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1911
1912                 if (asserted & GPIO_3_FUNC)
1913                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1914
1915                 if (asserted & GPIO_4_FUNC)
1916                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1917
1918                 if (port == 0) {
1919                         if (asserted & ATTN_GENERAL_ATTN_1) {
1920                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1921                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1922                         }
1923                         if (asserted & ATTN_GENERAL_ATTN_2) {
1924                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1925                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1926                         }
1927                         if (asserted & ATTN_GENERAL_ATTN_3) {
1928                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1929                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1930                         }
1931                 } else {
1932                         if (asserted & ATTN_GENERAL_ATTN_4) {
1933                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1934                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1935                         }
1936                         if (asserted & ATTN_GENERAL_ATTN_5) {
1937                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1938                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1939                         }
1940                         if (asserted & ATTN_GENERAL_ATTN_6) {
1941                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1942                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1943                         }
1944                 }
1945
1946         } /* if hardwired */
1947
1948         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
1949            asserted, hc_addr);
1950         REG_WR(bp, hc_addr, asserted);
1951
1952         /* now set back the mask */
1953         if (asserted & ATTN_NIG_FOR_FUNC) {
1954                 REG_WR(bp, nig_int_mask_addr, nig_mask);
1955                 bnx2x_release_phy_lock(bp);
1956         }
1957 }
1958
1959 static inline void bnx2x_fan_failure(struct bnx2x *bp)
1960 {
1961         int port = BP_PORT(bp);
1962
1963         /* mark the failure */
1964         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
1965         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
1966         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
1967                  bp->link_params.ext_phy_config);
1968
1969         /* log the failure */
1970         netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
1971                " the driver to shutdown the card to prevent permanent"
1972                " damage.  Please contact OEM Support for assistance\n");
1973 }
1974
1975 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
1976 {
1977         int port = BP_PORT(bp);
1978         int reg_offset;
1979         u32 val, swap_val, swap_override;
1980
1981         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
1982                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
1983
1984         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
1985
1986                 val = REG_RD(bp, reg_offset);
1987                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
1988                 REG_WR(bp, reg_offset, val);
1989
1990                 BNX2X_ERR("SPIO5 hw attention\n");
1991
1992                 /* Fan failure attention */
1993                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
1994                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
1995                         /* Low power mode is controlled by GPIO 2 */
1996                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1997                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
1998                         /* The PHY reset is controlled by GPIO 1 */
1999                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2000                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2001                         break;
2002
2003                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2004                         /* The PHY reset is controlled by GPIO 1 */
2005                         /* fake the port number to cancel the swap done in
2006                            set_gpio() */
2007                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2008                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2009                         port = (swap_val && swap_override) ^ 1;
2010                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2011                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2012                         break;
2013
2014                 default:
2015                         break;
2016                 }
2017                 bnx2x_fan_failure(bp);
2018         }
2019
2020         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2021                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2022                 bnx2x_acquire_phy_lock(bp);
2023                 bnx2x_handle_module_detect_int(&bp->link_params);
2024                 bnx2x_release_phy_lock(bp);
2025         }
2026
2027         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2028
2029                 val = REG_RD(bp, reg_offset);
2030                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2031                 REG_WR(bp, reg_offset, val);
2032
2033                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2034                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2035                 bnx2x_panic();
2036         }
2037 }
2038
2039 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2040 {
2041         u32 val;
2042
2043         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2044
2045                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2046                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2047                 /* DORQ discard attention */
2048                 if (val & 0x2)
2049                         BNX2X_ERR("FATAL error from DORQ\n");
2050         }
2051
2052         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2053
2054                 int port = BP_PORT(bp);
2055                 int reg_offset;
2056
2057                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2058                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2059
2060                 val = REG_RD(bp, reg_offset);
2061                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2062                 REG_WR(bp, reg_offset, val);
2063
2064                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2065                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2066                 bnx2x_panic();
2067         }
2068 }
2069
2070 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2071 {
2072         u32 val;
2073
2074         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2075
2076                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2077                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2078                 /* CFC error attention */
2079                 if (val & 0x2)
2080                         BNX2X_ERR("FATAL error from CFC\n");
2081         }
2082
2083         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2084
2085                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2086                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2087                 /* RQ_USDMDP_FIFO_OVERFLOW */
2088                 if (val & 0x18000)
2089                         BNX2X_ERR("FATAL error from PXP\n");
2090         }
2091
2092         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2093
2094                 int port = BP_PORT(bp);
2095                 int reg_offset;
2096
2097                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2098                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2099
2100                 val = REG_RD(bp, reg_offset);
2101                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2102                 REG_WR(bp, reg_offset, val);
2103
2104                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2105                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
2106                 bnx2x_panic();
2107         }
2108 }
2109
2110 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2111 {
2112         u32 val;
2113
2114         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2115
2116                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2117                         int func = BP_FUNC(bp);
2118
2119                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2120                         bp->mf_config = SHMEM_RD(bp,
2121                                            mf_cfg.func_mf_config[func].config);
2122                         val = SHMEM_RD(bp, func_mb[func].drv_status);
2123                         if (val & DRV_STATUS_DCC_EVENT_MASK)
2124                                 bnx2x_dcc_event(bp,
2125                                             (val & DRV_STATUS_DCC_EVENT_MASK));
2126                         bnx2x__link_status_update(bp);
2127                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
2128                                 bnx2x_pmf_update(bp);
2129
2130                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2131
2132                         BNX2X_ERR("MC assert!\n");
2133                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2134                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2135                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2136                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2137                         bnx2x_panic();
2138
2139                 } else if (attn & BNX2X_MCP_ASSERT) {
2140
2141                         BNX2X_ERR("MCP assert!\n");
2142                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2143                         bnx2x_fw_dump(bp);
2144
2145                 } else
2146                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2147         }
2148
2149         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2150                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2151                 if (attn & BNX2X_GRC_TIMEOUT) {
2152                         val = CHIP_IS_E1H(bp) ?
2153                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2154                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2155                 }
2156                 if (attn & BNX2X_GRC_RSV) {
2157                         val = CHIP_IS_E1H(bp) ?
2158                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2159                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2160                 }
2161                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2162         }
2163 }
2164
2165 #define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
2166 #define LOAD_COUNTER_BITS       16 /* Number of bits for load counter */
2167 #define LOAD_COUNTER_MASK       (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2168 #define RESET_DONE_FLAG_MASK    (~LOAD_COUNTER_MASK)
2169 #define RESET_DONE_FLAG_SHIFT   LOAD_COUNTER_BITS
2170 #define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2171 /*
2172  * should be run under rtnl lock
2173  */
2174 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2175 {
2176         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2177         val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2178         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2179         barrier();
2180         mmiowb();
2181 }
2182
2183 /*
2184  * should be run under rtnl lock
2185  */
2186 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2187 {
2188         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2189         val |= (1 << 16);
2190         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2191         barrier();
2192         mmiowb();
2193 }
2194
2195 /*
2196  * should be run under rtnl lock
2197  */
2198 bool bnx2x_reset_is_done(struct bnx2x *bp)
2199 {
2200         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2201         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2202         return (val & RESET_DONE_FLAG_MASK) ? false : true;
2203 }
2204
2205 /*
2206  * should be run under rtnl lock
2207  */
2208 inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
2209 {
2210         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2211
2212         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2213
2214         val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2215         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2216         barrier();
2217         mmiowb();
2218 }
2219
2220 /*
2221  * should be run under rtnl lock
2222  */
2223 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
2224 {
2225         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2226
2227         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2228
2229         val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2230         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2231         barrier();
2232         mmiowb();
2233
2234         return val1;
2235 }
2236
2237 /*
2238  * should be run under rtnl lock
2239  */
2240 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2241 {
2242         return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2243 }
2244
2245 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2246 {
2247         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2248         REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2249 }
2250
2251 static inline void _print_next_block(int idx, const char *blk)
2252 {
2253         if (idx)
2254                 pr_cont(", ");
2255         pr_cont("%s", blk);
2256 }
2257
2258 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2259 {
2260         int i = 0;
2261         u32 cur_bit = 0;
2262         for (i = 0; sig; i++) {
2263                 cur_bit = ((u32)0x1 << i);
2264                 if (sig & cur_bit) {
2265                         switch (cur_bit) {
2266                         case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2267                                 _print_next_block(par_num++, "BRB");
2268                                 break;
2269                         case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2270                                 _print_next_block(par_num++, "PARSER");
2271                                 break;
2272                         case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2273                                 _print_next_block(par_num++, "TSDM");
2274                                 break;
2275                         case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2276                                 _print_next_block(par_num++, "SEARCHER");
2277                                 break;
2278                         case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2279                                 _print_next_block(par_num++, "TSEMI");
2280                                 break;
2281                         }
2282
2283                         /* Clear the bit */
2284                         sig &= ~cur_bit;
2285                 }
2286         }
2287
2288         return par_num;
2289 }
2290
2291 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2292 {
2293         int i = 0;
2294         u32 cur_bit = 0;
2295         for (i = 0; sig; i++) {
2296                 cur_bit = ((u32)0x1 << i);
2297                 if (sig & cur_bit) {
2298                         switch (cur_bit) {
2299                         case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2300                                 _print_next_block(par_num++, "PBCLIENT");
2301                                 break;
2302                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2303                                 _print_next_block(par_num++, "QM");
2304                                 break;
2305                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2306                                 _print_next_block(par_num++, "XSDM");
2307                                 break;
2308                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2309                                 _print_next_block(par_num++, "XSEMI");
2310                                 break;
2311                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2312                                 _print_next_block(par_num++, "DOORBELLQ");
2313                                 break;
2314                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2315                                 _print_next_block(par_num++, "VAUX PCI CORE");
2316                                 break;
2317                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2318                                 _print_next_block(par_num++, "DEBUG");
2319                                 break;
2320                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2321                                 _print_next_block(par_num++, "USDM");
2322                                 break;
2323                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2324                                 _print_next_block(par_num++, "USEMI");
2325                                 break;
2326                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2327                                 _print_next_block(par_num++, "UPB");
2328                                 break;
2329                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2330                                 _print_next_block(par_num++, "CSDM");
2331                                 break;
2332                         }
2333
2334                         /* Clear the bit */
2335                         sig &= ~cur_bit;
2336                 }
2337         }
2338
2339         return par_num;
2340 }
2341
2342 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
2343 {
2344         int i = 0;
2345         u32 cur_bit = 0;
2346         for (i = 0; sig; i++) {
2347                 cur_bit = ((u32)0x1 << i);
2348                 if (sig & cur_bit) {
2349                         switch (cur_bit) {
2350                         case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
2351                                 _print_next_block(par_num++, "CSEMI");
2352                                 break;
2353                         case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
2354                                 _print_next_block(par_num++, "PXP");
2355                                 break;
2356                         case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
2357                                 _print_next_block(par_num++,
2358                                         "PXPPCICLOCKCLIENT");
2359                                 break;
2360                         case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
2361                                 _print_next_block(par_num++, "CFC");
2362                                 break;
2363                         case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
2364                                 _print_next_block(par_num++, "CDU");
2365                                 break;
2366                         case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
2367                                 _print_next_block(par_num++, "IGU");
2368                                 break;
2369                         case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
2370                                 _print_next_block(par_num++, "MISC");
2371                                 break;
2372                         }
2373
2374                         /* Clear the bit */
2375                         sig &= ~cur_bit;
2376                 }
2377         }
2378
2379         return par_num;
2380 }
2381
2382 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
2383 {
2384         int i = 0;
2385         u32 cur_bit = 0;
2386         for (i = 0; sig; i++) {
2387                 cur_bit = ((u32)0x1 << i);
2388                 if (sig & cur_bit) {
2389                         switch (cur_bit) {
2390                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
2391                                 _print_next_block(par_num++, "MCP ROM");
2392                                 break;
2393                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
2394                                 _print_next_block(par_num++, "MCP UMP RX");
2395                                 break;
2396                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
2397                                 _print_next_block(par_num++, "MCP UMP TX");
2398                                 break;
2399                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
2400                                 _print_next_block(par_num++, "MCP SCPAD");
2401                                 break;
2402                         }
2403
2404                         /* Clear the bit */
2405                         sig &= ~cur_bit;
2406                 }
2407         }
2408
2409         return par_num;
2410 }
2411
2412 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
2413                                      u32 sig2, u32 sig3)
2414 {
2415         if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
2416             (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
2417                 int par_num = 0;
2418                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
2419                         "[0]:0x%08x [1]:0x%08x "
2420                         "[2]:0x%08x [3]:0x%08x\n",
2421                           sig0 & HW_PRTY_ASSERT_SET_0,
2422                           sig1 & HW_PRTY_ASSERT_SET_1,
2423                           sig2 & HW_PRTY_ASSERT_SET_2,
2424                           sig3 & HW_PRTY_ASSERT_SET_3);
2425                 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
2426                        bp->dev->name);
2427                 par_num = bnx2x_print_blocks_with_parity0(
2428                         sig0 & HW_PRTY_ASSERT_SET_0, par_num);
2429                 par_num = bnx2x_print_blocks_with_parity1(
2430                         sig1 & HW_PRTY_ASSERT_SET_1, par_num);
2431                 par_num = bnx2x_print_blocks_with_parity2(
2432                         sig2 & HW_PRTY_ASSERT_SET_2, par_num);
2433                 par_num = bnx2x_print_blocks_with_parity3(
2434                         sig3 & HW_PRTY_ASSERT_SET_3, par_num);
2435                 printk("\n");
2436                 return true;
2437         } else
2438                 return false;
2439 }
2440
2441 bool bnx2x_chk_parity_attn(struct bnx2x *bp)
2442 {
2443         struct attn_route attn;
2444         int port = BP_PORT(bp);
2445
2446         attn.sig[0] = REG_RD(bp,
2447                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
2448                              port*4);
2449         attn.sig[1] = REG_RD(bp,
2450                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
2451                              port*4);
2452         attn.sig[2] = REG_RD(bp,
2453                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
2454                              port*4);
2455         attn.sig[3] = REG_RD(bp,
2456                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
2457                              port*4);
2458
2459         return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
2460                                         attn.sig[3]);
2461 }
2462
2463 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2464 {
2465         struct attn_route attn, *group_mask;
2466         int port = BP_PORT(bp);
2467         int index;
2468         u32 reg_addr;
2469         u32 val;
2470         u32 aeu_mask;
2471
2472         /* need to take HW lock because MCP or other port might also
2473            try to handle this event */
2474         bnx2x_acquire_alr(bp);
2475
2476         if (bnx2x_chk_parity_attn(bp)) {
2477                 bp->recovery_state = BNX2X_RECOVERY_INIT;
2478                 bnx2x_set_reset_in_progress(bp);
2479                 schedule_delayed_work(&bp->reset_task, 0);
2480                 /* Disable HW interrupts */
2481                 bnx2x_int_disable(bp);
2482                 bnx2x_release_alr(bp);
2483                 /* In case of parity errors don't handle attentions so that
2484                  * other function would "see" parity errors.
2485                  */
2486                 return;
2487         }
2488
2489         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2490         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2491         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2492         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2493         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2494            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2495
2496         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2497                 if (deasserted & (1 << index)) {
2498                         group_mask = &bp->attn_group[index];
2499
2500                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2501                            index, group_mask->sig[0], group_mask->sig[1],
2502                            group_mask->sig[2], group_mask->sig[3]);
2503
2504                         bnx2x_attn_int_deasserted3(bp,
2505                                         attn.sig[3] & group_mask->sig[3]);
2506                         bnx2x_attn_int_deasserted1(bp,
2507                                         attn.sig[1] & group_mask->sig[1]);
2508                         bnx2x_attn_int_deasserted2(bp,
2509                                         attn.sig[2] & group_mask->sig[2]);
2510                         bnx2x_attn_int_deasserted0(bp,
2511                                         attn.sig[0] & group_mask->sig[0]);
2512                 }
2513         }
2514
2515         bnx2x_release_alr(bp);
2516
2517         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2518
2519         val = ~deasserted;
2520         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2521            val, reg_addr);
2522         REG_WR(bp, reg_addr, val);
2523
2524         if (~bp->attn_state & deasserted)
2525                 BNX2X_ERR("IGU ERROR\n");
2526
2527         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2528                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2529
2530         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2531         aeu_mask = REG_RD(bp, reg_addr);
2532
2533         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2534            aeu_mask, deasserted);
2535         aeu_mask |= (deasserted & 0x3ff);
2536         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2537
2538         REG_WR(bp, reg_addr, aeu_mask);
2539         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2540
2541         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2542         bp->attn_state &= ~deasserted;
2543         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2544 }
2545
2546 static void bnx2x_attn_int(struct bnx2x *bp)
2547 {
2548         /* read local copy of bits */
2549         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2550                                                                 attn_bits);
2551         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2552                                                                 attn_bits_ack);
2553         u32 attn_state = bp->attn_state;
2554
2555         /* look for changed bits */
2556         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2557         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2558
2559         DP(NETIF_MSG_HW,
2560            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2561            attn_bits, attn_ack, asserted, deasserted);
2562
2563         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2564                 BNX2X_ERR("BAD attention state\n");
2565
2566         /* handle bits that were raised */
2567         if (asserted)
2568                 bnx2x_attn_int_asserted(bp, asserted);
2569
2570         if (deasserted)
2571                 bnx2x_attn_int_deasserted(bp, deasserted);
2572 }
2573
2574 static void bnx2x_sp_task(struct work_struct *work)
2575 {
2576         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2577         u16 status;
2578
2579         /* Return here if interrupt is disabled */
2580         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2581                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2582                 return;
2583         }
2584
2585         status = bnx2x_update_dsb_idx(bp);
2586 /*      if (status == 0)                                     */
2587 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2588
2589         DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
2590
2591         /* HW attentions */
2592         if (status & 0x1) {
2593                 bnx2x_attn_int(bp);
2594                 status &= ~0x1;
2595         }
2596
2597         /* CStorm events: STAT_QUERY */
2598         if (status & 0x2) {
2599                 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
2600                 status &= ~0x2;
2601         }
2602
2603         if (unlikely(status))
2604                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2605                    status);
2606
2607         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2608                      IGU_INT_NOP, 1);
2609         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2610                      IGU_INT_NOP, 1);
2611         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2612                      IGU_INT_NOP, 1);
2613         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2614                      IGU_INT_NOP, 1);
2615         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2616                      IGU_INT_ENABLE, 1);
2617 }
2618
2619 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2620 {
2621         struct net_device *dev = dev_instance;
2622         struct bnx2x *bp = netdev_priv(dev);
2623
2624         /* Return here if interrupt is disabled */
2625         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2626                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2627                 return IRQ_HANDLED;
2628         }
2629
2630         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2631
2632 #ifdef BNX2X_STOP_ON_ERROR
2633         if (unlikely(bp->panic))
2634                 return IRQ_HANDLED;
2635 #endif
2636
2637 #ifdef BCM_CNIC
2638         {
2639                 struct cnic_ops *c_ops;
2640
2641                 rcu_read_lock();
2642                 c_ops = rcu_dereference(bp->cnic_ops);
2643                 if (c_ops)
2644                         c_ops->cnic_handler(bp->cnic_data, NULL);
2645                 rcu_read_unlock();
2646         }
2647 #endif
2648         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2649
2650         return IRQ_HANDLED;
2651 }
2652
2653 /* end of slow path */
2654
2655 static void bnx2x_timer(unsigned long data)
2656 {
2657         struct bnx2x *bp = (struct bnx2x *) data;
2658
2659         if (!netif_running(bp->dev))
2660                 return;
2661
2662         if (atomic_read(&bp->intr_sem) != 0)
2663                 goto timer_restart;
2664
2665         if (poll) {
2666                 struct bnx2x_fastpath *fp = &bp->fp[0];
2667                 int rc;
2668
2669                 bnx2x_tx_int(fp);
2670                 rc = bnx2x_rx_int(fp, 1000);
2671         }
2672
2673         if (!BP_NOMCP(bp)) {
2674                 int func = BP_FUNC(bp);
2675                 u32 drv_pulse;
2676                 u32 mcp_pulse;
2677
2678                 ++bp->fw_drv_pulse_wr_seq;
2679                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
2680                 /* TBD - add SYSTEM_TIME */
2681                 drv_pulse = bp->fw_drv_pulse_wr_seq;
2682                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
2683
2684                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
2685                              MCP_PULSE_SEQ_MASK);
2686                 /* The delta between driver pulse and mcp response
2687                  * should be 1 (before mcp response) or 0 (after mcp response)
2688                  */
2689                 if ((drv_pulse != mcp_pulse) &&
2690                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
2691                         /* someone lost a heartbeat... */
2692                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2693                                   drv_pulse, mcp_pulse);
2694                 }
2695         }
2696
2697         if (bp->state == BNX2X_STATE_OPEN)
2698                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
2699
2700 timer_restart:
2701         mod_timer(&bp->timer, jiffies + bp->current_interval);
2702 }
2703
2704 /* end of Statistics */
2705
2706 /* nic init */
2707
2708 /*
2709  * nic init service functions
2710  */
2711
2712 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
2713 {
2714         int port = BP_PORT(bp);
2715
2716         /* "CSTORM" */
2717         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2718                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
2719                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
2720         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2721                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
2722                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
2723 }
2724
2725 void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
2726                           dma_addr_t mapping, int sb_id)
2727 {
2728         int port = BP_PORT(bp);
2729         int func = BP_FUNC(bp);
2730         int index;
2731         u64 section;
2732
2733         /* USTORM */
2734         section = ((u64)mapping) + offsetof(struct host_status_block,
2735                                             u_status_block);
2736         sb->u_status_block.status_block_id = sb_id;
2737
2738         REG_WR(bp, BAR_CSTRORM_INTMEM +
2739                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
2740         REG_WR(bp, BAR_CSTRORM_INTMEM +
2741                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
2742                U64_HI(section));
2743         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
2744                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
2745
2746         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
2747                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2748                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
2749
2750         /* CSTORM */
2751         section = ((u64)mapping) + offsetof(struct host_status_block,
2752                                             c_status_block);
2753         sb->c_status_block.status_block_id = sb_id;
2754
2755         REG_WR(bp, BAR_CSTRORM_INTMEM +
2756                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
2757         REG_WR(bp, BAR_CSTRORM_INTMEM +
2758                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
2759                U64_HI(section));
2760         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
2761                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
2762
2763         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
2764                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2765                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
2766
2767         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2768 }
2769
2770 static void bnx2x_zero_def_sb(struct bnx2x *bp)
2771 {
2772         int func = BP_FUNC(bp);
2773
2774         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
2775                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2776                         sizeof(struct tstorm_def_status_block)/4);
2777         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2778                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
2779                         sizeof(struct cstorm_def_status_block_u)/4);
2780         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2781                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
2782                         sizeof(struct cstorm_def_status_block_c)/4);
2783         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
2784                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2785                         sizeof(struct xstorm_def_status_block)/4);
2786 }
2787
2788 static void bnx2x_init_def_sb(struct bnx2x *bp,
2789                               struct host_def_status_block *def_sb,
2790                               dma_addr_t mapping, int sb_id)
2791 {
2792         int port = BP_PORT(bp);
2793         int func = BP_FUNC(bp);
2794         int index, val, reg_offset;
2795         u64 section;
2796
2797         /* ATTN */
2798         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2799                                             atten_status_block);
2800         def_sb->atten_status_block.status_block_id = sb_id;
2801
2802         bp->attn_state = 0;
2803
2804         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2805                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2806
2807         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2808                 bp->attn_group[index].sig[0] = REG_RD(bp,
2809                                                      reg_offset + 0x10*index);
2810                 bp->attn_group[index].sig[1] = REG_RD(bp,
2811                                                reg_offset + 0x4 + 0x10*index);
2812                 bp->attn_group[index].sig[2] = REG_RD(bp,
2813                                                reg_offset + 0x8 + 0x10*index);
2814                 bp->attn_group[index].sig[3] = REG_RD(bp,
2815                                                reg_offset + 0xc + 0x10*index);
2816         }
2817
2818         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
2819                              HC_REG_ATTN_MSG0_ADDR_L);
2820
2821         REG_WR(bp, reg_offset, U64_LO(section));
2822         REG_WR(bp, reg_offset + 4, U64_HI(section));
2823
2824         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
2825
2826         val = REG_RD(bp, reg_offset);
2827         val |= sb_id;
2828         REG_WR(bp, reg_offset, val);
2829
2830         /* USTORM */
2831         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2832                                             u_def_status_block);
2833         def_sb->u_def_status_block.status_block_id = sb_id;
2834
2835         REG_WR(bp, BAR_CSTRORM_INTMEM +
2836                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
2837         REG_WR(bp, BAR_CSTRORM_INTMEM +
2838                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
2839                U64_HI(section));
2840         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
2841                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
2842
2843         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
2844                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2845                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
2846
2847         /* CSTORM */
2848         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2849                                             c_def_status_block);
2850         def_sb->c_def_status_block.status_block_id = sb_id;
2851
2852         REG_WR(bp, BAR_CSTRORM_INTMEM +
2853                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
2854         REG_WR(bp, BAR_CSTRORM_INTMEM +
2855                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
2856                U64_HI(section));
2857         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
2858                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
2859
2860         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2861                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2862                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
2863
2864         /* TSTORM */
2865         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2866                                             t_def_status_block);
2867         def_sb->t_def_status_block.status_block_id = sb_id;
2868
2869         REG_WR(bp, BAR_TSTRORM_INTMEM +
2870                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2871         REG_WR(bp, BAR_TSTRORM_INTMEM +
2872                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2873                U64_HI(section));
2874         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
2875                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2876
2877         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2878                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
2879                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2880
2881         /* XSTORM */
2882         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2883                                             x_def_status_block);
2884         def_sb->x_def_status_block.status_block_id = sb_id;
2885
2886         REG_WR(bp, BAR_XSTRORM_INTMEM +
2887                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2888         REG_WR(bp, BAR_XSTRORM_INTMEM +
2889                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2890                U64_HI(section));
2891         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
2892                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2893
2894         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2895                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
2896                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2897
2898         bp->stats_pending = 0;
2899         bp->set_mac_pending = 0;
2900
2901         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2902 }
2903
2904 void bnx2x_update_coalesce(struct bnx2x *bp)
2905 {
2906         int port = BP_PORT(bp);
2907         int i;
2908
2909         for_each_queue(bp, i) {
2910                 int sb_id = bp->fp[i].sb_id;
2911
2912                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
2913                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2914                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
2915                                                       U_SB_ETH_RX_CQ_INDEX),
2916                         bp->rx_ticks/(4 * BNX2X_BTR));
2917                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2918                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
2919                                                        U_SB_ETH_RX_CQ_INDEX),
2920                          (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2921
2922                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2923                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2924                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
2925                                                       C_SB_ETH_TX_CQ_INDEX),
2926                         bp->tx_ticks/(4 * BNX2X_BTR));
2927                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2928                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
2929                                                        C_SB_ETH_TX_CQ_INDEX),
2930                          (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2931         }
2932 }
2933
2934 static void bnx2x_init_sp_ring(struct bnx2x *bp)
2935 {
2936         int func = BP_FUNC(bp);
2937
2938         spin_lock_init(&bp->spq_lock);
2939
2940         bp->spq_left = MAX_SPQ_PENDING;
2941         bp->spq_prod_idx = 0;
2942         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
2943         bp->spq_prod_bd = bp->spq;
2944         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
2945
2946         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
2947                U64_LO(bp->spq_mapping));
2948         REG_WR(bp,
2949                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
2950                U64_HI(bp->spq_mapping));
2951
2952         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
2953                bp->spq_prod_idx);
2954 }
2955
2956 static void bnx2x_init_context(struct bnx2x *bp)
2957 {
2958         int i;
2959
2960         /* Rx */
2961         for_each_queue(bp, i) {
2962                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
2963                 struct bnx2x_fastpath *fp = &bp->fp[i];
2964                 u8 cl_id = fp->cl_id;
2965
2966                 context->ustorm_st_context.common.sb_index_numbers =
2967                                                 BNX2X_RX_SB_INDEX_NUM;
2968                 context->ustorm_st_context.common.clientId = cl_id;
2969                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
2970                 context->ustorm_st_context.common.flags =
2971                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
2972                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
2973                 context->ustorm_st_context.common.statistics_counter_id =
2974                                                 cl_id;
2975                 context->ustorm_st_context.common.mc_alignment_log_size =
2976                                                 BNX2X_RX_ALIGN_SHIFT;
2977                 context->ustorm_st_context.common.bd_buff_size =
2978                                                 bp->rx_buf_size;
2979                 context->ustorm_st_context.common.bd_page_base_hi =
2980                                                 U64_HI(fp->rx_desc_mapping);
2981                 context->ustorm_st_context.common.bd_page_base_lo =
2982                                                 U64_LO(fp->rx_desc_mapping);
2983                 if (!fp->disable_tpa) {
2984                         context->ustorm_st_context.common.flags |=
2985                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
2986                         context->ustorm_st_context.common.sge_buff_size =
2987                                 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
2988                                            0xffff);
2989                         context->ustorm_st_context.common.sge_page_base_hi =
2990                                                 U64_HI(fp->rx_sge_mapping);
2991                         context->ustorm_st_context.common.sge_page_base_lo =
2992                                                 U64_LO(fp->rx_sge_mapping);
2993
2994                         context->ustorm_st_context.common.max_sges_for_packet =
2995                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
2996                         context->ustorm_st_context.common.max_sges_for_packet =
2997                                 ((context->ustorm_st_context.common.
2998                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
2999                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
3000                 }
3001
3002                 context->ustorm_ag_context.cdu_usage =
3003                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3004                                                CDU_REGION_NUMBER_UCM_AG,
3005                                                ETH_CONNECTION_TYPE);
3006
3007                 context->xstorm_ag_context.cdu_reserved =
3008                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3009                                                CDU_REGION_NUMBER_XCM_AG,
3010                                                ETH_CONNECTION_TYPE);
3011         }
3012
3013         /* Tx */
3014         for_each_queue(bp, i) {
3015                 struct bnx2x_fastpath *fp = &bp->fp[i];
3016                 struct eth_context *context =
3017                         bnx2x_sp(bp, context[i].eth);
3018
3019                 context->cstorm_st_context.sb_index_number =
3020                                                 C_SB_ETH_TX_CQ_INDEX;
3021                 context->cstorm_st_context.status_block_id = fp->sb_id;
3022
3023                 context->xstorm_st_context.tx_bd_page_base_hi =
3024                                                 U64_HI(fp->tx_desc_mapping);
3025                 context->xstorm_st_context.tx_bd_page_base_lo =
3026                                                 U64_LO(fp->tx_desc_mapping);
3027                 context->xstorm_st_context.statistics_data = (fp->cl_id |
3028                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
3029         }
3030 }
3031
3032 static void bnx2x_init_ind_table(struct bnx2x *bp)
3033 {
3034         int func = BP_FUNC(bp);
3035         int i;
3036
3037         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
3038                 return;
3039
3040         DP(NETIF_MSG_IFUP,
3041            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
3042         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
3043                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
3044                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
3045                         bp->fp->cl_id + (i % bp->num_queues));
3046 }
3047
3048 void bnx2x_set_client_config(struct bnx2x *bp)
3049 {
3050         struct tstorm_eth_client_config tstorm_client = {0};
3051         int port = BP_PORT(bp);
3052         int i;
3053
3054         tstorm_client.mtu = bp->dev->mtu;
3055         tstorm_client.config_flags =
3056                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
3057                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
3058 #ifdef BCM_VLAN
3059         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
3060                 tstorm_client.config_flags |=
3061                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
3062                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3063         }
3064 #endif
3065
3066         for_each_queue(bp, i) {
3067                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
3068
3069                 REG_WR(bp, BAR_TSTRORM_INTMEM +
3070                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
3071                        ((u32 *)&tstorm_client)[0]);
3072                 REG_WR(bp, BAR_TSTRORM_INTMEM +
3073                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
3074                        ((u32 *)&tstorm_client)[1]);
3075         }
3076
3077         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
3078            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
3079 }
3080
3081 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3082 {
3083         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
3084         int mode = bp->rx_mode;
3085         int mask = bp->rx_mode_cl_mask;
3086         int func = BP_FUNC(bp);
3087         int port = BP_PORT(bp);
3088         int i;
3089         /* All but management unicast packets should pass to the host as well */
3090         u32 llh_mask =
3091                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
3092                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
3093                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3094                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
3095
3096         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
3097
3098         switch (mode) {
3099         case BNX2X_RX_MODE_NONE: /* no Rx */
3100                 tstorm_mac_filter.ucast_drop_all = mask;
3101                 tstorm_mac_filter.mcast_drop_all = mask;
3102                 tstorm_mac_filter.bcast_drop_all = mask;
3103                 break;
3104
3105         case BNX2X_RX_MODE_NORMAL:
3106                 tstorm_mac_filter.bcast_accept_all = mask;
3107                 break;
3108
3109         case BNX2X_RX_MODE_ALLMULTI:
3110                 tstorm_mac_filter.mcast_accept_all = mask;
3111                 tstorm_mac_filter.bcast_accept_all = mask;
3112                 break;
3113
3114         case BNX2X_RX_MODE_PROMISC:
3115                 tstorm_mac_filter.ucast_accept_all = mask;
3116                 tstorm_mac_filter.mcast_accept_all = mask;
3117                 tstorm_mac_filter.bcast_accept_all = mask;
3118                 /* pass management unicast packets as well */
3119                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
3120                 break;
3121
3122         default:
3123                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
3124                 break;
3125         }
3126
3127         REG_WR(bp,
3128                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
3129                llh_mask);
3130
3131         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
3132                 REG_WR(bp, BAR_TSTRORM_INTMEM +
3133                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
3134                        ((u32 *)&tstorm_mac_filter)[i]);
3135
3136 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
3137                    ((u32 *)&tstorm_mac_filter)[i]); */
3138         }
3139
3140         if (mode != BNX2X_RX_MODE_NONE)
3141                 bnx2x_set_client_config(bp);
3142 }
3143
3144 static void bnx2x_init_internal_common(struct bnx2x *bp)
3145 {
3146         int i;
3147
3148         /* Zero this manually as its initialization is
3149            currently missing in the initTool */
3150         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3151                 REG_WR(bp, BAR_USTRORM_INTMEM +
3152                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
3153 }
3154
3155 static void bnx2x_init_internal_port(struct bnx2x *bp)
3156 {
3157         int port = BP_PORT(bp);
3158
3159         REG_WR(bp,
3160                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
3161         REG_WR(bp,
3162                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
3163         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3164         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3165 }
3166
3167 static void bnx2x_init_internal_func(struct bnx2x *bp)
3168 {
3169         struct tstorm_eth_function_common_config tstorm_config = {0};
3170         struct stats_indication_flags stats_flags = {0};
3171         int port = BP_PORT(bp);
3172         int func = BP_FUNC(bp);
3173         int i, j;
3174         u32 offset;
3175         u16 max_agg_size;
3176
3177         tstorm_config.config_flags = RSS_FLAGS(bp);
3178
3179         if (is_multi(bp))
3180                 tstorm_config.rss_result_mask = MULTI_MASK;
3181
3182         /* Enable TPA if needed */
3183         if (bp->flags & TPA_ENABLE_FLAG)
3184                 tstorm_config.config_flags |=
3185                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
3186
3187         if (IS_E1HMF(bp))
3188                 tstorm_config.config_flags |=
3189                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
3190
3191         tstorm_config.leading_client_id = BP_L_ID(bp);
3192
3193         REG_WR(bp, BAR_TSTRORM_INTMEM +
3194                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
3195                (*(u32 *)&tstorm_config));
3196
3197         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
3198         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
3199         bnx2x_set_storm_rx_mode(bp);
3200
3201         for_each_queue(bp, i) {
3202                 u8 cl_id = bp->fp[i].cl_id;
3203
3204                 /* reset xstorm per client statistics */
3205                 offset = BAR_XSTRORM_INTMEM +
3206                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3207                 for (j = 0;
3208                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
3209                         REG_WR(bp, offset + j*4, 0);
3210
3211                 /* reset tstorm per client statistics */
3212                 offset = BAR_TSTRORM_INTMEM +
3213                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3214                 for (j = 0;
3215                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
3216                         REG_WR(bp, offset + j*4, 0);
3217
3218                 /* reset ustorm per client statistics */
3219                 offset = BAR_USTRORM_INTMEM +
3220                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3221                 for (j = 0;
3222                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
3223                         REG_WR(bp, offset + j*4, 0);
3224         }
3225
3226         /* Init statistics related context */
3227         stats_flags.collect_eth = 1;
3228
3229         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
3230                ((u32 *)&stats_flags)[0]);
3231         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
3232                ((u32 *)&stats_flags)[1]);
3233
3234         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
3235                ((u32 *)&stats_flags)[0]);
3236         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
3237                ((u32 *)&stats_flags)[1]);
3238
3239         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
3240                ((u32 *)&stats_flags)[0]);
3241         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
3242                ((u32 *)&stats_flags)[1]);
3243
3244         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
3245                ((u32 *)&stats_flags)[0]);
3246         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
3247                ((u32 *)&stats_flags)[1]);
3248
3249         REG_WR(bp, BAR_XSTRORM_INTMEM +
3250                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3251                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3252         REG_WR(bp, BAR_XSTRORM_INTMEM +
3253                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3254                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3255
3256         REG_WR(bp, BAR_TSTRORM_INTMEM +
3257                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3258                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3259         REG_WR(bp, BAR_TSTRORM_INTMEM +
3260                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3261                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3262
3263         REG_WR(bp, BAR_USTRORM_INTMEM +
3264                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3265                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3266         REG_WR(bp, BAR_USTRORM_INTMEM +
3267                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3268                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3269
3270         if (CHIP_IS_E1H(bp)) {
3271                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3272                         IS_E1HMF(bp));
3273                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3274                         IS_E1HMF(bp));
3275                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3276                         IS_E1HMF(bp));
3277                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3278                         IS_E1HMF(bp));
3279
3280                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
3281                          bp->e1hov);
3282         }
3283
3284         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
3285         max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
3286                                    SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
3287         for_each_queue(bp, i) {
3288                 struct bnx2x_fastpath *fp = &bp->fp[i];
3289
3290                 REG_WR(bp, BAR_USTRORM_INTMEM +
3291                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
3292                        U64_LO(fp->rx_comp_mapping));
3293                 REG_WR(bp, BAR_USTRORM_INTMEM +
3294                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
3295                        U64_HI(fp->rx_comp_mapping));
3296
3297                 /* Next page */
3298                 REG_WR(bp, BAR_USTRORM_INTMEM +
3299                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
3300                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3301                 REG_WR(bp, BAR_USTRORM_INTMEM +
3302                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
3303                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3304
3305                 REG_WR16(bp, BAR_USTRORM_INTMEM +
3306                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
3307                          max_agg_size);
3308         }
3309
3310         /* dropless flow control */
3311         if (CHIP_IS_E1H(bp)) {
3312                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
3313
3314                 rx_pause.bd_thr_low = 250;
3315                 rx_pause.cqe_thr_low = 250;
3316                 rx_pause.cos = 1;
3317                 rx_pause.sge_thr_low = 0;
3318                 rx_pause.bd_thr_high = 350;
3319                 rx_pause.cqe_thr_high = 350;
3320                 rx_pause.sge_thr_high = 0;
3321
3322                 for_each_queue(bp, i) {
3323                         struct bnx2x_fastpath *fp = &bp->fp[i];
3324
3325                         if (!fp->disable_tpa) {
3326                                 rx_pause.sge_thr_low = 150;
3327                                 rx_pause.sge_thr_high = 250;
3328                         }
3329
3330
3331                         offset = BAR_USTRORM_INTMEM +
3332                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
3333                                                                    fp->cl_id);
3334                         for (j = 0;
3335                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
3336                              j++)
3337                                 REG_WR(bp, offset + j*4,
3338                                        ((u32 *)&rx_pause)[j]);
3339                 }
3340         }
3341
3342         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3343
3344         /* Init rate shaping and fairness contexts */
3345         if (IS_E1HMF(bp)) {
3346                 int vn;
3347
3348                 /* During init there is no active link
3349                    Until link is up, set link rate to 10Gbps */
3350                 bp->link_vars.line_speed = SPEED_10000;
3351                 bnx2x_init_port_minmax(bp);
3352
3353                 if (!BP_NOMCP(bp))
3354                         bp->mf_config =
3355                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
3356                 bnx2x_calc_vn_weight_sum(bp);
3357
3358                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
3359                         bnx2x_init_vn_minmax(bp, 2*vn + port);
3360
3361                 /* Enable rate shaping and fairness */
3362                 bp->cmng.flags.cmng_enables |=
3363                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
3364
3365         } else {
3366                 /* rate shaping and fairness are disabled */
3367                 DP(NETIF_MSG_IFUP,
3368                    "single function mode  minmax will be disabled\n");
3369         }
3370
3371
3372         /* Store cmng structures to internal memory */
3373         if (bp->port.pmf)
3374                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
3375                         REG_WR(bp, BAR_XSTRORM_INTMEM +
3376                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
3377                                ((u32 *)(&bp->cmng))[i]);
3378 }
3379
3380 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3381 {
3382         switch (load_code) {
3383         case FW_MSG_CODE_DRV_LOAD_COMMON:
3384                 bnx2x_init_internal_common(bp);
3385                 /* no break */
3386
3387         case FW_MSG_CODE_DRV_LOAD_PORT:
3388                 bnx2x_init_internal_port(bp);
3389                 /* no break */
3390
3391         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3392                 bnx2x_init_internal_func(bp);
3393                 break;
3394
3395         default:
3396                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3397                 break;
3398         }
3399 }
3400
3401 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
3402 {
3403         int i;
3404
3405         for_each_queue(bp, i) {
3406                 struct bnx2x_fastpath *fp = &bp->fp[i];
3407
3408                 fp->bp = bp;
3409                 fp->state = BNX2X_FP_STATE_CLOSED;
3410                 fp->index = i;
3411                 fp->cl_id = BP_L_ID(bp) + i;
3412 #ifdef BCM_CNIC
3413                 fp->sb_id = fp->cl_id + 1;
3414 #else
3415                 fp->sb_id = fp->cl_id;
3416 #endif
3417                 DP(NETIF_MSG_IFUP,
3418                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
3419                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
3420                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
3421                               fp->sb_id);
3422                 bnx2x_update_fpsb_idx(fp);
3423         }
3424
3425         /* ensure status block indices were read */
3426         rmb();
3427
3428
3429         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
3430                           DEF_SB_ID);
3431         bnx2x_update_dsb_idx(bp);
3432         bnx2x_update_coalesce(bp);
3433         bnx2x_init_rx_rings(bp);
3434         bnx2x_init_tx_ring(bp);
3435         bnx2x_init_sp_ring(bp);
3436         bnx2x_init_context(bp);
3437         bnx2x_init_internal(bp, load_code);
3438         bnx2x_init_ind_table(bp);
3439         bnx2x_stats_init(bp);
3440
3441         /* At this point, we are ready for interrupts */
3442         atomic_set(&bp->intr_sem, 0);
3443
3444         /* flush all before enabling interrupts */
3445         mb();
3446         mmiowb();
3447
3448         bnx2x_int_enable(bp);
3449
3450         /* Check for SPIO5 */
3451         bnx2x_attn_int_deasserted0(bp,
3452                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
3453                                    AEU_INPUTS_ATTN_BITS_SPIO5);
3454 }
3455
3456 /* end of nic init */
3457
3458 /*
3459  * gzip service functions
3460  */
3461
3462 static int bnx2x_gunzip_init(struct bnx2x *bp)
3463 {
3464         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
3465                                             &bp->gunzip_mapping, GFP_KERNEL);
3466         if (bp->gunzip_buf  == NULL)
3467                 goto gunzip_nomem1;
3468
3469         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3470         if (bp->strm  == NULL)
3471                 goto gunzip_nomem2;
3472
3473         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3474                                       GFP_KERNEL);
3475         if (bp->strm->workspace == NULL)
3476                 goto gunzip_nomem3;
3477
3478         return 0;
3479
3480 gunzip_nomem3:
3481         kfree(bp->strm);
3482         bp->strm = NULL;
3483
3484 gunzip_nomem2:
3485         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3486                           bp->gunzip_mapping);
3487         bp->gunzip_buf = NULL;
3488
3489 gunzip_nomem1:
3490         netdev_err(bp->dev, "Cannot allocate firmware buffer for"
3491                " un-compression\n");
3492         return -ENOMEM;
3493 }
3494
3495 static void bnx2x_gunzip_end(struct bnx2x *bp)
3496 {
3497         kfree(bp->strm->workspace);
3498
3499         kfree(bp->strm);
3500         bp->strm = NULL;
3501
3502         if (bp->gunzip_buf) {
3503                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3504                                   bp->gunzip_mapping);
3505                 bp->gunzip_buf = NULL;
3506         }
3507 }
3508
3509 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
3510 {
3511         int n, rc;
3512
3513         /* check gzip header */
3514         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
3515                 BNX2X_ERR("Bad gzip header\n");
3516                 return -EINVAL;
3517         }
3518
3519         n = 10;
3520
3521 #define FNAME                           0x8
3522
3523         if (zbuf[3] & FNAME)
3524                 while ((zbuf[n++] != 0) && (n < len));
3525
3526         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
3527         bp->strm->avail_in = len - n;
3528         bp->strm->next_out = bp->gunzip_buf;
3529         bp->strm->avail_out = FW_BUF_SIZE;
3530
3531         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
3532         if (rc != Z_OK)
3533                 return rc;
3534
3535         rc = zlib_inflate(bp->strm, Z_FINISH);
3536         if ((rc != Z_OK) && (rc != Z_STREAM_END))
3537                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
3538                            bp->strm->msg);
3539
3540         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
3541         if (bp->gunzip_outlen & 0x3)
3542                 netdev_err(bp->dev, "Firmware decompression error:"
3543                                     " gunzip_outlen (%d) not aligned\n",
3544                                 bp->gunzip_outlen);
3545         bp->gunzip_outlen >>= 2;
3546
3547         zlib_inflateEnd(bp->strm);
3548
3549         if (rc == Z_STREAM_END)
3550                 return 0;
3551
3552         return rc;
3553 }
3554
3555 /* nic load/unload */
3556
3557 /*
3558  * General service functions
3559  */
3560
3561 /* send a NIG loopback debug packet */
3562 static void bnx2x_lb_pckt(struct bnx2x *bp)
3563 {
3564         u32 wb_write[3];
3565
3566         /* Ethernet source and destination addresses */
3567         wb_write[0] = 0x55555555;
3568         wb_write[1] = 0x55555555;
3569         wb_write[2] = 0x20;             /* SOP */
3570         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3571
3572         /* NON-IP protocol */
3573         wb_write[0] = 0x09000000;
3574         wb_write[1] = 0x55555555;
3575         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
3576         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3577 }
3578
3579 /* some of the internal memories
3580  * are not directly readable from the driver
3581  * to test them we send debug packets
3582  */
3583 static int bnx2x_int_mem_test(struct bnx2x *bp)
3584 {
3585         int factor;
3586         int count, i;
3587         u32 val = 0;
3588
3589         if (CHIP_REV_IS_FPGA(bp))
3590                 factor = 120;
3591         else if (CHIP_REV_IS_EMUL(bp))
3592                 factor = 200;
3593         else
3594                 factor = 1;
3595
3596         DP(NETIF_MSG_HW, "start part1\n");
3597
3598         /* Disable inputs of parser neighbor blocks */
3599         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3600         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3601         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3602         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
3603
3604         /*  Write 0 to parser credits for CFC search request */
3605         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3606
3607         /* send Ethernet packet */
3608         bnx2x_lb_pckt(bp);
3609
3610         /* TODO do i reset NIG statistic? */
3611         /* Wait until NIG register shows 1 packet of size 0x10 */
3612         count = 1000 * factor;
3613         while (count) {
3614
3615                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3616                 val = *bnx2x_sp(bp, wb_data[0]);
3617                 if (val == 0x10)
3618                         break;
3619
3620                 msleep(10);
3621                 count--;
3622         }
3623         if (val != 0x10) {
3624                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
3625                 return -1;
3626         }
3627
3628         /* Wait until PRS register shows 1 packet */
3629         count = 1000 * factor;
3630         while (count) {
3631                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3632                 if (val == 1)
3633                         break;
3634
3635                 msleep(10);
3636                 count--;
3637         }
3638         if (val != 0x1) {
3639                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3640                 return -2;
3641         }
3642
3643         /* Reset and init BRB, PRS */
3644         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3645         msleep(50);
3646         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3647         msleep(50);
3648         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3649         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3650
3651         DP(NETIF_MSG_HW, "part2\n");
3652
3653         /* Disable inputs of parser neighbor blocks */
3654         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3655         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3656         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3657         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
3658
3659         /* Write 0 to parser credits for CFC search request */
3660         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3661
3662         /* send 10 Ethernet packets */
3663         for (i = 0; i < 10; i++)
3664                 bnx2x_lb_pckt(bp);
3665
3666         /* Wait until NIG register shows 10 + 1
3667            packets of size 11*0x10 = 0xb0 */
3668         count = 1000 * factor;
3669         while (count) {
3670
3671                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3672                 val = *bnx2x_sp(bp, wb_data[0]);
3673                 if (val == 0xb0)
3674                         break;
3675
3676                 msleep(10);
3677                 count--;
3678         }
3679         if (val != 0xb0) {
3680                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
3681                 return -3;
3682         }
3683
3684         /* Wait until PRS register shows 2 packets */
3685         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3686         if (val != 2)
3687                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
3688
3689         /* Write 1 to parser credits for CFC search request */
3690         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
3691
3692         /* Wait until PRS register shows 3 packets */
3693         msleep(10 * factor);
3694         /* Wait until NIG register shows 1 packet of size 0x10 */
3695         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3696         if (val != 3)
3697                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
3698
3699         /* clear NIG EOP FIFO */
3700         for (i = 0; i < 11; i++)
3701                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
3702         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
3703         if (val != 1) {
3704                 BNX2X_ERR("clear of NIG failed\n");
3705                 return -4;
3706         }
3707
3708         /* Reset and init BRB, PRS, NIG */
3709         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3710         msleep(50);
3711         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3712         msleep(50);
3713         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3714         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3715 #ifndef BCM_CNIC
3716         /* set NIC mode */
3717         REG_WR(bp, PRS_REG_NIC_MODE, 1);
3718 #endif
3719
3720         /* Enable inputs of parser neighbor blocks */
3721         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
3722         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
3723         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3724         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
3725
3726         DP(NETIF_MSG_HW, "done\n");
3727
3728         return 0; /* OK */
3729 }
3730
3731 static void enable_blocks_attention(struct bnx2x *bp)
3732 {
3733         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3734         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
3735         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3736         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
3737         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
3738         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
3739         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
3740         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
3741         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
3742 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3743 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
3744         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
3745         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
3746         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
3747 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3748 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
3749         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
3750         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
3751         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
3752         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
3753 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3754 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3755         if (CHIP_REV_IS_FPGA(bp))
3756                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
3757         else
3758                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
3759         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
3760         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
3761         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
3762 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3763 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
3764         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
3765         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
3766 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3767         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
3768 }
3769
3770 static const struct {
3771         u32 addr;
3772         u32 mask;
3773 } bnx2x_parity_mask[] = {
3774         {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
3775         {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
3776         {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
3777         {HC_REG_HC_PRTY_MASK, 0xffffffff},
3778         {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
3779         {QM_REG_QM_PRTY_MASK, 0x0},
3780         {DORQ_REG_DORQ_PRTY_MASK, 0x0},
3781         {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
3782         {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
3783         {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
3784         {CDU_REG_CDU_PRTY_MASK, 0x0},
3785         {CFC_REG_CFC_PRTY_MASK, 0x0},
3786         {DBG_REG_DBG_PRTY_MASK, 0x0},
3787         {DMAE_REG_DMAE_PRTY_MASK, 0x0},
3788         {BRB1_REG_BRB1_PRTY_MASK, 0x0},
3789         {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
3790         {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
3791         {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
3792         {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
3793         {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
3794         {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
3795         {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
3796         {USEM_REG_USEM_PRTY_MASK_0, 0x0},
3797         {USEM_REG_USEM_PRTY_MASK_1, 0x0},
3798         {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
3799         {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
3800         {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
3801         {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
3802 };
3803
3804 static void enable_blocks_parity(struct bnx2x *bp)
3805 {
3806         int i, mask_arr_len =
3807                 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
3808
3809         for (i = 0; i < mask_arr_len; i++)
3810                 REG_WR(bp, bnx2x_parity_mask[i].addr,
3811                         bnx2x_parity_mask[i].mask);
3812 }
3813
3814
3815 static void bnx2x_reset_common(struct bnx2x *bp)
3816 {
3817         /* reset_common */
3818         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
3819                0xd3ffff7f);
3820         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
3821 }
3822
3823 static void bnx2x_init_pxp(struct bnx2x *bp)
3824 {
3825         u16 devctl;
3826         int r_order, w_order;
3827
3828         pci_read_config_word(bp->pdev,
3829                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
3830         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
3831         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3832         if (bp->mrrs == -1)
3833                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3834         else {
3835                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
3836                 r_order = bp->mrrs;
3837         }
3838
3839         bnx2x_init_pxp_arb(bp, r_order, w_order);
3840 }
3841
3842 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3843 {
3844         int is_required;
3845         u32 val;
3846         int port;
3847
3848         if (BP_NOMCP(bp))
3849                 return;
3850
3851         is_required = 0;
3852         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
3853               SHARED_HW_CFG_FAN_FAILURE_MASK;
3854
3855         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
3856                 is_required = 1;
3857
3858         /*
3859          * The fan failure mechanism is usually related to the PHY type since
3860          * the power consumption of the board is affected by the PHY. Currently,
3861          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
3862          */
3863         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
3864                 for (port = PORT_0; port < PORT_MAX; port++) {
3865                         u32 phy_type =
3866                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
3867                                          external_phy_config) &
3868                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3869                         is_required |=
3870                                 ((phy_type ==
3871                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
3872                                  (phy_type ==
3873                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
3874                                  (phy_type ==
3875                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
3876                 }
3877
3878         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
3879
3880         if (is_required == 0)
3881                 return;
3882
3883         /* Fan failure is indicated by SPIO 5 */
3884         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
3885                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
3886
3887         /* set to active low mode */
3888         val = REG_RD(bp, MISC_REG_SPIO_INT);
3889         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
3890                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
3891         REG_WR(bp, MISC_REG_SPIO_INT, val);
3892
3893         /* enable interrupt to signal the IGU */
3894         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
3895         val |= (1 << MISC_REGISTERS_SPIO_5);
3896         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3897 }
3898
3899 static int bnx2x_init_common(struct bnx2x *bp)
3900 {
3901         u32 val, i;
3902 #ifdef BCM_CNIC
3903         u32 wb_write[2];
3904 #endif
3905
3906         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
3907
3908         bnx2x_reset_common(bp);
3909         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
3910         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
3911
3912         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
3913         if (CHIP_IS_E1H(bp))
3914                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
3915
3916         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
3917         msleep(30);
3918         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
3919
3920         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
3921         if (CHIP_IS_E1(bp)) {
3922                 /* enable HW interrupt from PXP on USDM overflow
3923                    bit 16 on INT_MASK_0 */
3924                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3925         }
3926
3927         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
3928         bnx2x_init_pxp(bp);
3929
3930 #ifdef __BIG_ENDIAN
3931         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
3932         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
3933         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
3934         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
3935         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
3936         /* make sure this value is 0 */
3937         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
3938
3939 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3940         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
3941         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
3942         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
3943         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
3944 #endif
3945
3946         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
3947 #ifdef BCM_CNIC
3948         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3949         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3950         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
3951 #endif
3952
3953         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
3954                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
3955
3956         /* let the HW do it's magic ... */
3957         msleep(100);
3958         /* finish PXP init */
3959         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
3960         if (val != 1) {
3961                 BNX2X_ERR("PXP2 CFG failed\n");
3962                 return -EBUSY;
3963         }
3964         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
3965         if (val != 1) {
3966                 BNX2X_ERR("PXP2 RD_INIT failed\n");
3967                 return -EBUSY;
3968         }
3969
3970         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
3971         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
3972
3973         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
3974
3975         /* clean the DMAE memory */
3976         bp->dmae_ready = 1;
3977         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
3978
3979         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
3980         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
3981         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
3982         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
3983
3984         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
3985         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
3986         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
3987         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
3988
3989         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
3990
3991 #ifdef BCM_CNIC
3992         wb_write[0] = 0;
3993         wb_write[1] = 0;
3994         for (i = 0; i < 64; i++) {
3995                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
3996                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
3997
3998                 if (CHIP_IS_E1H(bp)) {
3999                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
4000                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
4001                                           wb_write, 2);
4002                 }
4003         }
4004 #endif
4005         /* soft reset pulse */
4006         REG_WR(bp, QM_REG_SOFT_RESET, 1);
4007         REG_WR(bp, QM_REG_SOFT_RESET, 0);
4008
4009 #ifdef BCM_CNIC
4010         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
4011 #endif
4012
4013         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
4014         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
4015         if (!CHIP_REV_IS_SLOW(bp)) {
4016                 /* enable hw interrupt from doorbell Q */
4017                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4018         }
4019
4020         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4021         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4022         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4023 #ifndef BCM_CNIC
4024         /* set NIC mode */
4025         REG_WR(bp, PRS_REG_NIC_MODE, 1);
4026 #endif
4027         if (CHIP_IS_E1H(bp))
4028                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
4029
4030         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4031         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4032         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4033         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
4034
4035         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4036         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4037         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4038         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4039
4040         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4041         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4042         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4043         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
4044
4045         /* sync semi rtc */
4046         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4047                0x80000000);
4048         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4049                0x80000000);
4050
4051         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4052         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4053         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
4054
4055         REG_WR(bp, SRC_REG_SOFT_RST, 1);
4056         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4057                 REG_WR(bp, i, random32());
4058         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
4059 #ifdef BCM_CNIC
4060         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4061         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4062         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4063         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4064         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4065         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4066         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4067         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4068         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4069         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4070 #endif
4071         REG_WR(bp, SRC_REG_SOFT_RST, 0);
4072
4073         if (sizeof(union cdu_context) != 1024)
4074                 /* we currently assume that a context is 1024 bytes */
4075                 dev_alert(&bp->pdev->dev, "please adjust the size "
4076                                           "of cdu_context(%ld)\n",
4077                          (long)sizeof(union cdu_context));
4078
4079         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
4080         val = (4 << 24) + (0 << 12) + 1024;
4081         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
4082
4083         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
4084         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
4085         /* enable context validation interrupt from CFC */
4086         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4087
4088         /* set the thresholds to prevent CFC/CDU race */
4089         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
4090
4091         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
4092         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
4093
4094         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
4095         /* Reset PCIE errors for debug */
4096         REG_WR(bp, 0x2814, 0xffffffff);
4097         REG_WR(bp, 0x3820, 0xffffffff);
4098
4099         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
4100         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
4101         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
4102         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
4103
4104         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
4105         if (CHIP_IS_E1H(bp)) {
4106                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
4107                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
4108         }
4109
4110         if (CHIP_REV_IS_SLOW(bp))
4111                 msleep(200);
4112
4113         /* finish CFC init */
4114         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4115         if (val != 1) {
4116                 BNX2X_ERR("CFC LL_INIT failed\n");
4117                 return -EBUSY;
4118         }
4119         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4120         if (val != 1) {
4121                 BNX2X_ERR("CFC AC_INIT failed\n");
4122                 return -EBUSY;
4123         }
4124         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4125         if (val != 1) {
4126                 BNX2X_ERR("CFC CAM_INIT failed\n");
4127                 return -EBUSY;
4128         }
4129         REG_WR(bp, CFC_REG_DEBUG0, 0);
4130
4131         /* read NIG statistic
4132            to see if this is our first up since powerup */
4133         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4134         val = *bnx2x_sp(bp, wb_data[0]);
4135
4136         /* do internal memory self test */
4137         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4138                 BNX2X_ERR("internal mem self test failed\n");
4139                 return -EBUSY;
4140         }
4141
4142         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
4143         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4144         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4145         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4146         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4147                 bp->port.need_hw_lock = 1;
4148                 break;
4149
4150         default:
4151                 break;
4152         }
4153
4154         bnx2x_setup_fan_failure_detection(bp);
4155
4156         /* clear PXP2 attentions */
4157         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
4158
4159         enable_blocks_attention(bp);
4160         if (CHIP_PARITY_SUPPORTED(bp))
4161                 enable_blocks_parity(bp);
4162
4163         if (!BP_NOMCP(bp)) {
4164                 bnx2x_acquire_phy_lock(bp);
4165                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
4166                 bnx2x_release_phy_lock(bp);
4167         } else
4168                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4169
4170         return 0;
4171 }
4172
4173 static int bnx2x_init_port(struct bnx2x *bp)
4174 {
4175         int port = BP_PORT(bp);
4176         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
4177         u32 low, high;
4178         u32 val;
4179
4180         DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
4181
4182         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
4183
4184         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
4185         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
4186
4187         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4188         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4189         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
4190         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
4191
4192 #ifdef BCM_CNIC
4193         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
4194
4195         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
4196         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4197         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
4198 #endif
4199
4200         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
4201
4202         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
4203         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
4204                 /* no pause for emulation and FPGA */
4205                 low = 0;
4206                 high = 513;
4207         } else {
4208                 if (IS_E1HMF(bp))
4209                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4210                 else if (bp->dev->mtu > 4096) {
4211                         if (bp->flags & ONE_PORT_FLAG)
4212                                 low = 160;
4213                         else {
4214                                 val = bp->dev->mtu;
4215                                 /* (24*1024 + val*4)/256 */
4216                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
4217                         }
4218                 } else
4219                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4220                 high = low + 56;        /* 14*1024/256 */
4221         }
4222         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4223         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4224
4225
4226         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
4227
4228         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
4229         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
4230         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
4231         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
4232
4233         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4234         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4235         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4236         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
4237
4238         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
4239         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
4240
4241         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
4242
4243         /* configure PBF to work without PAUSE mtu 9000 */
4244         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
4245
4246         /* update threshold */
4247         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
4248         /* update init credit */
4249         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
4250
4251         /* probe changes */
4252         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
4253         msleep(5);
4254         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
4255
4256 #ifdef BCM_CNIC
4257         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
4258 #endif
4259         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
4260         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
4261
4262         if (CHIP_IS_E1(bp)) {
4263                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4264                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4265         }
4266         bnx2x_init_block(bp, HC_BLOCK, init_stage);
4267
4268         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
4269         /* init aeu_mask_attn_func_0/1:
4270          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4271          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4272          *             bits 4-7 are used for "per vn group attention" */
4273         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4274                (IS_E1HMF(bp) ? 0xF7 : 0x7));
4275
4276         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
4277         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
4278         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
4279         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
4280         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
4281
4282         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
4283
4284         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4285
4286         if (CHIP_IS_E1H(bp)) {
4287                 /* 0x2 disable e1hov, 0x1 enable */
4288                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4289                        (IS_E1HMF(bp) ? 0x1 : 0x2));
4290
4291                 {
4292                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4293                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
4294                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
4295                 }
4296         }
4297
4298         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
4299         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
4300
4301         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
4302         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4303                 {
4304                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
4305
4306                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
4307                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
4308
4309                 /* The GPIO should be swapped if the swap register is
4310                    set and active */
4311                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4312                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
4313
4314                 /* Select function upon port-swap configuration */
4315                 if (port == 0) {
4316                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
4317                         aeu_gpio_mask = (swap_val && swap_override) ?
4318                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
4319                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
4320                 } else {
4321                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
4322                         aeu_gpio_mask = (swap_val && swap_override) ?
4323                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
4324                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
4325                 }
4326                 val = REG_RD(bp, offset);
4327                 /* add GPIO3 to group */
4328                 val |= aeu_gpio_mask;
4329                 REG_WR(bp, offset, val);
4330                 }
4331                 break;
4332
4333         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4334         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4335                 /* add SPIO 5 to group 0 */
4336                 {
4337                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4338                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4339                 val = REG_RD(bp, reg_addr);
4340                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4341                 REG_WR(bp, reg_addr, val);
4342                 }
4343                 break;
4344
4345         default:
4346                 break;
4347         }
4348
4349         bnx2x__link_reset(bp);
4350
4351         return 0;
4352 }
4353
4354 #define ILT_PER_FUNC            (768/2)
4355 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
4356 /* the phys address is shifted right 12 bits and has an added
4357    1=valid bit added to the 53rd bit
4358    then since this is a wide register(TM)
4359    we split it into two 32 bit writes
4360  */
4361 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4362 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
4363 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
4364 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
4365
4366 #ifdef BCM_CNIC
4367 #define CNIC_ILT_LINES          127
4368 #define CNIC_CTX_PER_ILT        16
4369 #else
4370 #define CNIC_ILT_LINES          0
4371 #endif
4372
4373 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4374 {
4375         int reg;
4376
4377         if (CHIP_IS_E1H(bp))
4378                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4379         else /* E1 */
4380                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4381
4382         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4383 }
4384
4385 static int bnx2x_init_func(struct bnx2x *bp)
4386 {
4387         int port = BP_PORT(bp);
4388         int func = BP_FUNC(bp);
4389         u32 addr, val;
4390         int i;
4391
4392         DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
4393
4394         /* set MSI reconfigure capability */
4395         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4396         val = REG_RD(bp, addr);
4397         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
4398         REG_WR(bp, addr, val);
4399
4400         i = FUNC_ILT_BASE(func);
4401
4402         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
4403         if (CHIP_IS_E1H(bp)) {
4404                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
4405                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
4406         } else /* E1 */
4407                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
4408                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
4409
4410 #ifdef BCM_CNIC
4411         i += 1 + CNIC_ILT_LINES;
4412         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
4413         if (CHIP_IS_E1(bp))
4414                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
4415         else {
4416                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
4417                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
4418         }
4419
4420         i++;
4421         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
4422         if (CHIP_IS_E1(bp))
4423                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
4424         else {
4425                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
4426                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
4427         }
4428
4429         i++;
4430         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
4431         if (CHIP_IS_E1(bp))
4432                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
4433         else {
4434                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
4435                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
4436         }
4437
4438         /* tell the searcher where the T2 table is */
4439         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
4440
4441         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
4442                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
4443
4444         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
4445                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
4446                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
4447
4448         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
4449 #endif
4450
4451         if (CHIP_IS_E1H(bp)) {
4452                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4453                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4454                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4455                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4456                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4457                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4458                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4459                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4460                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
4461
4462                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4463                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
4464         }
4465
4466         /* HC init per function */
4467         if (CHIP_IS_E1H(bp)) {
4468                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4469
4470                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4471                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4472         }
4473         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
4474
4475         /* Reset PCIE errors for debug */
4476         REG_WR(bp, 0x2114, 0xffffffff);
4477         REG_WR(bp, 0x2120, 0xffffffff);
4478
4479         return 0;
4480 }
4481
4482 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4483 {
4484         int i, rc = 0;
4485
4486         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
4487            BP_FUNC(bp), load_code);
4488
4489         bp->dmae_ready = 0;
4490         mutex_init(&bp->dmae_mutex);
4491         rc = bnx2x_gunzip_init(bp);
4492         if (rc)
4493                 return rc;
4494
4495         switch (load_code) {
4496         case FW_MSG_CODE_DRV_LOAD_COMMON:
4497                 rc = bnx2x_init_common(bp);
4498                 if (rc)
4499                         goto init_hw_err;
4500                 /* no break */
4501
4502         case FW_MSG_CODE_DRV_LOAD_PORT:
4503                 bp->dmae_ready = 1;
4504                 rc = bnx2x_init_port(bp);
4505                 if (rc)
4506                         goto init_hw_err;
4507                 /* no break */
4508
4509         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4510                 bp->dmae_ready = 1;
4511                 rc = bnx2x_init_func(bp);
4512                 if (rc)
4513                         goto init_hw_err;
4514                 break;
4515
4516         default:
4517                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4518                 break;
4519         }
4520
4521         if (!BP_NOMCP(bp)) {
4522                 int func = BP_FUNC(bp);
4523
4524                 bp->fw_drv_pulse_wr_seq =
4525                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
4526                                  DRV_PULSE_SEQ_MASK);
4527                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4528         }
4529
4530         /* this needs to be done before gunzip end */
4531         bnx2x_zero_def_sb(bp);
4532         for_each_queue(bp, i)
4533                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4534 #ifdef BCM_CNIC
4535         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4536 #endif
4537
4538 init_hw_err:
4539         bnx2x_gunzip_end(bp);
4540
4541         return rc;
4542 }
4543
4544 void bnx2x_free_mem(struct bnx2x *bp)
4545 {
4546
4547 #define BNX2X_PCI_FREE(x, y, size) \
4548         do { \
4549                 if (x) { \
4550                         dma_free_coherent(&bp->pdev->dev, size, x, y); \
4551                         x = NULL; \
4552                         y = 0; \
4553                 } \
4554         } while (0)
4555
4556 #define BNX2X_FREE(x) \
4557         do { \
4558                 if (x) { \
4559                         vfree(x); \
4560                         x = NULL; \
4561                 } \
4562         } while (0)
4563
4564         int i;
4565
4566         /* fastpath */
4567         /* Common */
4568         for_each_queue(bp, i) {
4569
4570                 /* status blocks */
4571                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
4572                                bnx2x_fp(bp, i, status_blk_mapping),
4573                                sizeof(struct host_status_block));
4574         }
4575         /* Rx */
4576         for_each_queue(bp, i) {
4577
4578                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4579                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
4580                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
4581                                bnx2x_fp(bp, i, rx_desc_mapping),
4582                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
4583
4584                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
4585                                bnx2x_fp(bp, i, rx_comp_mapping),
4586                                sizeof(struct eth_fast_path_rx_cqe) *
4587                                NUM_RCQ_BD);
4588
4589                 /* SGE ring */
4590                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
4591                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
4592                                bnx2x_fp(bp, i, rx_sge_mapping),
4593                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4594         }
4595         /* Tx */
4596         for_each_queue(bp, i) {
4597
4598                 /* fastpath tx rings: tx_buf tx_desc */
4599                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
4600                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
4601                                bnx2x_fp(bp, i, tx_desc_mapping),
4602                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4603         }
4604         /* end of fastpath */
4605
4606         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
4607                        sizeof(struct host_def_status_block));
4608
4609         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
4610                        sizeof(struct bnx2x_slowpath));
4611
4612 #ifdef BCM_CNIC
4613         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
4614         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
4615         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
4616         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
4617         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
4618                        sizeof(struct host_status_block));
4619 #endif
4620         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
4621
4622 #undef BNX2X_PCI_FREE
4623 #undef BNX2X_KFREE
4624 }
4625
4626 int bnx2x_alloc_mem(struct bnx2x *bp)
4627 {
4628
4629 #define BNX2X_PCI_ALLOC(x, y, size) \
4630         do { \
4631                 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
4632                 if (x == NULL) \
4633                         goto alloc_mem_err; \
4634                 memset(x, 0, size); \
4635         } while (0)
4636
4637 #define BNX2X_ALLOC(x, size) \
4638         do { \
4639                 x = vmalloc(size); \
4640                 if (x == NULL) \
4641                         goto alloc_mem_err; \
4642                 memset(x, 0, size); \
4643         } while (0)
4644
4645         int i;
4646
4647         /* fastpath */
4648         /* Common */
4649         for_each_queue(bp, i) {
4650                 bnx2x_fp(bp, i, bp) = bp;
4651
4652                 /* status blocks */
4653                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
4654                                 &bnx2x_fp(bp, i, status_blk_mapping),
4655                                 sizeof(struct host_status_block));
4656         }
4657         /* Rx */
4658         for_each_queue(bp, i) {
4659
4660                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4661                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
4662                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4663                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
4664                                 &bnx2x_fp(bp, i, rx_desc_mapping),
4665                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4666
4667                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
4668                                 &bnx2x_fp(bp, i, rx_comp_mapping),
4669                                 sizeof(struct eth_fast_path_rx_cqe) *
4670                                 NUM_RCQ_BD);
4671
4672                 /* SGE ring */
4673                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
4674                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4675                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
4676                                 &bnx2x_fp(bp, i, rx_sge_mapping),
4677                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4678         }
4679         /* Tx */
4680         for_each_queue(bp, i) {
4681
4682                 /* fastpath tx rings: tx_buf tx_desc */
4683                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
4684                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4685                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
4686                                 &bnx2x_fp(bp, i, tx_desc_mapping),
4687                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4688         }
4689         /* end of fastpath */
4690
4691         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4692                         sizeof(struct host_def_status_block));
4693
4694         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4695                         sizeof(struct bnx2x_slowpath));
4696
4697 #ifdef BCM_CNIC
4698         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
4699
4700         /* allocate searcher T2 table
4701            we allocate 1/4 of alloc num for T2
4702           (which is not entered into the ILT) */
4703         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
4704
4705         /* Initialize T2 (for 1024 connections) */
4706         for (i = 0; i < 16*1024; i += 64)
4707                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
4708
4709         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
4710         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
4711
4712         /* QM queues (128*MAX_CONN) */
4713         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
4714
4715         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
4716                         sizeof(struct host_status_block));
4717 #endif
4718
4719         /* Slow path ring */
4720         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
4721
4722         return 0;
4723
4724 alloc_mem_err:
4725         bnx2x_free_mem(bp);
4726         return -ENOMEM;
4727
4728 #undef BNX2X_PCI_ALLOC
4729 #undef BNX2X_ALLOC
4730 }
4731
4732
4733 /*
4734  * Init service functions
4735  */
4736
4737 /**
4738  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
4739  *
4740  * @param bp driver descriptor
4741  * @param set set or clear an entry (1 or 0)
4742  * @param mac pointer to a buffer containing a MAC
4743  * @param cl_bit_vec bit vector of clients to register a MAC for
4744  * @param cam_offset offset in a CAM to use
4745  * @param with_bcast set broadcast MAC as well
4746  */
4747 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
4748                                       u32 cl_bit_vec, u8 cam_offset,
4749                                       u8 with_bcast)
4750 {
4751         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
4752         int port = BP_PORT(bp);
4753
4754         /* CAM allocation
4755          * unicasts 0-31:port0 32-63:port1
4756          * multicast 64-127:port0 128-191:port1
4757          */
4758         config->hdr.length = 1 + (with_bcast ? 1 : 0);
4759         config->hdr.offset = cam_offset;
4760         config->hdr.client_id = 0xff;
4761         config->hdr.reserved1 = 0;
4762
4763         /* primary MAC */
4764         config->config_table[0].cam_entry.msb_mac_addr =
4765                                         swab16(*(u16 *)&mac[0]);
4766         config->config_table[0].cam_entry.middle_mac_addr =
4767                                         swab16(*(u16 *)&mac[2]);
4768         config->config_table[0].cam_entry.lsb_mac_addr =
4769                                         swab16(*(u16 *)&mac[4]);
4770         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
4771         if (set)
4772                 config->config_table[0].target_table_entry.flags = 0;
4773         else
4774                 CAM_INVALIDATE(config->config_table[0]);
4775         config->config_table[0].target_table_entry.clients_bit_vector =
4776                                                 cpu_to_le32(cl_bit_vec);
4777         config->config_table[0].target_table_entry.vlan_id = 0;
4778
4779         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
4780            (set ? "setting" : "clearing"),
4781            config->config_table[0].cam_entry.msb_mac_addr,
4782            config->config_table[0].cam_entry.middle_mac_addr,
4783            config->config_table[0].cam_entry.lsb_mac_addr);
4784
4785         /* broadcast */
4786         if (with_bcast) {
4787                 config->config_table[1].cam_entry.msb_mac_addr =
4788                         cpu_to_le16(0xffff);
4789                 config->config_table[1].cam_entry.middle_mac_addr =
4790                         cpu_to_le16(0xffff);
4791                 config->config_table[1].cam_entry.lsb_mac_addr =
4792                         cpu_to_le16(0xffff);
4793                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
4794                 if (set)
4795                         config->config_table[1].target_table_entry.flags =
4796                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4797                 else
4798                         CAM_INVALIDATE(config->config_table[1]);
4799                 config->config_table[1].target_table_entry.clients_bit_vector =
4800                                                         cpu_to_le32(cl_bit_vec);
4801                 config->config_table[1].target_table_entry.vlan_id = 0;
4802         }
4803
4804         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4805                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4806                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4807 }
4808
4809 /**
4810  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
4811  *
4812  * @param bp driver descriptor
4813  * @param set set or clear an entry (1 or 0)
4814  * @param mac pointer to a buffer containing a MAC
4815  * @param cl_bit_vec bit vector of clients to register a MAC for
4816  * @param cam_offset offset in a CAM to use
4817  */
4818 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
4819                                        u32 cl_bit_vec, u8 cam_offset)
4820 {
4821         struct mac_configuration_cmd_e1h *config =
4822                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
4823
4824         config->hdr.length = 1;
4825         config->hdr.offset = cam_offset;
4826         config->hdr.client_id = 0xff;
4827         config->hdr.reserved1 = 0;
4828
4829         /* primary MAC */
4830         config->config_table[0].msb_mac_addr =
4831                                         swab16(*(u16 *)&mac[0]);
4832         config->config_table[0].middle_mac_addr =
4833                                         swab16(*(u16 *)&mac[2]);
4834         config->config_table[0].lsb_mac_addr =
4835                                         swab16(*(u16 *)&mac[4]);
4836         config->config_table[0].clients_bit_vector =
4837                                         cpu_to_le32(cl_bit_vec);
4838         config->config_table[0].vlan_id = 0;
4839         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
4840         if (set)
4841                 config->config_table[0].flags = BP_PORT(bp);
4842         else
4843                 config->config_table[0].flags =
4844                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
4845
4846         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
4847            (set ? "setting" : "clearing"),
4848            config->config_table[0].msb_mac_addr,
4849            config->config_table[0].middle_mac_addr,
4850            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
4851
4852         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4853                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4854                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4855 }
4856
4857 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4858                              int *state_p, int poll)
4859 {
4860         /* can take a while if any port is running */
4861         int cnt = 5000;
4862
4863         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4864            poll ? "polling" : "waiting", state, idx);
4865
4866         might_sleep();
4867         while (cnt--) {
4868                 if (poll) {
4869                         bnx2x_rx_int(bp->fp, 10);
4870                         /* if index is different from 0
4871                          * the reply for some commands will
4872                          * be on the non default queue
4873                          */
4874                         if (idx)
4875                                 bnx2x_rx_int(&bp->fp[idx], 10);
4876                 }
4877
4878                 mb(); /* state is changed by bnx2x_sp_event() */
4879                 if (*state_p == state) {
4880 #ifdef BNX2X_STOP_ON_ERROR
4881                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
4882 #endif
4883                         return 0;
4884                 }
4885
4886                 msleep(1);
4887
4888                 if (bp->panic)
4889                         return -EIO;
4890         }
4891
4892         /* timeout! */
4893         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4894                   poll ? "polling" : "waiting", state, idx);
4895 #ifdef BNX2X_STOP_ON_ERROR
4896         bnx2x_panic();
4897 #endif
4898
4899         return -EBUSY;
4900 }
4901
4902 void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
4903 {
4904         bp->set_mac_pending++;
4905         smp_wmb();
4906
4907         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
4908                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
4909
4910         /* Wait for a completion */
4911         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4912 }
4913
4914 void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
4915 {
4916         bp->set_mac_pending++;
4917         smp_wmb();
4918
4919         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
4920                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
4921                                   1);
4922
4923         /* Wait for a completion */
4924         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4925 }
4926
4927 #ifdef BCM_CNIC
4928 /**
4929  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
4930  * MAC(s). This function will wait until the ramdord completion
4931  * returns.
4932  *
4933  * @param bp driver handle
4934  * @param set set or clear the CAM entry
4935  *
4936  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
4937  */
4938 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
4939 {
4940         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
4941
4942         bp->set_mac_pending++;
4943         smp_wmb();
4944
4945         /* Send a SET_MAC ramrod */
4946         if (CHIP_IS_E1(bp))
4947                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
4948                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
4949                                   1);
4950         else
4951                 /* CAM allocation for E1H
4952                 * unicasts: by func number
4953                 * multicast: 20+FUNC*20, 20 each
4954                 */
4955                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
4956                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
4957
4958         /* Wait for a completion when setting */
4959         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4960
4961         return 0;
4962 }
4963 #endif
4964
4965 int bnx2x_setup_leading(struct bnx2x *bp)
4966 {
4967         int rc;
4968
4969         /* reset IGU state */
4970         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4971
4972         /* SETUP ramrod */
4973         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
4974
4975         /* Wait for completion */
4976         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
4977
4978         return rc;
4979 }
4980
4981 int bnx2x_setup_multi(struct bnx2x *bp, int index)
4982 {
4983         struct bnx2x_fastpath *fp = &bp->fp[index];
4984
4985         /* reset IGU state */
4986         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4987
4988         /* SETUP ramrod */
4989         fp->state = BNX2X_FP_STATE_OPENING;
4990         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
4991                       fp->cl_id, 0);
4992
4993         /* Wait for completion */
4994         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
4995                                  &(fp->state), 0);
4996 }
4997
4998
4999 void bnx2x_set_num_queues_msix(struct bnx2x *bp)
5000 {
5001
5002         switch (bp->multi_mode) {
5003         case ETH_RSS_MODE_DISABLED:
5004                 bp->num_queues = 1;
5005                 break;
5006
5007         case ETH_RSS_MODE_REGULAR:
5008                 if (num_queues)
5009                         bp->num_queues = min_t(u32, num_queues,
5010                                                   BNX2X_MAX_QUEUES(bp));
5011                 else
5012                         bp->num_queues = min_t(u32, num_online_cpus(),
5013                                                   BNX2X_MAX_QUEUES(bp));
5014                 break;
5015
5016
5017         default:
5018                 bp->num_queues = 1;
5019                 break;
5020         }
5021 }
5022
5023
5024
5025 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
5026 {
5027         struct bnx2x_fastpath *fp = &bp->fp[index];
5028         int rc;
5029
5030         /* halt the connection */
5031         fp->state = BNX2X_FP_STATE_HALTING;
5032         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
5033
5034         /* Wait for completion */
5035         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
5036                                &(fp->state), 1);
5037         if (rc) /* timeout */
5038                 return rc;
5039
5040         /* delete cfc entry */
5041         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
5042
5043         /* Wait for completion */
5044         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
5045                                &(fp->state), 1);
5046         return rc;
5047 }
5048
5049 static int bnx2x_stop_leading(struct bnx2x *bp)
5050 {
5051         __le16 dsb_sp_prod_idx;
5052         /* if the other port is handling traffic,
5053            this can take a lot of time */
5054         int cnt = 500;
5055         int rc;
5056
5057         might_sleep();
5058
5059         /* Send HALT ramrod */
5060         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
5061         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
5062
5063         /* Wait for completion */
5064         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
5065                                &(bp->fp[0].state), 1);
5066         if (rc) /* timeout */
5067                 return rc;
5068
5069         dsb_sp_prod_idx = *bp->dsb_sp_prod;
5070
5071         /* Send PORT_DELETE ramrod */
5072         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
5073
5074         /* Wait for completion to arrive on default status block
5075            we are going to reset the chip anyway
5076            so there is not much to do if this times out
5077          */
5078         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
5079                 if (!cnt) {
5080                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
5081                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5082                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
5083 #ifdef BNX2X_STOP_ON_ERROR
5084                         bnx2x_panic();
5085 #endif
5086                         rc = -EBUSY;
5087                         break;
5088                 }
5089                 cnt--;
5090                 msleep(1);
5091                 rmb(); /* Refresh the dsb_sp_prod */
5092         }
5093         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
5094         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
5095
5096         return rc;
5097 }
5098
5099 static void bnx2x_reset_func(struct bnx2x *bp)
5100 {
5101         int port = BP_PORT(bp);
5102         int func = BP_FUNC(bp);
5103         int base, i;
5104
5105         /* Configure IGU */
5106         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5107         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5108
5109 #ifdef BCM_CNIC
5110         /* Disable Timer scan */
5111         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
5112         /*
5113          * Wait for at least 10ms and up to 2 second for the timers scan to
5114          * complete
5115          */
5116         for (i = 0; i < 200; i++) {
5117                 msleep(10);
5118                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
5119                         break;
5120         }
5121 #endif
5122         /* Clear ILT */
5123         base = FUNC_ILT_BASE(func);
5124         for (i = base; i < base + ILT_PER_FUNC; i++)
5125                 bnx2x_ilt_wr(bp, i, 0);
5126 }
5127
5128 static void bnx2x_reset_port(struct bnx2x *bp)
5129 {
5130         int port = BP_PORT(bp);
5131         u32 val;
5132
5133         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5134
5135         /* Do not rcv packets to BRB */
5136         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5137         /* Do not direct rcv packets that are not for MCP to the BRB */
5138         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5139                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5140
5141         /* Configure AEU */
5142         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5143
5144         msleep(100);
5145         /* Check for BRB port occupancy */
5146         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5147         if (val)
5148                 DP(NETIF_MSG_IFDOWN,
5149                    "BRB1 is not empty  %d blocks are occupied\n", val);
5150
5151         /* TODO: Close Doorbell port? */
5152 }
5153
5154 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5155 {
5156         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
5157            BP_FUNC(bp), reset_code);
5158
5159         switch (reset_code) {
5160         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5161                 bnx2x_reset_port(bp);
5162                 bnx2x_reset_func(bp);
5163                 bnx2x_reset_common(bp);
5164                 break;
5165
5166         case FW_MSG_CODE_DRV_UNLOAD_PORT:
5167                 bnx2x_reset_port(bp);
5168                 bnx2x_reset_func(bp);
5169                 break;
5170
5171         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5172                 bnx2x_reset_func(bp);
5173                 break;
5174
5175         default:
5176                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5177                 break;
5178         }
5179 }
5180
5181 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5182 {
5183         int port = BP_PORT(bp);
5184         u32 reset_code = 0;
5185         int i, cnt, rc;
5186
5187         /* Wait until tx fastpath tasks complete */
5188         for_each_queue(bp, i) {
5189                 struct bnx2x_fastpath *fp = &bp->fp[i];
5190
5191                 cnt = 1000;
5192                 while (bnx2x_has_tx_work_unload(fp)) {
5193
5194                         bnx2x_tx_int(fp);
5195                         if (!cnt) {
5196                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
5197                                           i);
5198 #ifdef BNX2X_STOP_ON_ERROR
5199                                 bnx2x_panic();
5200                                 return -EBUSY;
5201 #else
5202                                 break;
5203 #endif
5204                         }
5205                         cnt--;
5206                         msleep(1);
5207                 }
5208         }
5209         /* Give HW time to discard old tx messages */
5210         msleep(1);
5211
5212         if (CHIP_IS_E1(bp)) {
5213                 struct mac_configuration_cmd *config =
5214                                                 bnx2x_sp(bp, mcast_config);
5215
5216                 bnx2x_set_eth_mac_addr_e1(bp, 0);
5217
5218                 for (i = 0; i < config->hdr.length; i++)
5219                         CAM_INVALIDATE(config->config_table[i]);
5220
5221                 config->hdr.length = i;
5222                 if (CHIP_REV_IS_SLOW(bp))
5223                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
5224                 else
5225                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
5226                 config->hdr.client_id = bp->fp->cl_id;
5227                 config->hdr.reserved1 = 0;
5228
5229                 bp->set_mac_pending++;
5230                 smp_wmb();
5231
5232                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5233                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
5234                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
5235
5236         } else { /* E1H */
5237                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5238
5239                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
5240
5241                 for (i = 0; i < MC_HASH_SIZE; i++)
5242                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
5243
5244                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
5245         }
5246 #ifdef BCM_CNIC
5247         /* Clear iSCSI L2 MAC */
5248         mutex_lock(&bp->cnic_mutex);
5249         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
5250                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
5251                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
5252         }
5253         mutex_unlock(&bp->cnic_mutex);
5254 #endif
5255
5256         if (unload_mode == UNLOAD_NORMAL)
5257                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5258
5259         else if (bp->flags & NO_WOL_FLAG)
5260                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
5261
5262         else if (bp->wol) {
5263                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5264                 u8 *mac_addr = bp->dev->dev_addr;
5265                 u32 val;
5266                 /* The mac address is written to entries 1-4 to
5267                    preserve entry 0 which is used by the PMF */
5268                 u8 entry = (BP_E1HVN(bp) + 1)*8;
5269
5270                 val = (mac_addr[0] << 8) | mac_addr[1];
5271                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
5272
5273                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
5274                       (mac_addr[4] << 8) | mac_addr[5];
5275                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
5276
5277                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
5278
5279         } else
5280                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5281
5282         /* Close multi and leading connections
5283            Completions for ramrods are collected in a synchronous way */
5284         for_each_nondefault_queue(bp, i)
5285                 if (bnx2x_stop_multi(bp, i))
5286                         goto unload_error;
5287
5288         rc = bnx2x_stop_leading(bp);
5289         if (rc) {
5290                 BNX2X_ERR("Stop leading failed!\n");
5291 #ifdef BNX2X_STOP_ON_ERROR
5292                 return -EBUSY;
5293 #else
5294                 goto unload_error;
5295 #endif
5296         }
5297
5298 unload_error:
5299         if (!BP_NOMCP(bp))
5300                 reset_code = bnx2x_fw_command(bp, reset_code);
5301         else {
5302                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
5303                    load_count[0], load_count[1], load_count[2]);
5304                 load_count[0]--;
5305                 load_count[1 + port]--;
5306                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
5307                    load_count[0], load_count[1], load_count[2]);
5308                 if (load_count[0] == 0)
5309                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
5310                 else if (load_count[1 + port] == 0)
5311                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
5312                 else
5313                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
5314         }
5315
5316         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
5317             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
5318                 bnx2x__link_reset(bp);
5319
5320         /* Reset the chip */
5321         bnx2x_reset_chip(bp, reset_code);
5322
5323         /* Report UNLOAD_DONE to MCP */
5324         if (!BP_NOMCP(bp))
5325                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5326
5327 }
5328
5329 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
5330 {
5331         u32 val;
5332
5333         DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
5334
5335         if (CHIP_IS_E1(bp)) {
5336                 int port = BP_PORT(bp);
5337                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5338                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
5339
5340                 val = REG_RD(bp, addr);
5341                 val &= ~(0x300);
5342                 REG_WR(bp, addr, val);
5343         } else if (CHIP_IS_E1H(bp)) {
5344                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
5345                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
5346                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
5347                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
5348         }
5349 }
5350
5351
5352 /* Close gates #2, #3 and #4: */
5353 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
5354 {
5355         u32 val, addr;
5356
5357         /* Gates #2 and #4a are closed/opened for "not E1" only */
5358         if (!CHIP_IS_E1(bp)) {
5359                 /* #4 */
5360                 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
5361                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
5362                        close ? (val | 0x1) : (val & (~(u32)1)));
5363                 /* #2 */
5364                 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
5365                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
5366                        close ? (val | 0x1) : (val & (~(u32)1)));
5367         }
5368
5369         /* #3 */
5370         addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
5371         val = REG_RD(bp, addr);
5372         REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
5373
5374         DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
5375                 close ? "closing" : "opening");
5376         mmiowb();
5377 }
5378
5379 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
5380
5381 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
5382 {
5383         /* Do some magic... */
5384         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5385         *magic_val = val & SHARED_MF_CLP_MAGIC;
5386         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
5387 }
5388
5389 /* Restore the value of the `magic' bit.
5390  *
5391  * @param pdev Device handle.
5392  * @param magic_val Old value of the `magic' bit.
5393  */
5394 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
5395 {
5396         /* Restore the `magic' bit value... */
5397         /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
5398         SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
5399                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
5400         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5401         MF_CFG_WR(bp, shared_mf_config.clp_mb,
5402                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
5403 }
5404
5405 /* Prepares for MCP reset: takes care of CLP configurations.
5406  *
5407  * @param bp
5408  * @param magic_val Old value of 'magic' bit.
5409  */
5410 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
5411 {
5412         u32 shmem;
5413         u32 validity_offset;
5414
5415         DP(NETIF_MSG_HW, "Starting\n");
5416
5417         /* Set `magic' bit in order to save MF config */
5418         if (!CHIP_IS_E1(bp))
5419                 bnx2x_clp_reset_prep(bp, magic_val);
5420
5421         /* Get shmem offset */
5422         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5423         validity_offset = offsetof(struct shmem_region, validity_map[0]);
5424
5425         /* Clear validity map flags */
5426         if (shmem > 0)
5427                 REG_WR(bp, shmem + validity_offset, 0);
5428 }
5429
5430 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
5431 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
5432
5433 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
5434  * depending on the HW type.
5435  *
5436  * @param bp
5437  */
5438 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
5439 {
5440         /* special handling for emulation and FPGA,
5441            wait 10 times longer */
5442         if (CHIP_REV_IS_SLOW(bp))
5443                 msleep(MCP_ONE_TIMEOUT*10);
5444         else
5445                 msleep(MCP_ONE_TIMEOUT);
5446 }
5447
5448 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
5449 {
5450         u32 shmem, cnt, validity_offset, val;
5451         int rc = 0;
5452
5453         msleep(100);
5454
5455         /* Get shmem offset */
5456         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5457         if (shmem == 0) {
5458                 BNX2X_ERR("Shmem 0 return failure\n");
5459                 rc = -ENOTTY;
5460                 goto exit_lbl;
5461         }
5462
5463         validity_offset = offsetof(struct shmem_region, validity_map[0]);
5464
5465         /* Wait for MCP to come up */
5466         for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
5467                 /* TBD: its best to check validity map of last port.
5468                  * currently checks on port 0.
5469                  */
5470                 val = REG_RD(bp, shmem + validity_offset);
5471                 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
5472                    shmem + validity_offset, val);
5473
5474                 /* check that shared memory is valid. */
5475                 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5476                     == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5477                         break;
5478
5479                 bnx2x_mcp_wait_one(bp);
5480         }
5481
5482         DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
5483
5484         /* Check that shared memory is valid. This indicates that MCP is up. */
5485         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
5486             (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
5487                 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
5488                 rc = -ENOTTY;
5489                 goto exit_lbl;
5490         }
5491
5492 exit_lbl:
5493         /* Restore the `magic' bit value */
5494         if (!CHIP_IS_E1(bp))
5495                 bnx2x_clp_reset_done(bp, magic_val);
5496
5497         return rc;
5498 }
5499
5500 static void bnx2x_pxp_prep(struct bnx2x *bp)
5501 {
5502         if (!CHIP_IS_E1(bp)) {
5503                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
5504                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
5505                 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
5506                 mmiowb();
5507         }
5508 }
5509
5510 /*
5511  * Reset the whole chip except for:
5512  *      - PCIE core
5513  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
5514  *              one reset bit)
5515  *      - IGU
5516  *      - MISC (including AEU)
5517  *      - GRC
5518  *      - RBCN, RBCP
5519  */
5520 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
5521 {
5522         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
5523
5524         not_reset_mask1 =
5525                 MISC_REGISTERS_RESET_REG_1_RST_HC |
5526                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
5527                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
5528
5529         not_reset_mask2 =
5530                 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
5531                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
5532                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
5533                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
5534                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
5535                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
5536                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
5537                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
5538
5539         reset_mask1 = 0xffffffff;
5540
5541         if (CHIP_IS_E1(bp))
5542                 reset_mask2 = 0xffff;
5543         else
5544                 reset_mask2 = 0x1ffff;
5545
5546         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5547                reset_mask1 & (~not_reset_mask1));
5548         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5549                reset_mask2 & (~not_reset_mask2));
5550
5551         barrier();
5552         mmiowb();
5553
5554         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
5555         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
5556         mmiowb();
5557 }
5558
5559 static int bnx2x_process_kill(struct bnx2x *bp)
5560 {
5561         int cnt = 1000;
5562         u32 val = 0;
5563         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
5564
5565
5566         /* Empty the Tetris buffer, wait for 1s */
5567         do {
5568                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
5569                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
5570                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
5571                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
5572                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
5573                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
5574                     ((port_is_idle_0 & 0x1) == 0x1) &&
5575                     ((port_is_idle_1 & 0x1) == 0x1) &&
5576                     (pgl_exp_rom2 == 0xffffffff))
5577                         break;
5578                 msleep(1);
5579         } while (cnt-- > 0);
5580
5581         if (cnt <= 0) {
5582                 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
5583                           " are still"
5584                           " outstanding read requests after 1s!\n");
5585                 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
5586                           " port_is_idle_0=0x%08x,"
5587                           " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
5588                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
5589                           pgl_exp_rom2);
5590                 return -EAGAIN;
5591         }
5592
5593         barrier();
5594
5595         /* Close gates #2, #3 and #4 */
5596         bnx2x_set_234_gates(bp, true);
5597
5598         /* TBD: Indicate that "process kill" is in progress to MCP */
5599
5600         /* Clear "unprepared" bit */
5601         REG_WR(bp, MISC_REG_UNPREPARED, 0);
5602         barrier();
5603
5604         /* Make sure all is written to the chip before the reset */
5605         mmiowb();
5606
5607         /* Wait for 1ms to empty GLUE and PCI-E core queues,
5608          * PSWHST, GRC and PSWRD Tetris buffer.
5609          */
5610         msleep(1);
5611
5612         /* Prepare to chip reset: */
5613         /* MCP */
5614         bnx2x_reset_mcp_prep(bp, &val);
5615
5616         /* PXP */
5617         bnx2x_pxp_prep(bp);
5618         barrier();
5619
5620         /* reset the chip */
5621         bnx2x_process_kill_chip_reset(bp);
5622         barrier();
5623
5624         /* Recover after reset: */
5625         /* MCP */
5626         if (bnx2x_reset_mcp_comp(bp, val))
5627                 return -EAGAIN;
5628
5629         /* PXP */
5630         bnx2x_pxp_prep(bp);
5631
5632         /* Open the gates #2, #3 and #4 */
5633         bnx2x_set_234_gates(bp, false);
5634
5635         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
5636          * reset state, re-enable attentions. */
5637
5638         return 0;
5639 }
5640
5641 static int bnx2x_leader_reset(struct bnx2x *bp)
5642 {
5643         int rc = 0;
5644         /* Try to recover after the failure */
5645         if (bnx2x_process_kill(bp)) {
5646                 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
5647                        bp->dev->name);
5648                 rc = -EAGAIN;
5649                 goto exit_leader_reset;
5650         }
5651
5652         /* Clear "reset is in progress" bit and update the driver state */
5653         bnx2x_set_reset_done(bp);
5654         bp->recovery_state = BNX2X_RECOVERY_DONE;
5655
5656 exit_leader_reset:
5657         bp->is_leader = 0;
5658         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
5659         smp_wmb();
5660         return rc;
5661 }
5662
5663 /* Assumption: runs under rtnl lock. This together with the fact
5664  * that it's called only from bnx2x_reset_task() ensure that it
5665  * will never be called when netif_running(bp->dev) is false.
5666  */
5667 static void bnx2x_parity_recover(struct bnx2x *bp)
5668 {
5669         DP(NETIF_MSG_HW, "Handling parity\n");
5670         while (1) {
5671                 switch (bp->recovery_state) {
5672                 case BNX2X_RECOVERY_INIT:
5673                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
5674                         /* Try to get a LEADER_LOCK HW lock */
5675                         if (bnx2x_trylock_hw_lock(bp,
5676                                 HW_LOCK_RESOURCE_RESERVED_08))
5677                                 bp->is_leader = 1;
5678
5679                         /* Stop the driver */
5680                         /* If interface has been removed - break */
5681                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
5682                                 return;
5683
5684                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
5685                         /* Ensure "is_leader" and "recovery_state"
5686                          *  update values are seen on other CPUs
5687                          */
5688                         smp_wmb();
5689                         break;
5690
5691                 case BNX2X_RECOVERY_WAIT:
5692                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
5693                         if (bp->is_leader) {
5694                                 u32 load_counter = bnx2x_get_load_cnt(bp);
5695                                 if (load_counter) {
5696                                         /* Wait until all other functions get
5697                                          * down.
5698                                          */
5699                                         schedule_delayed_work(&bp->reset_task,
5700                                                                 HZ/10);
5701                                         return;
5702                                 } else {
5703                                         /* If all other functions got down -
5704                                          * try to bring the chip back to
5705                                          * normal. In any case it's an exit
5706                                          * point for a leader.
5707                                          */
5708                                         if (bnx2x_leader_reset(bp) ||
5709                                         bnx2x_nic_load(bp, LOAD_NORMAL)) {
5710                                                 printk(KERN_ERR"%s: Recovery "
5711                                                 "has failed. Power cycle is "
5712                                                 "needed.\n", bp->dev->name);
5713                                                 /* Disconnect this device */
5714                                                 netif_device_detach(bp->dev);
5715                                                 /* Block ifup for all function
5716                                                  * of this ASIC until
5717                                                  * "process kill" or power
5718                                                  * cycle.
5719                                                  */
5720                                                 bnx2x_set_reset_in_progress(bp);
5721                                                 /* Shut down the power */
5722                                                 bnx2x_set_power_state(bp,
5723                                                                 PCI_D3hot);
5724                                                 return;
5725                                         }
5726
5727                                         return;
5728                                 }
5729                         } else { /* non-leader */
5730                                 if (!bnx2x_reset_is_done(bp)) {
5731                                         /* Try to get a LEADER_LOCK HW lock as
5732                                          * long as a former leader may have
5733                                          * been unloaded by the user or
5734                                          * released a leadership by another
5735                                          * reason.
5736                                          */
5737                                         if (bnx2x_trylock_hw_lock(bp,
5738                                             HW_LOCK_RESOURCE_RESERVED_08)) {
5739                                                 /* I'm a leader now! Restart a
5740                                                  * switch case.
5741                                                  */
5742                                                 bp->is_leader = 1;
5743                                                 break;
5744                                         }
5745
5746                                         schedule_delayed_work(&bp->reset_task,
5747                                                                 HZ/10);
5748                                         return;
5749
5750                                 } else { /* A leader has completed
5751                                           * the "process kill". It's an exit
5752                                           * point for a non-leader.
5753                                           */
5754                                         bnx2x_nic_load(bp, LOAD_NORMAL);
5755                                         bp->recovery_state =
5756                                                 BNX2X_RECOVERY_DONE;
5757                                         smp_wmb();
5758                                         return;
5759                                 }
5760                         }
5761                 default:
5762                         return;
5763                 }
5764         }
5765 }
5766
5767 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
5768  * scheduled on a general queue in order to prevent a dead lock.
5769  */
5770 static void bnx2x_reset_task(struct work_struct *work)
5771 {
5772         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
5773
5774 #ifdef BNX2X_STOP_ON_ERROR
5775         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
5776                   " so reset not done to allow debug dump,\n"
5777          KERN_ERR " you will need to reboot when done\n");
5778         return;
5779 #endif
5780
5781         rtnl_lock();
5782
5783         if (!netif_running(bp->dev))
5784                 goto reset_task_exit;
5785
5786         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
5787                 bnx2x_parity_recover(bp);
5788         else {
5789                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
5790                 bnx2x_nic_load(bp, LOAD_NORMAL);
5791         }
5792
5793 reset_task_exit:
5794         rtnl_unlock();
5795 }
5796
5797 /* end of nic load/unload */
5798
5799 /*
5800  * Init service functions
5801  */
5802
5803 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
5804 {
5805         switch (func) {
5806         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
5807         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
5808         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
5809         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
5810         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
5811         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
5812         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
5813         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
5814         default:
5815                 BNX2X_ERR("Unsupported function index: %d\n", func);
5816                 return (u32)(-1);
5817         }
5818 }
5819
5820 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
5821 {
5822         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
5823
5824         /* Flush all outstanding writes */
5825         mmiowb();
5826
5827         /* Pretend to be function 0 */
5828         REG_WR(bp, reg, 0);
5829         /* Flush the GRC transaction (in the chip) */
5830         new_val = REG_RD(bp, reg);
5831         if (new_val != 0) {
5832                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
5833                           new_val);
5834                 BUG();
5835         }
5836
5837         /* From now we are in the "like-E1" mode */
5838         bnx2x_int_disable(bp);
5839
5840         /* Flush all outstanding writes */
5841         mmiowb();
5842
5843         /* Restore the original funtion settings */
5844         REG_WR(bp, reg, orig_func);
5845         new_val = REG_RD(bp, reg);
5846         if (new_val != orig_func) {
5847                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
5848                           orig_func, new_val);
5849                 BUG();
5850         }
5851 }
5852
5853 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
5854 {
5855         if (CHIP_IS_E1H(bp))
5856                 bnx2x_undi_int_disable_e1h(bp, func);
5857         else
5858                 bnx2x_int_disable(bp);
5859 }
5860
5861 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5862 {
5863         u32 val;
5864
5865         /* Check if there is any driver already loaded */
5866         val = REG_RD(bp, MISC_REG_UNPREPARED);
5867         if (val == 0x1) {
5868                 /* Check if it is the UNDI driver
5869                  * UNDI driver initializes CID offset for normal bell to 0x7
5870                  */
5871                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5872                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
5873                 if (val == 0x7) {
5874                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5875                         /* save our func */
5876                         int func = BP_FUNC(bp);
5877                         u32 swap_en;
5878                         u32 swap_val;
5879
5880                         /* clear the UNDI indication */
5881                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
5882
5883                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
5884
5885                         /* try unload UNDI on port 0 */
5886                         bp->func = 0;
5887                         bp->fw_seq =
5888                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5889                                 DRV_MSG_SEQ_NUMBER_MASK);
5890                         reset_code = bnx2x_fw_command(bp, reset_code);
5891
5892                         /* if UNDI is loaded on the other port */
5893                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
5894
5895                                 /* send "DONE" for previous unload */
5896                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5897
5898                                 /* unload UNDI on port 1 */
5899                                 bp->func = 1;
5900                                 bp->fw_seq =
5901                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5902                                         DRV_MSG_SEQ_NUMBER_MASK);
5903                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5904
5905                                 bnx2x_fw_command(bp, reset_code);
5906                         }
5907
5908                         /* now it's safe to release the lock */
5909                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5910
5911                         bnx2x_undi_int_disable(bp, func);
5912
5913                         /* close input traffic and wait for it */
5914                         /* Do not rcv packets to BRB */
5915                         REG_WR(bp,
5916                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
5917                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
5918                         /* Do not direct rcv packets that are not for MCP to
5919                          * the BRB */
5920                         REG_WR(bp,
5921                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
5922                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5923                         /* clear AEU */
5924                         REG_WR(bp,
5925                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5926                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
5927                         msleep(10);
5928
5929                         /* save NIG port swap info */
5930                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5931                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5932                         /* reset device */
5933                         REG_WR(bp,
5934                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5935                                0xd3ffffff);
5936                         REG_WR(bp,
5937                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5938                                0x1403);
5939                         /* take the NIG out of reset and restore swap values */
5940                         REG_WR(bp,
5941                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5942                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
5943                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
5944                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
5945
5946                         /* send unload done to the MCP */
5947                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5948
5949                         /* restore our func and fw_seq */
5950                         bp->func = func;
5951                         bp->fw_seq =
5952                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5953                                 DRV_MSG_SEQ_NUMBER_MASK);
5954
5955                 } else
5956                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5957         }
5958 }
5959
5960 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5961 {
5962         u32 val, val2, val3, val4, id;
5963         u16 pmc;
5964
5965         /* Get the chip revision id and number. */
5966         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5967         val = REG_RD(bp, MISC_REG_CHIP_NUM);
5968         id = ((val & 0xffff) << 16);
5969         val = REG_RD(bp, MISC_REG_CHIP_REV);
5970         id |= ((val & 0xf) << 12);
5971         val = REG_RD(bp, MISC_REG_CHIP_METAL);
5972         id |= ((val & 0xff) << 4);
5973         val = REG_RD(bp, MISC_REG_BOND_ID);
5974         id |= (val & 0xf);
5975         bp->common.chip_id = id;
5976         bp->link_params.chip_id = bp->common.chip_id;
5977         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
5978
5979         val = (REG_RD(bp, 0x2874) & 0x55);
5980         if ((bp->common.chip_id & 0x1) ||
5981             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
5982                 bp->flags |= ONE_PORT_FLAG;
5983                 BNX2X_DEV_INFO("single port device\n");
5984         }
5985
5986         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
5987         bp->common.flash_size = (NVRAM_1MB_SIZE <<
5988                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
5989         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5990                        bp->common.flash_size, bp->common.flash_size);
5991
5992         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5993         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
5994         bp->link_params.shmem_base = bp->common.shmem_base;
5995         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
5996                        bp->common.shmem_base, bp->common.shmem2_base);
5997
5998         if (!bp->common.shmem_base ||
5999             (bp->common.shmem_base < 0xA0000) ||
6000             (bp->common.shmem_base >= 0xC0000)) {
6001                 BNX2X_DEV_INFO("MCP not active\n");
6002                 bp->flags |= NO_MCP_FLAG;
6003                 return;
6004         }
6005
6006         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6007         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6008                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6009                 BNX2X_ERROR("BAD MCP validity signature\n");
6010
6011         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6012         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
6013
6014         bp->link_params.hw_led_mode = ((bp->common.hw_config &
6015                                         SHARED_HW_CFG_LED_MODE_MASK) >>
6016                                        SHARED_HW_CFG_LED_MODE_SHIFT);
6017
6018         bp->link_params.feature_config_flags = 0;
6019         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
6020         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
6021                 bp->link_params.feature_config_flags |=
6022                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6023         else
6024                 bp->link_params.feature_config_flags &=
6025                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6026
6027         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6028         bp->common.bc_ver = val;
6029         BNX2X_DEV_INFO("bc_ver %X\n", val);
6030         if (val < BNX2X_BC_VER) {
6031                 /* for now only warn
6032                  * later we might need to enforce this */
6033                 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
6034                             "please upgrade BC\n", BNX2X_BC_VER, val);
6035         }
6036         bp->link_params.feature_config_flags |=
6037                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
6038                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
6039
6040         if (BP_E1HVN(bp) == 0) {
6041                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6042                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6043         } else {
6044                 /* no WOL capability for E1HVN != 0 */
6045                 bp->flags |= NO_WOL_FLAG;
6046         }
6047         BNX2X_DEV_INFO("%sWoL capable\n",
6048                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
6049
6050         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6051         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6052         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6053         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6054
6055         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
6056                  val, val2, val3, val4);
6057 }
6058
6059 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6060                                                     u32 switch_cfg)
6061 {
6062         int port = BP_PORT(bp);
6063         u32 ext_phy_type;
6064
6065         switch (switch_cfg) {
6066         case SWITCH_CFG_1G:
6067                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6068
6069                 ext_phy_type =
6070                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6071                 switch (ext_phy_type) {
6072                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6073                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6074                                        ext_phy_type);
6075
6076                         bp->port.supported |= (SUPPORTED_10baseT_Half |
6077                                                SUPPORTED_10baseT_Full |
6078                                                SUPPORTED_100baseT_Half |
6079                                                SUPPORTED_100baseT_Full |
6080                                                SUPPORTED_1000baseT_Full |
6081                                                SUPPORTED_2500baseX_Full |
6082                                                SUPPORTED_TP |
6083                                                SUPPORTED_FIBRE |
6084                                                SUPPORTED_Autoneg |
6085                                                SUPPORTED_Pause |
6086                                                SUPPORTED_Asym_Pause);
6087                         break;
6088
6089                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6090                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6091                                        ext_phy_type);
6092
6093                         bp->port.supported |= (SUPPORTED_10baseT_Half |
6094                                                SUPPORTED_10baseT_Full |
6095                                                SUPPORTED_100baseT_Half |
6096                                                SUPPORTED_100baseT_Full |
6097                                                SUPPORTED_1000baseT_Full |
6098                                                SUPPORTED_TP |
6099                                                SUPPORTED_FIBRE |
6100                                                SUPPORTED_Autoneg |
6101                                                SUPPORTED_Pause |
6102                                                SUPPORTED_Asym_Pause);
6103                         break;
6104
6105                 default:
6106                         BNX2X_ERR("NVRAM config error. "
6107                                   "BAD SerDes ext_phy_config 0x%x\n",
6108                                   bp->link_params.ext_phy_config);
6109                         return;
6110                 }
6111
6112                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6113                                            port*0x10);
6114                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6115                 break;
6116
6117         case SWITCH_CFG_10G:
6118                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6119
6120                 ext_phy_type =
6121                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6122                 switch (ext_phy_type) {
6123                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6124                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6125                                        ext_phy_type);
6126
6127                         bp->port.supported |= (SUPPORTED_10baseT_Half |
6128                                                SUPPORTED_10baseT_Full |
6129                                                SUPPORTED_100baseT_Half |
6130                                                SUPPORTED_100baseT_Full |
6131                                                SUPPORTED_1000baseT_Full |
6132                                                SUPPORTED_2500baseX_Full |
6133                                                SUPPORTED_10000baseT_Full |
6134                                                SUPPORTED_TP |
6135                                                SUPPORTED_FIBRE |
6136                                                SUPPORTED_Autoneg |
6137                                                SUPPORTED_Pause |
6138                                                SUPPORTED_Asym_Pause);
6139                         break;
6140
6141                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6142                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
6143                                        ext_phy_type);
6144
6145                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6146                                                SUPPORTED_1000baseT_Full |
6147                                                SUPPORTED_FIBRE |
6148                                                SUPPORTED_Autoneg |
6149                                                SUPPORTED_Pause |
6150                                                SUPPORTED_Asym_Pause);
6151                         break;
6152
6153                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6154                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
6155                                        ext_phy_type);
6156
6157                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6158                                                SUPPORTED_2500baseX_Full |
6159                                                SUPPORTED_1000baseT_Full |
6160                                                SUPPORTED_FIBRE |
6161                                                SUPPORTED_Autoneg |
6162                                                SUPPORTED_Pause |
6163                                                SUPPORTED_Asym_Pause);
6164                         break;
6165
6166                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
6167                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
6168                                        ext_phy_type);
6169
6170                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6171                                                SUPPORTED_FIBRE |
6172                                                SUPPORTED_Pause |
6173                                                SUPPORTED_Asym_Pause);
6174                         break;
6175
6176                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6177                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
6178                                        ext_phy_type);
6179
6180                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6181                                                SUPPORTED_1000baseT_Full |
6182                                                SUPPORTED_FIBRE |
6183                                                SUPPORTED_Pause |
6184                                                SUPPORTED_Asym_Pause);
6185                         break;
6186
6187                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6188                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
6189                                        ext_phy_type);
6190
6191                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6192                                                SUPPORTED_1000baseT_Full |
6193                                                SUPPORTED_Autoneg |
6194                                                SUPPORTED_FIBRE |
6195                                                SUPPORTED_Pause |
6196                                                SUPPORTED_Asym_Pause);
6197                         break;
6198
6199                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6200                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
6201                                        ext_phy_type);
6202
6203                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6204                                                SUPPORTED_1000baseT_Full |
6205                                                SUPPORTED_Autoneg |
6206                                                SUPPORTED_FIBRE |
6207                                                SUPPORTED_Pause |
6208                                                SUPPORTED_Asym_Pause);
6209                         break;
6210
6211                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6212                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
6213                                        ext_phy_type);
6214
6215                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6216                                                SUPPORTED_TP |
6217                                                SUPPORTED_Autoneg |
6218                                                SUPPORTED_Pause |
6219                                                SUPPORTED_Asym_Pause);
6220                         break;
6221
6222                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
6223                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
6224                                        ext_phy_type);
6225
6226                         bp->port.supported |= (SUPPORTED_10baseT_Half |
6227                                                SUPPORTED_10baseT_Full |
6228                                                SUPPORTED_100baseT_Half |
6229                                                SUPPORTED_100baseT_Full |
6230                                                SUPPORTED_1000baseT_Full |
6231                                                SUPPORTED_10000baseT_Full |
6232                                                SUPPORTED_TP |
6233                                                SUPPORTED_Autoneg |
6234                                                SUPPORTED_Pause |
6235                                                SUPPORTED_Asym_Pause);
6236                         break;
6237
6238                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
6239                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
6240                                   bp->link_params.ext_phy_config);
6241                         break;
6242
6243                 default:
6244                         BNX2X_ERR("NVRAM config error. "
6245                                   "BAD XGXS ext_phy_config 0x%x\n",
6246                                   bp->link_params.ext_phy_config);
6247                         return;
6248                 }
6249
6250                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6251                                            port*0x18);
6252                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6253
6254                 break;
6255
6256         default:
6257                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
6258                           bp->port.link_config);
6259                 return;
6260         }
6261         bp->link_params.phy_addr = bp->port.phy_addr;
6262
6263         /* mask what we support according to speed_cap_mask */
6264         if (!(bp->link_params.speed_cap_mask &
6265                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
6266                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
6267
6268         if (!(bp->link_params.speed_cap_mask &
6269                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
6270                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
6271
6272         if (!(bp->link_params.speed_cap_mask &
6273                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
6274                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
6275
6276         if (!(bp->link_params.speed_cap_mask &
6277                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
6278                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
6279
6280         if (!(bp->link_params.speed_cap_mask &
6281                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
6282                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
6283                                         SUPPORTED_1000baseT_Full);
6284
6285         if (!(bp->link_params.speed_cap_mask &
6286                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
6287                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
6288
6289         if (!(bp->link_params.speed_cap_mask &
6290                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
6291                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
6292
6293         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
6294 }
6295
6296 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
6297 {
6298         bp->link_params.req_duplex = DUPLEX_FULL;
6299
6300         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
6301         case PORT_FEATURE_LINK_SPEED_AUTO:
6302                 if (bp->port.supported & SUPPORTED_Autoneg) {
6303                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
6304                         bp->port.advertising = bp->port.supported;
6305                 } else {
6306                         u32 ext_phy_type =
6307                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6308
6309                         if ((ext_phy_type ==
6310                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
6311                             (ext_phy_type ==
6312                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
6313                                 /* force 10G, no AN */
6314                                 bp->link_params.req_line_speed = SPEED_10000;
6315                                 bp->port.advertising =
6316                                                 (ADVERTISED_10000baseT_Full |
6317                                                  ADVERTISED_FIBRE);
6318                                 break;
6319                         }
6320                         BNX2X_ERR("NVRAM config error. "
6321                                   "Invalid link_config 0x%x"
6322                                   "  Autoneg not supported\n",
6323                                   bp->port.link_config);
6324                         return;
6325                 }
6326                 break;
6327
6328         case PORT_FEATURE_LINK_SPEED_10M_FULL:
6329                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
6330                         bp->link_params.req_line_speed = SPEED_10;
6331                         bp->port.advertising = (ADVERTISED_10baseT_Full |
6332                                                 ADVERTISED_TP);
6333                 } else {
6334                         BNX2X_ERROR("NVRAM config error. "
6335                                     "Invalid link_config 0x%x"
6336                                     "  speed_cap_mask 0x%x\n",
6337                                     bp->port.link_config,
6338                                     bp->link_params.speed_cap_mask);
6339                         return;
6340                 }
6341                 break;
6342
6343         case PORT_FEATURE_LINK_SPEED_10M_HALF:
6344                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
6345                         bp->link_params.req_line_speed = SPEED_10;
6346                         bp->link_params.req_duplex = DUPLEX_HALF;
6347                         bp->port.advertising = (ADVERTISED_10baseT_Half |
6348                                                 ADVERTISED_TP);
6349                 } else {
6350                         BNX2X_ERROR("NVRAM config error. "
6351                                     "Invalid link_config 0x%x"
6352                                     "  speed_cap_mask 0x%x\n",
6353                                     bp->port.link_config,
6354                                     bp->link_params.speed_cap_mask);
6355                         return;
6356                 }
6357                 break;
6358
6359         case PORT_FEATURE_LINK_SPEED_100M_FULL:
6360                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
6361                         bp->link_params.req_line_speed = SPEED_100;
6362                         bp->port.advertising = (ADVERTISED_100baseT_Full |
6363                                                 ADVERTISED_TP);
6364                 } else {
6365                         BNX2X_ERROR("NVRAM config error. "
6366                                     "Invalid link_config 0x%x"
6367                                     "  speed_cap_mask 0x%x\n",
6368                                     bp->port.link_config,
6369                                     bp->link_params.speed_cap_mask);
6370                         return;
6371                 }
6372                 break;
6373
6374         case PORT_FEATURE_LINK_SPEED_100M_HALF:
6375                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
6376                         bp->link_params.req_line_speed = SPEED_100;
6377                         bp->link_params.req_duplex = DUPLEX_HALF;
6378                         bp->port.advertising = (ADVERTISED_100baseT_Half |
6379                                                 ADVERTISED_TP);
6380                 } else {
6381                         BNX2X_ERROR("NVRAM config error. "
6382                                     "Invalid link_config 0x%x"
6383                                     "  speed_cap_mask 0x%x\n",
6384                                     bp->port.link_config,
6385                                     bp->link_params.speed_cap_mask);
6386                         return;
6387                 }
6388                 break;
6389
6390         case PORT_FEATURE_LINK_SPEED_1G:
6391                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
6392                         bp->link_params.req_line_speed = SPEED_1000;
6393                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
6394                                                 ADVERTISED_TP);
6395                 } else {
6396                         BNX2X_ERROR("NVRAM config error. "
6397                                     "Invalid link_config 0x%x"
6398                                     "  speed_cap_mask 0x%x\n",
6399                                     bp->port.link_config,
6400                                     bp->link_params.speed_cap_mask);
6401                         return;
6402                 }
6403                 break;
6404
6405         case PORT_FEATURE_LINK_SPEED_2_5G:
6406                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
6407                         bp->link_params.req_line_speed = SPEED_2500;
6408                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
6409                                                 ADVERTISED_TP);
6410                 } else {
6411                         BNX2X_ERROR("NVRAM config error. "
6412                                     "Invalid link_config 0x%x"
6413                                     "  speed_cap_mask 0x%x\n",
6414                                     bp->port.link_config,
6415                                     bp->link_params.speed_cap_mask);
6416                         return;
6417                 }
6418                 break;
6419
6420         case PORT_FEATURE_LINK_SPEED_10G_CX4:
6421         case PORT_FEATURE_LINK_SPEED_10G_KX4:
6422         case PORT_FEATURE_LINK_SPEED_10G_KR:
6423                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
6424                         bp->link_params.req_line_speed = SPEED_10000;
6425                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
6426                                                 ADVERTISED_FIBRE);
6427                 } else {
6428                         BNX2X_ERROR("NVRAM config error. "
6429                                     "Invalid link_config 0x%x"
6430                                     "  speed_cap_mask 0x%x\n",
6431                                     bp->port.link_config,
6432                                     bp->link_params.speed_cap_mask);
6433                         return;
6434                 }
6435                 break;
6436
6437         default:
6438                 BNX2X_ERROR("NVRAM config error. "
6439                             "BAD link speed link_config 0x%x\n",
6440                             bp->port.link_config);
6441                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
6442                 bp->port.advertising = bp->port.supported;
6443                 break;
6444         }
6445
6446         bp->link_params.req_flow_ctrl = (bp->port.link_config &
6447                                          PORT_FEATURE_FLOW_CONTROL_MASK);
6448         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
6449             !(bp->port.supported & SUPPORTED_Autoneg))
6450                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
6451
6452         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
6453                        "  advertising 0x%x\n",
6454                        bp->link_params.req_line_speed,
6455                        bp->link_params.req_duplex,
6456                        bp->link_params.req_flow_ctrl, bp->port.advertising);
6457 }
6458
6459 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
6460 {
6461         mac_hi = cpu_to_be16(mac_hi);
6462         mac_lo = cpu_to_be32(mac_lo);
6463         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
6464         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
6465 }
6466
6467 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
6468 {
6469         int port = BP_PORT(bp);
6470         u32 val, val2;
6471         u32 config;
6472         u16 i;
6473         u32 ext_phy_type;
6474
6475         bp->link_params.bp = bp;
6476         bp->link_params.port = port;
6477
6478         bp->link_params.lane_config =
6479                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
6480         bp->link_params.ext_phy_config =
6481                 SHMEM_RD(bp,
6482                          dev_info.port_hw_config[port].external_phy_config);
6483         /* BCM8727_NOC => BCM8727 no over current */
6484         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
6485             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
6486                 bp->link_params.ext_phy_config &=
6487                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6488                 bp->link_params.ext_phy_config |=
6489                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
6490                 bp->link_params.feature_config_flags |=
6491                         FEATURE_CONFIG_BCM8727_NOC;
6492         }
6493
6494         bp->link_params.speed_cap_mask =
6495                 SHMEM_RD(bp,
6496                          dev_info.port_hw_config[port].speed_capability_mask);
6497
6498         bp->port.link_config =
6499                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6500
6501         /* Get the 4 lanes xgxs config rx and tx */
6502         for (i = 0; i < 2; i++) {
6503                 val = SHMEM_RD(bp,
6504                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
6505                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
6506                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
6507
6508                 val = SHMEM_RD(bp,
6509                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
6510                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
6511                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
6512         }
6513
6514         /* If the device is capable of WoL, set the default state according
6515          * to the HW
6516          */
6517         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
6518         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
6519                    (config & PORT_FEATURE_WOL_ENABLED));
6520
6521         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
6522                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
6523                        bp->link_params.lane_config,
6524                        bp->link_params.ext_phy_config,
6525                        bp->link_params.speed_cap_mask, bp->port.link_config);
6526
6527         bp->link_params.switch_cfg |= (bp->port.link_config &
6528                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
6529         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
6530
6531         bnx2x_link_settings_requested(bp);
6532
6533         /*
6534          * If connected directly, work with the internal PHY, otherwise, work
6535          * with the external PHY
6536          */
6537         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6538         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
6539                 bp->mdio.prtad = bp->link_params.phy_addr;
6540
6541         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
6542                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
6543                 bp->mdio.prtad =
6544                         XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
6545
6546         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6547         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
6548         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
6549         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
6550         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6551
6552 #ifdef BCM_CNIC
6553         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
6554         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
6555         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
6556 #endif
6557 }
6558
6559 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6560 {
6561         int func = BP_FUNC(bp);
6562         u32 val, val2;
6563         int rc = 0;
6564
6565         bnx2x_get_common_hwinfo(bp);
6566
6567         bp->e1hov = 0;
6568         bp->e1hmf = 0;
6569         if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
6570                 bp->mf_config =
6571                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
6572
6573                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
6574                        FUNC_MF_CFG_E1HOV_TAG_MASK);
6575                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
6576                         bp->e1hmf = 1;
6577                 BNX2X_DEV_INFO("%s function mode\n",
6578                                IS_E1HMF(bp) ? "multi" : "single");
6579
6580                 if (IS_E1HMF(bp)) {
6581                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
6582                                                                 e1hov_tag) &
6583                                FUNC_MF_CFG_E1HOV_TAG_MASK);
6584                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
6585                                 bp->e1hov = val;
6586                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
6587                                                "(0x%04x)\n",
6588                                                func, bp->e1hov, bp->e1hov);
6589                         } else {
6590                                 BNX2X_ERROR("No valid E1HOV for func %d,"
6591                                             "  aborting\n", func);
6592                                 rc = -EPERM;
6593                         }
6594                 } else {
6595                         if (BP_E1HVN(bp)) {
6596                                 BNX2X_ERROR("VN %d in single function mode,"
6597                                             "  aborting\n", BP_E1HVN(bp));
6598                                 rc = -EPERM;
6599                         }
6600                 }
6601         }
6602
6603         if (!BP_NOMCP(bp)) {
6604                 bnx2x_get_port_hwinfo(bp);
6605
6606                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
6607                               DRV_MSG_SEQ_NUMBER_MASK);
6608                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
6609         }
6610
6611         if (IS_E1HMF(bp)) {
6612                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
6613                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
6614                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
6615                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
6616                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6617                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6618                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6619                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6620                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
6621                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
6622                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
6623                                ETH_ALEN);
6624                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
6625                                ETH_ALEN);
6626                 }
6627
6628                 return rc;
6629         }
6630
6631         if (BP_NOMCP(bp)) {
6632                 /* only supposed to happen on emulation/FPGA */
6633                 BNX2X_ERROR("warning: random MAC workaround active\n");
6634                 random_ether_addr(bp->dev->dev_addr);
6635                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6636         }
6637
6638         return rc;
6639 }
6640
6641 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
6642 {
6643         int cnt, i, block_end, rodi;
6644         char vpd_data[BNX2X_VPD_LEN+1];
6645         char str_id_reg[VENDOR_ID_LEN+1];
6646         char str_id_cap[VENDOR_ID_LEN+1];
6647         u8 len;
6648
6649         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
6650         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
6651
6652         if (cnt < BNX2X_VPD_LEN)
6653                 goto out_not_found;
6654
6655         i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
6656                              PCI_VPD_LRDT_RO_DATA);
6657         if (i < 0)
6658                 goto out_not_found;
6659
6660
6661         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
6662                     pci_vpd_lrdt_size(&vpd_data[i]);
6663
6664         i += PCI_VPD_LRDT_TAG_SIZE;
6665
6666         if (block_end > BNX2X_VPD_LEN)
6667                 goto out_not_found;
6668
6669         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6670                                    PCI_VPD_RO_KEYWORD_MFR_ID);
6671         if (rodi < 0)
6672                 goto out_not_found;
6673
6674         len = pci_vpd_info_field_size(&vpd_data[rodi]);
6675
6676         if (len != VENDOR_ID_LEN)
6677                 goto out_not_found;
6678
6679         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6680
6681         /* vendor specific info */
6682         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
6683         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
6684         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
6685             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
6686
6687                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6688                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
6689                 if (rodi >= 0) {
6690                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
6691
6692                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6693
6694                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
6695                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
6696                                 bp->fw_ver[len] = ' ';
6697                         }
6698                 }
6699                 return;
6700         }
6701 out_not_found:
6702         return;
6703 }
6704
6705 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6706 {
6707         int func = BP_FUNC(bp);
6708         int timer_interval;
6709         int rc;
6710
6711         /* Disable interrupt handling until HW is initialized */
6712         atomic_set(&bp->intr_sem, 1);
6713         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6714
6715         mutex_init(&bp->port.phy_mutex);
6716         mutex_init(&bp->fw_mb_mutex);
6717         spin_lock_init(&bp->stats_lock);
6718 #ifdef BCM_CNIC
6719         mutex_init(&bp->cnic_mutex);
6720 #endif
6721
6722         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
6723         INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
6724
6725         rc = bnx2x_get_hwinfo(bp);
6726
6727         bnx2x_read_fwinfo(bp);
6728         /* need to reset chip if undi was active */
6729         if (!BP_NOMCP(bp))
6730                 bnx2x_undi_unload(bp);
6731
6732         if (CHIP_REV_IS_FPGA(bp))
6733                 dev_err(&bp->pdev->dev, "FPGA detected\n");
6734
6735         if (BP_NOMCP(bp) && (func == 0))
6736                 dev_err(&bp->pdev->dev, "MCP disabled, "
6737                                         "must load devices in order!\n");
6738
6739         /* Set multi queue mode */
6740         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
6741             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
6742                 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
6743                                         "requested is not MSI-X\n");
6744                 multi_mode = ETH_RSS_MODE_DISABLED;
6745         }
6746         bp->multi_mode = multi_mode;
6747         bp->int_mode = int_mode;
6748
6749         bp->dev->features |= NETIF_F_GRO;
6750
6751         /* Set TPA flags */
6752         if (disable_tpa) {
6753                 bp->flags &= ~TPA_ENABLE_FLAG;
6754                 bp->dev->features &= ~NETIF_F_LRO;
6755         } else {
6756                 bp->flags |= TPA_ENABLE_FLAG;
6757                 bp->dev->features |= NETIF_F_LRO;
6758         }
6759         bp->disable_tpa = disable_tpa;
6760
6761         if (CHIP_IS_E1(bp))
6762                 bp->dropless_fc = 0;
6763         else
6764                 bp->dropless_fc = dropless_fc;
6765
6766         bp->mrrs = mrrs;
6767
6768         bp->tx_ring_size = MAX_TX_AVAIL;
6769         bp->rx_ring_size = MAX_RX_AVAIL;
6770
6771         bp->rx_csum = 1;
6772
6773         /* make sure that the numbers are in the right granularity */
6774         bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6775         bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6776
6777         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
6778         bp->current_interval = (poll ? poll : timer_interval);
6779
6780         init_timer(&bp->timer);
6781         bp->timer.expires = jiffies + bp->current_interval;
6782         bp->timer.data = (unsigned long) bp;
6783         bp->timer.function = bnx2x_timer;
6784
6785         return rc;
6786 }
6787
6788
6789 /****************************************************************************
6790 * General service functions
6791 ****************************************************************************/
6792
6793 /* called with rtnl_lock */
6794 static int bnx2x_open(struct net_device *dev)
6795 {
6796         struct bnx2x *bp = netdev_priv(dev);
6797
6798         netif_carrier_off(dev);
6799
6800         bnx2x_set_power_state(bp, PCI_D0);
6801
6802         if (!bnx2x_reset_is_done(bp)) {
6803                 do {
6804                         /* Reset MCP mail box sequence if there is on going
6805                          * recovery
6806                          */
6807                         bp->fw_seq = 0;
6808
6809                         /* If it's the first function to load and reset done
6810                          * is still not cleared it may mean that. We don't
6811                          * check the attention state here because it may have
6812                          * already been cleared by a "common" reset but we
6813                          * shell proceed with "process kill" anyway.
6814                          */
6815                         if ((bnx2x_get_load_cnt(bp) == 0) &&
6816                                 bnx2x_trylock_hw_lock(bp,
6817                                 HW_LOCK_RESOURCE_RESERVED_08) &&
6818                                 (!bnx2x_leader_reset(bp))) {
6819                                 DP(NETIF_MSG_HW, "Recovered in open\n");
6820                                 break;
6821                         }
6822
6823                         bnx2x_set_power_state(bp, PCI_D3hot);
6824
6825                         printk(KERN_ERR"%s: Recovery flow hasn't been properly"
6826                         " completed yet. Try again later. If u still see this"
6827                         " message after a few retries then power cycle is"
6828                         " required.\n", bp->dev->name);
6829
6830                         return -EAGAIN;
6831                 } while (0);
6832         }
6833
6834         bp->recovery_state = BNX2X_RECOVERY_DONE;
6835
6836         return bnx2x_nic_load(bp, LOAD_OPEN);
6837 }
6838
6839 /* called with rtnl_lock */
6840 static int bnx2x_close(struct net_device *dev)
6841 {
6842         struct bnx2x *bp = netdev_priv(dev);
6843
6844         /* Unload the driver, release IRQs */
6845         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
6846         bnx2x_set_power_state(bp, PCI_D3hot);
6847
6848         return 0;
6849 }
6850
6851 /* called with netif_tx_lock from dev_mcast.c */
6852 void bnx2x_set_rx_mode(struct net_device *dev)
6853 {
6854         struct bnx2x *bp = netdev_priv(dev);
6855         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
6856         int port = BP_PORT(bp);
6857
6858         if (bp->state != BNX2X_STATE_OPEN) {
6859                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6860                 return;
6861         }
6862
6863         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
6864
6865         if (dev->flags & IFF_PROMISC)
6866                 rx_mode = BNX2X_RX_MODE_PROMISC;
6867
6868         else if ((dev->flags & IFF_ALLMULTI) ||
6869                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
6870                   CHIP_IS_E1(bp)))
6871                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6872
6873         else { /* some multicasts */
6874                 if (CHIP_IS_E1(bp)) {
6875                         int i, old, offset;
6876                         struct netdev_hw_addr *ha;
6877                         struct mac_configuration_cmd *config =
6878                                                 bnx2x_sp(bp, mcast_config);
6879
6880                         i = 0;
6881                         netdev_for_each_mc_addr(ha, dev) {
6882                                 config->config_table[i].
6883                                         cam_entry.msb_mac_addr =
6884                                         swab16(*(u16 *)&ha->addr[0]);
6885                                 config->config_table[i].
6886                                         cam_entry.middle_mac_addr =
6887                                         swab16(*(u16 *)&ha->addr[2]);
6888                                 config->config_table[i].
6889                                         cam_entry.lsb_mac_addr =
6890                                         swab16(*(u16 *)&ha->addr[4]);
6891                                 config->config_table[i].cam_entry.flags =
6892                                                         cpu_to_le16(port);
6893                                 config->config_table[i].
6894                                         target_table_entry.flags = 0;
6895                                 config->config_table[i].target_table_entry.
6896                                         clients_bit_vector =
6897                                                 cpu_to_le32(1 << BP_L_ID(bp));
6898                                 config->config_table[i].
6899                                         target_table_entry.vlan_id = 0;
6900
6901                                 DP(NETIF_MSG_IFUP,
6902                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6903                                    config->config_table[i].
6904                                                 cam_entry.msb_mac_addr,
6905                                    config->config_table[i].
6906                                                 cam_entry.middle_mac_addr,
6907                                    config->config_table[i].
6908                                                 cam_entry.lsb_mac_addr);
6909                                 i++;
6910                         }
6911                         old = config->hdr.length;
6912                         if (old > i) {
6913                                 for (; i < old; i++) {
6914                                         if (CAM_IS_INVALID(config->
6915                                                            config_table[i])) {
6916                                                 /* already invalidated */
6917                                                 break;
6918                                         }
6919                                         /* invalidate */
6920                                         CAM_INVALIDATE(config->
6921                                                        config_table[i]);
6922                                 }
6923                         }
6924
6925                         if (CHIP_REV_IS_SLOW(bp))
6926                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6927                         else
6928                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
6929
6930                         config->hdr.length = i;
6931                         config->hdr.offset = offset;
6932                         config->hdr.client_id = bp->fp->cl_id;
6933                         config->hdr.reserved1 = 0;
6934
6935                         bp->set_mac_pending++;
6936                         smp_wmb();
6937
6938                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6939                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6940                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
6941                                       0);
6942                 } else { /* E1H */
6943                         /* Accept one or more multicasts */
6944                         struct netdev_hw_addr *ha;
6945                         u32 mc_filter[MC_HASH_SIZE];
6946                         u32 crc, bit, regidx;
6947                         int i;
6948
6949                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6950
6951                         netdev_for_each_mc_addr(ha, dev) {
6952                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6953                                    ha->addr);
6954
6955                                 crc = crc32c_le(0, ha->addr, ETH_ALEN);
6956                                 bit = (crc >> 24) & 0xff;
6957                                 regidx = bit >> 5;
6958                                 bit &= 0x1f;
6959                                 mc_filter[regidx] |= (1 << bit);
6960                         }
6961
6962                         for (i = 0; i < MC_HASH_SIZE; i++)
6963                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6964                                        mc_filter[i]);
6965                 }
6966         }
6967
6968         bp->rx_mode = rx_mode;
6969         bnx2x_set_storm_rx_mode(bp);
6970 }
6971
6972
6973 /* called with rtnl_lock */
6974 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
6975                            int devad, u16 addr)
6976 {
6977         struct bnx2x *bp = netdev_priv(netdev);
6978         u16 value;
6979         int rc;
6980         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6981
6982         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
6983            prtad, devad, addr);
6984
6985         if (prtad != bp->mdio.prtad) {
6986                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
6987                    prtad, bp->mdio.prtad);
6988                 return -EINVAL;
6989         }
6990
6991         /* The HW expects different devad if CL22 is used */
6992         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
6993
6994         bnx2x_acquire_phy_lock(bp);
6995         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
6996                              devad, addr, &value);
6997         bnx2x_release_phy_lock(bp);
6998         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
6999
7000         if (!rc)
7001                 rc = value;
7002         return rc;
7003 }
7004
7005 /* called with rtnl_lock */
7006 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
7007                             u16 addr, u16 value)
7008 {
7009         struct bnx2x *bp = netdev_priv(netdev);
7010         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7011         int rc;
7012
7013         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
7014                            " value 0x%x\n", prtad, devad, addr, value);
7015
7016         if (prtad != bp->mdio.prtad) {
7017                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
7018                    prtad, bp->mdio.prtad);
7019                 return -EINVAL;
7020         }
7021
7022         /* The HW expects different devad if CL22 is used */
7023         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
7024
7025         bnx2x_acquire_phy_lock(bp);
7026         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
7027                               devad, addr, value);
7028         bnx2x_release_phy_lock(bp);
7029         return rc;
7030 }
7031
7032 /* called with rtnl_lock */
7033 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7034 {
7035         struct bnx2x *bp = netdev_priv(dev);
7036         struct mii_ioctl_data *mdio = if_mii(ifr);
7037
7038         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
7039            mdio->phy_id, mdio->reg_num, mdio->val_in);
7040
7041         if (!netif_running(dev))
7042                 return -EAGAIN;
7043
7044         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
7045 }
7046
7047 #ifdef CONFIG_NET_POLL_CONTROLLER
7048 static void poll_bnx2x(struct net_device *dev)
7049 {
7050         struct bnx2x *bp = netdev_priv(dev);
7051
7052         disable_irq(bp->pdev->irq);
7053         bnx2x_interrupt(bp->pdev->irq, dev);
7054         enable_irq(bp->pdev->irq);
7055 }
7056 #endif
7057
7058 static const struct net_device_ops bnx2x_netdev_ops = {
7059         .ndo_open               = bnx2x_open,
7060         .ndo_stop               = bnx2x_close,
7061         .ndo_start_xmit         = bnx2x_start_xmit,
7062         .ndo_set_multicast_list = bnx2x_set_rx_mode,
7063         .ndo_set_mac_address    = bnx2x_change_mac_addr,
7064         .ndo_validate_addr      = eth_validate_addr,
7065         .ndo_do_ioctl           = bnx2x_ioctl,
7066         .ndo_change_mtu         = bnx2x_change_mtu,
7067         .ndo_tx_timeout         = bnx2x_tx_timeout,
7068 #ifdef BCM_VLAN
7069         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
7070 #endif
7071 #ifdef CONFIG_NET_POLL_CONTROLLER
7072         .ndo_poll_controller    = poll_bnx2x,
7073 #endif
7074 };
7075
7076 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
7077                                     struct net_device *dev)
7078 {
7079         struct bnx2x *bp;
7080         int rc;
7081
7082         SET_NETDEV_DEV(dev, &pdev->dev);
7083         bp = netdev_priv(dev);
7084
7085         bp->dev = dev;
7086         bp->pdev = pdev;
7087         bp->flags = 0;
7088         bp->func = PCI_FUNC(pdev->devfn);
7089
7090         rc = pci_enable_device(pdev);
7091         if (rc) {
7092                 dev_err(&bp->pdev->dev,
7093                         "Cannot enable PCI device, aborting\n");
7094                 goto err_out;
7095         }
7096
7097         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7098                 dev_err(&bp->pdev->dev,
7099                         "Cannot find PCI device base address, aborting\n");
7100                 rc = -ENODEV;
7101                 goto err_out_disable;
7102         }
7103
7104         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
7105                 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
7106                        " base address, aborting\n");
7107                 rc = -ENODEV;
7108                 goto err_out_disable;
7109         }
7110
7111         if (atomic_read(&pdev->enable_cnt) == 1) {
7112                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7113                 if (rc) {
7114                         dev_err(&bp->pdev->dev,
7115                                 "Cannot obtain PCI resources, aborting\n");
7116                         goto err_out_disable;
7117                 }
7118
7119                 pci_set_master(pdev);
7120                 pci_save_state(pdev);
7121         }
7122
7123         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7124         if (bp->pm_cap == 0) {
7125                 dev_err(&bp->pdev->dev,
7126                         "Cannot find power management capability, aborting\n");
7127                 rc = -EIO;
7128                 goto err_out_release;
7129         }
7130
7131         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
7132         if (bp->pcie_cap == 0) {
7133                 dev_err(&bp->pdev->dev,
7134                         "Cannot find PCI Express capability, aborting\n");
7135                 rc = -EIO;
7136                 goto err_out_release;
7137         }
7138
7139         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
7140                 bp->flags |= USING_DAC_FLAG;
7141                 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
7142                         dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
7143                                " failed, aborting\n");
7144                         rc = -EIO;
7145                         goto err_out_release;
7146                 }
7147
7148         } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
7149                 dev_err(&bp->pdev->dev,
7150                         "System does not support DMA, aborting\n");
7151                 rc = -EIO;
7152                 goto err_out_release;
7153         }
7154
7155         dev->mem_start = pci_resource_start(pdev, 0);
7156         dev->base_addr = dev->mem_start;
7157         dev->mem_end = pci_resource_end(pdev, 0);
7158
7159         dev->irq = pdev->irq;
7160
7161         bp->regview = pci_ioremap_bar(pdev, 0);
7162         if (!bp->regview) {
7163                 dev_err(&bp->pdev->dev,
7164                         "Cannot map register space, aborting\n");
7165                 rc = -ENOMEM;
7166                 goto err_out_release;
7167         }
7168
7169         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
7170                                         min_t(u64, BNX2X_DB_SIZE,
7171                                               pci_resource_len(pdev, 2)));
7172         if (!bp->doorbells) {
7173                 dev_err(&bp->pdev->dev,
7174                         "Cannot map doorbell space, aborting\n");
7175                 rc = -ENOMEM;
7176                 goto err_out_unmap;
7177         }
7178
7179         bnx2x_set_power_state(bp, PCI_D0);
7180
7181         /* clean indirect addresses */
7182         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
7183                                PCICFG_VENDOR_ID_OFFSET);
7184         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
7185         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
7186         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
7187         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
7188
7189         /* Reset the load counter */
7190         bnx2x_clear_load_cnt(bp);
7191
7192         dev->watchdog_timeo = TX_TIMEOUT;
7193
7194         dev->netdev_ops = &bnx2x_netdev_ops;
7195         bnx2x_set_ethtool_ops(dev);
7196         dev->features |= NETIF_F_SG;
7197         dev->features |= NETIF_F_HW_CSUM;
7198         if (bp->flags & USING_DAC_FLAG)
7199                 dev->features |= NETIF_F_HIGHDMA;
7200         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7201         dev->features |= NETIF_F_TSO6;
7202 #ifdef BCM_VLAN
7203         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
7204         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
7205
7206         dev->vlan_features |= NETIF_F_SG;
7207         dev->vlan_features |= NETIF_F_HW_CSUM;
7208         if (bp->flags & USING_DAC_FLAG)
7209                 dev->vlan_features |= NETIF_F_HIGHDMA;
7210         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7211         dev->vlan_features |= NETIF_F_TSO6;
7212 #endif
7213
7214         /* get_port_hwinfo() will set prtad and mmds properly */
7215         bp->mdio.prtad = MDIO_PRTAD_NONE;
7216         bp->mdio.mmds = 0;
7217         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7218         bp->mdio.dev = dev;
7219         bp->mdio.mdio_read = bnx2x_mdio_read;
7220         bp->mdio.mdio_write = bnx2x_mdio_write;
7221
7222         return 0;
7223
7224 err_out_unmap:
7225         if (bp->regview) {
7226                 iounmap(bp->regview);
7227                 bp->regview = NULL;
7228         }
7229         if (bp->doorbells) {
7230                 iounmap(bp->doorbells);
7231                 bp->doorbells = NULL;
7232         }
7233
7234 err_out_release:
7235         if (atomic_read(&pdev->enable_cnt) == 1)
7236                 pci_release_regions(pdev);
7237
7238 err_out_disable:
7239         pci_disable_device(pdev);
7240         pci_set_drvdata(pdev, NULL);
7241
7242 err_out:
7243         return rc;
7244 }
7245
7246 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
7247                                                  int *width, int *speed)
7248 {
7249         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7250
7251         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
7252
7253         /* return value of 1=2.5GHz 2=5GHz */
7254         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
7255 }
7256
7257 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
7258 {
7259         const struct firmware *firmware = bp->firmware;
7260         struct bnx2x_fw_file_hdr *fw_hdr;
7261         struct bnx2x_fw_file_section *sections;
7262         u32 offset, len, num_ops;
7263         u16 *ops_offsets;
7264         int i;
7265         const u8 *fw_ver;
7266
7267         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
7268                 return -EINVAL;
7269
7270         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
7271         sections = (struct bnx2x_fw_file_section *)fw_hdr;
7272
7273         /* Make sure none of the offsets and sizes make us read beyond
7274          * the end of the firmware data */
7275         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
7276                 offset = be32_to_cpu(sections[i].offset);
7277                 len = be32_to_cpu(sections[i].len);
7278                 if (offset + len > firmware->size) {
7279                         dev_err(&bp->pdev->dev,
7280                                 "Section %d length is out of bounds\n", i);
7281                         return -EINVAL;
7282                 }
7283         }
7284
7285         /* Likewise for the init_ops offsets */
7286         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
7287         ops_offsets = (u16 *)(firmware->data + offset);
7288         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
7289
7290         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
7291                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
7292                         dev_err(&bp->pdev->dev,
7293                                 "Section offset %d is out of bounds\n", i);
7294                         return -EINVAL;
7295                 }
7296         }
7297
7298         /* Check FW version */
7299         offset = be32_to_cpu(fw_hdr->fw_version.offset);
7300         fw_ver = firmware->data + offset;
7301         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
7302             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
7303             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
7304             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
7305                 dev_err(&bp->pdev->dev,
7306                         "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
7307                        fw_ver[0], fw_ver[1], fw_ver[2],
7308                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
7309                        BCM_5710_FW_MINOR_VERSION,
7310                        BCM_5710_FW_REVISION_VERSION,
7311                        BCM_5710_FW_ENGINEERING_VERSION);
7312                 return -EINVAL;
7313         }
7314
7315         return 0;
7316 }
7317
7318 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7319 {
7320         const __be32 *source = (const __be32 *)_source;
7321         u32 *target = (u32 *)_target;
7322         u32 i;
7323
7324         for (i = 0; i < n/4; i++)
7325                 target[i] = be32_to_cpu(source[i]);
7326 }
7327
7328 /*
7329    Ops array is stored in the following format:
7330    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7331  */
7332 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
7333 {
7334         const __be32 *source = (const __be32 *)_source;
7335         struct raw_op *target = (struct raw_op *)_target;
7336         u32 i, j, tmp;
7337
7338         for (i = 0, j = 0; i < n/8; i++, j += 2) {
7339                 tmp = be32_to_cpu(source[j]);
7340                 target[i].op = (tmp >> 24) & 0xff;
7341                 target[i].offset = tmp & 0xffffff;
7342                 target[i].raw_data = be32_to_cpu(source[j + 1]);
7343         }
7344 }
7345
7346 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7347 {
7348         const __be16 *source = (const __be16 *)_source;
7349         u16 *target = (u16 *)_target;
7350         u32 i;
7351
7352         for (i = 0; i < n/2; i++)
7353                 target[i] = be16_to_cpu(source[i]);
7354 }
7355
7356 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
7357 do {                                                                    \
7358         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
7359         bp->arr = kmalloc(len, GFP_KERNEL);                             \
7360         if (!bp->arr) {                                                 \
7361                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7362                 goto lbl;                                               \
7363         }                                                               \
7364         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
7365              (u8 *)bp->arr, len);                                       \
7366 } while (0)
7367
7368 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
7369 {
7370         const char *fw_file_name;
7371         struct bnx2x_fw_file_hdr *fw_hdr;
7372         int rc;
7373
7374         if (CHIP_IS_E1(bp))
7375                 fw_file_name = FW_FILE_NAME_E1;
7376         else if (CHIP_IS_E1H(bp))
7377                 fw_file_name = FW_FILE_NAME_E1H;
7378         else {
7379                 dev_err(dev, "Unsupported chip revision\n");
7380                 return -EINVAL;
7381         }
7382
7383         dev_info(dev, "Loading %s\n", fw_file_name);
7384
7385         rc = request_firmware(&bp->firmware, fw_file_name, dev);
7386         if (rc) {
7387                 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
7388                 goto request_firmware_exit;
7389         }
7390
7391         rc = bnx2x_check_firmware(bp);
7392         if (rc) {
7393                 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
7394                 goto request_firmware_exit;
7395         }
7396
7397         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
7398
7399         /* Initialize the pointers to the init arrays */
7400         /* Blob */
7401         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
7402
7403         /* Opcodes */
7404         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
7405
7406         /* Offsets */
7407         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
7408                             be16_to_cpu_n);
7409
7410         /* STORMs firmware */
7411         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7412                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
7413         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
7414                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
7415         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7416                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
7417         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
7418                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
7419         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7420                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
7421         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
7422                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
7423         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7424                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
7425         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
7426                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
7427
7428         return 0;
7429
7430 init_offsets_alloc_err:
7431         kfree(bp->init_ops);
7432 init_ops_alloc_err:
7433         kfree(bp->init_data);
7434 request_firmware_exit:
7435         release_firmware(bp->firmware);
7436
7437         return rc;
7438 }
7439
7440
7441 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7442                                     const struct pci_device_id *ent)
7443 {
7444         struct net_device *dev = NULL;
7445         struct bnx2x *bp;
7446         int pcie_width, pcie_speed;
7447         int rc;
7448
7449         /* dev zeroed in init_etherdev */
7450         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
7451         if (!dev) {
7452                 dev_err(&pdev->dev, "Cannot allocate net device\n");
7453                 return -ENOMEM;
7454         }
7455
7456         bp = netdev_priv(dev);
7457         bp->msg_enable = debug;
7458
7459         pci_set_drvdata(pdev, dev);
7460
7461         rc = bnx2x_init_dev(pdev, dev);
7462         if (rc < 0) {
7463                 free_netdev(dev);
7464                 return rc;
7465         }
7466
7467         rc = bnx2x_init_bp(bp);
7468         if (rc)
7469                 goto init_one_exit;
7470
7471         /* Set init arrays */
7472         rc = bnx2x_init_firmware(bp, &pdev->dev);
7473         if (rc) {
7474                 dev_err(&pdev->dev, "Error loading firmware\n");
7475                 goto init_one_exit;
7476         }
7477
7478         rc = register_netdev(dev);
7479         if (rc) {
7480                 dev_err(&pdev->dev, "Cannot register net device\n");
7481                 goto init_one_exit;
7482         }
7483
7484         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
7485         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
7486                " IRQ %d, ", board_info[ent->driver_data].name,
7487                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
7488                pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
7489                dev->base_addr, bp->pdev->irq);
7490         pr_cont("node addr %pM\n", dev->dev_addr);
7491
7492         return 0;
7493
7494 init_one_exit:
7495         if (bp->regview)
7496                 iounmap(bp->regview);
7497
7498         if (bp->doorbells)
7499                 iounmap(bp->doorbells);
7500
7501         free_netdev(dev);
7502
7503         if (atomic_read(&pdev->enable_cnt) == 1)
7504                 pci_release_regions(pdev);
7505
7506         pci_disable_device(pdev);
7507         pci_set_drvdata(pdev, NULL);
7508
7509         return rc;
7510 }
7511
7512 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7513 {
7514         struct net_device *dev = pci_get_drvdata(pdev);
7515         struct bnx2x *bp;
7516
7517         if (!dev) {
7518                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
7519                 return;
7520         }
7521         bp = netdev_priv(dev);
7522
7523         unregister_netdev(dev);
7524
7525         /* Make sure RESET task is not scheduled before continuing */
7526         cancel_delayed_work_sync(&bp->reset_task);
7527
7528         kfree(bp->init_ops_offsets);
7529         kfree(bp->init_ops);
7530         kfree(bp->init_data);
7531         release_firmware(bp->firmware);
7532
7533         if (bp->regview)
7534                 iounmap(bp->regview);
7535
7536         if (bp->doorbells)
7537                 iounmap(bp->doorbells);
7538
7539         free_netdev(dev);
7540
7541         if (atomic_read(&pdev->enable_cnt) == 1)
7542                 pci_release_regions(pdev);
7543
7544         pci_disable_device(pdev);
7545         pci_set_drvdata(pdev, NULL);
7546 }
7547
7548 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
7549 {
7550         int i;
7551
7552         bp->state = BNX2X_STATE_ERROR;
7553
7554         bp->rx_mode = BNX2X_RX_MODE_NONE;
7555
7556         bnx2x_netif_stop(bp, 0);
7557         netif_carrier_off(bp->dev);
7558
7559         del_timer_sync(&bp->timer);
7560         bp->stats_state = STATS_STATE_DISABLED;
7561         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
7562
7563         /* Release IRQs */
7564         bnx2x_free_irq(bp, false);
7565
7566         if (CHIP_IS_E1(bp)) {
7567                 struct mac_configuration_cmd *config =
7568                                                 bnx2x_sp(bp, mcast_config);
7569
7570                 for (i = 0; i < config->hdr.length; i++)
7571                         CAM_INVALIDATE(config->config_table[i]);
7572         }
7573
7574         /* Free SKBs, SGEs, TPA pool and driver internals */
7575         bnx2x_free_skbs(bp);
7576         for_each_queue(bp, i)
7577                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7578         for_each_queue(bp, i)
7579                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7580         bnx2x_free_mem(bp);
7581
7582         bp->state = BNX2X_STATE_CLOSED;
7583
7584         return 0;
7585 }
7586
7587 static void bnx2x_eeh_recover(struct bnx2x *bp)
7588 {
7589         u32 val;
7590
7591         mutex_init(&bp->port.phy_mutex);
7592
7593         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7594         bp->link_params.shmem_base = bp->common.shmem_base;
7595         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7596
7597         if (!bp->common.shmem_base ||
7598             (bp->common.shmem_base < 0xA0000) ||
7599             (bp->common.shmem_base >= 0xC0000)) {
7600                 BNX2X_DEV_INFO("MCP not active\n");
7601                 bp->flags |= NO_MCP_FLAG;
7602                 return;
7603         }
7604
7605         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7606         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7607                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7608                 BNX2X_ERR("BAD MCP validity signature\n");
7609
7610         if (!BP_NOMCP(bp)) {
7611                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
7612                               & DRV_MSG_SEQ_NUMBER_MASK);
7613                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7614         }
7615 }
7616
7617 /**
7618  * bnx2x_io_error_detected - called when PCI error is detected
7619  * @pdev: Pointer to PCI device
7620  * @state: The current pci connection state
7621  *
7622  * This function is called after a PCI bus error affecting
7623  * this device has been detected.
7624  */
7625 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
7626                                                 pci_channel_state_t state)
7627 {
7628         struct net_device *dev = pci_get_drvdata(pdev);
7629         struct bnx2x *bp = netdev_priv(dev);
7630
7631         rtnl_lock();
7632
7633         netif_device_detach(dev);
7634
7635         if (state == pci_channel_io_perm_failure) {
7636                 rtnl_unlock();
7637                 return PCI_ERS_RESULT_DISCONNECT;
7638         }
7639
7640         if (netif_running(dev))
7641                 bnx2x_eeh_nic_unload(bp);
7642
7643         pci_disable_device(pdev);
7644
7645         rtnl_unlock();
7646
7647         /* Request a slot reset */
7648         return PCI_ERS_RESULT_NEED_RESET;
7649 }
7650
7651 /**
7652  * bnx2x_io_slot_reset - called after the PCI bus has been reset
7653  * @pdev: Pointer to PCI device
7654  *
7655  * Restart the card from scratch, as if from a cold-boot.
7656  */
7657 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
7658 {
7659         struct net_device *dev = pci_get_drvdata(pdev);
7660         struct bnx2x *bp = netdev_priv(dev);
7661
7662         rtnl_lock();
7663
7664         if (pci_enable_device(pdev)) {
7665                 dev_err(&pdev->dev,
7666                         "Cannot re-enable PCI device after reset\n");
7667                 rtnl_unlock();
7668                 return PCI_ERS_RESULT_DISCONNECT;
7669         }
7670
7671         pci_set_master(pdev);
7672         pci_restore_state(pdev);
7673
7674         if (netif_running(dev))
7675                 bnx2x_set_power_state(bp, PCI_D0);
7676
7677         rtnl_unlock();
7678
7679         return PCI_ERS_RESULT_RECOVERED;
7680 }
7681
7682 /**
7683  * bnx2x_io_resume - called when traffic can start flowing again
7684  * @pdev: Pointer to PCI device
7685  *
7686  * This callback is called when the error recovery driver tells us that
7687  * its OK to resume normal operation.
7688  */
7689 static void bnx2x_io_resume(struct pci_dev *pdev)
7690 {
7691         struct net_device *dev = pci_get_drvdata(pdev);
7692         struct bnx2x *bp = netdev_priv(dev);
7693
7694         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
7695                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
7696                 return;
7697         }
7698
7699         rtnl_lock();
7700
7701         bnx2x_eeh_recover(bp);
7702
7703         if (netif_running(dev))
7704                 bnx2x_nic_load(bp, LOAD_NORMAL);
7705
7706         netif_device_attach(dev);
7707
7708         rtnl_unlock();
7709 }
7710
7711 static struct pci_error_handlers bnx2x_err_handler = {
7712         .error_detected = bnx2x_io_error_detected,
7713         .slot_reset     = bnx2x_io_slot_reset,
7714         .resume         = bnx2x_io_resume,
7715 };
7716
7717 static struct pci_driver bnx2x_pci_driver = {
7718         .name        = DRV_MODULE_NAME,
7719         .id_table    = bnx2x_pci_tbl,
7720         .probe       = bnx2x_init_one,
7721         .remove      = __devexit_p(bnx2x_remove_one),
7722         .suspend     = bnx2x_suspend,
7723         .resume      = bnx2x_resume,
7724         .err_handler = &bnx2x_err_handler,
7725 };
7726
7727 static int __init bnx2x_init(void)
7728 {
7729         int ret;
7730
7731         pr_info("%s", version);
7732
7733         bnx2x_wq = create_singlethread_workqueue("bnx2x");
7734         if (bnx2x_wq == NULL) {
7735                 pr_err("Cannot create workqueue\n");
7736                 return -ENOMEM;
7737         }
7738
7739         ret = pci_register_driver(&bnx2x_pci_driver);
7740         if (ret) {
7741                 pr_err("Cannot register driver\n");
7742                 destroy_workqueue(bnx2x_wq);
7743         }
7744         return ret;
7745 }
7746
7747 static void __exit bnx2x_cleanup(void)
7748 {
7749         pci_unregister_driver(&bnx2x_pci_driver);
7750
7751         destroy_workqueue(bnx2x_wq);
7752 }
7753
7754 module_init(bnx2x_init);
7755 module_exit(bnx2x_cleanup);
7756
7757 #ifdef BCM_CNIC
7758
7759 /* count denotes the number of new completions we have seen */
7760 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
7761 {
7762         struct eth_spe *spe;
7763
7764 #ifdef BNX2X_STOP_ON_ERROR
7765         if (unlikely(bp->panic))
7766                 return;
7767 #endif
7768
7769         spin_lock_bh(&bp->spq_lock);
7770         bp->cnic_spq_pending -= count;
7771
7772         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
7773              bp->cnic_spq_pending++) {
7774
7775                 if (!bp->cnic_kwq_pending)
7776                         break;
7777
7778                 spe = bnx2x_sp_get_next(bp);
7779                 *spe = *bp->cnic_kwq_cons;
7780
7781                 bp->cnic_kwq_pending--;
7782
7783                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
7784                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
7785
7786                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
7787                         bp->cnic_kwq_cons = bp->cnic_kwq;
7788                 else
7789                         bp->cnic_kwq_cons++;
7790         }
7791         bnx2x_sp_prod_update(bp);
7792         spin_unlock_bh(&bp->spq_lock);
7793 }
7794
7795 static int bnx2x_cnic_sp_queue(struct net_device *dev,
7796                                struct kwqe_16 *kwqes[], u32 count)
7797 {
7798         struct bnx2x *bp = netdev_priv(dev);
7799         int i;
7800
7801 #ifdef BNX2X_STOP_ON_ERROR
7802         if (unlikely(bp->panic))
7803                 return -EIO;
7804 #endif
7805
7806         spin_lock_bh(&bp->spq_lock);
7807
7808         for (i = 0; i < count; i++) {
7809                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
7810
7811                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
7812                         break;
7813
7814                 *bp->cnic_kwq_prod = *spe;
7815
7816                 bp->cnic_kwq_pending++;
7817
7818                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
7819                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
7820                    spe->data.mac_config_addr.hi,
7821                    spe->data.mac_config_addr.lo,
7822                    bp->cnic_kwq_pending);
7823
7824                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
7825                         bp->cnic_kwq_prod = bp->cnic_kwq;
7826                 else
7827                         bp->cnic_kwq_prod++;
7828         }
7829
7830         spin_unlock_bh(&bp->spq_lock);
7831
7832         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
7833                 bnx2x_cnic_sp_post(bp, 0);
7834
7835         return i;
7836 }
7837
7838 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7839 {
7840         struct cnic_ops *c_ops;
7841         int rc = 0;
7842
7843         mutex_lock(&bp->cnic_mutex);
7844         c_ops = bp->cnic_ops;
7845         if (c_ops)
7846                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7847         mutex_unlock(&bp->cnic_mutex);
7848
7849         return rc;
7850 }
7851
7852 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7853 {
7854         struct cnic_ops *c_ops;
7855         int rc = 0;
7856
7857         rcu_read_lock();
7858         c_ops = rcu_dereference(bp->cnic_ops);
7859         if (c_ops)
7860                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7861         rcu_read_unlock();
7862
7863         return rc;
7864 }
7865
7866 /*
7867  * for commands that have no data
7868  */
7869 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
7870 {
7871         struct cnic_ctl_info ctl = {0};
7872
7873         ctl.cmd = cmd;
7874
7875         return bnx2x_cnic_ctl_send(bp, &ctl);
7876 }
7877
7878 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
7879 {
7880         struct cnic_ctl_info ctl;
7881
7882         /* first we tell CNIC and only then we count this as a completion */
7883         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
7884         ctl.data.comp.cid = cid;
7885
7886         bnx2x_cnic_ctl_send_bh(bp, &ctl);
7887         bnx2x_cnic_sp_post(bp, 1);
7888 }
7889
7890 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7891 {
7892         struct bnx2x *bp = netdev_priv(dev);
7893         int rc = 0;
7894
7895         switch (ctl->cmd) {
7896         case DRV_CTL_CTXTBL_WR_CMD: {
7897                 u32 index = ctl->data.io.offset;
7898                 dma_addr_t addr = ctl->data.io.dma_addr;
7899
7900                 bnx2x_ilt_wr(bp, index, addr);
7901                 break;
7902         }
7903
7904         case DRV_CTL_COMPLETION_CMD: {
7905                 int count = ctl->data.comp.comp_count;
7906
7907                 bnx2x_cnic_sp_post(bp, count);
7908                 break;
7909         }
7910
7911         /* rtnl_lock is held.  */
7912         case DRV_CTL_START_L2_CMD: {
7913                 u32 cli = ctl->data.ring.client_id;
7914
7915                 bp->rx_mode_cl_mask |= (1 << cli);
7916                 bnx2x_set_storm_rx_mode(bp);
7917                 break;
7918         }
7919
7920         /* rtnl_lock is held.  */
7921         case DRV_CTL_STOP_L2_CMD: {
7922                 u32 cli = ctl->data.ring.client_id;
7923
7924                 bp->rx_mode_cl_mask &= ~(1 << cli);
7925                 bnx2x_set_storm_rx_mode(bp);
7926                 break;
7927         }
7928
7929         default:
7930                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
7931                 rc = -EINVAL;
7932         }
7933
7934         return rc;
7935 }
7936
7937 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
7938 {
7939         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7940
7941         if (bp->flags & USING_MSIX_FLAG) {
7942                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
7943                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
7944                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
7945         } else {
7946                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
7947                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
7948         }
7949         cp->irq_arr[0].status_blk = bp->cnic_sb;
7950         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
7951         cp->irq_arr[1].status_blk = bp->def_status_blk;
7952         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
7953
7954         cp->num_irq = 2;
7955 }
7956
7957 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
7958                                void *data)
7959 {
7960         struct bnx2x *bp = netdev_priv(dev);
7961         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7962
7963         if (ops == NULL)
7964                 return -EINVAL;
7965
7966         if (atomic_read(&bp->intr_sem) != 0)
7967                 return -EBUSY;
7968
7969         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
7970         if (!bp->cnic_kwq)
7971                 return -ENOMEM;
7972
7973         bp->cnic_kwq_cons = bp->cnic_kwq;
7974         bp->cnic_kwq_prod = bp->cnic_kwq;
7975         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
7976
7977         bp->cnic_spq_pending = 0;
7978         bp->cnic_kwq_pending = 0;
7979
7980         bp->cnic_data = data;
7981
7982         cp->num_irq = 0;
7983         cp->drv_state = CNIC_DRV_STATE_REGD;
7984
7985         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
7986
7987         bnx2x_setup_cnic_irq_info(bp);
7988         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7989         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7990         rcu_assign_pointer(bp->cnic_ops, ops);
7991
7992         return 0;
7993 }
7994
7995 static int bnx2x_unregister_cnic(struct net_device *dev)
7996 {
7997         struct bnx2x *bp = netdev_priv(dev);
7998         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7999
8000         mutex_lock(&bp->cnic_mutex);
8001         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8002                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8003                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8004         }
8005         cp->drv_state = 0;
8006         rcu_assign_pointer(bp->cnic_ops, NULL);
8007         mutex_unlock(&bp->cnic_mutex);
8008         synchronize_rcu();
8009         kfree(bp->cnic_kwq);
8010         bp->cnic_kwq = NULL;
8011
8012         return 0;
8013 }
8014
8015 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
8016 {
8017         struct bnx2x *bp = netdev_priv(dev);
8018         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8019
8020         cp->drv_owner = THIS_MODULE;
8021         cp->chip_id = CHIP_ID(bp);
8022         cp->pdev = bp->pdev;
8023         cp->io_base = bp->regview;
8024         cp->io_base2 = bp->doorbells;
8025         cp->max_kwqe_pending = 8;
8026         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
8027         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
8028         cp->ctx_tbl_len = CNIC_ILT_LINES;
8029         cp->starting_cid = BCM_CNIC_CID_START;
8030         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
8031         cp->drv_ctl = bnx2x_drv_ctl;
8032         cp->drv_register_cnic = bnx2x_register_cnic;
8033         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
8034
8035         return cp;
8036 }
8037 EXPORT_SYMBOL(bnx2x_cnic_probe);
8038
8039 #endif /* BCM_CNIC */
8040