]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/bnx2x/bnx2x_main.c
bnx2x: Add dual-media changes
[net-next-2.6.git] / drivers / net / bnx2x / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 #include <linux/stringify.h>
53
54 #define BNX2X_MAIN
55 #include "bnx2x.h"
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_cmn.h"
59
60
61 #include <linux/firmware.h>
62 #include "bnx2x_fw_file_hdr.h"
63 /* FW files */
64 #define FW_FILE_VERSION                                 \
65         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
66         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
67         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
68         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
69 #define FW_FILE_NAME_E1         "bnx2x-e1-" FW_FILE_VERSION ".fw"
70 #define FW_FILE_NAME_E1H        "bnx2x-e1h-" FW_FILE_VERSION ".fw"
71
72 /* Time in jiffies before concluding the transmitter is hung */
73 #define TX_TIMEOUT              (5*HZ)
74
75 static char version[] __devinitdata =
76         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
77         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
79 MODULE_AUTHOR("Eliezer Tamir");
80 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_MODULE_VERSION);
83 MODULE_FIRMWARE(FW_FILE_NAME_E1);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
85
86 static int multi_mode = 1;
87 module_param(multi_mode, int, 0);
88 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
89                              "(0 Disable; 1 Enable (default))");
90
91 static int num_queues;
92 module_param(num_queues, int, 0);
93 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
94                                 " (default is as a number of CPUs)");
95
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
99
100 static int int_mode;
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
103                                 "(1 INT#x; 2 MSI)");
104
105 static int dropless_fc;
106 module_param(dropless_fc, int, 0);
107 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
108
109 static int poll;
110 module_param(poll, int, 0);
111 MODULE_PARM_DESC(poll, " Use polling (for debug)");
112
113 static int mrrs = -1;
114 module_param(mrrs, int, 0);
115 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
116
117 static int debug;
118 module_param(debug, int, 0);
119 MODULE_PARM_DESC(debug, " Default debug msglevel");
120
121 static struct workqueue_struct *bnx2x_wq;
122
123 enum bnx2x_board_type {
124         BCM57710 = 0,
125         BCM57711 = 1,
126         BCM57711E = 2,
127 };
128
129 /* indexed by board_type, above */
130 static struct {
131         char *name;
132 } board_info[] __devinitdata = {
133         { "Broadcom NetXtreme II BCM57710 XGb" },
134         { "Broadcom NetXtreme II BCM57711 XGb" },
135         { "Broadcom NetXtreme II BCM57711E XGb" }
136 };
137
138
139 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
140         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
143         { 0 }
144 };
145
146 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148 /****************************************************************************
149 * General service functions
150 ****************************************************************************/
151
152 /* used only at init
153  * locking is done by mcp
154  */
155 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
156 {
157         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160                                PCICFG_VENDOR_ID_OFFSET);
161 }
162
163 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164 {
165         u32 val;
166
167         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170                                PCICFG_VENDOR_ID_OFFSET);
171
172         return val;
173 }
174
175 const u32 dmae_reg_go_c[] = {
176         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180 };
181
182 /* copy command into DMAE command memory and set DMAE command go */
183 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
184 {
185         u32 cmd_offset;
186         int i;
187
188         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
189         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
190                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
191
192                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
193                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
194         }
195         REG_WR(bp, dmae_reg_go_c[idx], 1);
196 }
197
198 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
199                       u32 len32)
200 {
201         struct dmae_command dmae;
202         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
203         int cnt = 200;
204
205         if (!bp->dmae_ready) {
206                 u32 *data = bnx2x_sp(bp, wb_data[0]);
207
208                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
209                    "  using indirect\n", dst_addr, len32);
210                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
211                 return;
212         }
213
214         memset(&dmae, 0, sizeof(struct dmae_command));
215
216         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
217                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
218                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
219 #ifdef __BIG_ENDIAN
220                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
221 #else
222                        DMAE_CMD_ENDIANITY_DW_SWAP |
223 #endif
224                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
225                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
226         dmae.src_addr_lo = U64_LO(dma_addr);
227         dmae.src_addr_hi = U64_HI(dma_addr);
228         dmae.dst_addr_lo = dst_addr >> 2;
229         dmae.dst_addr_hi = 0;
230         dmae.len = len32;
231         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
232         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
233         dmae.comp_val = DMAE_COMP_VAL;
234
235         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
236            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
237                     "dst_addr [%x:%08x (%08x)]\n"
238            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
239            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
240            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
241            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
242         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
243            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
244            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
245
246         mutex_lock(&bp->dmae_mutex);
247
248         *wb_comp = 0;
249
250         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
251
252         udelay(5);
253
254         while (*wb_comp != DMAE_COMP_VAL) {
255                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
256
257                 if (!cnt) {
258                         BNX2X_ERR("DMAE timeout!\n");
259                         break;
260                 }
261                 cnt--;
262                 /* adjust delay for emulation/FPGA */
263                 if (CHIP_REV_IS_SLOW(bp))
264                         msleep(100);
265                 else
266                         udelay(5);
267         }
268
269         mutex_unlock(&bp->dmae_mutex);
270 }
271
272 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
273 {
274         struct dmae_command dmae;
275         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
276         int cnt = 200;
277
278         if (!bp->dmae_ready) {
279                 u32 *data = bnx2x_sp(bp, wb_data[0]);
280                 int i;
281
282                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
283                    "  using indirect\n", src_addr, len32);
284                 for (i = 0; i < len32; i++)
285                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
286                 return;
287         }
288
289         memset(&dmae, 0, sizeof(struct dmae_command));
290
291         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
292                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
293                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
294 #ifdef __BIG_ENDIAN
295                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
296 #else
297                        DMAE_CMD_ENDIANITY_DW_SWAP |
298 #endif
299                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
300                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
301         dmae.src_addr_lo = src_addr >> 2;
302         dmae.src_addr_hi = 0;
303         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
304         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
305         dmae.len = len32;
306         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
307         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
308         dmae.comp_val = DMAE_COMP_VAL;
309
310         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
311            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
312                     "dst_addr [%x:%08x (%08x)]\n"
313            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
314            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
315            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
316            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
317
318         mutex_lock(&bp->dmae_mutex);
319
320         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
321         *wb_comp = 0;
322
323         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
324
325         udelay(5);
326
327         while (*wb_comp != DMAE_COMP_VAL) {
328
329                 if (!cnt) {
330                         BNX2X_ERR("DMAE timeout!\n");
331                         break;
332                 }
333                 cnt--;
334                 /* adjust delay for emulation/FPGA */
335                 if (CHIP_REV_IS_SLOW(bp))
336                         msleep(100);
337                 else
338                         udelay(5);
339         }
340         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
341            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
342            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
343
344         mutex_unlock(&bp->dmae_mutex);
345 }
346
347 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
348                                u32 addr, u32 len)
349 {
350         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
351         int offset = 0;
352
353         while (len > dmae_wr_max) {
354                 bnx2x_write_dmae(bp, phys_addr + offset,
355                                  addr + offset, dmae_wr_max);
356                 offset += dmae_wr_max * 4;
357                 len -= dmae_wr_max;
358         }
359
360         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
361 }
362
363 /* used only for slowpath so not inlined */
364 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
365 {
366         u32 wb_write[2];
367
368         wb_write[0] = val_hi;
369         wb_write[1] = val_lo;
370         REG_WR_DMAE(bp, reg, wb_write, 2);
371 }
372
373 #ifdef USE_WB_RD
374 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
375 {
376         u32 wb_data[2];
377
378         REG_RD_DMAE(bp, reg, wb_data, 2);
379
380         return HILO_U64(wb_data[0], wb_data[1]);
381 }
382 #endif
383
384 static int bnx2x_mc_assert(struct bnx2x *bp)
385 {
386         char last_idx;
387         int i, rc = 0;
388         u32 row0, row1, row2, row3;
389
390         /* XSTORM */
391         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
392                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
393         if (last_idx)
394                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
395
396         /* print the asserts */
397         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
398
399                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
400                               XSTORM_ASSERT_LIST_OFFSET(i));
401                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
402                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
403                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
405                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
407
408                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
409                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
410                                   " 0x%08x 0x%08x 0x%08x\n",
411                                   i, row3, row2, row1, row0);
412                         rc++;
413                 } else {
414                         break;
415                 }
416         }
417
418         /* TSTORM */
419         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
420                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
421         if (last_idx)
422                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
423
424         /* print the asserts */
425         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
426
427                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
428                               TSTORM_ASSERT_LIST_OFFSET(i));
429                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
430                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
431                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
433                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
435
436                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
437                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
438                                   " 0x%08x 0x%08x 0x%08x\n",
439                                   i, row3, row2, row1, row0);
440                         rc++;
441                 } else {
442                         break;
443                 }
444         }
445
446         /* CSTORM */
447         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
448                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
449         if (last_idx)
450                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
451
452         /* print the asserts */
453         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
454
455                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
456                               CSTORM_ASSERT_LIST_OFFSET(i));
457                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
458                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
459                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
461                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
463
464                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
465                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
466                                   " 0x%08x 0x%08x 0x%08x\n",
467                                   i, row3, row2, row1, row0);
468                         rc++;
469                 } else {
470                         break;
471                 }
472         }
473
474         /* USTORM */
475         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
476                            USTORM_ASSERT_LIST_INDEX_OFFSET);
477         if (last_idx)
478                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
479
480         /* print the asserts */
481         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
482
483                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
484                               USTORM_ASSERT_LIST_OFFSET(i));
485                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
486                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
487                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
488                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
489                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
490                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
491
492                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
493                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
494                                   " 0x%08x 0x%08x 0x%08x\n",
495                                   i, row3, row2, row1, row0);
496                         rc++;
497                 } else {
498                         break;
499                 }
500         }
501
502         return rc;
503 }
504
505 static void bnx2x_fw_dump(struct bnx2x *bp)
506 {
507         u32 addr;
508         u32 mark, offset;
509         __be32 data[9];
510         int word;
511
512         if (BP_NOMCP(bp)) {
513                 BNX2X_ERR("NO MCP - can not dump\n");
514                 return;
515         }
516
517         addr = bp->common.shmem_base - 0x0800 + 4;
518         mark = REG_RD(bp, addr);
519         mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
520         pr_err("begin fw dump (mark 0x%x)\n", mark);
521
522         pr_err("");
523         for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
524                 for (word = 0; word < 8; word++)
525                         data[word] = htonl(REG_RD(bp, offset + 4*word));
526                 data[8] = 0x0;
527                 pr_cont("%s", (char *)data);
528         }
529         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
530                 for (word = 0; word < 8; word++)
531                         data[word] = htonl(REG_RD(bp, offset + 4*word));
532                 data[8] = 0x0;
533                 pr_cont("%s", (char *)data);
534         }
535         pr_err("end of fw dump\n");
536 }
537
538 void bnx2x_panic_dump(struct bnx2x *bp)
539 {
540         int i;
541         u16 j, start, end;
542
543         bp->stats_state = STATS_STATE_DISABLED;
544         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
545
546         BNX2X_ERR("begin crash dump -----------------\n");
547
548         /* Indices */
549         /* Common */
550         BNX2X_ERR("def_c_idx(0x%x)  def_u_idx(0x%x)  def_x_idx(0x%x)"
551                   "  def_t_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
552                   "  spq_prod_idx(0x%x)\n",
553                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
554                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
555
556         /* Rx */
557         for_each_queue(bp, i) {
558                 struct bnx2x_fastpath *fp = &bp->fp[i];
559
560                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
561                           "  *rx_bd_cons_sb(0x%x)  rx_comp_prod(0x%x)"
562                           "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
563                           i, fp->rx_bd_prod, fp->rx_bd_cons,
564                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
565                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
566                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
567                           "  fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
568                           fp->rx_sge_prod, fp->last_max_sge,
569                           le16_to_cpu(fp->fp_u_idx),
570                           fp->status_blk->u_status_block.status_block_index);
571         }
572
573         /* Tx */
574         for_each_queue(bp, i) {
575                 struct bnx2x_fastpath *fp = &bp->fp[i];
576
577                 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
578                           "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
579                           "  *tx_cons_sb(0x%x)\n",
580                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
581                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
582                 BNX2X_ERR("     fp_c_idx(0x%x)  *sb_c_idx(0x%x)"
583                           "  tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
584                           fp->status_blk->c_status_block.status_block_index,
585                           fp->tx_db.data.prod);
586         }
587
588         /* Rings */
589         /* Rx */
590         for_each_queue(bp, i) {
591                 struct bnx2x_fastpath *fp = &bp->fp[i];
592
593                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
594                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
595                 for (j = start; j != end; j = RX_BD(j + 1)) {
596                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
597                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
598
599                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
600                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
601                 }
602
603                 start = RX_SGE(fp->rx_sge_prod);
604                 end = RX_SGE(fp->last_max_sge);
605                 for (j = start; j != end; j = RX_SGE(j + 1)) {
606                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
607                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
608
609                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
610                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
611                 }
612
613                 start = RCQ_BD(fp->rx_comp_cons - 10);
614                 end = RCQ_BD(fp->rx_comp_cons + 503);
615                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
616                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
617
618                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
619                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
620                 }
621         }
622
623         /* Tx */
624         for_each_queue(bp, i) {
625                 struct bnx2x_fastpath *fp = &bp->fp[i];
626
627                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
628                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
629                 for (j = start; j != end; j = TX_BD(j + 1)) {
630                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
631
632                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
633                                   i, j, sw_bd->skb, sw_bd->first_bd);
634                 }
635
636                 start = TX_BD(fp->tx_bd_cons - 10);
637                 end = TX_BD(fp->tx_bd_cons + 254);
638                 for (j = start; j != end; j = TX_BD(j + 1)) {
639                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
640
641                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
642                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
643                 }
644         }
645
646         bnx2x_fw_dump(bp);
647         bnx2x_mc_assert(bp);
648         BNX2X_ERR("end crash dump -----------------\n");
649 }
650
651 void bnx2x_int_enable(struct bnx2x *bp)
652 {
653         int port = BP_PORT(bp);
654         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
655         u32 val = REG_RD(bp, addr);
656         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
657         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
658
659         if (msix) {
660                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
661                          HC_CONFIG_0_REG_INT_LINE_EN_0);
662                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
664         } else if (msi) {
665                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
666                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
668                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
669         } else {
670                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
671                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
672                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
673                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
674
675                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
676                    val, port, addr);
677
678                 REG_WR(bp, addr, val);
679
680                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
681         }
682
683         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
684            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
685
686         REG_WR(bp, addr, val);
687         /*
688          * Ensure that HC_CONFIG is written before leading/trailing edge config
689          */
690         mmiowb();
691         barrier();
692
693         if (CHIP_IS_E1H(bp)) {
694                 /* init leading/trailing edge */
695                 if (IS_E1HMF(bp)) {
696                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
697                         if (bp->port.pmf)
698                                 /* enable nig and gpio3 attention */
699                                 val |= 0x1100;
700                 } else
701                         val = 0xffff;
702
703                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
704                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
705         }
706
707         /* Make sure that interrupts are indeed enabled from here on */
708         mmiowb();
709 }
710
711 static void bnx2x_int_disable(struct bnx2x *bp)
712 {
713         int port = BP_PORT(bp);
714         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
715         u32 val = REG_RD(bp, addr);
716
717         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
718                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
719                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
720                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
721
722         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
723            val, port, addr);
724
725         /* flush all outstanding writes */
726         mmiowb();
727
728         REG_WR(bp, addr, val);
729         if (REG_RD(bp, addr) != val)
730                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
731 }
732
733 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
734 {
735         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
736         int i, offset;
737
738         /* disable interrupt handling */
739         atomic_inc(&bp->intr_sem);
740         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
741
742         if (disable_hw)
743                 /* prevent the HW from sending interrupts */
744                 bnx2x_int_disable(bp);
745
746         /* make sure all ISRs are done */
747         if (msix) {
748                 synchronize_irq(bp->msix_table[0].vector);
749                 offset = 1;
750 #ifdef BCM_CNIC
751                 offset++;
752 #endif
753                 for_each_queue(bp, i)
754                         synchronize_irq(bp->msix_table[i + offset].vector);
755         } else
756                 synchronize_irq(bp->pdev->irq);
757
758         /* make sure sp_task is not running */
759         cancel_delayed_work(&bp->sp_task);
760         flush_workqueue(bnx2x_wq);
761 }
762
763 /* fast path */
764
765 /*
766  * General service functions
767  */
768
769 /* Return true if succeeded to acquire the lock */
770 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
771 {
772         u32 lock_status;
773         u32 resource_bit = (1 << resource);
774         int func = BP_FUNC(bp);
775         u32 hw_lock_control_reg;
776
777         DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
778
779         /* Validating that the resource is within range */
780         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
781                 DP(NETIF_MSG_HW,
782                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
783                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
784                 return false;
785         }
786
787         if (func <= 5)
788                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
789         else
790                 hw_lock_control_reg =
791                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
792
793         /* Try to acquire the lock */
794         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
795         lock_status = REG_RD(bp, hw_lock_control_reg);
796         if (lock_status & resource_bit)
797                 return true;
798
799         DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
800         return false;
801 }
802
803
804 #ifdef BCM_CNIC
805 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
806 #endif
807
808 void bnx2x_sp_event(struct bnx2x_fastpath *fp,
809                            union eth_rx_cqe *rr_cqe)
810 {
811         struct bnx2x *bp = fp->bp;
812         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
813         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
814
815         DP(BNX2X_MSG_SP,
816            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
817            fp->index, cid, command, bp->state,
818            rr_cqe->ramrod_cqe.ramrod_type);
819
820         bp->spq_left++;
821
822         if (fp->index) {
823                 switch (command | fp->state) {
824                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
825                                                 BNX2X_FP_STATE_OPENING):
826                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
827                            cid);
828                         fp->state = BNX2X_FP_STATE_OPEN;
829                         break;
830
831                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
832                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
833                            cid);
834                         fp->state = BNX2X_FP_STATE_HALTED;
835                         break;
836
837                 default:
838                         BNX2X_ERR("unexpected MC reply (%d)  "
839                                   "fp[%d] state is %x\n",
840                                   command, fp->index, fp->state);
841                         break;
842                 }
843                 mb(); /* force bnx2x_wait_ramrod() to see the change */
844                 return;
845         }
846
847         switch (command | bp->state) {
848         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
849                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
850                 bp->state = BNX2X_STATE_OPEN;
851                 break;
852
853         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
854                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
855                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
856                 fp->state = BNX2X_FP_STATE_HALTED;
857                 break;
858
859         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
860                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
861                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
862                 break;
863
864 #ifdef BCM_CNIC
865         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
866                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
867                 bnx2x_cnic_cfc_comp(bp, cid);
868                 break;
869 #endif
870
871         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
872         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
873                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
874                 bp->set_mac_pending--;
875                 smp_wmb();
876                 break;
877
878         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
879                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
880                 bp->set_mac_pending--;
881                 smp_wmb();
882                 break;
883
884         default:
885                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
886                           command, bp->state);
887                 break;
888         }
889         mb(); /* force bnx2x_wait_ramrod() to see the change */
890 }
891
892 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
893 {
894         struct bnx2x *bp = netdev_priv(dev_instance);
895         u16 status = bnx2x_ack_int(bp);
896         u16 mask;
897         int i;
898
899         /* Return here if interrupt is shared and it's not for us */
900         if (unlikely(status == 0)) {
901                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
902                 return IRQ_NONE;
903         }
904         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
905
906         /* Return here if interrupt is disabled */
907         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
908                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
909                 return IRQ_HANDLED;
910         }
911
912 #ifdef BNX2X_STOP_ON_ERROR
913         if (unlikely(bp->panic))
914                 return IRQ_HANDLED;
915 #endif
916
917         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
918                 struct bnx2x_fastpath *fp = &bp->fp[i];
919
920                 mask = 0x2 << fp->sb_id;
921                 if (status & mask) {
922                         /* Handle Rx and Tx according to SB id */
923                         prefetch(fp->rx_cons_sb);
924                         prefetch(&fp->status_blk->u_status_block.
925                                                 status_block_index);
926                         prefetch(fp->tx_cons_sb);
927                         prefetch(&fp->status_blk->c_status_block.
928                                                 status_block_index);
929                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
930                         status &= ~mask;
931                 }
932         }
933
934 #ifdef BCM_CNIC
935         mask = 0x2 << CNIC_SB_ID(bp);
936         if (status & (mask | 0x1)) {
937                 struct cnic_ops *c_ops = NULL;
938
939                 rcu_read_lock();
940                 c_ops = rcu_dereference(bp->cnic_ops);
941                 if (c_ops)
942                         c_ops->cnic_handler(bp->cnic_data, NULL);
943                 rcu_read_unlock();
944
945                 status &= ~mask;
946         }
947 #endif
948
949         if (unlikely(status & 0x1)) {
950                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
951
952                 status &= ~0x1;
953                 if (!status)
954                         return IRQ_HANDLED;
955         }
956
957         if (unlikely(status))
958                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
959                    status);
960
961         return IRQ_HANDLED;
962 }
963
964 /* end of fast path */
965
966
967 /* Link */
968
969 /*
970  * General service functions
971  */
972
973 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
974 {
975         u32 lock_status;
976         u32 resource_bit = (1 << resource);
977         int func = BP_FUNC(bp);
978         u32 hw_lock_control_reg;
979         int cnt;
980
981         /* Validating that the resource is within range */
982         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
983                 DP(NETIF_MSG_HW,
984                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
985                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
986                 return -EINVAL;
987         }
988
989         if (func <= 5) {
990                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
991         } else {
992                 hw_lock_control_reg =
993                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
994         }
995
996         /* Validating that the resource is not already taken */
997         lock_status = REG_RD(bp, hw_lock_control_reg);
998         if (lock_status & resource_bit) {
999                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1000                    lock_status, resource_bit);
1001                 return -EEXIST;
1002         }
1003
1004         /* Try for 5 second every 5ms */
1005         for (cnt = 0; cnt < 1000; cnt++) {
1006                 /* Try to acquire the lock */
1007                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1008                 lock_status = REG_RD(bp, hw_lock_control_reg);
1009                 if (lock_status & resource_bit)
1010                         return 0;
1011
1012                 msleep(5);
1013         }
1014         DP(NETIF_MSG_HW, "Timeout\n");
1015         return -EAGAIN;
1016 }
1017
1018 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1019 {
1020         u32 lock_status;
1021         u32 resource_bit = (1 << resource);
1022         int func = BP_FUNC(bp);
1023         u32 hw_lock_control_reg;
1024
1025         DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1026
1027         /* Validating that the resource is within range */
1028         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1029                 DP(NETIF_MSG_HW,
1030                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1031                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1032                 return -EINVAL;
1033         }
1034
1035         if (func <= 5) {
1036                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1037         } else {
1038                 hw_lock_control_reg =
1039                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1040         }
1041
1042         /* Validating that the resource is currently taken */
1043         lock_status = REG_RD(bp, hw_lock_control_reg);
1044         if (!(lock_status & resource_bit)) {
1045                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1046                    lock_status, resource_bit);
1047                 return -EFAULT;
1048         }
1049
1050         REG_WR(bp, hw_lock_control_reg, resource_bit);
1051         return 0;
1052 }
1053
1054
1055 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1056 {
1057         /* The GPIO should be swapped if swap register is set and active */
1058         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1059                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1060         int gpio_shift = gpio_num +
1061                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1062         u32 gpio_mask = (1 << gpio_shift);
1063         u32 gpio_reg;
1064         int value;
1065
1066         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1067                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1068                 return -EINVAL;
1069         }
1070
1071         /* read GPIO value */
1072         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1073
1074         /* get the requested pin value */
1075         if ((gpio_reg & gpio_mask) == gpio_mask)
1076                 value = 1;
1077         else
1078                 value = 0;
1079
1080         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1081
1082         return value;
1083 }
1084
1085 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1086 {
1087         /* The GPIO should be swapped if swap register is set and active */
1088         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1089                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1090         int gpio_shift = gpio_num +
1091                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1092         u32 gpio_mask = (1 << gpio_shift);
1093         u32 gpio_reg;
1094
1095         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1096                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1097                 return -EINVAL;
1098         }
1099
1100         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1101         /* read GPIO and mask except the float bits */
1102         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1103
1104         switch (mode) {
1105         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1106                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1107                    gpio_num, gpio_shift);
1108                 /* clear FLOAT and set CLR */
1109                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1110                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1111                 break;
1112
1113         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1114                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1115                    gpio_num, gpio_shift);
1116                 /* clear FLOAT and set SET */
1117                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1118                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1119                 break;
1120
1121         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1122                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1123                    gpio_num, gpio_shift);
1124                 /* set FLOAT */
1125                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1126                 break;
1127
1128         default:
1129                 break;
1130         }
1131
1132         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1133         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1134
1135         return 0;
1136 }
1137
1138 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1139 {
1140         /* The GPIO should be swapped if swap register is set and active */
1141         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1142                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1143         int gpio_shift = gpio_num +
1144                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1145         u32 gpio_mask = (1 << gpio_shift);
1146         u32 gpio_reg;
1147
1148         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1149                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1150                 return -EINVAL;
1151         }
1152
1153         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1154         /* read GPIO int */
1155         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1156
1157         switch (mode) {
1158         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1159                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1160                                    "output low\n", gpio_num, gpio_shift);
1161                 /* clear SET and set CLR */
1162                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1163                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1164                 break;
1165
1166         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1167                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1168                                    "output high\n", gpio_num, gpio_shift);
1169                 /* clear CLR and set SET */
1170                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1171                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1172                 break;
1173
1174         default:
1175                 break;
1176         }
1177
1178         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1179         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1180
1181         return 0;
1182 }
1183
1184 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1185 {
1186         u32 spio_mask = (1 << spio_num);
1187         u32 spio_reg;
1188
1189         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1190             (spio_num > MISC_REGISTERS_SPIO_7)) {
1191                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1192                 return -EINVAL;
1193         }
1194
1195         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1196         /* read SPIO and mask except the float bits */
1197         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1198
1199         switch (mode) {
1200         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1201                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1202                 /* clear FLOAT and set CLR */
1203                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1204                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1205                 break;
1206
1207         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1208                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1209                 /* clear FLOAT and set SET */
1210                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1211                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1212                 break;
1213
1214         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1215                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1216                 /* set FLOAT */
1217                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1218                 break;
1219
1220         default:
1221                 break;
1222         }
1223
1224         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1225         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1226
1227         return 0;
1228 }
1229
1230 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1231 {
1232         u32 sel_phy_idx = 0;
1233         if (bp->link_vars.link_up) {
1234                 sel_phy_idx = EXT_PHY1;
1235                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1236                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1237                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1238                         sel_phy_idx = EXT_PHY2;
1239         } else {
1240
1241                 switch (bnx2x_phy_selection(&bp->link_params)) {
1242                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1243                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1244                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1245                        sel_phy_idx = EXT_PHY1;
1246                        break;
1247                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1248                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1249                        sel_phy_idx = EXT_PHY2;
1250                        break;
1251                 }
1252         }
1253         /*
1254         * The selected actived PHY is always after swapping (in case PHY
1255         * swapping is enabled). So when swapping is enabled, we need to reverse
1256         * the configuration
1257         */
1258
1259         if (bp->link_params.multi_phy_config &
1260             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1261                 if (sel_phy_idx == EXT_PHY1)
1262                         sel_phy_idx = EXT_PHY2;
1263                 else if (sel_phy_idx == EXT_PHY2)
1264                         sel_phy_idx = EXT_PHY1;
1265         }
1266         return LINK_CONFIG_IDX(sel_phy_idx);
1267 }
1268
1269 void bnx2x_calc_fc_adv(struct bnx2x *bp)
1270 {
1271         u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
1272         switch (bp->link_vars.ieee_fc &
1273                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1274         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1275                 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1276                                           ADVERTISED_Pause);
1277                 break;
1278
1279         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1280                 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
1281                                          ADVERTISED_Pause);
1282                 break;
1283
1284         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1285                 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
1286                 break;
1287
1288         default:
1289                 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1290                                           ADVERTISED_Pause);
1291                 break;
1292         }
1293 }
1294
1295
1296 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1297 {
1298         if (!BP_NOMCP(bp)) {
1299                 u8 rc;
1300                 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1301                 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
1302                 /* Initialize link parameters structure variables */
1303                 /* It is recommended to turn off RX FC for jumbo frames
1304                    for better performance */
1305                 if (bp->dev->mtu > 5000)
1306                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1307                 else
1308                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1309
1310                 bnx2x_acquire_phy_lock(bp);
1311
1312                 if (load_mode == LOAD_DIAG) {
1313                         bp->link_params.loopback_mode = LOOPBACK_XGXS;
1314                         bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1315                 }
1316
1317                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1318
1319                 bnx2x_release_phy_lock(bp);
1320
1321                 bnx2x_calc_fc_adv(bp);
1322
1323                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1324                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1325                         bnx2x_link_report(bp);
1326                 }
1327                 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
1328                 return rc;
1329         }
1330         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1331         return -EINVAL;
1332 }
1333
1334 void bnx2x_link_set(struct bnx2x *bp)
1335 {
1336         if (!BP_NOMCP(bp)) {
1337                 bnx2x_acquire_phy_lock(bp);
1338                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1339                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1340                 bnx2x_release_phy_lock(bp);
1341
1342                 bnx2x_calc_fc_adv(bp);
1343         } else
1344                 BNX2X_ERR("Bootcode is missing - can not set link\n");
1345 }
1346
1347 static void bnx2x__link_reset(struct bnx2x *bp)
1348 {
1349         if (!BP_NOMCP(bp)) {
1350                 bnx2x_acquire_phy_lock(bp);
1351                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1352                 bnx2x_release_phy_lock(bp);
1353         } else
1354                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1355 }
1356
1357 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
1358 {
1359         u8 rc = 0;
1360
1361         if (!BP_NOMCP(bp)) {
1362                 bnx2x_acquire_phy_lock(bp);
1363                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1364                                      is_serdes);
1365                 bnx2x_release_phy_lock(bp);
1366         } else
1367                 BNX2X_ERR("Bootcode is missing - can not test link\n");
1368
1369         return rc;
1370 }
1371
1372 static void bnx2x_init_port_minmax(struct bnx2x *bp)
1373 {
1374         u32 r_param = bp->link_vars.line_speed / 8;
1375         u32 fair_periodic_timeout_usec;
1376         u32 t_fair;
1377
1378         memset(&(bp->cmng.rs_vars), 0,
1379                sizeof(struct rate_shaping_vars_per_port));
1380         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1381
1382         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1383         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1384
1385         /* this is the threshold below which no timer arming will occur
1386            1.25 coefficient is for the threshold to be a little bigger
1387            than the real time, to compensate for timer in-accuracy */
1388         bp->cmng.rs_vars.rs_threshold =
1389                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1390
1391         /* resolution of fairness timer */
1392         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1393         /* for 10G it is 1000usec. for 1G it is 10000usec. */
1394         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1395
1396         /* this is the threshold below which we won't arm the timer anymore */
1397         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1398
1399         /* we multiply by 1e3/8 to get bytes/msec.
1400            We don't want the credits to pass a credit
1401            of the t_fair*FAIR_MEM (algorithm resolution) */
1402         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1403         /* since each tick is 4 usec */
1404         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1405 }
1406
1407 /* Calculates the sum of vn_min_rates.
1408    It's needed for further normalizing of the min_rates.
1409    Returns:
1410      sum of vn_min_rates.
1411        or
1412      0 - if all the min_rates are 0.
1413      In the later case fainess algorithm should be deactivated.
1414      If not all min_rates are zero then those that are zeroes will be set to 1.
1415  */
1416 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1417 {
1418         int all_zero = 1;
1419         int port = BP_PORT(bp);
1420         int vn;
1421
1422         bp->vn_weight_sum = 0;
1423         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1424                 int func = 2*vn + port;
1425                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1426                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1427                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1428
1429                 /* Skip hidden vns */
1430                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1431                         continue;
1432
1433                 /* If min rate is zero - set it to 1 */
1434                 if (!vn_min_rate)
1435                         vn_min_rate = DEF_MIN_RATE;
1436                 else
1437                         all_zero = 0;
1438
1439                 bp->vn_weight_sum += vn_min_rate;
1440         }
1441
1442         /* ... only if all min rates are zeros - disable fairness */
1443         if (all_zero) {
1444                 bp->cmng.flags.cmng_enables &=
1445                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1446                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1447                    "  fairness will be disabled\n");
1448         } else
1449                 bp->cmng.flags.cmng_enables |=
1450                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1451 }
1452
1453 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1454 {
1455         struct rate_shaping_vars_per_vn m_rs_vn;
1456         struct fairness_vars_per_vn m_fair_vn;
1457         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1458         u16 vn_min_rate, vn_max_rate;
1459         int i;
1460
1461         /* If function is hidden - set min and max to zeroes */
1462         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1463                 vn_min_rate = 0;
1464                 vn_max_rate = 0;
1465
1466         } else {
1467                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1468                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1469                 /* If min rate is zero - set it to 1 */
1470                 if (!vn_min_rate)
1471                         vn_min_rate = DEF_MIN_RATE;
1472                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1473                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1474         }
1475         DP(NETIF_MSG_IFUP,
1476            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
1477            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1478
1479         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1480         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1481
1482         /* global vn counter - maximal Mbps for this vn */
1483         m_rs_vn.vn_counter.rate = vn_max_rate;
1484
1485         /* quota - number of bytes transmitted in this period */
1486         m_rs_vn.vn_counter.quota =
1487                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1488
1489         if (bp->vn_weight_sum) {
1490                 /* credit for each period of the fairness algorithm:
1491                    number of bytes in T_FAIR (the vn share the port rate).
1492                    vn_weight_sum should not be larger than 10000, thus
1493                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1494                    than zero */
1495                 m_fair_vn.vn_credit_delta =
1496                         max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1497                                                    (8 * bp->vn_weight_sum))),
1498                               (bp->cmng.fair_vars.fair_threshold * 2));
1499                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
1500                    m_fair_vn.vn_credit_delta);
1501         }
1502
1503         /* Store it to internal memory */
1504         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1505                 REG_WR(bp, BAR_XSTRORM_INTMEM +
1506                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1507                        ((u32 *)(&m_rs_vn))[i]);
1508
1509         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1510                 REG_WR(bp, BAR_XSTRORM_INTMEM +
1511                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1512                        ((u32 *)(&m_fair_vn))[i]);
1513 }
1514
1515
1516 /* This function is called upon link interrupt */
1517 static void bnx2x_link_attn(struct bnx2x *bp)
1518 {
1519         u32 prev_link_status = bp->link_vars.link_status;
1520         /* Make sure that we are synced with the current statistics */
1521         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1522
1523         bnx2x_link_update(&bp->link_params, &bp->link_vars);
1524
1525         if (bp->link_vars.link_up) {
1526
1527                 /* dropless flow control */
1528                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1529                         int port = BP_PORT(bp);
1530                         u32 pause_enabled = 0;
1531
1532                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1533                                 pause_enabled = 1;
1534
1535                         REG_WR(bp, BAR_USTRORM_INTMEM +
1536                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1537                                pause_enabled);
1538                 }
1539
1540                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1541                         struct host_port_stats *pstats;
1542
1543                         pstats = bnx2x_sp(bp, port_stats);
1544                         /* reset old bmac stats */
1545                         memset(&(pstats->mac_stx[0]), 0,
1546                                sizeof(struct mac_stx));
1547                 }
1548                 if (bp->state == BNX2X_STATE_OPEN)
1549                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1550         }
1551
1552         /* indicate link status only if link status actually changed */
1553         if (prev_link_status != bp->link_vars.link_status)
1554                 bnx2x_link_report(bp);
1555
1556         if (IS_E1HMF(bp)) {
1557                 int port = BP_PORT(bp);
1558                 int func;
1559                 int vn;
1560
1561                 /* Set the attention towards other drivers on the same port */
1562                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1563                         if (vn == BP_E1HVN(bp))
1564                                 continue;
1565
1566                         func = ((vn << 1) | port);
1567                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1568                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1569                 }
1570
1571                 if (bp->link_vars.link_up) {
1572                         int i;
1573
1574                         /* Init rate shaping and fairness contexts */
1575                         bnx2x_init_port_minmax(bp);
1576
1577                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
1578                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
1579
1580                         /* Store it to internal memory */
1581                         for (i = 0;
1582                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
1583                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
1584                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1585                                        ((u32 *)(&bp->cmng))[i]);
1586                 }
1587         }
1588 }
1589
1590 void bnx2x__link_status_update(struct bnx2x *bp)
1591 {
1592         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
1593                 return;
1594
1595         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
1596
1597         if (bp->link_vars.link_up)
1598                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1599         else
1600                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1601
1602         bnx2x_calc_vn_weight_sum(bp);
1603
1604         /* indicate link status */
1605         bnx2x_link_report(bp);
1606 }
1607
1608 static void bnx2x_pmf_update(struct bnx2x *bp)
1609 {
1610         int port = BP_PORT(bp);
1611         u32 val;
1612
1613         bp->port.pmf = 1;
1614         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1615
1616         /* enable nig attention */
1617         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1618         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1619         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1620
1621         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1622 }
1623
1624 /* end of Link */
1625
1626 /* slow path */
1627
1628 /*
1629  * General service functions
1630  */
1631
1632 /* send the MCP a request, block until there is a reply */
1633 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
1634 {
1635         int func = BP_FUNC(bp);
1636         u32 seq = ++bp->fw_seq;
1637         u32 rc = 0;
1638         u32 cnt = 1;
1639         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1640
1641         mutex_lock(&bp->fw_mb_mutex);
1642         SHMEM_WR(bp, func_mb[func].drv_mb_param, param);
1643         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1644         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1645
1646         do {
1647                 /* let the FW do it's magic ... */
1648                 msleep(delay);
1649
1650                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
1651
1652                 /* Give the FW up to 5 second (500*10ms) */
1653         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
1654
1655         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
1656            cnt*delay, rc, seq);
1657
1658         /* is this a reply to our command? */
1659         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
1660                 rc &= FW_MSG_CODE_MASK;
1661         else {
1662                 /* FW BUG! */
1663                 BNX2X_ERR("FW failed to respond!\n");
1664                 bnx2x_fw_dump(bp);
1665                 rc = 0;
1666         }
1667         mutex_unlock(&bp->fw_mb_mutex);
1668
1669         return rc;
1670 }
1671
1672 static void bnx2x_e1h_disable(struct bnx2x *bp)
1673 {
1674         int port = BP_PORT(bp);
1675
1676         netif_tx_disable(bp->dev);
1677
1678         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
1679
1680         netif_carrier_off(bp->dev);
1681 }
1682
1683 static void bnx2x_e1h_enable(struct bnx2x *bp)
1684 {
1685         int port = BP_PORT(bp);
1686
1687         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
1688
1689         /* Tx queue should be only reenabled */
1690         netif_tx_wake_all_queues(bp->dev);
1691
1692         /*
1693          * Should not call netif_carrier_on since it will be called if the link
1694          * is up when checking for link state
1695          */
1696 }
1697
1698 static void bnx2x_update_min_max(struct bnx2x *bp)
1699 {
1700         int port = BP_PORT(bp);
1701         int vn, i;
1702
1703         /* Init rate shaping and fairness contexts */
1704         bnx2x_init_port_minmax(bp);
1705
1706         bnx2x_calc_vn_weight_sum(bp);
1707
1708         for (vn = VN_0; vn < E1HVN_MAX; vn++)
1709                 bnx2x_init_vn_minmax(bp, 2*vn + port);
1710
1711         if (bp->port.pmf) {
1712                 int func;
1713
1714                 /* Set the attention towards other drivers on the same port */
1715                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1716                         if (vn == BP_E1HVN(bp))
1717                                 continue;
1718
1719                         func = ((vn << 1) | port);
1720                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1721                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1722                 }
1723
1724                 /* Store it to internal memory */
1725                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1726                         REG_WR(bp, BAR_XSTRORM_INTMEM +
1727                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1728                                ((u32 *)(&bp->cmng))[i]);
1729         }
1730 }
1731
1732 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1733 {
1734         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
1735
1736         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
1737
1738                 /*
1739                  * This is the only place besides the function initialization
1740                  * where the bp->flags can change so it is done without any
1741                  * locks
1742                  */
1743                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1744                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
1745                         bp->flags |= MF_FUNC_DIS;
1746
1747                         bnx2x_e1h_disable(bp);
1748                 } else {
1749                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
1750                         bp->flags &= ~MF_FUNC_DIS;
1751
1752                         bnx2x_e1h_enable(bp);
1753                 }
1754                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
1755         }
1756         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1757
1758                 bnx2x_update_min_max(bp);
1759                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1760         }
1761
1762         /* Report results to MCP */
1763         if (dcc_event)
1764                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
1765         else
1766                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
1767 }
1768
1769 /* must be called under the spq lock */
1770 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
1771 {
1772         struct eth_spe *next_spe = bp->spq_prod_bd;
1773
1774         if (bp->spq_prod_bd == bp->spq_last_bd) {
1775                 bp->spq_prod_bd = bp->spq;
1776                 bp->spq_prod_idx = 0;
1777                 DP(NETIF_MSG_TIMER, "end of spq\n");
1778         } else {
1779                 bp->spq_prod_bd++;
1780                 bp->spq_prod_idx++;
1781         }
1782         return next_spe;
1783 }
1784
1785 /* must be called under the spq lock */
1786 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1787 {
1788         int func = BP_FUNC(bp);
1789
1790         /* Make sure that BD data is updated before writing the producer */
1791         wmb();
1792
1793         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1794                bp->spq_prod_idx);
1795         mmiowb();
1796 }
1797
1798 /* the slow path queue is odd since completions arrive on the fastpath ring */
1799 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1800                          u32 data_hi, u32 data_lo, int common)
1801 {
1802         struct eth_spe *spe;
1803
1804 #ifdef BNX2X_STOP_ON_ERROR
1805         if (unlikely(bp->panic))
1806                 return -EIO;
1807 #endif
1808
1809         spin_lock_bh(&bp->spq_lock);
1810
1811         if (!bp->spq_left) {
1812                 BNX2X_ERR("BUG! SPQ ring full!\n");
1813                 spin_unlock_bh(&bp->spq_lock);
1814                 bnx2x_panic();
1815                 return -EBUSY;
1816         }
1817
1818         spe = bnx2x_sp_get_next(bp);
1819
1820         /* CID needs port number to be encoded int it */
1821         spe->hdr.conn_and_cmd_data =
1822                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1823                                     HW_CID(bp, cid));
1824         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
1825         if (common)
1826                 spe->hdr.type |=
1827                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1828
1829         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1830         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
1831
1832         bp->spq_left--;
1833
1834         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1835            "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
1836            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1837            (u32)(U64_LO(bp->spq_mapping) +
1838            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1839            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1840
1841         bnx2x_sp_prod_update(bp);
1842         spin_unlock_bh(&bp->spq_lock);
1843         return 0;
1844 }
1845
1846 /* acquire split MCP access lock register */
1847 static int bnx2x_acquire_alr(struct bnx2x *bp)
1848 {
1849         u32 j, val;
1850         int rc = 0;
1851
1852         might_sleep();
1853         for (j = 0; j < 1000; j++) {
1854                 val = (1UL << 31);
1855                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1856                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1857                 if (val & (1L << 31))
1858                         break;
1859
1860                 msleep(5);
1861         }
1862         if (!(val & (1L << 31))) {
1863                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
1864                 rc = -EBUSY;
1865         }
1866
1867         return rc;
1868 }
1869
1870 /* release split MCP access lock register */
1871 static void bnx2x_release_alr(struct bnx2x *bp)
1872 {
1873         REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
1874 }
1875
1876 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1877 {
1878         struct host_def_status_block *def_sb = bp->def_status_blk;
1879         u16 rc = 0;
1880
1881         barrier(); /* status block is written to by the chip */
1882         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1883                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1884                 rc |= 1;
1885         }
1886         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1887                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1888                 rc |= 2;
1889         }
1890         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1891                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1892                 rc |= 4;
1893         }
1894         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1895                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1896                 rc |= 8;
1897         }
1898         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1899                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1900                 rc |= 16;
1901         }
1902         return rc;
1903 }
1904
1905 /*
1906  * slow path service functions
1907  */
1908
1909 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1910 {
1911         int port = BP_PORT(bp);
1912         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
1913                        COMMAND_REG_ATTN_BITS_SET);
1914         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1915                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
1916         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1917                                        NIG_REG_MASK_INTERRUPT_PORT0;
1918         u32 aeu_mask;
1919         u32 nig_mask = 0;
1920
1921         if (bp->attn_state & asserted)
1922                 BNX2X_ERR("IGU ERROR\n");
1923
1924         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1925         aeu_mask = REG_RD(bp, aeu_addr);
1926
1927         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
1928            aeu_mask, asserted);
1929         aeu_mask &= ~(asserted & 0x3ff);
1930         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
1931
1932         REG_WR(bp, aeu_addr, aeu_mask);
1933         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1934
1935         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
1936         bp->attn_state |= asserted;
1937         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
1938
1939         if (asserted & ATTN_HARD_WIRED_MASK) {
1940                 if (asserted & ATTN_NIG_FOR_FUNC) {
1941
1942                         bnx2x_acquire_phy_lock(bp);
1943
1944                         /* save nig interrupt mask */
1945                         nig_mask = REG_RD(bp, nig_int_mask_addr);
1946                         REG_WR(bp, nig_int_mask_addr, 0);
1947
1948                         bnx2x_link_attn(bp);
1949
1950                         /* handle unicore attn? */
1951                 }
1952                 if (asserted & ATTN_SW_TIMER_4_FUNC)
1953                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1954
1955                 if (asserted & GPIO_2_FUNC)
1956                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1957
1958                 if (asserted & GPIO_3_FUNC)
1959                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1960
1961                 if (asserted & GPIO_4_FUNC)
1962                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1963
1964                 if (port == 0) {
1965                         if (asserted & ATTN_GENERAL_ATTN_1) {
1966                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1967                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1968                         }
1969                         if (asserted & ATTN_GENERAL_ATTN_2) {
1970                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1971                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1972                         }
1973                         if (asserted & ATTN_GENERAL_ATTN_3) {
1974                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1975                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1976                         }
1977                 } else {
1978                         if (asserted & ATTN_GENERAL_ATTN_4) {
1979                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1980                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1981                         }
1982                         if (asserted & ATTN_GENERAL_ATTN_5) {
1983                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1984                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1985                         }
1986                         if (asserted & ATTN_GENERAL_ATTN_6) {
1987                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1988                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1989                         }
1990                 }
1991
1992         } /* if hardwired */
1993
1994         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
1995            asserted, hc_addr);
1996         REG_WR(bp, hc_addr, asserted);
1997
1998         /* now set back the mask */
1999         if (asserted & ATTN_NIG_FOR_FUNC) {
2000                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2001                 bnx2x_release_phy_lock(bp);
2002         }
2003 }
2004
2005 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2006 {
2007         int port = BP_PORT(bp);
2008         u32 ext_phy_config;
2009         /* mark the failure */
2010         ext_phy_config =
2011                 SHMEM_RD(bp,
2012                          dev_info.port_hw_config[port].external_phy_config);
2013
2014         ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2015         ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2016         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2017                  ext_phy_config);
2018
2019         /* log the failure */
2020         netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2021                " the driver to shutdown the card to prevent permanent"
2022                " damage.  Please contact OEM Support for assistance\n");
2023 }
2024
2025 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2026 {
2027         int port = BP_PORT(bp);
2028         int reg_offset;
2029         u32 val;
2030
2031         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2032                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2033
2034         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2035
2036                 val = REG_RD(bp, reg_offset);
2037                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2038                 REG_WR(bp, reg_offset, val);
2039
2040                 BNX2X_ERR("SPIO5 hw attention\n");
2041
2042                 /* Fan failure attention */
2043                 bnx2x_hw_reset_phy(&bp->link_params);
2044                 bnx2x_fan_failure(bp);
2045         }
2046
2047         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2048                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2049                 bnx2x_acquire_phy_lock(bp);
2050                 bnx2x_handle_module_detect_int(&bp->link_params);
2051                 bnx2x_release_phy_lock(bp);
2052         }
2053
2054         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2055
2056                 val = REG_RD(bp, reg_offset);
2057                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2058                 REG_WR(bp, reg_offset, val);
2059
2060                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2061                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2062                 bnx2x_panic();
2063         }
2064 }
2065
2066 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2067 {
2068         u32 val;
2069
2070         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2071
2072                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2073                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2074                 /* DORQ discard attention */
2075                 if (val & 0x2)
2076                         BNX2X_ERR("FATAL error from DORQ\n");
2077         }
2078
2079         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2080
2081                 int port = BP_PORT(bp);
2082                 int reg_offset;
2083
2084                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2085                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2086
2087                 val = REG_RD(bp, reg_offset);
2088                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2089                 REG_WR(bp, reg_offset, val);
2090
2091                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2092                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2093                 bnx2x_panic();
2094         }
2095 }
2096
2097 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2098 {
2099         u32 val;
2100
2101         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2102
2103                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2104                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2105                 /* CFC error attention */
2106                 if (val & 0x2)
2107                         BNX2X_ERR("FATAL error from CFC\n");
2108         }
2109
2110         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2111
2112                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2113                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2114                 /* RQ_USDMDP_FIFO_OVERFLOW */
2115                 if (val & 0x18000)
2116                         BNX2X_ERR("FATAL error from PXP\n");
2117         }
2118
2119         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2120
2121                 int port = BP_PORT(bp);
2122                 int reg_offset;
2123
2124                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2125                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2126
2127                 val = REG_RD(bp, reg_offset);
2128                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2129                 REG_WR(bp, reg_offset, val);
2130
2131                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2132                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
2133                 bnx2x_panic();
2134         }
2135 }
2136
2137 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2138 {
2139         u32 val;
2140
2141         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2142
2143                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2144                         int func = BP_FUNC(bp);
2145
2146                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2147                         bp->mf_config = SHMEM_RD(bp,
2148                                            mf_cfg.func_mf_config[func].config);
2149                         val = SHMEM_RD(bp, func_mb[func].drv_status);
2150                         if (val & DRV_STATUS_DCC_EVENT_MASK)
2151                                 bnx2x_dcc_event(bp,
2152                                             (val & DRV_STATUS_DCC_EVENT_MASK));
2153                         bnx2x__link_status_update(bp);
2154                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
2155                                 bnx2x_pmf_update(bp);
2156
2157                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2158
2159                         BNX2X_ERR("MC assert!\n");
2160                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2161                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2162                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2163                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2164                         bnx2x_panic();
2165
2166                 } else if (attn & BNX2X_MCP_ASSERT) {
2167
2168                         BNX2X_ERR("MCP assert!\n");
2169                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2170                         bnx2x_fw_dump(bp);
2171
2172                 } else
2173                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2174         }
2175
2176         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2177                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2178                 if (attn & BNX2X_GRC_TIMEOUT) {
2179                         val = CHIP_IS_E1H(bp) ?
2180                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2181                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2182                 }
2183                 if (attn & BNX2X_GRC_RSV) {
2184                         val = CHIP_IS_E1H(bp) ?
2185                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2186                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2187                 }
2188                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2189         }
2190 }
2191
2192 #define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
2193 #define LOAD_COUNTER_BITS       16 /* Number of bits for load counter */
2194 #define LOAD_COUNTER_MASK       (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2195 #define RESET_DONE_FLAG_MASK    (~LOAD_COUNTER_MASK)
2196 #define RESET_DONE_FLAG_SHIFT   LOAD_COUNTER_BITS
2197 #define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2198 /*
2199  * should be run under rtnl lock
2200  */
2201 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2202 {
2203         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2204         val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2205         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2206         barrier();
2207         mmiowb();
2208 }
2209
2210 /*
2211  * should be run under rtnl lock
2212  */
2213 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2214 {
2215         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2216         val |= (1 << 16);
2217         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2218         barrier();
2219         mmiowb();
2220 }
2221
2222 /*
2223  * should be run under rtnl lock
2224  */
2225 bool bnx2x_reset_is_done(struct bnx2x *bp)
2226 {
2227         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2228         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2229         return (val & RESET_DONE_FLAG_MASK) ? false : true;
2230 }
2231
2232 /*
2233  * should be run under rtnl lock
2234  */
2235 inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
2236 {
2237         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2238
2239         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2240
2241         val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2242         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2243         barrier();
2244         mmiowb();
2245 }
2246
2247 /*
2248  * should be run under rtnl lock
2249  */
2250 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
2251 {
2252         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2253
2254         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2255
2256         val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2257         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2258         barrier();
2259         mmiowb();
2260
2261         return val1;
2262 }
2263
2264 /*
2265  * should be run under rtnl lock
2266  */
2267 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2268 {
2269         return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2270 }
2271
2272 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2273 {
2274         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2275         REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2276 }
2277
2278 static inline void _print_next_block(int idx, const char *blk)
2279 {
2280         if (idx)
2281                 pr_cont(", ");
2282         pr_cont("%s", blk);
2283 }
2284
2285 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2286 {
2287         int i = 0;
2288         u32 cur_bit = 0;
2289         for (i = 0; sig; i++) {
2290                 cur_bit = ((u32)0x1 << i);
2291                 if (sig & cur_bit) {
2292                         switch (cur_bit) {
2293                         case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2294                                 _print_next_block(par_num++, "BRB");
2295                                 break;
2296                         case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2297                                 _print_next_block(par_num++, "PARSER");
2298                                 break;
2299                         case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2300                                 _print_next_block(par_num++, "TSDM");
2301                                 break;
2302                         case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2303                                 _print_next_block(par_num++, "SEARCHER");
2304                                 break;
2305                         case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2306                                 _print_next_block(par_num++, "TSEMI");
2307                                 break;
2308                         }
2309
2310                         /* Clear the bit */
2311                         sig &= ~cur_bit;
2312                 }
2313         }
2314
2315         return par_num;
2316 }
2317
2318 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2319 {
2320         int i = 0;
2321         u32 cur_bit = 0;
2322         for (i = 0; sig; i++) {
2323                 cur_bit = ((u32)0x1 << i);
2324                 if (sig & cur_bit) {
2325                         switch (cur_bit) {
2326                         case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2327                                 _print_next_block(par_num++, "PBCLIENT");
2328                                 break;
2329                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2330                                 _print_next_block(par_num++, "QM");
2331                                 break;
2332                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2333                                 _print_next_block(par_num++, "XSDM");
2334                                 break;
2335                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2336                                 _print_next_block(par_num++, "XSEMI");
2337                                 break;
2338                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2339                                 _print_next_block(par_num++, "DOORBELLQ");
2340                                 break;
2341                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2342                                 _print_next_block(par_num++, "VAUX PCI CORE");
2343                                 break;
2344                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2345                                 _print_next_block(par_num++, "DEBUG");
2346                                 break;
2347                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2348                                 _print_next_block(par_num++, "USDM");
2349                                 break;
2350                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2351                                 _print_next_block(par_num++, "USEMI");
2352                                 break;
2353                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2354                                 _print_next_block(par_num++, "UPB");
2355                                 break;
2356                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2357                                 _print_next_block(par_num++, "CSDM");
2358                                 break;
2359                         }
2360
2361                         /* Clear the bit */
2362                         sig &= ~cur_bit;
2363                 }
2364         }
2365
2366         return par_num;
2367 }
2368
2369 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
2370 {
2371         int i = 0;
2372         u32 cur_bit = 0;
2373         for (i = 0; sig; i++) {
2374                 cur_bit = ((u32)0x1 << i);
2375                 if (sig & cur_bit) {
2376                         switch (cur_bit) {
2377                         case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
2378                                 _print_next_block(par_num++, "CSEMI");
2379                                 break;
2380                         case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
2381                                 _print_next_block(par_num++, "PXP");
2382                                 break;
2383                         case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
2384                                 _print_next_block(par_num++,
2385                                         "PXPPCICLOCKCLIENT");
2386                                 break;
2387                         case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
2388                                 _print_next_block(par_num++, "CFC");
2389                                 break;
2390                         case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
2391                                 _print_next_block(par_num++, "CDU");
2392                                 break;
2393                         case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
2394                                 _print_next_block(par_num++, "IGU");
2395                                 break;
2396                         case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
2397                                 _print_next_block(par_num++, "MISC");
2398                                 break;
2399                         }
2400
2401                         /* Clear the bit */
2402                         sig &= ~cur_bit;
2403                 }
2404         }
2405
2406         return par_num;
2407 }
2408
2409 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
2410 {
2411         int i = 0;
2412         u32 cur_bit = 0;
2413         for (i = 0; sig; i++) {
2414                 cur_bit = ((u32)0x1 << i);
2415                 if (sig & cur_bit) {
2416                         switch (cur_bit) {
2417                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
2418                                 _print_next_block(par_num++, "MCP ROM");
2419                                 break;
2420                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
2421                                 _print_next_block(par_num++, "MCP UMP RX");
2422                                 break;
2423                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
2424                                 _print_next_block(par_num++, "MCP UMP TX");
2425                                 break;
2426                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
2427                                 _print_next_block(par_num++, "MCP SCPAD");
2428                                 break;
2429                         }
2430
2431                         /* Clear the bit */
2432                         sig &= ~cur_bit;
2433                 }
2434         }
2435
2436         return par_num;
2437 }
2438
2439 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
2440                                      u32 sig2, u32 sig3)
2441 {
2442         if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
2443             (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
2444                 int par_num = 0;
2445                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
2446                         "[0]:0x%08x [1]:0x%08x "
2447                         "[2]:0x%08x [3]:0x%08x\n",
2448                           sig0 & HW_PRTY_ASSERT_SET_0,
2449                           sig1 & HW_PRTY_ASSERT_SET_1,
2450                           sig2 & HW_PRTY_ASSERT_SET_2,
2451                           sig3 & HW_PRTY_ASSERT_SET_3);
2452                 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
2453                        bp->dev->name);
2454                 par_num = bnx2x_print_blocks_with_parity0(
2455                         sig0 & HW_PRTY_ASSERT_SET_0, par_num);
2456                 par_num = bnx2x_print_blocks_with_parity1(
2457                         sig1 & HW_PRTY_ASSERT_SET_1, par_num);
2458                 par_num = bnx2x_print_blocks_with_parity2(
2459                         sig2 & HW_PRTY_ASSERT_SET_2, par_num);
2460                 par_num = bnx2x_print_blocks_with_parity3(
2461                         sig3 & HW_PRTY_ASSERT_SET_3, par_num);
2462                 printk("\n");
2463                 return true;
2464         } else
2465                 return false;
2466 }
2467
2468 bool bnx2x_chk_parity_attn(struct bnx2x *bp)
2469 {
2470         struct attn_route attn;
2471         int port = BP_PORT(bp);
2472
2473         attn.sig[0] = REG_RD(bp,
2474                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
2475                              port*4);
2476         attn.sig[1] = REG_RD(bp,
2477                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
2478                              port*4);
2479         attn.sig[2] = REG_RD(bp,
2480                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
2481                              port*4);
2482         attn.sig[3] = REG_RD(bp,
2483                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
2484                              port*4);
2485
2486         return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
2487                                         attn.sig[3]);
2488 }
2489
2490 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2491 {
2492         struct attn_route attn, *group_mask;
2493         int port = BP_PORT(bp);
2494         int index;
2495         u32 reg_addr;
2496         u32 val;
2497         u32 aeu_mask;
2498
2499         /* need to take HW lock because MCP or other port might also
2500            try to handle this event */
2501         bnx2x_acquire_alr(bp);
2502
2503         if (bnx2x_chk_parity_attn(bp)) {
2504                 bp->recovery_state = BNX2X_RECOVERY_INIT;
2505                 bnx2x_set_reset_in_progress(bp);
2506                 schedule_delayed_work(&bp->reset_task, 0);
2507                 /* Disable HW interrupts */
2508                 bnx2x_int_disable(bp);
2509                 bnx2x_release_alr(bp);
2510                 /* In case of parity errors don't handle attentions so that
2511                  * other function would "see" parity errors.
2512                  */
2513                 return;
2514         }
2515
2516         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2517         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2518         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2519         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2520         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2521            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2522
2523         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2524                 if (deasserted & (1 << index)) {
2525                         group_mask = &bp->attn_group[index];
2526
2527                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2528                            index, group_mask->sig[0], group_mask->sig[1],
2529                            group_mask->sig[2], group_mask->sig[3]);
2530
2531                         bnx2x_attn_int_deasserted3(bp,
2532                                         attn.sig[3] & group_mask->sig[3]);
2533                         bnx2x_attn_int_deasserted1(bp,
2534                                         attn.sig[1] & group_mask->sig[1]);
2535                         bnx2x_attn_int_deasserted2(bp,
2536                                         attn.sig[2] & group_mask->sig[2]);
2537                         bnx2x_attn_int_deasserted0(bp,
2538                                         attn.sig[0] & group_mask->sig[0]);
2539                 }
2540         }
2541
2542         bnx2x_release_alr(bp);
2543
2544         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2545
2546         val = ~deasserted;
2547         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2548            val, reg_addr);
2549         REG_WR(bp, reg_addr, val);
2550
2551         if (~bp->attn_state & deasserted)
2552                 BNX2X_ERR("IGU ERROR\n");
2553
2554         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2555                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2556
2557         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2558         aeu_mask = REG_RD(bp, reg_addr);
2559
2560         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2561            aeu_mask, deasserted);
2562         aeu_mask |= (deasserted & 0x3ff);
2563         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2564
2565         REG_WR(bp, reg_addr, aeu_mask);
2566         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2567
2568         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2569         bp->attn_state &= ~deasserted;
2570         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2571 }
2572
2573 static void bnx2x_attn_int(struct bnx2x *bp)
2574 {
2575         /* read local copy of bits */
2576         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2577                                                                 attn_bits);
2578         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2579                                                                 attn_bits_ack);
2580         u32 attn_state = bp->attn_state;
2581
2582         /* look for changed bits */
2583         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2584         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2585
2586         DP(NETIF_MSG_HW,
2587            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2588            attn_bits, attn_ack, asserted, deasserted);
2589
2590         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2591                 BNX2X_ERR("BAD attention state\n");
2592
2593         /* handle bits that were raised */
2594         if (asserted)
2595                 bnx2x_attn_int_asserted(bp, asserted);
2596
2597         if (deasserted)
2598                 bnx2x_attn_int_deasserted(bp, deasserted);
2599 }
2600
2601 static void bnx2x_sp_task(struct work_struct *work)
2602 {
2603         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2604         u16 status;
2605
2606         /* Return here if interrupt is disabled */
2607         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2608                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2609                 return;
2610         }
2611
2612         status = bnx2x_update_dsb_idx(bp);
2613 /*      if (status == 0)                                     */
2614 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2615
2616         DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
2617
2618         /* HW attentions */
2619         if (status & 0x1) {
2620                 bnx2x_attn_int(bp);
2621                 status &= ~0x1;
2622         }
2623
2624         /* CStorm events: STAT_QUERY */
2625         if (status & 0x2) {
2626                 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
2627                 status &= ~0x2;
2628         }
2629
2630         if (unlikely(status))
2631                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2632                    status);
2633
2634         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2635                      IGU_INT_NOP, 1);
2636         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2637                      IGU_INT_NOP, 1);
2638         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2639                      IGU_INT_NOP, 1);
2640         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2641                      IGU_INT_NOP, 1);
2642         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2643                      IGU_INT_ENABLE, 1);
2644 }
2645
2646 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2647 {
2648         struct net_device *dev = dev_instance;
2649         struct bnx2x *bp = netdev_priv(dev);
2650
2651         /* Return here if interrupt is disabled */
2652         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2653                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2654                 return IRQ_HANDLED;
2655         }
2656
2657         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2658
2659 #ifdef BNX2X_STOP_ON_ERROR
2660         if (unlikely(bp->panic))
2661                 return IRQ_HANDLED;
2662 #endif
2663
2664 #ifdef BCM_CNIC
2665         {
2666                 struct cnic_ops *c_ops;
2667
2668                 rcu_read_lock();
2669                 c_ops = rcu_dereference(bp->cnic_ops);
2670                 if (c_ops)
2671                         c_ops->cnic_handler(bp->cnic_data, NULL);
2672                 rcu_read_unlock();
2673         }
2674 #endif
2675         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2676
2677         return IRQ_HANDLED;
2678 }
2679
2680 /* end of slow path */
2681
2682 static void bnx2x_timer(unsigned long data)
2683 {
2684         struct bnx2x *bp = (struct bnx2x *) data;
2685
2686         if (!netif_running(bp->dev))
2687                 return;
2688
2689         if (atomic_read(&bp->intr_sem) != 0)
2690                 goto timer_restart;
2691
2692         if (poll) {
2693                 struct bnx2x_fastpath *fp = &bp->fp[0];
2694                 int rc;
2695
2696                 bnx2x_tx_int(fp);
2697                 rc = bnx2x_rx_int(fp, 1000);
2698         }
2699
2700         if (!BP_NOMCP(bp)) {
2701                 int func = BP_FUNC(bp);
2702                 u32 drv_pulse;
2703                 u32 mcp_pulse;
2704
2705                 ++bp->fw_drv_pulse_wr_seq;
2706                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
2707                 /* TBD - add SYSTEM_TIME */
2708                 drv_pulse = bp->fw_drv_pulse_wr_seq;
2709                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
2710
2711                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
2712                              MCP_PULSE_SEQ_MASK);
2713                 /* The delta between driver pulse and mcp response
2714                  * should be 1 (before mcp response) or 0 (after mcp response)
2715                  */
2716                 if ((drv_pulse != mcp_pulse) &&
2717                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
2718                         /* someone lost a heartbeat... */
2719                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2720                                   drv_pulse, mcp_pulse);
2721                 }
2722         }
2723
2724         if (bp->state == BNX2X_STATE_OPEN)
2725                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
2726
2727 timer_restart:
2728         mod_timer(&bp->timer, jiffies + bp->current_interval);
2729 }
2730
2731 /* end of Statistics */
2732
2733 /* nic init */
2734
2735 /*
2736  * nic init service functions
2737  */
2738
2739 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
2740 {
2741         int port = BP_PORT(bp);
2742
2743         /* "CSTORM" */
2744         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2745                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
2746                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
2747         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2748                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
2749                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
2750 }
2751
2752 void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
2753                           dma_addr_t mapping, int sb_id)
2754 {
2755         int port = BP_PORT(bp);
2756         int func = BP_FUNC(bp);
2757         int index;
2758         u64 section;
2759
2760         /* USTORM */
2761         section = ((u64)mapping) + offsetof(struct host_status_block,
2762                                             u_status_block);
2763         sb->u_status_block.status_block_id = sb_id;
2764
2765         REG_WR(bp, BAR_CSTRORM_INTMEM +
2766                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
2767         REG_WR(bp, BAR_CSTRORM_INTMEM +
2768                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
2769                U64_HI(section));
2770         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
2771                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
2772
2773         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
2774                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2775                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
2776
2777         /* CSTORM */
2778         section = ((u64)mapping) + offsetof(struct host_status_block,
2779                                             c_status_block);
2780         sb->c_status_block.status_block_id = sb_id;
2781
2782         REG_WR(bp, BAR_CSTRORM_INTMEM +
2783                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
2784         REG_WR(bp, BAR_CSTRORM_INTMEM +
2785                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
2786                U64_HI(section));
2787         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
2788                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
2789
2790         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
2791                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2792                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
2793
2794         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2795 }
2796
2797 static void bnx2x_zero_def_sb(struct bnx2x *bp)
2798 {
2799         int func = BP_FUNC(bp);
2800
2801         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
2802                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2803                         sizeof(struct tstorm_def_status_block)/4);
2804         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2805                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
2806                         sizeof(struct cstorm_def_status_block_u)/4);
2807         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2808                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
2809                         sizeof(struct cstorm_def_status_block_c)/4);
2810         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
2811                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2812                         sizeof(struct xstorm_def_status_block)/4);
2813 }
2814
2815 static void bnx2x_init_def_sb(struct bnx2x *bp,
2816                               struct host_def_status_block *def_sb,
2817                               dma_addr_t mapping, int sb_id)
2818 {
2819         int port = BP_PORT(bp);
2820         int func = BP_FUNC(bp);
2821         int index, val, reg_offset;
2822         u64 section;
2823
2824         /* ATTN */
2825         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2826                                             atten_status_block);
2827         def_sb->atten_status_block.status_block_id = sb_id;
2828
2829         bp->attn_state = 0;
2830
2831         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2832                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2833
2834         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2835                 bp->attn_group[index].sig[0] = REG_RD(bp,
2836                                                      reg_offset + 0x10*index);
2837                 bp->attn_group[index].sig[1] = REG_RD(bp,
2838                                                reg_offset + 0x4 + 0x10*index);
2839                 bp->attn_group[index].sig[2] = REG_RD(bp,
2840                                                reg_offset + 0x8 + 0x10*index);
2841                 bp->attn_group[index].sig[3] = REG_RD(bp,
2842                                                reg_offset + 0xc + 0x10*index);
2843         }
2844
2845         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
2846                              HC_REG_ATTN_MSG0_ADDR_L);
2847
2848         REG_WR(bp, reg_offset, U64_LO(section));
2849         REG_WR(bp, reg_offset + 4, U64_HI(section));
2850
2851         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
2852
2853         val = REG_RD(bp, reg_offset);
2854         val |= sb_id;
2855         REG_WR(bp, reg_offset, val);
2856
2857         /* USTORM */
2858         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2859                                             u_def_status_block);
2860         def_sb->u_def_status_block.status_block_id = sb_id;
2861
2862         REG_WR(bp, BAR_CSTRORM_INTMEM +
2863                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
2864         REG_WR(bp, BAR_CSTRORM_INTMEM +
2865                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
2866                U64_HI(section));
2867         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
2868                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
2869
2870         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
2871                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2872                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
2873
2874         /* CSTORM */
2875         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2876                                             c_def_status_block);
2877         def_sb->c_def_status_block.status_block_id = sb_id;
2878
2879         REG_WR(bp, BAR_CSTRORM_INTMEM +
2880                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
2881         REG_WR(bp, BAR_CSTRORM_INTMEM +
2882                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
2883                U64_HI(section));
2884         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
2885                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
2886
2887         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2888                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2889                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
2890
2891         /* TSTORM */
2892         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2893                                             t_def_status_block);
2894         def_sb->t_def_status_block.status_block_id = sb_id;
2895
2896         REG_WR(bp, BAR_TSTRORM_INTMEM +
2897                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2898         REG_WR(bp, BAR_TSTRORM_INTMEM +
2899                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2900                U64_HI(section));
2901         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
2902                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2903
2904         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2905                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
2906                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2907
2908         /* XSTORM */
2909         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2910                                             x_def_status_block);
2911         def_sb->x_def_status_block.status_block_id = sb_id;
2912
2913         REG_WR(bp, BAR_XSTRORM_INTMEM +
2914                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2915         REG_WR(bp, BAR_XSTRORM_INTMEM +
2916                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2917                U64_HI(section));
2918         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
2919                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2920
2921         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2922                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
2923                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2924
2925         bp->stats_pending = 0;
2926         bp->set_mac_pending = 0;
2927
2928         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2929 }
2930
2931 void bnx2x_update_coalesce(struct bnx2x *bp)
2932 {
2933         int port = BP_PORT(bp);
2934         int i;
2935
2936         for_each_queue(bp, i) {
2937                 int sb_id = bp->fp[i].sb_id;
2938
2939                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
2940                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2941                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
2942                                                       U_SB_ETH_RX_CQ_INDEX),
2943                         bp->rx_ticks/(4 * BNX2X_BTR));
2944                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2945                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
2946                                                        U_SB_ETH_RX_CQ_INDEX),
2947                          (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2948
2949                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2950                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2951                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
2952                                                       C_SB_ETH_TX_CQ_INDEX),
2953                         bp->tx_ticks/(4 * BNX2X_BTR));
2954                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2955                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
2956                                                        C_SB_ETH_TX_CQ_INDEX),
2957                          (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2958         }
2959 }
2960
2961 static void bnx2x_init_sp_ring(struct bnx2x *bp)
2962 {
2963         int func = BP_FUNC(bp);
2964
2965         spin_lock_init(&bp->spq_lock);
2966
2967         bp->spq_left = MAX_SPQ_PENDING;
2968         bp->spq_prod_idx = 0;
2969         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
2970         bp->spq_prod_bd = bp->spq;
2971         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
2972
2973         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
2974                U64_LO(bp->spq_mapping));
2975         REG_WR(bp,
2976                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
2977                U64_HI(bp->spq_mapping));
2978
2979         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
2980                bp->spq_prod_idx);
2981 }
2982
2983 static void bnx2x_init_context(struct bnx2x *bp)
2984 {
2985         int i;
2986
2987         /* Rx */
2988         for_each_queue(bp, i) {
2989                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
2990                 struct bnx2x_fastpath *fp = &bp->fp[i];
2991                 u8 cl_id = fp->cl_id;
2992
2993                 context->ustorm_st_context.common.sb_index_numbers =
2994                                                 BNX2X_RX_SB_INDEX_NUM;
2995                 context->ustorm_st_context.common.clientId = cl_id;
2996                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
2997                 context->ustorm_st_context.common.flags =
2998                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
2999                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
3000                 context->ustorm_st_context.common.statistics_counter_id =
3001                                                 cl_id;
3002                 context->ustorm_st_context.common.mc_alignment_log_size =
3003                                                 BNX2X_RX_ALIGN_SHIFT;
3004                 context->ustorm_st_context.common.bd_buff_size =
3005                                                 bp->rx_buf_size;
3006                 context->ustorm_st_context.common.bd_page_base_hi =
3007                                                 U64_HI(fp->rx_desc_mapping);
3008                 context->ustorm_st_context.common.bd_page_base_lo =
3009                                                 U64_LO(fp->rx_desc_mapping);
3010                 if (!fp->disable_tpa) {
3011                         context->ustorm_st_context.common.flags |=
3012                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
3013                         context->ustorm_st_context.common.sge_buff_size =
3014                                 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
3015                                            0xffff);
3016                         context->ustorm_st_context.common.sge_page_base_hi =
3017                                                 U64_HI(fp->rx_sge_mapping);
3018                         context->ustorm_st_context.common.sge_page_base_lo =
3019                                                 U64_LO(fp->rx_sge_mapping);
3020
3021                         context->ustorm_st_context.common.max_sges_for_packet =
3022                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
3023                         context->ustorm_st_context.common.max_sges_for_packet =
3024                                 ((context->ustorm_st_context.common.
3025                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
3026                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
3027                 }
3028
3029                 context->ustorm_ag_context.cdu_usage =
3030                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3031                                                CDU_REGION_NUMBER_UCM_AG,
3032                                                ETH_CONNECTION_TYPE);
3033
3034                 context->xstorm_ag_context.cdu_reserved =
3035                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3036                                                CDU_REGION_NUMBER_XCM_AG,
3037                                                ETH_CONNECTION_TYPE);
3038         }
3039
3040         /* Tx */
3041         for_each_queue(bp, i) {
3042                 struct bnx2x_fastpath *fp = &bp->fp[i];
3043                 struct eth_context *context =
3044                         bnx2x_sp(bp, context[i].eth);
3045
3046                 context->cstorm_st_context.sb_index_number =
3047                                                 C_SB_ETH_TX_CQ_INDEX;
3048                 context->cstorm_st_context.status_block_id = fp->sb_id;
3049
3050                 context->xstorm_st_context.tx_bd_page_base_hi =
3051                                                 U64_HI(fp->tx_desc_mapping);
3052                 context->xstorm_st_context.tx_bd_page_base_lo =
3053                                                 U64_LO(fp->tx_desc_mapping);
3054                 context->xstorm_st_context.statistics_data = (fp->cl_id |
3055                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
3056         }
3057 }
3058
3059 static void bnx2x_init_ind_table(struct bnx2x *bp)
3060 {
3061         int func = BP_FUNC(bp);
3062         int i;
3063
3064         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
3065                 return;
3066
3067         DP(NETIF_MSG_IFUP,
3068            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
3069         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
3070                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
3071                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
3072                         bp->fp->cl_id + (i % bp->num_queues));
3073 }
3074
3075 void bnx2x_set_client_config(struct bnx2x *bp)
3076 {
3077         struct tstorm_eth_client_config tstorm_client = {0};
3078         int port = BP_PORT(bp);
3079         int i;
3080
3081         tstorm_client.mtu = bp->dev->mtu;
3082         tstorm_client.config_flags =
3083                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
3084                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
3085 #ifdef BCM_VLAN
3086         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
3087                 tstorm_client.config_flags |=
3088                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
3089                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3090         }
3091 #endif
3092
3093         for_each_queue(bp, i) {
3094                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
3095
3096                 REG_WR(bp, BAR_TSTRORM_INTMEM +
3097                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
3098                        ((u32 *)&tstorm_client)[0]);
3099                 REG_WR(bp, BAR_TSTRORM_INTMEM +
3100                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
3101                        ((u32 *)&tstorm_client)[1]);
3102         }
3103
3104         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
3105            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
3106 }
3107
3108 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3109 {
3110         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
3111         int mode = bp->rx_mode;
3112         int mask = bp->rx_mode_cl_mask;
3113         int func = BP_FUNC(bp);
3114         int port = BP_PORT(bp);
3115         int i;
3116         /* All but management unicast packets should pass to the host as well */
3117         u32 llh_mask =
3118                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
3119                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
3120                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3121                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
3122
3123         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
3124
3125         switch (mode) {
3126         case BNX2X_RX_MODE_NONE: /* no Rx */
3127                 tstorm_mac_filter.ucast_drop_all = mask;
3128                 tstorm_mac_filter.mcast_drop_all = mask;
3129                 tstorm_mac_filter.bcast_drop_all = mask;
3130                 break;
3131
3132         case BNX2X_RX_MODE_NORMAL:
3133                 tstorm_mac_filter.bcast_accept_all = mask;
3134                 break;
3135
3136         case BNX2X_RX_MODE_ALLMULTI:
3137                 tstorm_mac_filter.mcast_accept_all = mask;
3138                 tstorm_mac_filter.bcast_accept_all = mask;
3139                 break;
3140
3141         case BNX2X_RX_MODE_PROMISC:
3142                 tstorm_mac_filter.ucast_accept_all = mask;
3143                 tstorm_mac_filter.mcast_accept_all = mask;
3144                 tstorm_mac_filter.bcast_accept_all = mask;
3145                 /* pass management unicast packets as well */
3146                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
3147                 break;
3148
3149         default:
3150                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
3151                 break;
3152         }
3153
3154         REG_WR(bp,
3155                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
3156                llh_mask);
3157
3158         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
3159                 REG_WR(bp, BAR_TSTRORM_INTMEM +
3160                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
3161                        ((u32 *)&tstorm_mac_filter)[i]);
3162
3163 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
3164                    ((u32 *)&tstorm_mac_filter)[i]); */
3165         }
3166
3167         if (mode != BNX2X_RX_MODE_NONE)
3168                 bnx2x_set_client_config(bp);
3169 }
3170
3171 static void bnx2x_init_internal_common(struct bnx2x *bp)
3172 {
3173         int i;
3174
3175         /* Zero this manually as its initialization is
3176            currently missing in the initTool */
3177         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3178                 REG_WR(bp, BAR_USTRORM_INTMEM +
3179                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
3180 }
3181
3182 static void bnx2x_init_internal_port(struct bnx2x *bp)
3183 {
3184         int port = BP_PORT(bp);
3185
3186         REG_WR(bp,
3187                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
3188         REG_WR(bp,
3189                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
3190         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3191         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3192 }
3193
3194 static void bnx2x_init_internal_func(struct bnx2x *bp)
3195 {
3196         struct tstorm_eth_function_common_config tstorm_config = {0};
3197         struct stats_indication_flags stats_flags = {0};
3198         int port = BP_PORT(bp);
3199         int func = BP_FUNC(bp);
3200         int i, j;
3201         u32 offset;
3202         u16 max_agg_size;
3203
3204         tstorm_config.config_flags = RSS_FLAGS(bp);
3205
3206         if (is_multi(bp))
3207                 tstorm_config.rss_result_mask = MULTI_MASK;
3208
3209         /* Enable TPA if needed */
3210         if (bp->flags & TPA_ENABLE_FLAG)
3211                 tstorm_config.config_flags |=
3212                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
3213
3214         if (IS_E1HMF(bp))
3215                 tstorm_config.config_flags |=
3216                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
3217
3218         tstorm_config.leading_client_id = BP_L_ID(bp);
3219
3220         REG_WR(bp, BAR_TSTRORM_INTMEM +
3221                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
3222                (*(u32 *)&tstorm_config));
3223
3224         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
3225         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
3226         bnx2x_set_storm_rx_mode(bp);
3227
3228         for_each_queue(bp, i) {
3229                 u8 cl_id = bp->fp[i].cl_id;
3230
3231                 /* reset xstorm per client statistics */
3232                 offset = BAR_XSTRORM_INTMEM +
3233                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3234                 for (j = 0;
3235                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
3236                         REG_WR(bp, offset + j*4, 0);
3237
3238                 /* reset tstorm per client statistics */
3239                 offset = BAR_TSTRORM_INTMEM +
3240                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3241                 for (j = 0;
3242                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
3243                         REG_WR(bp, offset + j*4, 0);
3244
3245                 /* reset ustorm per client statistics */
3246                 offset = BAR_USTRORM_INTMEM +
3247                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3248                 for (j = 0;
3249                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
3250                         REG_WR(bp, offset + j*4, 0);
3251         }
3252
3253         /* Init statistics related context */
3254         stats_flags.collect_eth = 1;
3255
3256         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
3257                ((u32 *)&stats_flags)[0]);
3258         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
3259                ((u32 *)&stats_flags)[1]);
3260
3261         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
3262                ((u32 *)&stats_flags)[0]);
3263         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
3264                ((u32 *)&stats_flags)[1]);
3265
3266         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
3267                ((u32 *)&stats_flags)[0]);
3268         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
3269                ((u32 *)&stats_flags)[1]);
3270
3271         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
3272                ((u32 *)&stats_flags)[0]);
3273         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
3274                ((u32 *)&stats_flags)[1]);
3275
3276         REG_WR(bp, BAR_XSTRORM_INTMEM +
3277                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3278                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3279         REG_WR(bp, BAR_XSTRORM_INTMEM +
3280                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3281                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3282
3283         REG_WR(bp, BAR_TSTRORM_INTMEM +
3284                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3285                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3286         REG_WR(bp, BAR_TSTRORM_INTMEM +
3287                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3288                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3289
3290         REG_WR(bp, BAR_USTRORM_INTMEM +
3291                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3292                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3293         REG_WR(bp, BAR_USTRORM_INTMEM +
3294                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3295                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3296
3297         if (CHIP_IS_E1H(bp)) {
3298                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3299                         IS_E1HMF(bp));
3300                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3301                         IS_E1HMF(bp));
3302                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3303                         IS_E1HMF(bp));
3304                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3305                         IS_E1HMF(bp));
3306
3307                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
3308                          bp->e1hov);
3309         }
3310
3311         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
3312         max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
3313                                    SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
3314         for_each_queue(bp, i) {
3315                 struct bnx2x_fastpath *fp = &bp->fp[i];
3316
3317                 REG_WR(bp, BAR_USTRORM_INTMEM +
3318                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
3319                        U64_LO(fp->rx_comp_mapping));
3320                 REG_WR(bp, BAR_USTRORM_INTMEM +
3321                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
3322                        U64_HI(fp->rx_comp_mapping));
3323
3324                 /* Next page */
3325                 REG_WR(bp, BAR_USTRORM_INTMEM +
3326                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
3327                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3328                 REG_WR(bp, BAR_USTRORM_INTMEM +
3329                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
3330                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3331
3332                 REG_WR16(bp, BAR_USTRORM_INTMEM +
3333                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
3334                          max_agg_size);
3335         }
3336
3337         /* dropless flow control */
3338         if (CHIP_IS_E1H(bp)) {
3339                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
3340
3341                 rx_pause.bd_thr_low = 250;
3342                 rx_pause.cqe_thr_low = 250;
3343                 rx_pause.cos = 1;
3344                 rx_pause.sge_thr_low = 0;
3345                 rx_pause.bd_thr_high = 350;
3346                 rx_pause.cqe_thr_high = 350;
3347                 rx_pause.sge_thr_high = 0;
3348
3349                 for_each_queue(bp, i) {
3350                         struct bnx2x_fastpath *fp = &bp->fp[i];
3351
3352                         if (!fp->disable_tpa) {
3353                                 rx_pause.sge_thr_low = 150;
3354                                 rx_pause.sge_thr_high = 250;
3355                         }
3356
3357
3358                         offset = BAR_USTRORM_INTMEM +
3359                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
3360                                                                    fp->cl_id);
3361                         for (j = 0;
3362                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
3363                              j++)
3364                                 REG_WR(bp, offset + j*4,
3365                                        ((u32 *)&rx_pause)[j]);
3366                 }
3367         }
3368
3369         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3370
3371         /* Init rate shaping and fairness contexts */
3372         if (IS_E1HMF(bp)) {
3373                 int vn;
3374
3375                 /* During init there is no active link
3376                    Until link is up, set link rate to 10Gbps */
3377                 bp->link_vars.line_speed = SPEED_10000;
3378                 bnx2x_init_port_minmax(bp);
3379
3380                 if (!BP_NOMCP(bp))
3381                         bp->mf_config =
3382                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
3383                 bnx2x_calc_vn_weight_sum(bp);
3384
3385                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
3386                         bnx2x_init_vn_minmax(bp, 2*vn + port);
3387
3388                 /* Enable rate shaping and fairness */
3389                 bp->cmng.flags.cmng_enables |=
3390                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
3391
3392         } else {
3393                 /* rate shaping and fairness are disabled */
3394                 DP(NETIF_MSG_IFUP,
3395                    "single function mode  minmax will be disabled\n");
3396         }
3397
3398
3399         /* Store cmng structures to internal memory */
3400         if (bp->port.pmf)
3401                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
3402                         REG_WR(bp, BAR_XSTRORM_INTMEM +
3403                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
3404                                ((u32 *)(&bp->cmng))[i]);
3405 }
3406
3407 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3408 {
3409         switch (load_code) {
3410         case FW_MSG_CODE_DRV_LOAD_COMMON:
3411                 bnx2x_init_internal_common(bp);
3412                 /* no break */
3413
3414         case FW_MSG_CODE_DRV_LOAD_PORT:
3415                 bnx2x_init_internal_port(bp);
3416                 /* no break */
3417
3418         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3419                 bnx2x_init_internal_func(bp);
3420                 break;
3421
3422         default:
3423                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3424                 break;
3425         }
3426 }
3427
3428 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
3429 {
3430         int i;
3431
3432         for_each_queue(bp, i) {
3433                 struct bnx2x_fastpath *fp = &bp->fp[i];
3434
3435                 fp->bp = bp;
3436                 fp->state = BNX2X_FP_STATE_CLOSED;
3437                 fp->index = i;
3438                 fp->cl_id = BP_L_ID(bp) + i;
3439 #ifdef BCM_CNIC
3440                 fp->sb_id = fp->cl_id + 1;
3441 #else
3442                 fp->sb_id = fp->cl_id;
3443 #endif
3444                 DP(NETIF_MSG_IFUP,
3445                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
3446                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
3447                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
3448                               fp->sb_id);
3449                 bnx2x_update_fpsb_idx(fp);
3450         }
3451
3452         /* ensure status block indices were read */
3453         rmb();
3454
3455
3456         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
3457                           DEF_SB_ID);
3458         bnx2x_update_dsb_idx(bp);
3459         bnx2x_update_coalesce(bp);
3460         bnx2x_init_rx_rings(bp);
3461         bnx2x_init_tx_ring(bp);
3462         bnx2x_init_sp_ring(bp);
3463         bnx2x_init_context(bp);
3464         bnx2x_init_internal(bp, load_code);
3465         bnx2x_init_ind_table(bp);
3466         bnx2x_stats_init(bp);
3467
3468         /* At this point, we are ready for interrupts */
3469         atomic_set(&bp->intr_sem, 0);
3470
3471         /* flush all before enabling interrupts */
3472         mb();
3473         mmiowb();
3474
3475         bnx2x_int_enable(bp);
3476
3477         /* Check for SPIO5 */
3478         bnx2x_attn_int_deasserted0(bp,
3479                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
3480                                    AEU_INPUTS_ATTN_BITS_SPIO5);
3481 }
3482
3483 /* end of nic init */
3484
3485 /*
3486  * gzip service functions
3487  */
3488
3489 static int bnx2x_gunzip_init(struct bnx2x *bp)
3490 {
3491         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
3492                                             &bp->gunzip_mapping, GFP_KERNEL);
3493         if (bp->gunzip_buf  == NULL)
3494                 goto gunzip_nomem1;
3495
3496         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3497         if (bp->strm  == NULL)
3498                 goto gunzip_nomem2;
3499
3500         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3501                                       GFP_KERNEL);
3502         if (bp->strm->workspace == NULL)
3503                 goto gunzip_nomem3;
3504
3505         return 0;
3506
3507 gunzip_nomem3:
3508         kfree(bp->strm);
3509         bp->strm = NULL;
3510
3511 gunzip_nomem2:
3512         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3513                           bp->gunzip_mapping);
3514         bp->gunzip_buf = NULL;
3515
3516 gunzip_nomem1:
3517         netdev_err(bp->dev, "Cannot allocate firmware buffer for"
3518                " un-compression\n");
3519         return -ENOMEM;
3520 }
3521
3522 static void bnx2x_gunzip_end(struct bnx2x *bp)
3523 {
3524         kfree(bp->strm->workspace);
3525
3526         kfree(bp->strm);
3527         bp->strm = NULL;
3528
3529         if (bp->gunzip_buf) {
3530                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3531                                   bp->gunzip_mapping);
3532                 bp->gunzip_buf = NULL;
3533         }
3534 }
3535
3536 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
3537 {
3538         int n, rc;
3539
3540         /* check gzip header */
3541         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
3542                 BNX2X_ERR("Bad gzip header\n");
3543                 return -EINVAL;
3544         }
3545
3546         n = 10;
3547
3548 #define FNAME                           0x8
3549
3550         if (zbuf[3] & FNAME)
3551                 while ((zbuf[n++] != 0) && (n < len));
3552
3553         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
3554         bp->strm->avail_in = len - n;
3555         bp->strm->next_out = bp->gunzip_buf;
3556         bp->strm->avail_out = FW_BUF_SIZE;
3557
3558         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
3559         if (rc != Z_OK)
3560                 return rc;
3561
3562         rc = zlib_inflate(bp->strm, Z_FINISH);
3563         if ((rc != Z_OK) && (rc != Z_STREAM_END))
3564                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
3565                            bp->strm->msg);
3566
3567         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
3568         if (bp->gunzip_outlen & 0x3)
3569                 netdev_err(bp->dev, "Firmware decompression error:"
3570                                     " gunzip_outlen (%d) not aligned\n",
3571                                 bp->gunzip_outlen);
3572         bp->gunzip_outlen >>= 2;
3573
3574         zlib_inflateEnd(bp->strm);
3575
3576         if (rc == Z_STREAM_END)
3577                 return 0;
3578
3579         return rc;
3580 }
3581
3582 /* nic load/unload */
3583
3584 /*
3585  * General service functions
3586  */
3587
3588 /* send a NIG loopback debug packet */
3589 static void bnx2x_lb_pckt(struct bnx2x *bp)
3590 {
3591         u32 wb_write[3];
3592
3593         /* Ethernet source and destination addresses */
3594         wb_write[0] = 0x55555555;
3595         wb_write[1] = 0x55555555;
3596         wb_write[2] = 0x20;             /* SOP */
3597         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3598
3599         /* NON-IP protocol */
3600         wb_write[0] = 0x09000000;
3601         wb_write[1] = 0x55555555;
3602         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
3603         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3604 }
3605
3606 /* some of the internal memories
3607  * are not directly readable from the driver
3608  * to test them we send debug packets
3609  */
3610 static int bnx2x_int_mem_test(struct bnx2x *bp)
3611 {
3612         int factor;
3613         int count, i;
3614         u32 val = 0;
3615
3616         if (CHIP_REV_IS_FPGA(bp))
3617                 factor = 120;
3618         else if (CHIP_REV_IS_EMUL(bp))
3619                 factor = 200;
3620         else
3621                 factor = 1;
3622
3623         DP(NETIF_MSG_HW, "start part1\n");
3624
3625         /* Disable inputs of parser neighbor blocks */
3626         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3627         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3628         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3629         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
3630
3631         /*  Write 0 to parser credits for CFC search request */
3632         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3633
3634         /* send Ethernet packet */
3635         bnx2x_lb_pckt(bp);
3636
3637         /* TODO do i reset NIG statistic? */
3638         /* Wait until NIG register shows 1 packet of size 0x10 */
3639         count = 1000 * factor;
3640         while (count) {
3641
3642                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3643                 val = *bnx2x_sp(bp, wb_data[0]);
3644                 if (val == 0x10)
3645                         break;
3646
3647                 msleep(10);
3648                 count--;
3649         }
3650         if (val != 0x10) {
3651                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
3652                 return -1;
3653         }
3654
3655         /* Wait until PRS register shows 1 packet */
3656         count = 1000 * factor;
3657         while (count) {
3658                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3659                 if (val == 1)
3660                         break;
3661
3662                 msleep(10);
3663                 count--;
3664         }
3665         if (val != 0x1) {
3666                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3667                 return -2;
3668         }
3669
3670         /* Reset and init BRB, PRS */
3671         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3672         msleep(50);
3673         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3674         msleep(50);
3675         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3676         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3677
3678         DP(NETIF_MSG_HW, "part2\n");
3679
3680         /* Disable inputs of parser neighbor blocks */
3681         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3682         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3683         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3684         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
3685
3686         /* Write 0 to parser credits for CFC search request */
3687         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3688
3689         /* send 10 Ethernet packets */
3690         for (i = 0; i < 10; i++)
3691                 bnx2x_lb_pckt(bp);
3692
3693         /* Wait until NIG register shows 10 + 1
3694            packets of size 11*0x10 = 0xb0 */
3695         count = 1000 * factor;
3696         while (count) {
3697
3698                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3699                 val = *bnx2x_sp(bp, wb_data[0]);
3700                 if (val == 0xb0)
3701                         break;
3702
3703                 msleep(10);
3704                 count--;
3705         }
3706         if (val != 0xb0) {
3707                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
3708                 return -3;
3709         }
3710
3711         /* Wait until PRS register shows 2 packets */
3712         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3713         if (val != 2)
3714                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
3715
3716         /* Write 1 to parser credits for CFC search request */
3717         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
3718
3719         /* Wait until PRS register shows 3 packets */
3720         msleep(10 * factor);
3721         /* Wait until NIG register shows 1 packet of size 0x10 */
3722         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3723         if (val != 3)
3724                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
3725
3726         /* clear NIG EOP FIFO */
3727         for (i = 0; i < 11; i++)
3728                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
3729         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
3730         if (val != 1) {
3731                 BNX2X_ERR("clear of NIG failed\n");
3732                 return -4;
3733         }
3734
3735         /* Reset and init BRB, PRS, NIG */
3736         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3737         msleep(50);
3738         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3739         msleep(50);
3740         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3741         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3742 #ifndef BCM_CNIC
3743         /* set NIC mode */
3744         REG_WR(bp, PRS_REG_NIC_MODE, 1);
3745 #endif
3746
3747         /* Enable inputs of parser neighbor blocks */
3748         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
3749         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
3750         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3751         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
3752
3753         DP(NETIF_MSG_HW, "done\n");
3754
3755         return 0; /* OK */
3756 }
3757
3758 static void enable_blocks_attention(struct bnx2x *bp)
3759 {
3760         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3761         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
3762         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3763         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
3764         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
3765         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
3766         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
3767         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
3768         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
3769 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3770 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
3771         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
3772         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
3773         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
3774 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3775 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
3776         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
3777         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
3778         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
3779         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
3780 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3781 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3782         if (CHIP_REV_IS_FPGA(bp))
3783                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
3784         else
3785                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
3786         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
3787         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
3788         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
3789 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3790 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
3791         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
3792         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
3793 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3794         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
3795 }
3796
3797 static const struct {
3798         u32 addr;
3799         u32 mask;
3800 } bnx2x_parity_mask[] = {
3801         {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
3802         {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
3803         {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
3804         {HC_REG_HC_PRTY_MASK, 0xffffffff},
3805         {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
3806         {QM_REG_QM_PRTY_MASK, 0x0},
3807         {DORQ_REG_DORQ_PRTY_MASK, 0x0},
3808         {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
3809         {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
3810         {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
3811         {CDU_REG_CDU_PRTY_MASK, 0x0},
3812         {CFC_REG_CFC_PRTY_MASK, 0x0},
3813         {DBG_REG_DBG_PRTY_MASK, 0x0},
3814         {DMAE_REG_DMAE_PRTY_MASK, 0x0},
3815         {BRB1_REG_BRB1_PRTY_MASK, 0x0},
3816         {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
3817         {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
3818         {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
3819         {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
3820         {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
3821         {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
3822         {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
3823         {USEM_REG_USEM_PRTY_MASK_0, 0x0},
3824         {USEM_REG_USEM_PRTY_MASK_1, 0x0},
3825         {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
3826         {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
3827         {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
3828         {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
3829 };
3830
3831 static void enable_blocks_parity(struct bnx2x *bp)
3832 {
3833         int i, mask_arr_len =
3834                 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
3835
3836         for (i = 0; i < mask_arr_len; i++)
3837                 REG_WR(bp, bnx2x_parity_mask[i].addr,
3838                         bnx2x_parity_mask[i].mask);
3839 }
3840
3841
3842 static void bnx2x_reset_common(struct bnx2x *bp)
3843 {
3844         /* reset_common */
3845         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
3846                0xd3ffff7f);
3847         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
3848 }
3849
3850 static void bnx2x_init_pxp(struct bnx2x *bp)
3851 {
3852         u16 devctl;
3853         int r_order, w_order;
3854
3855         pci_read_config_word(bp->pdev,
3856                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
3857         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
3858         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3859         if (bp->mrrs == -1)
3860                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3861         else {
3862                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
3863                 r_order = bp->mrrs;
3864         }
3865
3866         bnx2x_init_pxp_arb(bp, r_order, w_order);
3867 }
3868
3869 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3870 {
3871         int is_required;
3872         u32 val;
3873         int port;
3874
3875         if (BP_NOMCP(bp))
3876                 return;
3877
3878         is_required = 0;
3879         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
3880               SHARED_HW_CFG_FAN_FAILURE_MASK;
3881
3882         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
3883                 is_required = 1;
3884
3885         /*
3886          * The fan failure mechanism is usually related to the PHY type since
3887          * the power consumption of the board is affected by the PHY. Currently,
3888          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
3889          */
3890         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
3891                 for (port = PORT_0; port < PORT_MAX; port++) {
3892                         is_required |=
3893                                 bnx2x_fan_failure_det_req(
3894                                         bp,
3895                                         bp->common.shmem_base,
3896                                         bp->common.shmem2_base,
3897                                         port);
3898                 }
3899
3900         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
3901
3902         if (is_required == 0)
3903                 return;
3904
3905         /* Fan failure is indicated by SPIO 5 */
3906         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
3907                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
3908
3909         /* set to active low mode */
3910         val = REG_RD(bp, MISC_REG_SPIO_INT);
3911         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
3912                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
3913         REG_WR(bp, MISC_REG_SPIO_INT, val);
3914
3915         /* enable interrupt to signal the IGU */
3916         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
3917         val |= (1 << MISC_REGISTERS_SPIO_5);
3918         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3919 }
3920
3921 static int bnx2x_init_common(struct bnx2x *bp)
3922 {
3923         u32 val, i;
3924 #ifdef BCM_CNIC
3925         u32 wb_write[2];
3926 #endif
3927
3928         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
3929
3930         bnx2x_reset_common(bp);
3931         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
3932         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
3933
3934         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
3935         if (CHIP_IS_E1H(bp))
3936                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
3937
3938         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
3939         msleep(30);
3940         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
3941
3942         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
3943         if (CHIP_IS_E1(bp)) {
3944                 /* enable HW interrupt from PXP on USDM overflow
3945                    bit 16 on INT_MASK_0 */
3946                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3947         }
3948
3949         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
3950         bnx2x_init_pxp(bp);
3951
3952 #ifdef __BIG_ENDIAN
3953         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
3954         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
3955         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
3956         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
3957         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
3958         /* make sure this value is 0 */
3959         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
3960
3961 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3962         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
3963         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
3964         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
3965         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
3966 #endif
3967
3968         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
3969 #ifdef BCM_CNIC
3970         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3971         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3972         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
3973 #endif
3974
3975         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
3976                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
3977
3978         /* let the HW do it's magic ... */
3979         msleep(100);
3980         /* finish PXP init */
3981         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
3982         if (val != 1) {
3983                 BNX2X_ERR("PXP2 CFG failed\n");
3984                 return -EBUSY;
3985         }
3986         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
3987         if (val != 1) {
3988                 BNX2X_ERR("PXP2 RD_INIT failed\n");
3989                 return -EBUSY;
3990         }
3991
3992         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
3993         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
3994
3995         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
3996
3997         /* clean the DMAE memory */
3998         bp->dmae_ready = 1;
3999         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
4000
4001         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
4002         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
4003         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
4004         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
4005
4006         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
4007         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
4008         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
4009         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
4010
4011         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
4012
4013 #ifdef BCM_CNIC
4014         wb_write[0] = 0;
4015         wb_write[1] = 0;
4016         for (i = 0; i < 64; i++) {
4017                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
4018                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
4019
4020                 if (CHIP_IS_E1H(bp)) {
4021                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
4022                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
4023                                           wb_write, 2);
4024                 }
4025         }
4026 #endif
4027         /* soft reset pulse */
4028         REG_WR(bp, QM_REG_SOFT_RESET, 1);
4029         REG_WR(bp, QM_REG_SOFT_RESET, 0);
4030
4031 #ifdef BCM_CNIC
4032         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
4033 #endif
4034
4035         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
4036         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
4037         if (!CHIP_REV_IS_SLOW(bp)) {
4038                 /* enable hw interrupt from doorbell Q */
4039                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4040         }
4041
4042         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4043         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4044         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4045 #ifndef BCM_CNIC
4046         /* set NIC mode */
4047         REG_WR(bp, PRS_REG_NIC_MODE, 1);
4048 #endif
4049         if (CHIP_IS_E1H(bp))
4050                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
4051
4052         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4053         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4054         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4055         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
4056
4057         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4058         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4059         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4060         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4061
4062         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4063         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4064         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4065         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
4066
4067         /* sync semi rtc */
4068         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4069                0x80000000);
4070         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4071                0x80000000);
4072
4073         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4074         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4075         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
4076
4077         REG_WR(bp, SRC_REG_SOFT_RST, 1);
4078         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4079                 REG_WR(bp, i, random32());
4080         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
4081 #ifdef BCM_CNIC
4082         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4083         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4084         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4085         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4086         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4087         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4088         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4089         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4090         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4091         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4092 #endif
4093         REG_WR(bp, SRC_REG_SOFT_RST, 0);
4094
4095         if (sizeof(union cdu_context) != 1024)
4096                 /* we currently assume that a context is 1024 bytes */
4097                 dev_alert(&bp->pdev->dev, "please adjust the size "
4098                                           "of cdu_context(%ld)\n",
4099                          (long)sizeof(union cdu_context));
4100
4101         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
4102         val = (4 << 24) + (0 << 12) + 1024;
4103         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
4104
4105         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
4106         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
4107         /* enable context validation interrupt from CFC */
4108         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4109
4110         /* set the thresholds to prevent CFC/CDU race */
4111         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
4112
4113         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
4114         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
4115
4116         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
4117         /* Reset PCIE errors for debug */
4118         REG_WR(bp, 0x2814, 0xffffffff);
4119         REG_WR(bp, 0x3820, 0xffffffff);
4120
4121         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
4122         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
4123         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
4124         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
4125
4126         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
4127         if (CHIP_IS_E1H(bp)) {
4128                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
4129                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
4130         }
4131
4132         if (CHIP_REV_IS_SLOW(bp))
4133                 msleep(200);
4134
4135         /* finish CFC init */
4136         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4137         if (val != 1) {
4138                 BNX2X_ERR("CFC LL_INIT failed\n");
4139                 return -EBUSY;
4140         }
4141         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4142         if (val != 1) {
4143                 BNX2X_ERR("CFC AC_INIT failed\n");
4144                 return -EBUSY;
4145         }
4146         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4147         if (val != 1) {
4148                 BNX2X_ERR("CFC CAM_INIT failed\n");
4149                 return -EBUSY;
4150         }
4151         REG_WR(bp, CFC_REG_DEBUG0, 0);
4152
4153         /* read NIG statistic
4154            to see if this is our first up since powerup */
4155         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4156         val = *bnx2x_sp(bp, wb_data[0]);
4157
4158         /* do internal memory self test */
4159         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4160                 BNX2X_ERR("internal mem self test failed\n");
4161                 return -EBUSY;
4162         }
4163
4164         bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
4165                                                        bp->common.shmem_base,
4166                                                        bp->common.shmem2_base);
4167
4168         bnx2x_setup_fan_failure_detection(bp);
4169
4170         /* clear PXP2 attentions */
4171         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
4172
4173         enable_blocks_attention(bp);
4174         if (CHIP_PARITY_SUPPORTED(bp))
4175                 enable_blocks_parity(bp);
4176
4177         if (!BP_NOMCP(bp)) {
4178                 bnx2x_acquire_phy_lock(bp);
4179                 bnx2x_common_init_phy(bp, bp->common.shmem_base,
4180                                       bp->common.shmem2_base);
4181                 bnx2x_release_phy_lock(bp);
4182         } else
4183                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4184
4185         return 0;
4186 }
4187
4188 static int bnx2x_init_port(struct bnx2x *bp)
4189 {
4190         int port = BP_PORT(bp);
4191         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
4192         u32 low, high;
4193         u32 val;
4194
4195         DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
4196
4197         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
4198
4199         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
4200         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
4201
4202         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4203         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4204         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
4205         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
4206
4207 #ifdef BCM_CNIC
4208         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
4209
4210         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
4211         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4212         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
4213 #endif
4214
4215         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
4216
4217         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
4218         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
4219                 /* no pause for emulation and FPGA */
4220                 low = 0;
4221                 high = 513;
4222         } else {
4223                 if (IS_E1HMF(bp))
4224                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4225                 else if (bp->dev->mtu > 4096) {
4226                         if (bp->flags & ONE_PORT_FLAG)
4227                                 low = 160;
4228                         else {
4229                                 val = bp->dev->mtu;
4230                                 /* (24*1024 + val*4)/256 */
4231                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
4232                         }
4233                 } else
4234                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4235                 high = low + 56;        /* 14*1024/256 */
4236         }
4237         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4238         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4239
4240
4241         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
4242
4243         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
4244         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
4245         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
4246         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
4247
4248         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4249         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4250         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4251         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
4252
4253         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
4254         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
4255
4256         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
4257
4258         /* configure PBF to work without PAUSE mtu 9000 */
4259         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
4260
4261         /* update threshold */
4262         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
4263         /* update init credit */
4264         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
4265
4266         /* probe changes */
4267         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
4268         msleep(5);
4269         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
4270
4271 #ifdef BCM_CNIC
4272         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
4273 #endif
4274         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
4275         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
4276
4277         if (CHIP_IS_E1(bp)) {
4278                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4279                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4280         }
4281         bnx2x_init_block(bp, HC_BLOCK, init_stage);
4282
4283         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
4284         /* init aeu_mask_attn_func_0/1:
4285          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4286          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4287          *             bits 4-7 are used for "per vn group attention" */
4288         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4289                (IS_E1HMF(bp) ? 0xF7 : 0x7));
4290
4291         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
4292         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
4293         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
4294         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
4295         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
4296
4297         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
4298
4299         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4300
4301         if (CHIP_IS_E1H(bp)) {
4302                 /* 0x2 disable e1hov, 0x1 enable */
4303                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4304                        (IS_E1HMF(bp) ? 0x1 : 0x2));
4305
4306                 {
4307                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4308                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
4309                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
4310                 }
4311         }
4312
4313         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
4314         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
4315         bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
4316                                                        bp->common.shmem_base,
4317                                                        bp->common.shmem2_base);
4318         if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
4319                                       bp->common.shmem2_base, port)) {
4320                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4321                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4322                 val = REG_RD(bp, reg_addr);
4323                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4324                 REG_WR(bp, reg_addr, val);
4325         }
4326         bnx2x__link_reset(bp);
4327
4328         return 0;
4329 }
4330
4331 #define ILT_PER_FUNC            (768/2)
4332 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
4333 /* the phys address is shifted right 12 bits and has an added
4334    1=valid bit added to the 53rd bit
4335    then since this is a wide register(TM)
4336    we split it into two 32 bit writes
4337  */
4338 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4339 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
4340 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
4341 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
4342
4343 #ifdef BCM_CNIC
4344 #define CNIC_ILT_LINES          127
4345 #define CNIC_CTX_PER_ILT        16
4346 #else
4347 #define CNIC_ILT_LINES          0
4348 #endif
4349
4350 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4351 {
4352         int reg;
4353
4354         if (CHIP_IS_E1H(bp))
4355                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4356         else /* E1 */
4357                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4358
4359         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4360 }
4361
4362 static int bnx2x_init_func(struct bnx2x *bp)
4363 {
4364         int port = BP_PORT(bp);
4365         int func = BP_FUNC(bp);
4366         u32 addr, val;
4367         int i;
4368
4369         DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
4370
4371         /* set MSI reconfigure capability */
4372         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4373         val = REG_RD(bp, addr);
4374         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
4375         REG_WR(bp, addr, val);
4376
4377         i = FUNC_ILT_BASE(func);
4378
4379         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
4380         if (CHIP_IS_E1H(bp)) {
4381                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
4382                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
4383         } else /* E1 */
4384                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
4385                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
4386
4387 #ifdef BCM_CNIC
4388         i += 1 + CNIC_ILT_LINES;
4389         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
4390         if (CHIP_IS_E1(bp))
4391                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
4392         else {
4393                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
4394                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
4395         }
4396
4397         i++;
4398         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
4399         if (CHIP_IS_E1(bp))
4400                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
4401         else {
4402                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
4403                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
4404         }
4405
4406         i++;
4407         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
4408         if (CHIP_IS_E1(bp))
4409                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
4410         else {
4411                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
4412                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
4413         }
4414
4415         /* tell the searcher where the T2 table is */
4416         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
4417
4418         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
4419                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
4420
4421         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
4422                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
4423                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
4424
4425         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
4426 #endif
4427
4428         if (CHIP_IS_E1H(bp)) {
4429                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4430                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4431                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4432                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4433                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4434                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4435                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4436                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4437                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
4438
4439                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4440                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
4441         }
4442
4443         /* HC init per function */
4444         if (CHIP_IS_E1H(bp)) {
4445                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4446
4447                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4448                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4449         }
4450         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
4451
4452         /* Reset PCIE errors for debug */
4453         REG_WR(bp, 0x2114, 0xffffffff);
4454         REG_WR(bp, 0x2120, 0xffffffff);
4455         bnx2x_phy_probe(&bp->link_params);
4456         return 0;
4457 }
4458
4459 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4460 {
4461         int i, rc = 0;
4462
4463         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
4464            BP_FUNC(bp), load_code);
4465
4466         bp->dmae_ready = 0;
4467         mutex_init(&bp->dmae_mutex);
4468         rc = bnx2x_gunzip_init(bp);
4469         if (rc)
4470                 return rc;
4471
4472         switch (load_code) {
4473         case FW_MSG_CODE_DRV_LOAD_COMMON:
4474                 rc = bnx2x_init_common(bp);
4475                 if (rc)
4476                         goto init_hw_err;
4477                 /* no break */
4478
4479         case FW_MSG_CODE_DRV_LOAD_PORT:
4480                 bp->dmae_ready = 1;
4481                 rc = bnx2x_init_port(bp);
4482                 if (rc)
4483                         goto init_hw_err;
4484                 /* no break */
4485
4486         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4487                 bp->dmae_ready = 1;
4488                 rc = bnx2x_init_func(bp);
4489                 if (rc)
4490                         goto init_hw_err;
4491                 break;
4492
4493         default:
4494                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4495                 break;
4496         }
4497
4498         if (!BP_NOMCP(bp)) {
4499                 int func = BP_FUNC(bp);
4500
4501                 bp->fw_drv_pulse_wr_seq =
4502                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
4503                                  DRV_PULSE_SEQ_MASK);
4504                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4505         }
4506
4507         /* this needs to be done before gunzip end */
4508         bnx2x_zero_def_sb(bp);
4509         for_each_queue(bp, i)
4510                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4511 #ifdef BCM_CNIC
4512         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4513 #endif
4514
4515 init_hw_err:
4516         bnx2x_gunzip_end(bp);
4517
4518         return rc;
4519 }
4520
4521 void bnx2x_free_mem(struct bnx2x *bp)
4522 {
4523
4524 #define BNX2X_PCI_FREE(x, y, size) \
4525         do { \
4526                 if (x) { \
4527                         dma_free_coherent(&bp->pdev->dev, size, x, y); \
4528                         x = NULL; \
4529                         y = 0; \
4530                 } \
4531         } while (0)
4532
4533 #define BNX2X_FREE(x) \
4534         do { \
4535                 if (x) { \
4536                         vfree(x); \
4537                         x = NULL; \
4538                 } \
4539         } while (0)
4540
4541         int i;
4542
4543         /* fastpath */
4544         /* Common */
4545         for_each_queue(bp, i) {
4546
4547                 /* status blocks */
4548                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
4549                                bnx2x_fp(bp, i, status_blk_mapping),
4550                                sizeof(struct host_status_block));
4551         }
4552         /* Rx */
4553         for_each_queue(bp, i) {
4554
4555                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4556                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
4557                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
4558                                bnx2x_fp(bp, i, rx_desc_mapping),
4559                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
4560
4561                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
4562                                bnx2x_fp(bp, i, rx_comp_mapping),
4563                                sizeof(struct eth_fast_path_rx_cqe) *
4564                                NUM_RCQ_BD);
4565
4566                 /* SGE ring */
4567                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
4568                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
4569                                bnx2x_fp(bp, i, rx_sge_mapping),
4570                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4571         }
4572         /* Tx */
4573         for_each_queue(bp, i) {
4574
4575                 /* fastpath tx rings: tx_buf tx_desc */
4576                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
4577                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
4578                                bnx2x_fp(bp, i, tx_desc_mapping),
4579                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4580         }
4581         /* end of fastpath */
4582
4583         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
4584                        sizeof(struct host_def_status_block));
4585
4586         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
4587                        sizeof(struct bnx2x_slowpath));
4588
4589 #ifdef BCM_CNIC
4590         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
4591         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
4592         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
4593         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
4594         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
4595                        sizeof(struct host_status_block));
4596 #endif
4597         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
4598
4599 #undef BNX2X_PCI_FREE
4600 #undef BNX2X_KFREE
4601 }
4602
4603 int bnx2x_alloc_mem(struct bnx2x *bp)
4604 {
4605
4606 #define BNX2X_PCI_ALLOC(x, y, size) \
4607         do { \
4608                 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
4609                 if (x == NULL) \
4610                         goto alloc_mem_err; \
4611                 memset(x, 0, size); \
4612         } while (0)
4613
4614 #define BNX2X_ALLOC(x, size) \
4615         do { \
4616                 x = vmalloc(size); \
4617                 if (x == NULL) \
4618                         goto alloc_mem_err; \
4619                 memset(x, 0, size); \
4620         } while (0)
4621
4622         int i;
4623
4624         /* fastpath */
4625         /* Common */
4626         for_each_queue(bp, i) {
4627                 bnx2x_fp(bp, i, bp) = bp;
4628
4629                 /* status blocks */
4630                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
4631                                 &bnx2x_fp(bp, i, status_blk_mapping),
4632                                 sizeof(struct host_status_block));
4633         }
4634         /* Rx */
4635         for_each_queue(bp, i) {
4636
4637                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4638                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
4639                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4640                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
4641                                 &bnx2x_fp(bp, i, rx_desc_mapping),
4642                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4643
4644                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
4645                                 &bnx2x_fp(bp, i, rx_comp_mapping),
4646                                 sizeof(struct eth_fast_path_rx_cqe) *
4647                                 NUM_RCQ_BD);
4648
4649                 /* SGE ring */
4650                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
4651                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4652                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
4653                                 &bnx2x_fp(bp, i, rx_sge_mapping),
4654                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4655         }
4656         /* Tx */
4657         for_each_queue(bp, i) {
4658
4659                 /* fastpath tx rings: tx_buf tx_desc */
4660                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
4661                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4662                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
4663                                 &bnx2x_fp(bp, i, tx_desc_mapping),
4664                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4665         }
4666         /* end of fastpath */
4667
4668         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4669                         sizeof(struct host_def_status_block));
4670
4671         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4672                         sizeof(struct bnx2x_slowpath));
4673
4674 #ifdef BCM_CNIC
4675         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
4676
4677         /* allocate searcher T2 table
4678            we allocate 1/4 of alloc num for T2
4679           (which is not entered into the ILT) */
4680         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
4681
4682         /* Initialize T2 (for 1024 connections) */
4683         for (i = 0; i < 16*1024; i += 64)
4684                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
4685
4686         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
4687         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
4688
4689         /* QM queues (128*MAX_CONN) */
4690         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
4691
4692         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
4693                         sizeof(struct host_status_block));
4694 #endif
4695
4696         /* Slow path ring */
4697         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
4698
4699         return 0;
4700
4701 alloc_mem_err:
4702         bnx2x_free_mem(bp);
4703         return -ENOMEM;
4704
4705 #undef BNX2X_PCI_ALLOC
4706 #undef BNX2X_ALLOC
4707 }
4708
4709
4710 /*
4711  * Init service functions
4712  */
4713
4714 /**
4715  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
4716  *
4717  * @param bp driver descriptor
4718  * @param set set or clear an entry (1 or 0)
4719  * @param mac pointer to a buffer containing a MAC
4720  * @param cl_bit_vec bit vector of clients to register a MAC for
4721  * @param cam_offset offset in a CAM to use
4722  * @param with_bcast set broadcast MAC as well
4723  */
4724 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
4725                                       u32 cl_bit_vec, u8 cam_offset,
4726                                       u8 with_bcast)
4727 {
4728         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
4729         int port = BP_PORT(bp);
4730
4731         /* CAM allocation
4732          * unicasts 0-31:port0 32-63:port1
4733          * multicast 64-127:port0 128-191:port1
4734          */
4735         config->hdr.length = 1 + (with_bcast ? 1 : 0);
4736         config->hdr.offset = cam_offset;
4737         config->hdr.client_id = 0xff;
4738         config->hdr.reserved1 = 0;
4739
4740         /* primary MAC */
4741         config->config_table[0].cam_entry.msb_mac_addr =
4742                                         swab16(*(u16 *)&mac[0]);
4743         config->config_table[0].cam_entry.middle_mac_addr =
4744                                         swab16(*(u16 *)&mac[2]);
4745         config->config_table[0].cam_entry.lsb_mac_addr =
4746                                         swab16(*(u16 *)&mac[4]);
4747         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
4748         if (set)
4749                 config->config_table[0].target_table_entry.flags = 0;
4750         else
4751                 CAM_INVALIDATE(config->config_table[0]);
4752         config->config_table[0].target_table_entry.clients_bit_vector =
4753                                                 cpu_to_le32(cl_bit_vec);
4754         config->config_table[0].target_table_entry.vlan_id = 0;
4755
4756         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
4757            (set ? "setting" : "clearing"),
4758            config->config_table[0].cam_entry.msb_mac_addr,
4759            config->config_table[0].cam_entry.middle_mac_addr,
4760            config->config_table[0].cam_entry.lsb_mac_addr);
4761
4762         /* broadcast */
4763         if (with_bcast) {
4764                 config->config_table[1].cam_entry.msb_mac_addr =
4765                         cpu_to_le16(0xffff);
4766                 config->config_table[1].cam_entry.middle_mac_addr =
4767                         cpu_to_le16(0xffff);
4768                 config->config_table[1].cam_entry.lsb_mac_addr =
4769                         cpu_to_le16(0xffff);
4770                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
4771                 if (set)
4772                         config->config_table[1].target_table_entry.flags =
4773                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4774                 else
4775                         CAM_INVALIDATE(config->config_table[1]);
4776                 config->config_table[1].target_table_entry.clients_bit_vector =
4777                                                         cpu_to_le32(cl_bit_vec);
4778                 config->config_table[1].target_table_entry.vlan_id = 0;
4779         }
4780
4781         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4782                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4783                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4784 }
4785
4786 /**
4787  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
4788  *
4789  * @param bp driver descriptor
4790  * @param set set or clear an entry (1 or 0)
4791  * @param mac pointer to a buffer containing a MAC
4792  * @param cl_bit_vec bit vector of clients to register a MAC for
4793  * @param cam_offset offset in a CAM to use
4794  */
4795 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
4796                                        u32 cl_bit_vec, u8 cam_offset)
4797 {
4798         struct mac_configuration_cmd_e1h *config =
4799                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
4800
4801         config->hdr.length = 1;
4802         config->hdr.offset = cam_offset;
4803         config->hdr.client_id = 0xff;
4804         config->hdr.reserved1 = 0;
4805
4806         /* primary MAC */
4807         config->config_table[0].msb_mac_addr =
4808                                         swab16(*(u16 *)&mac[0]);
4809         config->config_table[0].middle_mac_addr =
4810                                         swab16(*(u16 *)&mac[2]);
4811         config->config_table[0].lsb_mac_addr =
4812                                         swab16(*(u16 *)&mac[4]);
4813         config->config_table[0].clients_bit_vector =
4814                                         cpu_to_le32(cl_bit_vec);
4815         config->config_table[0].vlan_id = 0;
4816         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
4817         if (set)
4818                 config->config_table[0].flags = BP_PORT(bp);
4819         else
4820                 config->config_table[0].flags =
4821                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
4822
4823         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
4824            (set ? "setting" : "clearing"),
4825            config->config_table[0].msb_mac_addr,
4826            config->config_table[0].middle_mac_addr,
4827            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
4828
4829         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4830                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4831                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4832 }
4833
4834 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4835                              int *state_p, int poll)
4836 {
4837         /* can take a while if any port is running */
4838         int cnt = 5000;
4839
4840         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4841            poll ? "polling" : "waiting", state, idx);
4842
4843         might_sleep();
4844         while (cnt--) {
4845                 if (poll) {
4846                         bnx2x_rx_int(bp->fp, 10);
4847                         /* if index is different from 0
4848                          * the reply for some commands will
4849                          * be on the non default queue
4850                          */
4851                         if (idx)
4852                                 bnx2x_rx_int(&bp->fp[idx], 10);
4853                 }
4854
4855                 mb(); /* state is changed by bnx2x_sp_event() */
4856                 if (*state_p == state) {
4857 #ifdef BNX2X_STOP_ON_ERROR
4858                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
4859 #endif
4860                         return 0;
4861                 }
4862
4863                 msleep(1);
4864
4865                 if (bp->panic)
4866                         return -EIO;
4867         }
4868
4869         /* timeout! */
4870         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4871                   poll ? "polling" : "waiting", state, idx);
4872 #ifdef BNX2X_STOP_ON_ERROR
4873         bnx2x_panic();
4874 #endif
4875
4876         return -EBUSY;
4877 }
4878
4879 void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
4880 {
4881         bp->set_mac_pending++;
4882         smp_wmb();
4883
4884         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
4885                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
4886
4887         /* Wait for a completion */
4888         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4889 }
4890
4891 void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
4892 {
4893         bp->set_mac_pending++;
4894         smp_wmb();
4895
4896         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
4897                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
4898                                   1);
4899
4900         /* Wait for a completion */
4901         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4902 }
4903
4904 #ifdef BCM_CNIC
4905 /**
4906  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
4907  * MAC(s). This function will wait until the ramdord completion
4908  * returns.
4909  *
4910  * @param bp driver handle
4911  * @param set set or clear the CAM entry
4912  *
4913  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
4914  */
4915 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
4916 {
4917         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
4918
4919         bp->set_mac_pending++;
4920         smp_wmb();
4921
4922         /* Send a SET_MAC ramrod */
4923         if (CHIP_IS_E1(bp))
4924                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
4925                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
4926                                   1);
4927         else
4928                 /* CAM allocation for E1H
4929                 * unicasts: by func number
4930                 * multicast: 20+FUNC*20, 20 each
4931                 */
4932                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
4933                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
4934
4935         /* Wait for a completion when setting */
4936         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4937
4938         return 0;
4939 }
4940 #endif
4941
4942 int bnx2x_setup_leading(struct bnx2x *bp)
4943 {
4944         int rc;
4945
4946         /* reset IGU state */
4947         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4948
4949         /* SETUP ramrod */
4950         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
4951
4952         /* Wait for completion */
4953         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
4954
4955         return rc;
4956 }
4957
4958 int bnx2x_setup_multi(struct bnx2x *bp, int index)
4959 {
4960         struct bnx2x_fastpath *fp = &bp->fp[index];
4961
4962         /* reset IGU state */
4963         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4964
4965         /* SETUP ramrod */
4966         fp->state = BNX2X_FP_STATE_OPENING;
4967         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
4968                       fp->cl_id, 0);
4969
4970         /* Wait for completion */
4971         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
4972                                  &(fp->state), 0);
4973 }
4974
4975
4976 void bnx2x_set_num_queues_msix(struct bnx2x *bp)
4977 {
4978
4979         switch (bp->multi_mode) {
4980         case ETH_RSS_MODE_DISABLED:
4981                 bp->num_queues = 1;
4982                 break;
4983
4984         case ETH_RSS_MODE_REGULAR:
4985                 if (num_queues)
4986                         bp->num_queues = min_t(u32, num_queues,
4987                                                   BNX2X_MAX_QUEUES(bp));
4988                 else
4989                         bp->num_queues = min_t(u32, num_online_cpus(),
4990                                                   BNX2X_MAX_QUEUES(bp));
4991                 break;
4992
4993
4994         default:
4995                 bp->num_queues = 1;
4996                 break;
4997         }
4998 }
4999
5000
5001
5002 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
5003 {
5004         struct bnx2x_fastpath *fp = &bp->fp[index];
5005         int rc;
5006
5007         /* halt the connection */
5008         fp->state = BNX2X_FP_STATE_HALTING;
5009         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
5010
5011         /* Wait for completion */
5012         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
5013                                &(fp->state), 1);
5014         if (rc) /* timeout */
5015                 return rc;
5016
5017         /* delete cfc entry */
5018         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
5019
5020         /* Wait for completion */
5021         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
5022                                &(fp->state), 1);
5023         return rc;
5024 }
5025
5026 static int bnx2x_stop_leading(struct bnx2x *bp)
5027 {
5028         __le16 dsb_sp_prod_idx;
5029         /* if the other port is handling traffic,
5030            this can take a lot of time */
5031         int cnt = 500;
5032         int rc;
5033
5034         might_sleep();
5035
5036         /* Send HALT ramrod */
5037         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
5038         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
5039
5040         /* Wait for completion */
5041         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
5042                                &(bp->fp[0].state), 1);
5043         if (rc) /* timeout */
5044                 return rc;
5045
5046         dsb_sp_prod_idx = *bp->dsb_sp_prod;
5047
5048         /* Send PORT_DELETE ramrod */
5049         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
5050
5051         /* Wait for completion to arrive on default status block
5052            we are going to reset the chip anyway
5053            so there is not much to do if this times out
5054          */
5055         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
5056                 if (!cnt) {
5057                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
5058                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5059                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
5060 #ifdef BNX2X_STOP_ON_ERROR
5061                         bnx2x_panic();
5062 #endif
5063                         rc = -EBUSY;
5064                         break;
5065                 }
5066                 cnt--;
5067                 msleep(1);
5068                 rmb(); /* Refresh the dsb_sp_prod */
5069         }
5070         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
5071         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
5072
5073         return rc;
5074 }
5075
5076 static void bnx2x_reset_func(struct bnx2x *bp)
5077 {
5078         int port = BP_PORT(bp);
5079         int func = BP_FUNC(bp);
5080         int base, i;
5081
5082         /* Configure IGU */
5083         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5084         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5085
5086 #ifdef BCM_CNIC
5087         /* Disable Timer scan */
5088         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
5089         /*
5090          * Wait for at least 10ms and up to 2 second for the timers scan to
5091          * complete
5092          */
5093         for (i = 0; i < 200; i++) {
5094                 msleep(10);
5095                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
5096                         break;
5097         }
5098 #endif
5099         /* Clear ILT */
5100         base = FUNC_ILT_BASE(func);
5101         for (i = base; i < base + ILT_PER_FUNC; i++)
5102                 bnx2x_ilt_wr(bp, i, 0);
5103 }
5104
5105 static void bnx2x_reset_port(struct bnx2x *bp)
5106 {
5107         int port = BP_PORT(bp);
5108         u32 val;
5109
5110         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5111
5112         /* Do not rcv packets to BRB */
5113         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5114         /* Do not direct rcv packets that are not for MCP to the BRB */
5115         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5116                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5117
5118         /* Configure AEU */
5119         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5120
5121         msleep(100);
5122         /* Check for BRB port occupancy */
5123         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5124         if (val)
5125                 DP(NETIF_MSG_IFDOWN,
5126                    "BRB1 is not empty  %d blocks are occupied\n", val);
5127
5128         /* TODO: Close Doorbell port? */
5129 }
5130
5131 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5132 {
5133         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
5134            BP_FUNC(bp), reset_code);
5135
5136         switch (reset_code) {
5137         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5138                 bnx2x_reset_port(bp);
5139                 bnx2x_reset_func(bp);
5140                 bnx2x_reset_common(bp);
5141                 break;
5142
5143         case FW_MSG_CODE_DRV_UNLOAD_PORT:
5144                 bnx2x_reset_port(bp);
5145                 bnx2x_reset_func(bp);
5146                 break;
5147
5148         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5149                 bnx2x_reset_func(bp);
5150                 break;
5151
5152         default:
5153                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5154                 break;
5155         }
5156 }
5157
5158 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5159 {
5160         int port = BP_PORT(bp);
5161         u32 reset_code = 0;
5162         int i, cnt, rc;
5163
5164         /* Wait until tx fastpath tasks complete */
5165         for_each_queue(bp, i) {
5166                 struct bnx2x_fastpath *fp = &bp->fp[i];
5167
5168                 cnt = 1000;
5169                 while (bnx2x_has_tx_work_unload(fp)) {
5170
5171                         bnx2x_tx_int(fp);
5172                         if (!cnt) {
5173                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
5174                                           i);
5175 #ifdef BNX2X_STOP_ON_ERROR
5176                                 bnx2x_panic();
5177                                 return -EBUSY;
5178 #else
5179                                 break;
5180 #endif
5181                         }
5182                         cnt--;
5183                         msleep(1);
5184                 }
5185         }
5186         /* Give HW time to discard old tx messages */
5187         msleep(1);
5188
5189         if (CHIP_IS_E1(bp)) {
5190                 struct mac_configuration_cmd *config =
5191                                                 bnx2x_sp(bp, mcast_config);
5192
5193                 bnx2x_set_eth_mac_addr_e1(bp, 0);
5194
5195                 for (i = 0; i < config->hdr.length; i++)
5196                         CAM_INVALIDATE(config->config_table[i]);
5197
5198                 config->hdr.length = i;
5199                 if (CHIP_REV_IS_SLOW(bp))
5200                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
5201                 else
5202                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
5203                 config->hdr.client_id = bp->fp->cl_id;
5204                 config->hdr.reserved1 = 0;
5205
5206                 bp->set_mac_pending++;
5207                 smp_wmb();
5208
5209                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5210                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
5211                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
5212
5213         } else { /* E1H */
5214                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5215
5216                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
5217
5218                 for (i = 0; i < MC_HASH_SIZE; i++)
5219                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
5220
5221                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
5222         }
5223 #ifdef BCM_CNIC
5224         /* Clear iSCSI L2 MAC */
5225         mutex_lock(&bp->cnic_mutex);
5226         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
5227                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
5228                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
5229         }
5230         mutex_unlock(&bp->cnic_mutex);
5231 #endif
5232
5233         if (unload_mode == UNLOAD_NORMAL)
5234                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5235
5236         else if (bp->flags & NO_WOL_FLAG)
5237                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
5238
5239         else if (bp->wol) {
5240                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5241                 u8 *mac_addr = bp->dev->dev_addr;
5242                 u32 val;
5243                 /* The mac address is written to entries 1-4 to
5244                    preserve entry 0 which is used by the PMF */
5245                 u8 entry = (BP_E1HVN(bp) + 1)*8;
5246
5247                 val = (mac_addr[0] << 8) | mac_addr[1];
5248                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
5249
5250                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
5251                       (mac_addr[4] << 8) | mac_addr[5];
5252                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
5253
5254                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
5255
5256         } else
5257                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5258
5259         /* Close multi and leading connections
5260            Completions for ramrods are collected in a synchronous way */
5261         for_each_nondefault_queue(bp, i)
5262                 if (bnx2x_stop_multi(bp, i))
5263                         goto unload_error;
5264
5265         rc = bnx2x_stop_leading(bp);
5266         if (rc) {
5267                 BNX2X_ERR("Stop leading failed!\n");
5268 #ifdef BNX2X_STOP_ON_ERROR
5269                 return -EBUSY;
5270 #else
5271                 goto unload_error;
5272 #endif
5273         }
5274
5275 unload_error:
5276         if (!BP_NOMCP(bp))
5277                 reset_code = bnx2x_fw_command(bp, reset_code, 0);
5278         else {
5279                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
5280                    load_count[0], load_count[1], load_count[2]);
5281                 load_count[0]--;
5282                 load_count[1 + port]--;
5283                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
5284                    load_count[0], load_count[1], load_count[2]);
5285                 if (load_count[0] == 0)
5286                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
5287                 else if (load_count[1 + port] == 0)
5288                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
5289                 else
5290                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
5291         }
5292
5293         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
5294             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
5295                 bnx2x__link_reset(bp);
5296
5297         /* Reset the chip */
5298         bnx2x_reset_chip(bp, reset_code);
5299
5300         /* Report UNLOAD_DONE to MCP */
5301         if (!BP_NOMCP(bp))
5302                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
5303
5304 }
5305
5306 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
5307 {
5308         u32 val;
5309
5310         DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
5311
5312         if (CHIP_IS_E1(bp)) {
5313                 int port = BP_PORT(bp);
5314                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5315                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
5316
5317                 val = REG_RD(bp, addr);
5318                 val &= ~(0x300);
5319                 REG_WR(bp, addr, val);
5320         } else if (CHIP_IS_E1H(bp)) {
5321                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
5322                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
5323                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
5324                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
5325         }
5326 }
5327
5328
5329 /* Close gates #2, #3 and #4: */
5330 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
5331 {
5332         u32 val, addr;
5333
5334         /* Gates #2 and #4a are closed/opened for "not E1" only */
5335         if (!CHIP_IS_E1(bp)) {
5336                 /* #4 */
5337                 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
5338                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
5339                        close ? (val | 0x1) : (val & (~(u32)1)));
5340                 /* #2 */
5341                 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
5342                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
5343                        close ? (val | 0x1) : (val & (~(u32)1)));
5344         }
5345
5346         /* #3 */
5347         addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
5348         val = REG_RD(bp, addr);
5349         REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
5350
5351         DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
5352                 close ? "closing" : "opening");
5353         mmiowb();
5354 }
5355
5356 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
5357
5358 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
5359 {
5360         /* Do some magic... */
5361         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5362         *magic_val = val & SHARED_MF_CLP_MAGIC;
5363         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
5364 }
5365
5366 /* Restore the value of the `magic' bit.
5367  *
5368  * @param pdev Device handle.
5369  * @param magic_val Old value of the `magic' bit.
5370  */
5371 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
5372 {
5373         /* Restore the `magic' bit value... */
5374         /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
5375         SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
5376                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
5377         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5378         MF_CFG_WR(bp, shared_mf_config.clp_mb,
5379                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
5380 }
5381
5382 /* Prepares for MCP reset: takes care of CLP configurations.
5383  *
5384  * @param bp
5385  * @param magic_val Old value of 'magic' bit.
5386  */
5387 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
5388 {
5389         u32 shmem;
5390         u32 validity_offset;
5391
5392         DP(NETIF_MSG_HW, "Starting\n");
5393
5394         /* Set `magic' bit in order to save MF config */
5395         if (!CHIP_IS_E1(bp))
5396                 bnx2x_clp_reset_prep(bp, magic_val);
5397
5398         /* Get shmem offset */
5399         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5400         validity_offset = offsetof(struct shmem_region, validity_map[0]);
5401
5402         /* Clear validity map flags */
5403         if (shmem > 0)
5404                 REG_WR(bp, shmem + validity_offset, 0);
5405 }
5406
5407 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
5408 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
5409
5410 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
5411  * depending on the HW type.
5412  *
5413  * @param bp
5414  */
5415 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
5416 {
5417         /* special handling for emulation and FPGA,
5418            wait 10 times longer */
5419         if (CHIP_REV_IS_SLOW(bp))
5420                 msleep(MCP_ONE_TIMEOUT*10);
5421         else
5422                 msleep(MCP_ONE_TIMEOUT);
5423 }
5424
5425 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
5426 {
5427         u32 shmem, cnt, validity_offset, val;
5428         int rc = 0;
5429
5430         msleep(100);
5431
5432         /* Get shmem offset */
5433         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5434         if (shmem == 0) {
5435                 BNX2X_ERR("Shmem 0 return failure\n");
5436                 rc = -ENOTTY;
5437                 goto exit_lbl;
5438         }
5439
5440         validity_offset = offsetof(struct shmem_region, validity_map[0]);
5441
5442         /* Wait for MCP to come up */
5443         for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
5444                 /* TBD: its best to check validity map of last port.
5445                  * currently checks on port 0.
5446                  */
5447                 val = REG_RD(bp, shmem + validity_offset);
5448                 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
5449                    shmem + validity_offset, val);
5450
5451                 /* check that shared memory is valid. */
5452                 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5453                     == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5454                         break;
5455
5456                 bnx2x_mcp_wait_one(bp);
5457         }
5458
5459         DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
5460
5461         /* Check that shared memory is valid. This indicates that MCP is up. */
5462         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
5463             (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
5464                 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
5465                 rc = -ENOTTY;
5466                 goto exit_lbl;
5467         }
5468
5469 exit_lbl:
5470         /* Restore the `magic' bit value */
5471         if (!CHIP_IS_E1(bp))
5472                 bnx2x_clp_reset_done(bp, magic_val);
5473
5474         return rc;
5475 }
5476
5477 static void bnx2x_pxp_prep(struct bnx2x *bp)
5478 {
5479         if (!CHIP_IS_E1(bp)) {
5480                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
5481                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
5482                 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
5483                 mmiowb();
5484         }
5485 }
5486
5487 /*
5488  * Reset the whole chip except for:
5489  *      - PCIE core
5490  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
5491  *              one reset bit)
5492  *      - IGU
5493  *      - MISC (including AEU)
5494  *      - GRC
5495  *      - RBCN, RBCP
5496  */
5497 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
5498 {
5499         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
5500
5501         not_reset_mask1 =
5502                 MISC_REGISTERS_RESET_REG_1_RST_HC |
5503                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
5504                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
5505
5506         not_reset_mask2 =
5507                 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
5508                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
5509                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
5510                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
5511                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
5512                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
5513                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
5514                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
5515
5516         reset_mask1 = 0xffffffff;
5517
5518         if (CHIP_IS_E1(bp))
5519                 reset_mask2 = 0xffff;
5520         else
5521                 reset_mask2 = 0x1ffff;
5522
5523         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5524                reset_mask1 & (~not_reset_mask1));
5525         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5526                reset_mask2 & (~not_reset_mask2));
5527
5528         barrier();
5529         mmiowb();
5530
5531         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
5532         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
5533         mmiowb();
5534 }
5535
5536 static int bnx2x_process_kill(struct bnx2x *bp)
5537 {
5538         int cnt = 1000;
5539         u32 val = 0;
5540         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
5541
5542
5543         /* Empty the Tetris buffer, wait for 1s */
5544         do {
5545                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
5546                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
5547                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
5548                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
5549                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
5550                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
5551                     ((port_is_idle_0 & 0x1) == 0x1) &&
5552                     ((port_is_idle_1 & 0x1) == 0x1) &&
5553                     (pgl_exp_rom2 == 0xffffffff))
5554                         break;
5555                 msleep(1);
5556         } while (cnt-- > 0);
5557
5558         if (cnt <= 0) {
5559                 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
5560                           " are still"
5561                           " outstanding read requests after 1s!\n");
5562                 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
5563                           " port_is_idle_0=0x%08x,"
5564                           " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
5565                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
5566                           pgl_exp_rom2);
5567                 return -EAGAIN;
5568         }
5569
5570         barrier();
5571
5572         /* Close gates #2, #3 and #4 */
5573         bnx2x_set_234_gates(bp, true);
5574
5575         /* TBD: Indicate that "process kill" is in progress to MCP */
5576
5577         /* Clear "unprepared" bit */
5578         REG_WR(bp, MISC_REG_UNPREPARED, 0);
5579         barrier();
5580
5581         /* Make sure all is written to the chip before the reset */
5582         mmiowb();
5583
5584         /* Wait for 1ms to empty GLUE and PCI-E core queues,
5585          * PSWHST, GRC and PSWRD Tetris buffer.
5586          */
5587         msleep(1);
5588
5589         /* Prepare to chip reset: */
5590         /* MCP */
5591         bnx2x_reset_mcp_prep(bp, &val);
5592
5593         /* PXP */
5594         bnx2x_pxp_prep(bp);
5595         barrier();
5596
5597         /* reset the chip */
5598         bnx2x_process_kill_chip_reset(bp);
5599         barrier();
5600
5601         /* Recover after reset: */
5602         /* MCP */
5603         if (bnx2x_reset_mcp_comp(bp, val))
5604                 return -EAGAIN;
5605
5606         /* PXP */
5607         bnx2x_pxp_prep(bp);
5608
5609         /* Open the gates #2, #3 and #4 */
5610         bnx2x_set_234_gates(bp, false);
5611
5612         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
5613          * reset state, re-enable attentions. */
5614
5615         return 0;
5616 }
5617
5618 static int bnx2x_leader_reset(struct bnx2x *bp)
5619 {
5620         int rc = 0;
5621         /* Try to recover after the failure */
5622         if (bnx2x_process_kill(bp)) {
5623                 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
5624                        bp->dev->name);
5625                 rc = -EAGAIN;
5626                 goto exit_leader_reset;
5627         }
5628
5629         /* Clear "reset is in progress" bit and update the driver state */
5630         bnx2x_set_reset_done(bp);
5631         bp->recovery_state = BNX2X_RECOVERY_DONE;
5632
5633 exit_leader_reset:
5634         bp->is_leader = 0;
5635         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
5636         smp_wmb();
5637         return rc;
5638 }
5639
5640 /* Assumption: runs under rtnl lock. This together with the fact
5641  * that it's called only from bnx2x_reset_task() ensure that it
5642  * will never be called when netif_running(bp->dev) is false.
5643  */
5644 static void bnx2x_parity_recover(struct bnx2x *bp)
5645 {
5646         DP(NETIF_MSG_HW, "Handling parity\n");
5647         while (1) {
5648                 switch (bp->recovery_state) {
5649                 case BNX2X_RECOVERY_INIT:
5650                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
5651                         /* Try to get a LEADER_LOCK HW lock */
5652                         if (bnx2x_trylock_hw_lock(bp,
5653                                 HW_LOCK_RESOURCE_RESERVED_08))
5654                                 bp->is_leader = 1;
5655
5656                         /* Stop the driver */
5657                         /* If interface has been removed - break */
5658                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
5659                                 return;
5660
5661                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
5662                         /* Ensure "is_leader" and "recovery_state"
5663                          *  update values are seen on other CPUs
5664                          */
5665                         smp_wmb();
5666                         break;
5667
5668                 case BNX2X_RECOVERY_WAIT:
5669                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
5670                         if (bp->is_leader) {
5671                                 u32 load_counter = bnx2x_get_load_cnt(bp);
5672                                 if (load_counter) {
5673                                         /* Wait until all other functions get
5674                                          * down.
5675                                          */
5676                                         schedule_delayed_work(&bp->reset_task,
5677                                                                 HZ/10);
5678                                         return;
5679                                 } else {
5680                                         /* If all other functions got down -
5681                                          * try to bring the chip back to
5682                                          * normal. In any case it's an exit
5683                                          * point for a leader.
5684                                          */
5685                                         if (bnx2x_leader_reset(bp) ||
5686                                         bnx2x_nic_load(bp, LOAD_NORMAL)) {
5687                                                 printk(KERN_ERR"%s: Recovery "
5688                                                 "has failed. Power cycle is "
5689                                                 "needed.\n", bp->dev->name);
5690                                                 /* Disconnect this device */
5691                                                 netif_device_detach(bp->dev);
5692                                                 /* Block ifup for all function
5693                                                  * of this ASIC until
5694                                                  * "process kill" or power
5695                                                  * cycle.
5696                                                  */
5697                                                 bnx2x_set_reset_in_progress(bp);
5698                                                 /* Shut down the power */
5699                                                 bnx2x_set_power_state(bp,
5700                                                                 PCI_D3hot);
5701                                                 return;
5702                                         }
5703
5704                                         return;
5705                                 }
5706                         } else { /* non-leader */
5707                                 if (!bnx2x_reset_is_done(bp)) {
5708                                         /* Try to get a LEADER_LOCK HW lock as
5709                                          * long as a former leader may have
5710                                          * been unloaded by the user or
5711                                          * released a leadership by another
5712                                          * reason.
5713                                          */
5714                                         if (bnx2x_trylock_hw_lock(bp,
5715                                             HW_LOCK_RESOURCE_RESERVED_08)) {
5716                                                 /* I'm a leader now! Restart a
5717                                                  * switch case.
5718                                                  */
5719                                                 bp->is_leader = 1;
5720                                                 break;
5721                                         }
5722
5723                                         schedule_delayed_work(&bp->reset_task,
5724                                                                 HZ/10);
5725                                         return;
5726
5727                                 } else { /* A leader has completed
5728                                           * the "process kill". It's an exit
5729                                           * point for a non-leader.
5730                                           */
5731                                         bnx2x_nic_load(bp, LOAD_NORMAL);
5732                                         bp->recovery_state =
5733                                                 BNX2X_RECOVERY_DONE;
5734                                         smp_wmb();
5735                                         return;
5736                                 }
5737                         }
5738                 default:
5739                         return;
5740                 }
5741         }
5742 }
5743
5744 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
5745  * scheduled on a general queue in order to prevent a dead lock.
5746  */
5747 static void bnx2x_reset_task(struct work_struct *work)
5748 {
5749         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
5750
5751 #ifdef BNX2X_STOP_ON_ERROR
5752         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
5753                   " so reset not done to allow debug dump,\n"
5754          KERN_ERR " you will need to reboot when done\n");
5755         return;
5756 #endif
5757
5758         rtnl_lock();
5759
5760         if (!netif_running(bp->dev))
5761                 goto reset_task_exit;
5762
5763         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
5764                 bnx2x_parity_recover(bp);
5765         else {
5766                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
5767                 bnx2x_nic_load(bp, LOAD_NORMAL);
5768         }
5769
5770 reset_task_exit:
5771         rtnl_unlock();
5772 }
5773
5774 /* end of nic load/unload */
5775
5776 /*
5777  * Init service functions
5778  */
5779
5780 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
5781 {
5782         switch (func) {
5783         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
5784         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
5785         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
5786         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
5787         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
5788         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
5789         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
5790         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
5791         default:
5792                 BNX2X_ERR("Unsupported function index: %d\n", func);
5793                 return (u32)(-1);
5794         }
5795 }
5796
5797 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
5798 {
5799         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
5800
5801         /* Flush all outstanding writes */
5802         mmiowb();
5803
5804         /* Pretend to be function 0 */
5805         REG_WR(bp, reg, 0);
5806         /* Flush the GRC transaction (in the chip) */
5807         new_val = REG_RD(bp, reg);
5808         if (new_val != 0) {
5809                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
5810                           new_val);
5811                 BUG();
5812         }
5813
5814         /* From now we are in the "like-E1" mode */
5815         bnx2x_int_disable(bp);
5816
5817         /* Flush all outstanding writes */
5818         mmiowb();
5819
5820         /* Restore the original funtion settings */
5821         REG_WR(bp, reg, orig_func);
5822         new_val = REG_RD(bp, reg);
5823         if (new_val != orig_func) {
5824                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
5825                           orig_func, new_val);
5826                 BUG();
5827         }
5828 }
5829
5830 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
5831 {
5832         if (CHIP_IS_E1H(bp))
5833                 bnx2x_undi_int_disable_e1h(bp, func);
5834         else
5835                 bnx2x_int_disable(bp);
5836 }
5837
5838 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5839 {
5840         u32 val;
5841
5842         /* Check if there is any driver already loaded */
5843         val = REG_RD(bp, MISC_REG_UNPREPARED);
5844         if (val == 0x1) {
5845                 /* Check if it is the UNDI driver
5846                  * UNDI driver initializes CID offset for normal bell to 0x7
5847                  */
5848                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5849                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
5850                 if (val == 0x7) {
5851                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5852                         /* save our func */
5853                         int func = BP_FUNC(bp);
5854                         u32 swap_en;
5855                         u32 swap_val;
5856
5857                         /* clear the UNDI indication */
5858                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
5859
5860                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
5861
5862                         /* try unload UNDI on port 0 */
5863                         bp->func = 0;
5864                         bp->fw_seq =
5865                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5866                                 DRV_MSG_SEQ_NUMBER_MASK);
5867                         reset_code = bnx2x_fw_command(bp, reset_code, 0);
5868
5869                         /* if UNDI is loaded on the other port */
5870                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
5871
5872                                 /* send "DONE" for previous unload */
5873                                 bnx2x_fw_command(bp,
5874                                                  DRV_MSG_CODE_UNLOAD_DONE, 0);
5875
5876                                 /* unload UNDI on port 1 */
5877                                 bp->func = 1;
5878                                 bp->fw_seq =
5879                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5880                                         DRV_MSG_SEQ_NUMBER_MASK);
5881                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5882
5883                                 bnx2x_fw_command(bp, reset_code, 0);
5884                         }
5885
5886                         /* now it's safe to release the lock */
5887                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5888
5889                         bnx2x_undi_int_disable(bp, func);
5890
5891                         /* close input traffic and wait for it */
5892                         /* Do not rcv packets to BRB */
5893                         REG_WR(bp,
5894                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
5895                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
5896                         /* Do not direct rcv packets that are not for MCP to
5897                          * the BRB */
5898                         REG_WR(bp,
5899                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
5900                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5901                         /* clear AEU */
5902                         REG_WR(bp,
5903                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5904                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
5905                         msleep(10);
5906
5907                         /* save NIG port swap info */
5908                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5909                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5910                         /* reset device */
5911                         REG_WR(bp,
5912                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5913                                0xd3ffffff);
5914                         REG_WR(bp,
5915                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5916                                0x1403);
5917                         /* take the NIG out of reset and restore swap values */
5918                         REG_WR(bp,
5919                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5920                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
5921                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
5922                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
5923
5924                         /* send unload done to the MCP */
5925                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
5926
5927                         /* restore our func and fw_seq */
5928                         bp->func = func;
5929                         bp->fw_seq =
5930                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5931                                 DRV_MSG_SEQ_NUMBER_MASK);
5932
5933                 } else
5934                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5935         }
5936 }
5937
5938 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5939 {
5940         u32 val, val2, val3, val4, id;
5941         u16 pmc;
5942
5943         /* Get the chip revision id and number. */
5944         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5945         val = REG_RD(bp, MISC_REG_CHIP_NUM);
5946         id = ((val & 0xffff) << 16);
5947         val = REG_RD(bp, MISC_REG_CHIP_REV);
5948         id |= ((val & 0xf) << 12);
5949         val = REG_RD(bp, MISC_REG_CHIP_METAL);
5950         id |= ((val & 0xff) << 4);
5951         val = REG_RD(bp, MISC_REG_BOND_ID);
5952         id |= (val & 0xf);
5953         bp->common.chip_id = id;
5954         bp->link_params.chip_id = bp->common.chip_id;
5955         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
5956
5957         val = (REG_RD(bp, 0x2874) & 0x55);
5958         if ((bp->common.chip_id & 0x1) ||
5959             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
5960                 bp->flags |= ONE_PORT_FLAG;
5961                 BNX2X_DEV_INFO("single port device\n");
5962         }
5963
5964         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
5965         bp->common.flash_size = (NVRAM_1MB_SIZE <<
5966                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
5967         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5968                        bp->common.flash_size, bp->common.flash_size);
5969
5970         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5971         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
5972         bp->link_params.shmem_base = bp->common.shmem_base;
5973         bp->link_params.shmem2_base = bp->common.shmem2_base;
5974         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
5975                        bp->common.shmem_base, bp->common.shmem2_base);
5976
5977         if (!bp->common.shmem_base ||
5978             (bp->common.shmem_base < 0xA0000) ||
5979             (bp->common.shmem_base >= 0xC0000)) {
5980                 BNX2X_DEV_INFO("MCP not active\n");
5981                 bp->flags |= NO_MCP_FLAG;
5982                 return;
5983         }
5984
5985         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
5986         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5987                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5988                 BNX2X_ERROR("BAD MCP validity signature\n");
5989
5990         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
5991         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
5992
5993         bp->link_params.hw_led_mode = ((bp->common.hw_config &
5994                                         SHARED_HW_CFG_LED_MODE_MASK) >>
5995                                        SHARED_HW_CFG_LED_MODE_SHIFT);
5996
5997         bp->link_params.feature_config_flags = 0;
5998         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
5999         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
6000                 bp->link_params.feature_config_flags |=
6001                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6002         else
6003                 bp->link_params.feature_config_flags &=
6004                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6005
6006         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6007         bp->common.bc_ver = val;
6008         BNX2X_DEV_INFO("bc_ver %X\n", val);
6009         if (val < BNX2X_BC_VER) {
6010                 /* for now only warn
6011                  * later we might need to enforce this */
6012                 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
6013                             "please upgrade BC\n", BNX2X_BC_VER, val);
6014         }
6015         bp->link_params.feature_config_flags |=
6016                                 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
6017                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
6018         bp->link_params.feature_config_flags |=
6019                 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
6020                 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
6021
6022         if (BP_E1HVN(bp) == 0) {
6023                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6024                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6025         } else {
6026                 /* no WOL capability for E1HVN != 0 */
6027                 bp->flags |= NO_WOL_FLAG;
6028         }
6029         BNX2X_DEV_INFO("%sWoL capable\n",
6030                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
6031
6032         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6033         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6034         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6035         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6036
6037         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
6038                  val, val2, val3, val4);
6039 }
6040
6041 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6042                                                     u32 switch_cfg)
6043 {
6044         int cfg_size = 0, idx, port = BP_PORT(bp);
6045
6046         /* Aggregation of supported attributes of all external phys */
6047         bp->port.supported[0] = 0;
6048         bp->port.supported[1] = 0;
6049         switch (bp->link_params.num_phys) {
6050         case 1:
6051                 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
6052                 cfg_size = 1;
6053                 break;
6054         case 2:
6055                 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
6056                 cfg_size = 1;
6057                 break;
6058         case 3:
6059                 if (bp->link_params.multi_phy_config &
6060                     PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
6061                         bp->port.supported[1] =
6062                                 bp->link_params.phy[EXT_PHY1].supported;
6063                         bp->port.supported[0] =
6064                                 bp->link_params.phy[EXT_PHY2].supported;
6065                 } else {
6066                         bp->port.supported[0] =
6067                                 bp->link_params.phy[EXT_PHY1].supported;
6068                         bp->port.supported[1] =
6069                                 bp->link_params.phy[EXT_PHY2].supported;
6070                 }
6071                 cfg_size = 2;
6072                 break;
6073         }
6074
6075         if (!(bp->port.supported[0] || bp->port.supported[1])) {
6076                 BNX2X_ERR("NVRAM config error. BAD phy config."
6077                           "PHY1 config 0x%x, PHY2 config 0x%x\n",
6078                            SHMEM_RD(bp,
6079                            dev_info.port_hw_config[port].external_phy_config),
6080                            SHMEM_RD(bp,
6081                            dev_info.port_hw_config[port].external_phy_config2));
6082                         return;
6083                 }
6084
6085         switch (switch_cfg) {
6086         case SWITCH_CFG_1G:
6087                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6088                                            port*0x10);
6089                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6090                 break;
6091
6092         case SWITCH_CFG_10G:
6093                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6094                                            port*0x18);
6095                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6096
6097                 break;
6098
6099         default:
6100                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
6101                           bp->port.link_config[0]);
6102                 return;
6103         }
6104         /* mask what we support according to speed_cap_mask per configuration */
6105         for (idx = 0; idx < cfg_size; idx++) {
6106                 if (!(bp->link_params.speed_cap_mask[idx] &
6107                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
6108                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
6109
6110                 if (!(bp->link_params.speed_cap_mask[idx] &
6111                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
6112                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
6113
6114                 if (!(bp->link_params.speed_cap_mask[idx] &
6115                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
6116                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
6117
6118                 if (!(bp->link_params.speed_cap_mask[idx] &
6119                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
6120                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
6121
6122                 if (!(bp->link_params.speed_cap_mask[idx] &
6123                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
6124                         bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
6125                                         SUPPORTED_1000baseT_Full);
6126
6127                 if (!(bp->link_params.speed_cap_mask[idx] &
6128                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
6129                         bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
6130
6131                 if (!(bp->link_params.speed_cap_mask[idx] &
6132                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
6133                         bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
6134
6135         }
6136
6137         BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
6138                        bp->port.supported[1]);
6139 }
6140
6141 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
6142 {
6143         u32 link_config, idx, cfg_size = 0;
6144         bp->port.advertising[0] = 0;
6145         bp->port.advertising[1] = 0;
6146         switch (bp->link_params.num_phys) {
6147         case 1:
6148         case 2:
6149                 cfg_size = 1;
6150                 break;
6151         case 3:
6152                 cfg_size = 2;
6153                 break;
6154         }
6155         for (idx = 0; idx < cfg_size; idx++) {
6156                 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
6157                 link_config = bp->port.link_config[idx];
6158                 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
6159         case PORT_FEATURE_LINK_SPEED_AUTO:
6160                         if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
6161                                 bp->link_params.req_line_speed[idx] =
6162                                         SPEED_AUTO_NEG;
6163                                 bp->port.advertising[idx] |=
6164                                         bp->port.supported[idx];
6165                 } else {
6166                         /* force 10G, no AN */
6167                                 bp->link_params.req_line_speed[idx] =
6168                                         SPEED_10000;
6169                                 bp->port.advertising[idx] |=
6170                                         (ADVERTISED_10000baseT_Full |
6171                                                  ADVERTISED_FIBRE);
6172                                 continue;
6173                 }
6174                 break;
6175
6176         case PORT_FEATURE_LINK_SPEED_10M_FULL:
6177                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
6178                                 bp->link_params.req_line_speed[idx] =
6179                                         SPEED_10;
6180                                 bp->port.advertising[idx] |=
6181                                         (ADVERTISED_10baseT_Full |
6182                                                 ADVERTISED_TP);
6183                 } else {
6184                         BNX2X_ERROR("NVRAM config error. "
6185                                     "Invalid link_config 0x%x"
6186                                     "  speed_cap_mask 0x%x\n",
6187                                     link_config,
6188                                     bp->link_params.speed_cap_mask[idx]);
6189                         return;
6190                 }
6191                 break;
6192
6193         case PORT_FEATURE_LINK_SPEED_10M_HALF:
6194                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
6195                                 bp->link_params.req_line_speed[idx] =
6196                                         SPEED_10;
6197                                 bp->link_params.req_duplex[idx] =
6198                                         DUPLEX_HALF;
6199                                 bp->port.advertising[idx] |=
6200                                         (ADVERTISED_10baseT_Half |
6201                                                 ADVERTISED_TP);
6202                 } else {
6203                         BNX2X_ERROR("NVRAM config error. "
6204                                     "Invalid link_config 0x%x"
6205                                     "  speed_cap_mask 0x%x\n",
6206                                     link_config,
6207                                     bp->link_params.speed_cap_mask[idx]);
6208                         return;
6209                 }
6210                 break;
6211
6212         case PORT_FEATURE_LINK_SPEED_100M_FULL:
6213                         if (bp->port.supported[idx] & SUPPORTED_100baseT_Full) {
6214                                 bp->link_params.req_line_speed[idx] =
6215                                         SPEED_100;
6216                                 bp->port.advertising[idx] |=
6217                                         (ADVERTISED_100baseT_Full |
6218                                                 ADVERTISED_TP);
6219                 } else {
6220                         BNX2X_ERROR("NVRAM config error. "
6221                                     "Invalid link_config 0x%x"
6222                                     "  speed_cap_mask 0x%x\n",
6223                                     link_config,
6224                                     bp->link_params.speed_cap_mask[idx]);
6225                         return;
6226                 }
6227                 break;
6228
6229         case PORT_FEATURE_LINK_SPEED_100M_HALF:
6230                         if (bp->port.supported[idx] & SUPPORTED_100baseT_Half) {
6231                                 bp->link_params.req_line_speed[idx] = SPEED_100;
6232                                 bp->link_params.req_duplex[idx] = DUPLEX_HALF;
6233                                 bp->port.advertising[idx] |=
6234                                         (ADVERTISED_100baseT_Half |
6235                                                 ADVERTISED_TP);
6236                 } else {
6237                         BNX2X_ERROR("NVRAM config error. "
6238                                     "Invalid link_config 0x%x"
6239                                     "  speed_cap_mask 0x%x\n",
6240                                     link_config,
6241                                     bp->link_params.speed_cap_mask[idx]);
6242                         return;
6243                 }
6244                 break;
6245
6246         case PORT_FEATURE_LINK_SPEED_1G:
6247                         if (bp->port.supported[idx] &
6248                             SUPPORTED_1000baseT_Full) {
6249                                 bp->link_params.req_line_speed[idx] =
6250                                         SPEED_1000;
6251                                 bp->port.advertising[idx] |=
6252                                         (ADVERTISED_1000baseT_Full |
6253                                                 ADVERTISED_TP);
6254                 } else {
6255                         BNX2X_ERROR("NVRAM config error. "
6256                                     "Invalid link_config 0x%x"
6257                                     "  speed_cap_mask 0x%x\n",
6258                                     link_config,
6259                                     bp->link_params.speed_cap_mask[idx]);
6260                         return;
6261                 }
6262                 break;
6263
6264         case PORT_FEATURE_LINK_SPEED_2_5G:
6265                         if (bp->port.supported[idx] &
6266                             SUPPORTED_2500baseX_Full) {
6267                                 bp->link_params.req_line_speed[idx] =
6268                                         SPEED_2500;
6269                                 bp->port.advertising[idx] |=
6270                                         (ADVERTISED_2500baseX_Full |
6271                                                 ADVERTISED_TP);
6272                 } else {
6273                         BNX2X_ERROR("NVRAM config error. "
6274                                     "Invalid link_config 0x%x"
6275                                     "  speed_cap_mask 0x%x\n",
6276                                     link_config,
6277                                      bp->link_params.speed_cap_mask[idx]);
6278                         return;
6279                 }
6280                 break;
6281
6282         case PORT_FEATURE_LINK_SPEED_10G_CX4:
6283         case PORT_FEATURE_LINK_SPEED_10G_KX4:
6284         case PORT_FEATURE_LINK_SPEED_10G_KR:
6285                         if (bp->port.supported[idx] &
6286                             SUPPORTED_10000baseT_Full) {
6287                                 bp->link_params.req_line_speed[idx] =
6288                                         SPEED_10000;
6289                                 bp->port.advertising[idx] |=
6290                                         (ADVERTISED_10000baseT_Full |
6291                                                 ADVERTISED_FIBRE);
6292                 } else {
6293                         BNX2X_ERROR("NVRAM config error. "
6294                                     "Invalid link_config 0x%x"
6295                                     "  speed_cap_mask 0x%x\n",
6296                                     link_config,
6297                                      bp->link_params.speed_cap_mask[idx]);
6298                         return;
6299                 }
6300                 break;
6301
6302         default:
6303                 BNX2X_ERROR("NVRAM config error. "
6304                             "BAD link speed link_config 0x%x\n",
6305                                   link_config);
6306                         bp->link_params.req_line_speed[idx] = SPEED_AUTO_NEG;
6307                         bp->port.advertising[idx] = bp->port.supported[idx];
6308                 break;
6309         }
6310
6311                 bp->link_params.req_flow_ctrl[idx] = (link_config &
6312                                          PORT_FEATURE_FLOW_CONTROL_MASK);
6313                 if ((bp->link_params.req_flow_ctrl[idx] ==
6314                      BNX2X_FLOW_CTRL_AUTO) &&
6315                     !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
6316                         bp->link_params.req_flow_ctrl[idx] =
6317                                 BNX2X_FLOW_CTRL_NONE;
6318                 }
6319
6320                 BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d req_flow_ctrl"
6321                                " 0x%x advertising 0x%x\n",
6322                                bp->link_params.req_line_speed[idx],
6323                                bp->link_params.req_duplex[idx],
6324                                bp->link_params.req_flow_ctrl[idx],
6325                                bp->port.advertising[idx]);
6326         }
6327 }
6328
6329 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
6330 {
6331         mac_hi = cpu_to_be16(mac_hi);
6332         mac_lo = cpu_to_be32(mac_lo);
6333         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
6334         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
6335 }
6336
6337 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
6338 {
6339         int port = BP_PORT(bp);
6340         u32 val, val2;
6341         u32 config;
6342         u32 ext_phy_type, ext_phy_config;;
6343
6344         bp->link_params.bp = bp;
6345         bp->link_params.port = port;
6346
6347         bp->link_params.lane_config =
6348                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
6349
6350         bp->link_params.speed_cap_mask[0] =
6351                 SHMEM_RD(bp,
6352                          dev_info.port_hw_config[port].speed_capability_mask);
6353         bp->link_params.speed_cap_mask[1] =
6354                 SHMEM_RD(bp,
6355                          dev_info.port_hw_config[port].speed_capability_mask2);
6356         bp->port.link_config[0] =
6357                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6358
6359         bp->port.link_config[1] =
6360                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
6361
6362         bp->link_params.multi_phy_config =
6363                 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
6364         /* If the device is capable of WoL, set the default state according
6365          * to the HW
6366          */
6367         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
6368         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
6369                    (config & PORT_FEATURE_WOL_ENABLED));
6370
6371         BNX2X_DEV_INFO("lane_config 0x%08x"
6372                        "speed_cap_mask0 0x%08x  link_config0 0x%08x\n",
6373                        bp->link_params.lane_config,
6374                        bp->link_params.speed_cap_mask[0],
6375                        bp->port.link_config[0]);
6376
6377         bp->link_params.switch_cfg = (bp->port.link_config[0] &
6378                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
6379         bnx2x_phy_probe(&bp->link_params);
6380         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
6381
6382         bnx2x_link_settings_requested(bp);
6383
6384         /*
6385          * If connected directly, work with the internal PHY, otherwise, work
6386          * with the external PHY
6387          */
6388         ext_phy_config =
6389                 SHMEM_RD(bp,
6390                          dev_info.port_hw_config[port].external_phy_config);
6391         ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
6392         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
6393                 bp->mdio.prtad = bp->port.phy_addr;
6394
6395         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
6396                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
6397                 bp->mdio.prtad =
6398                         XGXS_EXT_PHY_ADDR(ext_phy_config);
6399
6400         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6401         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
6402         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
6403         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
6404         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6405
6406 #ifdef BCM_CNIC
6407         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
6408         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
6409         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
6410 #endif
6411 }
6412
6413 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6414 {
6415         int func = BP_FUNC(bp);
6416         u32 val, val2;
6417         int rc = 0;
6418
6419         bnx2x_get_common_hwinfo(bp);
6420
6421         bp->e1hov = 0;
6422         bp->e1hmf = 0;
6423         if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
6424                 bp->mf_config =
6425                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
6426
6427                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
6428                        FUNC_MF_CFG_E1HOV_TAG_MASK);
6429                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
6430                         bp->e1hmf = 1;
6431                 BNX2X_DEV_INFO("%s function mode\n",
6432                                IS_E1HMF(bp) ? "multi" : "single");
6433
6434                 if (IS_E1HMF(bp)) {
6435                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
6436                                                                 e1hov_tag) &
6437                                FUNC_MF_CFG_E1HOV_TAG_MASK);
6438                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
6439                                 bp->e1hov = val;
6440                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
6441                                                "(0x%04x)\n",
6442                                                func, bp->e1hov, bp->e1hov);
6443                         } else {
6444                                 BNX2X_ERROR("No valid E1HOV for func %d,"
6445                                             "  aborting\n", func);
6446                                 rc = -EPERM;
6447                         }
6448                 } else {
6449                         if (BP_E1HVN(bp)) {
6450                                 BNX2X_ERROR("VN %d in single function mode,"
6451                                             "  aborting\n", BP_E1HVN(bp));
6452                                 rc = -EPERM;
6453                         }
6454                 }
6455         }
6456
6457         if (!BP_NOMCP(bp)) {
6458                 bnx2x_get_port_hwinfo(bp);
6459
6460                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
6461                               DRV_MSG_SEQ_NUMBER_MASK);
6462                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
6463         }
6464
6465         if (IS_E1HMF(bp)) {
6466                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
6467                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
6468                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
6469                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
6470                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6471                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6472                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6473                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6474                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
6475                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
6476                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
6477                                ETH_ALEN);
6478                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
6479                                ETH_ALEN);
6480                 }
6481
6482                 return rc;
6483         }
6484
6485         if (BP_NOMCP(bp)) {
6486                 /* only supposed to happen on emulation/FPGA */
6487                 BNX2X_ERROR("warning: random MAC workaround active\n");
6488                 random_ether_addr(bp->dev->dev_addr);
6489                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6490         }
6491
6492         return rc;
6493 }
6494
6495 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
6496 {
6497         int cnt, i, block_end, rodi;
6498         char vpd_data[BNX2X_VPD_LEN+1];
6499         char str_id_reg[VENDOR_ID_LEN+1];
6500         char str_id_cap[VENDOR_ID_LEN+1];
6501         u8 len;
6502
6503         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
6504         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
6505
6506         if (cnt < BNX2X_VPD_LEN)
6507                 goto out_not_found;
6508
6509         i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
6510                              PCI_VPD_LRDT_RO_DATA);
6511         if (i < 0)
6512                 goto out_not_found;
6513
6514
6515         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
6516                     pci_vpd_lrdt_size(&vpd_data[i]);
6517
6518         i += PCI_VPD_LRDT_TAG_SIZE;
6519
6520         if (block_end > BNX2X_VPD_LEN)
6521                 goto out_not_found;
6522
6523         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6524                                    PCI_VPD_RO_KEYWORD_MFR_ID);
6525         if (rodi < 0)
6526                 goto out_not_found;
6527
6528         len = pci_vpd_info_field_size(&vpd_data[rodi]);
6529
6530         if (len != VENDOR_ID_LEN)
6531                 goto out_not_found;
6532
6533         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6534
6535         /* vendor specific info */
6536         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
6537         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
6538         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
6539             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
6540
6541                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6542                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
6543                 if (rodi >= 0) {
6544                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
6545
6546                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6547
6548                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
6549                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
6550                                 bp->fw_ver[len] = ' ';
6551                         }
6552                 }
6553                 return;
6554         }
6555 out_not_found:
6556         return;
6557 }
6558
6559 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6560 {
6561         int func = BP_FUNC(bp);
6562         int timer_interval;
6563         int rc;
6564
6565         /* Disable interrupt handling until HW is initialized */
6566         atomic_set(&bp->intr_sem, 1);
6567         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6568
6569         mutex_init(&bp->port.phy_mutex);
6570         mutex_init(&bp->fw_mb_mutex);
6571         spin_lock_init(&bp->stats_lock);
6572 #ifdef BCM_CNIC
6573         mutex_init(&bp->cnic_mutex);
6574 #endif
6575
6576         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
6577         INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
6578
6579         rc = bnx2x_get_hwinfo(bp);
6580
6581         bnx2x_read_fwinfo(bp);
6582         /* need to reset chip if undi was active */
6583         if (!BP_NOMCP(bp))
6584                 bnx2x_undi_unload(bp);
6585
6586         if (CHIP_REV_IS_FPGA(bp))
6587                 dev_err(&bp->pdev->dev, "FPGA detected\n");
6588
6589         if (BP_NOMCP(bp) && (func == 0))
6590                 dev_err(&bp->pdev->dev, "MCP disabled, "
6591                                         "must load devices in order!\n");
6592
6593         /* Set multi queue mode */
6594         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
6595             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
6596                 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
6597                                         "requested is not MSI-X\n");
6598                 multi_mode = ETH_RSS_MODE_DISABLED;
6599         }
6600         bp->multi_mode = multi_mode;
6601         bp->int_mode = int_mode;
6602
6603         bp->dev->features |= NETIF_F_GRO;
6604
6605         /* Set TPA flags */
6606         if (disable_tpa) {
6607                 bp->flags &= ~TPA_ENABLE_FLAG;
6608                 bp->dev->features &= ~NETIF_F_LRO;
6609         } else {
6610                 bp->flags |= TPA_ENABLE_FLAG;
6611                 bp->dev->features |= NETIF_F_LRO;
6612         }
6613         bp->disable_tpa = disable_tpa;
6614
6615         if (CHIP_IS_E1(bp))
6616                 bp->dropless_fc = 0;
6617         else
6618                 bp->dropless_fc = dropless_fc;
6619
6620         bp->mrrs = mrrs;
6621
6622         bp->tx_ring_size = MAX_TX_AVAIL;
6623         bp->rx_ring_size = MAX_RX_AVAIL;
6624
6625         bp->rx_csum = 1;
6626
6627         /* make sure that the numbers are in the right granularity */
6628         bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6629         bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6630
6631         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
6632         bp->current_interval = (poll ? poll : timer_interval);
6633
6634         init_timer(&bp->timer);
6635         bp->timer.expires = jiffies + bp->current_interval;
6636         bp->timer.data = (unsigned long) bp;
6637         bp->timer.function = bnx2x_timer;
6638
6639         return rc;
6640 }
6641
6642
6643 /****************************************************************************
6644 * General service functions
6645 ****************************************************************************/
6646
6647 /* called with rtnl_lock */
6648 static int bnx2x_open(struct net_device *dev)
6649 {
6650         struct bnx2x *bp = netdev_priv(dev);
6651
6652         netif_carrier_off(dev);
6653
6654         bnx2x_set_power_state(bp, PCI_D0);
6655
6656         if (!bnx2x_reset_is_done(bp)) {
6657                 do {
6658                         /* Reset MCP mail box sequence if there is on going
6659                          * recovery
6660                          */
6661                         bp->fw_seq = 0;
6662
6663                         /* If it's the first function to load and reset done
6664                          * is still not cleared it may mean that. We don't
6665                          * check the attention state here because it may have
6666                          * already been cleared by a "common" reset but we
6667                          * shell proceed with "process kill" anyway.
6668                          */
6669                         if ((bnx2x_get_load_cnt(bp) == 0) &&
6670                                 bnx2x_trylock_hw_lock(bp,
6671                                 HW_LOCK_RESOURCE_RESERVED_08) &&
6672                                 (!bnx2x_leader_reset(bp))) {
6673                                 DP(NETIF_MSG_HW, "Recovered in open\n");
6674                                 break;
6675                         }
6676
6677                         bnx2x_set_power_state(bp, PCI_D3hot);
6678
6679                         printk(KERN_ERR"%s: Recovery flow hasn't been properly"
6680                         " completed yet. Try again later. If u still see this"
6681                         " message after a few retries then power cycle is"
6682                         " required.\n", bp->dev->name);
6683
6684                         return -EAGAIN;
6685                 } while (0);
6686         }
6687
6688         bp->recovery_state = BNX2X_RECOVERY_DONE;
6689
6690         return bnx2x_nic_load(bp, LOAD_OPEN);
6691 }
6692
6693 /* called with rtnl_lock */
6694 static int bnx2x_close(struct net_device *dev)
6695 {
6696         struct bnx2x *bp = netdev_priv(dev);
6697
6698         /* Unload the driver, release IRQs */
6699         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
6700         bnx2x_set_power_state(bp, PCI_D3hot);
6701
6702         return 0;
6703 }
6704
6705 /* called with netif_tx_lock from dev_mcast.c */
6706 void bnx2x_set_rx_mode(struct net_device *dev)
6707 {
6708         struct bnx2x *bp = netdev_priv(dev);
6709         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
6710         int port = BP_PORT(bp);
6711
6712         if (bp->state != BNX2X_STATE_OPEN) {
6713                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6714                 return;
6715         }
6716
6717         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
6718
6719         if (dev->flags & IFF_PROMISC)
6720                 rx_mode = BNX2X_RX_MODE_PROMISC;
6721
6722         else if ((dev->flags & IFF_ALLMULTI) ||
6723                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
6724                   CHIP_IS_E1(bp)))
6725                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6726
6727         else { /* some multicasts */
6728                 if (CHIP_IS_E1(bp)) {
6729                         int i, old, offset;
6730                         struct netdev_hw_addr *ha;
6731                         struct mac_configuration_cmd *config =
6732                                                 bnx2x_sp(bp, mcast_config);
6733
6734                         i = 0;
6735                         netdev_for_each_mc_addr(ha, dev) {
6736                                 config->config_table[i].
6737                                         cam_entry.msb_mac_addr =
6738                                         swab16(*(u16 *)&ha->addr[0]);
6739                                 config->config_table[i].
6740                                         cam_entry.middle_mac_addr =
6741                                         swab16(*(u16 *)&ha->addr[2]);
6742                                 config->config_table[i].
6743                                         cam_entry.lsb_mac_addr =
6744                                         swab16(*(u16 *)&ha->addr[4]);
6745                                 config->config_table[i].cam_entry.flags =
6746                                                         cpu_to_le16(port);
6747                                 config->config_table[i].
6748                                         target_table_entry.flags = 0;
6749                                 config->config_table[i].target_table_entry.
6750                                         clients_bit_vector =
6751                                                 cpu_to_le32(1 << BP_L_ID(bp));
6752                                 config->config_table[i].
6753                                         target_table_entry.vlan_id = 0;
6754
6755                                 DP(NETIF_MSG_IFUP,
6756                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6757                                    config->config_table[i].
6758                                                 cam_entry.msb_mac_addr,
6759                                    config->config_table[i].
6760                                                 cam_entry.middle_mac_addr,
6761                                    config->config_table[i].
6762                                                 cam_entry.lsb_mac_addr);
6763                                 i++;
6764                         }
6765                         old = config->hdr.length;
6766                         if (old > i) {
6767                                 for (; i < old; i++) {
6768                                         if (CAM_IS_INVALID(config->
6769                                                            config_table[i])) {
6770                                                 /* already invalidated */
6771                                                 break;
6772                                         }
6773                                         /* invalidate */
6774                                         CAM_INVALIDATE(config->
6775                                                        config_table[i]);
6776                                 }
6777                         }
6778
6779                         if (CHIP_REV_IS_SLOW(bp))
6780                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6781                         else
6782                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
6783
6784                         config->hdr.length = i;
6785                         config->hdr.offset = offset;
6786                         config->hdr.client_id = bp->fp->cl_id;
6787                         config->hdr.reserved1 = 0;
6788
6789                         bp->set_mac_pending++;
6790                         smp_wmb();
6791
6792                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6793                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6794                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
6795                                       0);
6796                 } else { /* E1H */
6797                         /* Accept one or more multicasts */
6798                         struct netdev_hw_addr *ha;
6799                         u32 mc_filter[MC_HASH_SIZE];
6800                         u32 crc, bit, regidx;
6801                         int i;
6802
6803                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6804
6805                         netdev_for_each_mc_addr(ha, dev) {
6806                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6807                                    ha->addr);
6808
6809                                 crc = crc32c_le(0, ha->addr, ETH_ALEN);
6810                                 bit = (crc >> 24) & 0xff;
6811                                 regidx = bit >> 5;
6812                                 bit &= 0x1f;
6813                                 mc_filter[regidx] |= (1 << bit);
6814                         }
6815
6816                         for (i = 0; i < MC_HASH_SIZE; i++)
6817                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6818                                        mc_filter[i]);
6819                 }
6820         }
6821
6822         bp->rx_mode = rx_mode;
6823         bnx2x_set_storm_rx_mode(bp);
6824 }
6825
6826
6827 /* called with rtnl_lock */
6828 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
6829                            int devad, u16 addr)
6830 {
6831         struct bnx2x *bp = netdev_priv(netdev);
6832         u16 value;
6833         int rc;
6834
6835         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
6836            prtad, devad, addr);
6837
6838         /* The HW expects different devad if CL22 is used */
6839         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
6840
6841         bnx2x_acquire_phy_lock(bp);
6842         rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
6843         bnx2x_release_phy_lock(bp);
6844         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
6845
6846         if (!rc)
6847                 rc = value;
6848         return rc;
6849 }
6850
6851 /* called with rtnl_lock */
6852 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
6853                             u16 addr, u16 value)
6854 {
6855         struct bnx2x *bp = netdev_priv(netdev);
6856         int rc;
6857
6858         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
6859                            " value 0x%x\n", prtad, devad, addr, value);
6860
6861         /* The HW expects different devad if CL22 is used */
6862         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
6863
6864         bnx2x_acquire_phy_lock(bp);
6865         rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
6866         bnx2x_release_phy_lock(bp);
6867         return rc;
6868 }
6869
6870 /* called with rtnl_lock */
6871 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6872 {
6873         struct bnx2x *bp = netdev_priv(dev);
6874         struct mii_ioctl_data *mdio = if_mii(ifr);
6875
6876         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
6877            mdio->phy_id, mdio->reg_num, mdio->val_in);
6878
6879         if (!netif_running(dev))
6880                 return -EAGAIN;
6881
6882         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
6883 }
6884
6885 #ifdef CONFIG_NET_POLL_CONTROLLER
6886 static void poll_bnx2x(struct net_device *dev)
6887 {
6888         struct bnx2x *bp = netdev_priv(dev);
6889
6890         disable_irq(bp->pdev->irq);
6891         bnx2x_interrupt(bp->pdev->irq, dev);
6892         enable_irq(bp->pdev->irq);
6893 }
6894 #endif
6895
6896 static const struct net_device_ops bnx2x_netdev_ops = {
6897         .ndo_open               = bnx2x_open,
6898         .ndo_stop               = bnx2x_close,
6899         .ndo_start_xmit         = bnx2x_start_xmit,
6900         .ndo_set_multicast_list = bnx2x_set_rx_mode,
6901         .ndo_set_mac_address    = bnx2x_change_mac_addr,
6902         .ndo_validate_addr      = eth_validate_addr,
6903         .ndo_do_ioctl           = bnx2x_ioctl,
6904         .ndo_change_mtu         = bnx2x_change_mtu,
6905         .ndo_tx_timeout         = bnx2x_tx_timeout,
6906 #ifdef BCM_VLAN
6907         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
6908 #endif
6909 #ifdef CONFIG_NET_POLL_CONTROLLER
6910         .ndo_poll_controller    = poll_bnx2x,
6911 #endif
6912 };
6913
6914 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
6915                                     struct net_device *dev)
6916 {
6917         struct bnx2x *bp;
6918         int rc;
6919
6920         SET_NETDEV_DEV(dev, &pdev->dev);
6921         bp = netdev_priv(dev);
6922
6923         bp->dev = dev;
6924         bp->pdev = pdev;
6925         bp->flags = 0;
6926         bp->func = PCI_FUNC(pdev->devfn);
6927
6928         rc = pci_enable_device(pdev);
6929         if (rc) {
6930                 dev_err(&bp->pdev->dev,
6931                         "Cannot enable PCI device, aborting\n");
6932                 goto err_out;
6933         }
6934
6935         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6936                 dev_err(&bp->pdev->dev,
6937                         "Cannot find PCI device base address, aborting\n");
6938                 rc = -ENODEV;
6939                 goto err_out_disable;
6940         }
6941
6942         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
6943                 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
6944                        " base address, aborting\n");
6945                 rc = -ENODEV;
6946                 goto err_out_disable;
6947         }
6948
6949         if (atomic_read(&pdev->enable_cnt) == 1) {
6950                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6951                 if (rc) {
6952                         dev_err(&bp->pdev->dev,
6953                                 "Cannot obtain PCI resources, aborting\n");
6954                         goto err_out_disable;
6955                 }
6956
6957                 pci_set_master(pdev);
6958                 pci_save_state(pdev);
6959         }
6960
6961         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6962         if (bp->pm_cap == 0) {
6963                 dev_err(&bp->pdev->dev,
6964                         "Cannot find power management capability, aborting\n");
6965                 rc = -EIO;
6966                 goto err_out_release;
6967         }
6968
6969         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
6970         if (bp->pcie_cap == 0) {
6971                 dev_err(&bp->pdev->dev,
6972                         "Cannot find PCI Express capability, aborting\n");
6973                 rc = -EIO;
6974                 goto err_out_release;
6975         }
6976
6977         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
6978                 bp->flags |= USING_DAC_FLAG;
6979                 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
6980                         dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
6981                                " failed, aborting\n");
6982                         rc = -EIO;
6983                         goto err_out_release;
6984                 }
6985
6986         } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
6987                 dev_err(&bp->pdev->dev,
6988                         "System does not support DMA, aborting\n");
6989                 rc = -EIO;
6990                 goto err_out_release;
6991         }
6992
6993         dev->mem_start = pci_resource_start(pdev, 0);
6994         dev->base_addr = dev->mem_start;
6995         dev->mem_end = pci_resource_end(pdev, 0);
6996
6997         dev->irq = pdev->irq;
6998
6999         bp->regview = pci_ioremap_bar(pdev, 0);
7000         if (!bp->regview) {
7001                 dev_err(&bp->pdev->dev,
7002                         "Cannot map register space, aborting\n");
7003                 rc = -ENOMEM;
7004                 goto err_out_release;
7005         }
7006
7007         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
7008                                         min_t(u64, BNX2X_DB_SIZE,
7009                                               pci_resource_len(pdev, 2)));
7010         if (!bp->doorbells) {
7011                 dev_err(&bp->pdev->dev,
7012                         "Cannot map doorbell space, aborting\n");
7013                 rc = -ENOMEM;
7014                 goto err_out_unmap;
7015         }
7016
7017         bnx2x_set_power_state(bp, PCI_D0);
7018
7019         /* clean indirect addresses */
7020         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
7021                                PCICFG_VENDOR_ID_OFFSET);
7022         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
7023         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
7024         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
7025         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
7026
7027         /* Reset the load counter */
7028         bnx2x_clear_load_cnt(bp);
7029
7030         dev->watchdog_timeo = TX_TIMEOUT;
7031
7032         dev->netdev_ops = &bnx2x_netdev_ops;
7033         bnx2x_set_ethtool_ops(dev);
7034         dev->features |= NETIF_F_SG;
7035         dev->features |= NETIF_F_HW_CSUM;
7036         if (bp->flags & USING_DAC_FLAG)
7037                 dev->features |= NETIF_F_HIGHDMA;
7038         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7039         dev->features |= NETIF_F_TSO6;
7040 #ifdef BCM_VLAN
7041         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
7042         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
7043
7044         dev->vlan_features |= NETIF_F_SG;
7045         dev->vlan_features |= NETIF_F_HW_CSUM;
7046         if (bp->flags & USING_DAC_FLAG)
7047                 dev->vlan_features |= NETIF_F_HIGHDMA;
7048         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7049         dev->vlan_features |= NETIF_F_TSO6;
7050 #endif
7051
7052         /* get_port_hwinfo() will set prtad and mmds properly */
7053         bp->mdio.prtad = MDIO_PRTAD_NONE;
7054         bp->mdio.mmds = 0;
7055         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7056         bp->mdio.dev = dev;
7057         bp->mdio.mdio_read = bnx2x_mdio_read;
7058         bp->mdio.mdio_write = bnx2x_mdio_write;
7059
7060         return 0;
7061
7062 err_out_unmap:
7063         if (bp->regview) {
7064                 iounmap(bp->regview);
7065                 bp->regview = NULL;
7066         }
7067         if (bp->doorbells) {
7068                 iounmap(bp->doorbells);
7069                 bp->doorbells = NULL;
7070         }
7071
7072 err_out_release:
7073         if (atomic_read(&pdev->enable_cnt) == 1)
7074                 pci_release_regions(pdev);
7075
7076 err_out_disable:
7077         pci_disable_device(pdev);
7078         pci_set_drvdata(pdev, NULL);
7079
7080 err_out:
7081         return rc;
7082 }
7083
7084 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
7085                                                  int *width, int *speed)
7086 {
7087         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7088
7089         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
7090
7091         /* return value of 1=2.5GHz 2=5GHz */
7092         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
7093 }
7094
7095 static int bnx2x_check_firmware(struct bnx2x *bp)
7096 {
7097         const struct firmware *firmware = bp->firmware;
7098         struct bnx2x_fw_file_hdr *fw_hdr;
7099         struct bnx2x_fw_file_section *sections;
7100         u32 offset, len, num_ops;
7101         u16 *ops_offsets;
7102         int i;
7103         const u8 *fw_ver;
7104
7105         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
7106                 return -EINVAL;
7107
7108         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
7109         sections = (struct bnx2x_fw_file_section *)fw_hdr;
7110
7111         /* Make sure none of the offsets and sizes make us read beyond
7112          * the end of the firmware data */
7113         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
7114                 offset = be32_to_cpu(sections[i].offset);
7115                 len = be32_to_cpu(sections[i].len);
7116                 if (offset + len > firmware->size) {
7117                         dev_err(&bp->pdev->dev,
7118                                 "Section %d length is out of bounds\n", i);
7119                         return -EINVAL;
7120                 }
7121         }
7122
7123         /* Likewise for the init_ops offsets */
7124         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
7125         ops_offsets = (u16 *)(firmware->data + offset);
7126         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
7127
7128         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
7129                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
7130                         dev_err(&bp->pdev->dev,
7131                                 "Section offset %d is out of bounds\n", i);
7132                         return -EINVAL;
7133                 }
7134         }
7135
7136         /* Check FW version */
7137         offset = be32_to_cpu(fw_hdr->fw_version.offset);
7138         fw_ver = firmware->data + offset;
7139         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
7140             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
7141             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
7142             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
7143                 dev_err(&bp->pdev->dev,
7144                         "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
7145                        fw_ver[0], fw_ver[1], fw_ver[2],
7146                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
7147                        BCM_5710_FW_MINOR_VERSION,
7148                        BCM_5710_FW_REVISION_VERSION,
7149                        BCM_5710_FW_ENGINEERING_VERSION);
7150                 return -EINVAL;
7151         }
7152
7153         return 0;
7154 }
7155
7156 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7157 {
7158         const __be32 *source = (const __be32 *)_source;
7159         u32 *target = (u32 *)_target;
7160         u32 i;
7161
7162         for (i = 0; i < n/4; i++)
7163                 target[i] = be32_to_cpu(source[i]);
7164 }
7165
7166 /*
7167    Ops array is stored in the following format:
7168    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7169  */
7170 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
7171 {
7172         const __be32 *source = (const __be32 *)_source;
7173         struct raw_op *target = (struct raw_op *)_target;
7174         u32 i, j, tmp;
7175
7176         for (i = 0, j = 0; i < n/8; i++, j += 2) {
7177                 tmp = be32_to_cpu(source[j]);
7178                 target[i].op = (tmp >> 24) & 0xff;
7179                 target[i].offset = tmp & 0xffffff;
7180                 target[i].raw_data = be32_to_cpu(source[j + 1]);
7181         }
7182 }
7183
7184 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7185 {
7186         const __be16 *source = (const __be16 *)_source;
7187         u16 *target = (u16 *)_target;
7188         u32 i;
7189
7190         for (i = 0; i < n/2; i++)
7191                 target[i] = be16_to_cpu(source[i]);
7192 }
7193
7194 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
7195 do {                                                                    \
7196         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
7197         bp->arr = kmalloc(len, GFP_KERNEL);                             \
7198         if (!bp->arr) {                                                 \
7199                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7200                 goto lbl;                                               \
7201         }                                                               \
7202         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
7203              (u8 *)bp->arr, len);                                       \
7204 } while (0)
7205
7206 int bnx2x_init_firmware(struct bnx2x *bp)
7207 {
7208         const char *fw_file_name;
7209         struct bnx2x_fw_file_hdr *fw_hdr;
7210         int rc;
7211
7212         if (CHIP_IS_E1(bp))
7213                 fw_file_name = FW_FILE_NAME_E1;
7214         else if (CHIP_IS_E1H(bp))
7215                 fw_file_name = FW_FILE_NAME_E1H;
7216         else {
7217                 BNX2X_ERR("Unsupported chip revision\n");
7218                 return -EINVAL;
7219         }
7220
7221         BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
7222
7223         rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
7224         if (rc) {
7225                 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
7226                 goto request_firmware_exit;
7227         }
7228
7229         rc = bnx2x_check_firmware(bp);
7230         if (rc) {
7231                 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
7232                 goto request_firmware_exit;
7233         }
7234
7235         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
7236
7237         /* Initialize the pointers to the init arrays */
7238         /* Blob */
7239         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
7240
7241         /* Opcodes */
7242         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
7243
7244         /* Offsets */
7245         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
7246                             be16_to_cpu_n);
7247
7248         /* STORMs firmware */
7249         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7250                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
7251         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
7252                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
7253         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7254                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
7255         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
7256                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
7257         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7258                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
7259         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
7260                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
7261         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7262                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
7263         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
7264                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
7265
7266         return 0;
7267
7268 init_offsets_alloc_err:
7269         kfree(bp->init_ops);
7270 init_ops_alloc_err:
7271         kfree(bp->init_data);
7272 request_firmware_exit:
7273         release_firmware(bp->firmware);
7274
7275         return rc;
7276 }
7277
7278
7279 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7280                                     const struct pci_device_id *ent)
7281 {
7282         struct net_device *dev = NULL;
7283         struct bnx2x *bp;
7284         int pcie_width, pcie_speed;
7285         int rc;
7286
7287         /* dev zeroed in init_etherdev */
7288         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
7289         if (!dev) {
7290                 dev_err(&pdev->dev, "Cannot allocate net device\n");
7291                 return -ENOMEM;
7292         }
7293
7294         bp = netdev_priv(dev);
7295         bp->msg_enable = debug;
7296
7297         pci_set_drvdata(pdev, dev);
7298
7299         rc = bnx2x_init_dev(pdev, dev);
7300         if (rc < 0) {
7301                 free_netdev(dev);
7302                 return rc;
7303         }
7304
7305         rc = bnx2x_init_bp(bp);
7306         if (rc)
7307                 goto init_one_exit;
7308
7309         rc = register_netdev(dev);
7310         if (rc) {
7311                 dev_err(&pdev->dev, "Cannot register net device\n");
7312                 goto init_one_exit;
7313         }
7314
7315         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
7316         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
7317                " IRQ %d, ", board_info[ent->driver_data].name,
7318                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
7319                pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
7320                dev->base_addr, bp->pdev->irq);
7321         pr_cont("node addr %pM\n", dev->dev_addr);
7322
7323         return 0;
7324
7325 init_one_exit:
7326         if (bp->regview)
7327                 iounmap(bp->regview);
7328
7329         if (bp->doorbells)
7330                 iounmap(bp->doorbells);
7331
7332         free_netdev(dev);
7333
7334         if (atomic_read(&pdev->enable_cnt) == 1)
7335                 pci_release_regions(pdev);
7336
7337         pci_disable_device(pdev);
7338         pci_set_drvdata(pdev, NULL);
7339
7340         return rc;
7341 }
7342
7343 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7344 {
7345         struct net_device *dev = pci_get_drvdata(pdev);
7346         struct bnx2x *bp;
7347
7348         if (!dev) {
7349                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
7350                 return;
7351         }
7352         bp = netdev_priv(dev);
7353
7354         unregister_netdev(dev);
7355
7356         /* Make sure RESET task is not scheduled before continuing */
7357         cancel_delayed_work_sync(&bp->reset_task);
7358
7359         if (bp->regview)
7360                 iounmap(bp->regview);
7361
7362         if (bp->doorbells)
7363                 iounmap(bp->doorbells);
7364
7365         free_netdev(dev);
7366
7367         if (atomic_read(&pdev->enable_cnt) == 1)
7368                 pci_release_regions(pdev);
7369
7370         pci_disable_device(pdev);
7371         pci_set_drvdata(pdev, NULL);
7372 }
7373
7374 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
7375 {
7376         int i;
7377
7378         bp->state = BNX2X_STATE_ERROR;
7379
7380         bp->rx_mode = BNX2X_RX_MODE_NONE;
7381
7382         bnx2x_netif_stop(bp, 0);
7383         netif_carrier_off(bp->dev);
7384
7385         del_timer_sync(&bp->timer);
7386         bp->stats_state = STATS_STATE_DISABLED;
7387         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
7388
7389         /* Release IRQs */
7390         bnx2x_free_irq(bp, false);
7391
7392         if (CHIP_IS_E1(bp)) {
7393                 struct mac_configuration_cmd *config =
7394                                                 bnx2x_sp(bp, mcast_config);
7395
7396                 for (i = 0; i < config->hdr.length; i++)
7397                         CAM_INVALIDATE(config->config_table[i]);
7398         }
7399
7400         /* Free SKBs, SGEs, TPA pool and driver internals */
7401         bnx2x_free_skbs(bp);
7402         for_each_queue(bp, i)
7403                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7404         for_each_queue(bp, i)
7405                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7406         bnx2x_free_mem(bp);
7407
7408         bp->state = BNX2X_STATE_CLOSED;
7409
7410         return 0;
7411 }
7412
7413 static void bnx2x_eeh_recover(struct bnx2x *bp)
7414 {
7415         u32 val;
7416
7417         mutex_init(&bp->port.phy_mutex);
7418
7419         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7420         bp->link_params.shmem_base = bp->common.shmem_base;
7421         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7422
7423         if (!bp->common.shmem_base ||
7424             (bp->common.shmem_base < 0xA0000) ||
7425             (bp->common.shmem_base >= 0xC0000)) {
7426                 BNX2X_DEV_INFO("MCP not active\n");
7427                 bp->flags |= NO_MCP_FLAG;
7428                 return;
7429         }
7430
7431         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7432         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7433                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7434                 BNX2X_ERR("BAD MCP validity signature\n");
7435
7436         if (!BP_NOMCP(bp)) {
7437                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
7438                               & DRV_MSG_SEQ_NUMBER_MASK);
7439                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7440         }
7441 }
7442
7443 /**
7444  * bnx2x_io_error_detected - called when PCI error is detected
7445  * @pdev: Pointer to PCI device
7446  * @state: The current pci connection state
7447  *
7448  * This function is called after a PCI bus error affecting
7449  * this device has been detected.
7450  */
7451 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
7452                                                 pci_channel_state_t state)
7453 {
7454         struct net_device *dev = pci_get_drvdata(pdev);
7455         struct bnx2x *bp = netdev_priv(dev);
7456
7457         rtnl_lock();
7458
7459         netif_device_detach(dev);
7460
7461         if (state == pci_channel_io_perm_failure) {
7462                 rtnl_unlock();
7463                 return PCI_ERS_RESULT_DISCONNECT;
7464         }
7465
7466         if (netif_running(dev))
7467                 bnx2x_eeh_nic_unload(bp);
7468
7469         pci_disable_device(pdev);
7470
7471         rtnl_unlock();
7472
7473         /* Request a slot reset */
7474         return PCI_ERS_RESULT_NEED_RESET;
7475 }
7476
7477 /**
7478  * bnx2x_io_slot_reset - called after the PCI bus has been reset
7479  * @pdev: Pointer to PCI device
7480  *
7481  * Restart the card from scratch, as if from a cold-boot.
7482  */
7483 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
7484 {
7485         struct net_device *dev = pci_get_drvdata(pdev);
7486         struct bnx2x *bp = netdev_priv(dev);
7487
7488         rtnl_lock();
7489
7490         if (pci_enable_device(pdev)) {
7491                 dev_err(&pdev->dev,
7492                         "Cannot re-enable PCI device after reset\n");
7493                 rtnl_unlock();
7494                 return PCI_ERS_RESULT_DISCONNECT;
7495         }
7496
7497         pci_set_master(pdev);
7498         pci_restore_state(pdev);
7499
7500         if (netif_running(dev))
7501                 bnx2x_set_power_state(bp, PCI_D0);
7502
7503         rtnl_unlock();
7504
7505         return PCI_ERS_RESULT_RECOVERED;
7506 }
7507
7508 /**
7509  * bnx2x_io_resume - called when traffic can start flowing again
7510  * @pdev: Pointer to PCI device
7511  *
7512  * This callback is called when the error recovery driver tells us that
7513  * its OK to resume normal operation.
7514  */
7515 static void bnx2x_io_resume(struct pci_dev *pdev)
7516 {
7517         struct net_device *dev = pci_get_drvdata(pdev);
7518         struct bnx2x *bp = netdev_priv(dev);
7519
7520         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
7521                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
7522                 return;
7523         }
7524
7525         rtnl_lock();
7526
7527         bnx2x_eeh_recover(bp);
7528
7529         if (netif_running(dev))
7530                 bnx2x_nic_load(bp, LOAD_NORMAL);
7531
7532         netif_device_attach(dev);
7533
7534         rtnl_unlock();
7535 }
7536
7537 static struct pci_error_handlers bnx2x_err_handler = {
7538         .error_detected = bnx2x_io_error_detected,
7539         .slot_reset     = bnx2x_io_slot_reset,
7540         .resume         = bnx2x_io_resume,
7541 };
7542
7543 static struct pci_driver bnx2x_pci_driver = {
7544         .name        = DRV_MODULE_NAME,
7545         .id_table    = bnx2x_pci_tbl,
7546         .probe       = bnx2x_init_one,
7547         .remove      = __devexit_p(bnx2x_remove_one),
7548         .suspend     = bnx2x_suspend,
7549         .resume      = bnx2x_resume,
7550         .err_handler = &bnx2x_err_handler,
7551 };
7552
7553 static int __init bnx2x_init(void)
7554 {
7555         int ret;
7556
7557         pr_info("%s", version);
7558
7559         bnx2x_wq = create_singlethread_workqueue("bnx2x");
7560         if (bnx2x_wq == NULL) {
7561                 pr_err("Cannot create workqueue\n");
7562                 return -ENOMEM;
7563         }
7564
7565         ret = pci_register_driver(&bnx2x_pci_driver);
7566         if (ret) {
7567                 pr_err("Cannot register driver\n");
7568                 destroy_workqueue(bnx2x_wq);
7569         }
7570         return ret;
7571 }
7572
7573 static void __exit bnx2x_cleanup(void)
7574 {
7575         pci_unregister_driver(&bnx2x_pci_driver);
7576
7577         destroy_workqueue(bnx2x_wq);
7578 }
7579
7580 module_init(bnx2x_init);
7581 module_exit(bnx2x_cleanup);
7582
7583 #ifdef BCM_CNIC
7584
7585 /* count denotes the number of new completions we have seen */
7586 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
7587 {
7588         struct eth_spe *spe;
7589
7590 #ifdef BNX2X_STOP_ON_ERROR
7591         if (unlikely(bp->panic))
7592                 return;
7593 #endif
7594
7595         spin_lock_bh(&bp->spq_lock);
7596         bp->cnic_spq_pending -= count;
7597
7598         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
7599              bp->cnic_spq_pending++) {
7600
7601                 if (!bp->cnic_kwq_pending)
7602                         break;
7603
7604                 spe = bnx2x_sp_get_next(bp);
7605                 *spe = *bp->cnic_kwq_cons;
7606
7607                 bp->cnic_kwq_pending--;
7608
7609                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
7610                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
7611
7612                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
7613                         bp->cnic_kwq_cons = bp->cnic_kwq;
7614                 else
7615                         bp->cnic_kwq_cons++;
7616         }
7617         bnx2x_sp_prod_update(bp);
7618         spin_unlock_bh(&bp->spq_lock);
7619 }
7620
7621 static int bnx2x_cnic_sp_queue(struct net_device *dev,
7622                                struct kwqe_16 *kwqes[], u32 count)
7623 {
7624         struct bnx2x *bp = netdev_priv(dev);
7625         int i;
7626
7627 #ifdef BNX2X_STOP_ON_ERROR
7628         if (unlikely(bp->panic))
7629                 return -EIO;
7630 #endif
7631
7632         spin_lock_bh(&bp->spq_lock);
7633
7634         for (i = 0; i < count; i++) {
7635                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
7636
7637                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
7638                         break;
7639
7640                 *bp->cnic_kwq_prod = *spe;
7641
7642                 bp->cnic_kwq_pending++;
7643
7644                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
7645                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
7646                    spe->data.mac_config_addr.hi,
7647                    spe->data.mac_config_addr.lo,
7648                    bp->cnic_kwq_pending);
7649
7650                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
7651                         bp->cnic_kwq_prod = bp->cnic_kwq;
7652                 else
7653                         bp->cnic_kwq_prod++;
7654         }
7655
7656         spin_unlock_bh(&bp->spq_lock);
7657
7658         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
7659                 bnx2x_cnic_sp_post(bp, 0);
7660
7661         return i;
7662 }
7663
7664 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7665 {
7666         struct cnic_ops *c_ops;
7667         int rc = 0;
7668
7669         mutex_lock(&bp->cnic_mutex);
7670         c_ops = bp->cnic_ops;
7671         if (c_ops)
7672                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7673         mutex_unlock(&bp->cnic_mutex);
7674
7675         return rc;
7676 }
7677
7678 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7679 {
7680         struct cnic_ops *c_ops;
7681         int rc = 0;
7682
7683         rcu_read_lock();
7684         c_ops = rcu_dereference(bp->cnic_ops);
7685         if (c_ops)
7686                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7687         rcu_read_unlock();
7688
7689         return rc;
7690 }
7691
7692 /*
7693  * for commands that have no data
7694  */
7695 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
7696 {
7697         struct cnic_ctl_info ctl = {0};
7698
7699         ctl.cmd = cmd;
7700
7701         return bnx2x_cnic_ctl_send(bp, &ctl);
7702 }
7703
7704 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
7705 {
7706         struct cnic_ctl_info ctl;
7707
7708         /* first we tell CNIC and only then we count this as a completion */
7709         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
7710         ctl.data.comp.cid = cid;
7711
7712         bnx2x_cnic_ctl_send_bh(bp, &ctl);
7713         bnx2x_cnic_sp_post(bp, 1);
7714 }
7715
7716 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7717 {
7718         struct bnx2x *bp = netdev_priv(dev);
7719         int rc = 0;
7720
7721         switch (ctl->cmd) {
7722         case DRV_CTL_CTXTBL_WR_CMD: {
7723                 u32 index = ctl->data.io.offset;
7724                 dma_addr_t addr = ctl->data.io.dma_addr;
7725
7726                 bnx2x_ilt_wr(bp, index, addr);
7727                 break;
7728         }
7729
7730         case DRV_CTL_COMPLETION_CMD: {
7731                 int count = ctl->data.comp.comp_count;
7732
7733                 bnx2x_cnic_sp_post(bp, count);
7734                 break;
7735         }
7736
7737         /* rtnl_lock is held.  */
7738         case DRV_CTL_START_L2_CMD: {
7739                 u32 cli = ctl->data.ring.client_id;
7740
7741                 bp->rx_mode_cl_mask |= (1 << cli);
7742                 bnx2x_set_storm_rx_mode(bp);
7743                 break;
7744         }
7745
7746         /* rtnl_lock is held.  */
7747         case DRV_CTL_STOP_L2_CMD: {
7748                 u32 cli = ctl->data.ring.client_id;
7749
7750                 bp->rx_mode_cl_mask &= ~(1 << cli);
7751                 bnx2x_set_storm_rx_mode(bp);
7752                 break;
7753         }
7754
7755         default:
7756                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
7757                 rc = -EINVAL;
7758         }
7759
7760         return rc;
7761 }
7762
7763 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
7764 {
7765         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7766
7767         if (bp->flags & USING_MSIX_FLAG) {
7768                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
7769                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
7770                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
7771         } else {
7772                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
7773                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
7774         }
7775         cp->irq_arr[0].status_blk = bp->cnic_sb;
7776         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
7777         cp->irq_arr[1].status_blk = bp->def_status_blk;
7778         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
7779
7780         cp->num_irq = 2;
7781 }
7782
7783 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
7784                                void *data)
7785 {
7786         struct bnx2x *bp = netdev_priv(dev);
7787         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7788
7789         if (ops == NULL)
7790                 return -EINVAL;
7791
7792         if (atomic_read(&bp->intr_sem) != 0)
7793                 return -EBUSY;
7794
7795         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
7796         if (!bp->cnic_kwq)
7797                 return -ENOMEM;
7798
7799         bp->cnic_kwq_cons = bp->cnic_kwq;
7800         bp->cnic_kwq_prod = bp->cnic_kwq;
7801         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
7802
7803         bp->cnic_spq_pending = 0;
7804         bp->cnic_kwq_pending = 0;
7805
7806         bp->cnic_data = data;
7807
7808         cp->num_irq = 0;
7809         cp->drv_state = CNIC_DRV_STATE_REGD;
7810
7811         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
7812
7813         bnx2x_setup_cnic_irq_info(bp);
7814         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7815         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7816         rcu_assign_pointer(bp->cnic_ops, ops);
7817
7818         return 0;
7819 }
7820
7821 static int bnx2x_unregister_cnic(struct net_device *dev)
7822 {
7823         struct bnx2x *bp = netdev_priv(dev);
7824         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7825
7826         mutex_lock(&bp->cnic_mutex);
7827         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7828                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7829                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7830         }
7831         cp->drv_state = 0;
7832         rcu_assign_pointer(bp->cnic_ops, NULL);
7833         mutex_unlock(&bp->cnic_mutex);
7834         synchronize_rcu();
7835         kfree(bp->cnic_kwq);
7836         bp->cnic_kwq = NULL;
7837
7838         return 0;
7839 }
7840
7841 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
7842 {
7843         struct bnx2x *bp = netdev_priv(dev);
7844         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7845
7846         cp->drv_owner = THIS_MODULE;
7847         cp->chip_id = CHIP_ID(bp);
7848         cp->pdev = bp->pdev;
7849         cp->io_base = bp->regview;
7850         cp->io_base2 = bp->doorbells;
7851         cp->max_kwqe_pending = 8;
7852         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
7853         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
7854         cp->ctx_tbl_len = CNIC_ILT_LINES;
7855         cp->starting_cid = BCM_CNIC_CID_START;
7856         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
7857         cp->drv_ctl = bnx2x_drv_ctl;
7858         cp->drv_register_cnic = bnx2x_register_cnic;
7859         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
7860
7861         return cp;
7862 }
7863 EXPORT_SYMBOL(bnx2x_cnic_probe);
7864
7865 #endif /* BCM_CNIC */
7866