]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/bnx2x/bnx2x_main.c
bnx2x: Split PHY functions
[net-next-2.6.git] / drivers / net / bnx2x / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 #include <linux/stringify.h>
53
54 #define BNX2X_MAIN
55 #include "bnx2x.h"
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_cmn.h"
59
60
61 #include <linux/firmware.h>
62 #include "bnx2x_fw_file_hdr.h"
63 /* FW files */
64 #define FW_FILE_VERSION                                 \
65         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
66         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
67         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
68         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
69 #define FW_FILE_NAME_E1         "bnx2x-e1-" FW_FILE_VERSION ".fw"
70 #define FW_FILE_NAME_E1H        "bnx2x-e1h-" FW_FILE_VERSION ".fw"
71
72 /* Time in jiffies before concluding the transmitter is hung */
73 #define TX_TIMEOUT              (5*HZ)
74
75 static char version[] __devinitdata =
76         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
77         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
79 MODULE_AUTHOR("Eliezer Tamir");
80 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_MODULE_VERSION);
83 MODULE_FIRMWARE(FW_FILE_NAME_E1);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
85
86 static int multi_mode = 1;
87 module_param(multi_mode, int, 0);
88 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
89                              "(0 Disable; 1 Enable (default))");
90
91 static int num_queues;
92 module_param(num_queues, int, 0);
93 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
94                                 " (default is as a number of CPUs)");
95
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
99
100 static int int_mode;
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
103                                 "(1 INT#x; 2 MSI)");
104
105 static int dropless_fc;
106 module_param(dropless_fc, int, 0);
107 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
108
109 static int poll;
110 module_param(poll, int, 0);
111 MODULE_PARM_DESC(poll, " Use polling (for debug)");
112
113 static int mrrs = -1;
114 module_param(mrrs, int, 0);
115 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
116
117 static int debug;
118 module_param(debug, int, 0);
119 MODULE_PARM_DESC(debug, " Default debug msglevel");
120
121 static struct workqueue_struct *bnx2x_wq;
122
123 enum bnx2x_board_type {
124         BCM57710 = 0,
125         BCM57711 = 1,
126         BCM57711E = 2,
127 };
128
129 /* indexed by board_type, above */
130 static struct {
131         char *name;
132 } board_info[] __devinitdata = {
133         { "Broadcom NetXtreme II BCM57710 XGb" },
134         { "Broadcom NetXtreme II BCM57711 XGb" },
135         { "Broadcom NetXtreme II BCM57711E XGb" }
136 };
137
138
139 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
140         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
143         { 0 }
144 };
145
146 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148 /****************************************************************************
149 * General service functions
150 ****************************************************************************/
151
152 /* used only at init
153  * locking is done by mcp
154  */
155 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
156 {
157         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160                                PCICFG_VENDOR_ID_OFFSET);
161 }
162
163 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164 {
165         u32 val;
166
167         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170                                PCICFG_VENDOR_ID_OFFSET);
171
172         return val;
173 }
174
175 const u32 dmae_reg_go_c[] = {
176         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180 };
181
182 /* copy command into DMAE command memory and set DMAE command go */
183 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
184 {
185         u32 cmd_offset;
186         int i;
187
188         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
189         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
190                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
191
192                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
193                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
194         }
195         REG_WR(bp, dmae_reg_go_c[idx], 1);
196 }
197
198 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
199                       u32 len32)
200 {
201         struct dmae_command dmae;
202         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
203         int cnt = 200;
204
205         if (!bp->dmae_ready) {
206                 u32 *data = bnx2x_sp(bp, wb_data[0]);
207
208                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
209                    "  using indirect\n", dst_addr, len32);
210                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
211                 return;
212         }
213
214         memset(&dmae, 0, sizeof(struct dmae_command));
215
216         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
217                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
218                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
219 #ifdef __BIG_ENDIAN
220                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
221 #else
222                        DMAE_CMD_ENDIANITY_DW_SWAP |
223 #endif
224                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
225                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
226         dmae.src_addr_lo = U64_LO(dma_addr);
227         dmae.src_addr_hi = U64_HI(dma_addr);
228         dmae.dst_addr_lo = dst_addr >> 2;
229         dmae.dst_addr_hi = 0;
230         dmae.len = len32;
231         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
232         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
233         dmae.comp_val = DMAE_COMP_VAL;
234
235         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
236            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
237                     "dst_addr [%x:%08x (%08x)]\n"
238            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
239            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
240            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
241            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
242         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
243            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
244            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
245
246         mutex_lock(&bp->dmae_mutex);
247
248         *wb_comp = 0;
249
250         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
251
252         udelay(5);
253
254         while (*wb_comp != DMAE_COMP_VAL) {
255                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
256
257                 if (!cnt) {
258                         BNX2X_ERR("DMAE timeout!\n");
259                         break;
260                 }
261                 cnt--;
262                 /* adjust delay for emulation/FPGA */
263                 if (CHIP_REV_IS_SLOW(bp))
264                         msleep(100);
265                 else
266                         udelay(5);
267         }
268
269         mutex_unlock(&bp->dmae_mutex);
270 }
271
272 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
273 {
274         struct dmae_command dmae;
275         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
276         int cnt = 200;
277
278         if (!bp->dmae_ready) {
279                 u32 *data = bnx2x_sp(bp, wb_data[0]);
280                 int i;
281
282                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
283                    "  using indirect\n", src_addr, len32);
284                 for (i = 0; i < len32; i++)
285                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
286                 return;
287         }
288
289         memset(&dmae, 0, sizeof(struct dmae_command));
290
291         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
292                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
293                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
294 #ifdef __BIG_ENDIAN
295                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
296 #else
297                        DMAE_CMD_ENDIANITY_DW_SWAP |
298 #endif
299                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
300                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
301         dmae.src_addr_lo = src_addr >> 2;
302         dmae.src_addr_hi = 0;
303         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
304         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
305         dmae.len = len32;
306         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
307         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
308         dmae.comp_val = DMAE_COMP_VAL;
309
310         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
311            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
312                     "dst_addr [%x:%08x (%08x)]\n"
313            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
314            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
315            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
316            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
317
318         mutex_lock(&bp->dmae_mutex);
319
320         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
321         *wb_comp = 0;
322
323         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
324
325         udelay(5);
326
327         while (*wb_comp != DMAE_COMP_VAL) {
328
329                 if (!cnt) {
330                         BNX2X_ERR("DMAE timeout!\n");
331                         break;
332                 }
333                 cnt--;
334                 /* adjust delay for emulation/FPGA */
335                 if (CHIP_REV_IS_SLOW(bp))
336                         msleep(100);
337                 else
338                         udelay(5);
339         }
340         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
341            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
342            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
343
344         mutex_unlock(&bp->dmae_mutex);
345 }
346
347 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
348                                u32 addr, u32 len)
349 {
350         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
351         int offset = 0;
352
353         while (len > dmae_wr_max) {
354                 bnx2x_write_dmae(bp, phys_addr + offset,
355                                  addr + offset, dmae_wr_max);
356                 offset += dmae_wr_max * 4;
357                 len -= dmae_wr_max;
358         }
359
360         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
361 }
362
363 /* used only for slowpath so not inlined */
364 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
365 {
366         u32 wb_write[2];
367
368         wb_write[0] = val_hi;
369         wb_write[1] = val_lo;
370         REG_WR_DMAE(bp, reg, wb_write, 2);
371 }
372
373 #ifdef USE_WB_RD
374 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
375 {
376         u32 wb_data[2];
377
378         REG_RD_DMAE(bp, reg, wb_data, 2);
379
380         return HILO_U64(wb_data[0], wb_data[1]);
381 }
382 #endif
383
384 static int bnx2x_mc_assert(struct bnx2x *bp)
385 {
386         char last_idx;
387         int i, rc = 0;
388         u32 row0, row1, row2, row3;
389
390         /* XSTORM */
391         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
392                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
393         if (last_idx)
394                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
395
396         /* print the asserts */
397         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
398
399                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
400                               XSTORM_ASSERT_LIST_OFFSET(i));
401                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
402                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
403                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
405                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
407
408                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
409                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
410                                   " 0x%08x 0x%08x 0x%08x\n",
411                                   i, row3, row2, row1, row0);
412                         rc++;
413                 } else {
414                         break;
415                 }
416         }
417
418         /* TSTORM */
419         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
420                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
421         if (last_idx)
422                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
423
424         /* print the asserts */
425         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
426
427                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
428                               TSTORM_ASSERT_LIST_OFFSET(i));
429                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
430                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
431                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
433                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
435
436                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
437                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
438                                   " 0x%08x 0x%08x 0x%08x\n",
439                                   i, row3, row2, row1, row0);
440                         rc++;
441                 } else {
442                         break;
443                 }
444         }
445
446         /* CSTORM */
447         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
448                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
449         if (last_idx)
450                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
451
452         /* print the asserts */
453         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
454
455                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
456                               CSTORM_ASSERT_LIST_OFFSET(i));
457                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
458                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
459                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
461                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
463
464                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
465                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
466                                   " 0x%08x 0x%08x 0x%08x\n",
467                                   i, row3, row2, row1, row0);
468                         rc++;
469                 } else {
470                         break;
471                 }
472         }
473
474         /* USTORM */
475         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
476                            USTORM_ASSERT_LIST_INDEX_OFFSET);
477         if (last_idx)
478                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
479
480         /* print the asserts */
481         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
482
483                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
484                               USTORM_ASSERT_LIST_OFFSET(i));
485                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
486                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
487                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
488                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
489                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
490                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
491
492                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
493                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
494                                   " 0x%08x 0x%08x 0x%08x\n",
495                                   i, row3, row2, row1, row0);
496                         rc++;
497                 } else {
498                         break;
499                 }
500         }
501
502         return rc;
503 }
504
505 static void bnx2x_fw_dump(struct bnx2x *bp)
506 {
507         u32 addr;
508         u32 mark, offset;
509         __be32 data[9];
510         int word;
511
512         if (BP_NOMCP(bp)) {
513                 BNX2X_ERR("NO MCP - can not dump\n");
514                 return;
515         }
516
517         addr = bp->common.shmem_base - 0x0800 + 4;
518         mark = REG_RD(bp, addr);
519         mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
520         pr_err("begin fw dump (mark 0x%x)\n", mark);
521
522         pr_err("");
523         for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
524                 for (word = 0; word < 8; word++)
525                         data[word] = htonl(REG_RD(bp, offset + 4*word));
526                 data[8] = 0x0;
527                 pr_cont("%s", (char *)data);
528         }
529         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
530                 for (word = 0; word < 8; word++)
531                         data[word] = htonl(REG_RD(bp, offset + 4*word));
532                 data[8] = 0x0;
533                 pr_cont("%s", (char *)data);
534         }
535         pr_err("end of fw dump\n");
536 }
537
538 void bnx2x_panic_dump(struct bnx2x *bp)
539 {
540         int i;
541         u16 j, start, end;
542
543         bp->stats_state = STATS_STATE_DISABLED;
544         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
545
546         BNX2X_ERR("begin crash dump -----------------\n");
547
548         /* Indices */
549         /* Common */
550         BNX2X_ERR("def_c_idx(0x%x)  def_u_idx(0x%x)  def_x_idx(0x%x)"
551                   "  def_t_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
552                   "  spq_prod_idx(0x%x)\n",
553                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
554                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
555
556         /* Rx */
557         for_each_queue(bp, i) {
558                 struct bnx2x_fastpath *fp = &bp->fp[i];
559
560                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
561                           "  *rx_bd_cons_sb(0x%x)  rx_comp_prod(0x%x)"
562                           "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
563                           i, fp->rx_bd_prod, fp->rx_bd_cons,
564                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
565                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
566                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
567                           "  fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
568                           fp->rx_sge_prod, fp->last_max_sge,
569                           le16_to_cpu(fp->fp_u_idx),
570                           fp->status_blk->u_status_block.status_block_index);
571         }
572
573         /* Tx */
574         for_each_queue(bp, i) {
575                 struct bnx2x_fastpath *fp = &bp->fp[i];
576
577                 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
578                           "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
579                           "  *tx_cons_sb(0x%x)\n",
580                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
581                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
582                 BNX2X_ERR("     fp_c_idx(0x%x)  *sb_c_idx(0x%x)"
583                           "  tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
584                           fp->status_blk->c_status_block.status_block_index,
585                           fp->tx_db.data.prod);
586         }
587
588         /* Rings */
589         /* Rx */
590         for_each_queue(bp, i) {
591                 struct bnx2x_fastpath *fp = &bp->fp[i];
592
593                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
594                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
595                 for (j = start; j != end; j = RX_BD(j + 1)) {
596                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
597                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
598
599                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
600                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
601                 }
602
603                 start = RX_SGE(fp->rx_sge_prod);
604                 end = RX_SGE(fp->last_max_sge);
605                 for (j = start; j != end; j = RX_SGE(j + 1)) {
606                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
607                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
608
609                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
610                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
611                 }
612
613                 start = RCQ_BD(fp->rx_comp_cons - 10);
614                 end = RCQ_BD(fp->rx_comp_cons + 503);
615                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
616                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
617
618                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
619                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
620                 }
621         }
622
623         /* Tx */
624         for_each_queue(bp, i) {
625                 struct bnx2x_fastpath *fp = &bp->fp[i];
626
627                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
628                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
629                 for (j = start; j != end; j = TX_BD(j + 1)) {
630                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
631
632                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
633                                   i, j, sw_bd->skb, sw_bd->first_bd);
634                 }
635
636                 start = TX_BD(fp->tx_bd_cons - 10);
637                 end = TX_BD(fp->tx_bd_cons + 254);
638                 for (j = start; j != end; j = TX_BD(j + 1)) {
639                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
640
641                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
642                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
643                 }
644         }
645
646         bnx2x_fw_dump(bp);
647         bnx2x_mc_assert(bp);
648         BNX2X_ERR("end crash dump -----------------\n");
649 }
650
651 void bnx2x_int_enable(struct bnx2x *bp)
652 {
653         int port = BP_PORT(bp);
654         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
655         u32 val = REG_RD(bp, addr);
656         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
657         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
658
659         if (msix) {
660                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
661                          HC_CONFIG_0_REG_INT_LINE_EN_0);
662                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
664         } else if (msi) {
665                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
666                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
668                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
669         } else {
670                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
671                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
672                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
673                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
674
675                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
676                    val, port, addr);
677
678                 REG_WR(bp, addr, val);
679
680                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
681         }
682
683         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
684            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
685
686         REG_WR(bp, addr, val);
687         /*
688          * Ensure that HC_CONFIG is written before leading/trailing edge config
689          */
690         mmiowb();
691         barrier();
692
693         if (CHIP_IS_E1H(bp)) {
694                 /* init leading/trailing edge */
695                 if (IS_E1HMF(bp)) {
696                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
697                         if (bp->port.pmf)
698                                 /* enable nig and gpio3 attention */
699                                 val |= 0x1100;
700                 } else
701                         val = 0xffff;
702
703                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
704                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
705         }
706
707         /* Make sure that interrupts are indeed enabled from here on */
708         mmiowb();
709 }
710
711 static void bnx2x_int_disable(struct bnx2x *bp)
712 {
713         int port = BP_PORT(bp);
714         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
715         u32 val = REG_RD(bp, addr);
716
717         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
718                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
719                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
720                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
721
722         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
723            val, port, addr);
724
725         /* flush all outstanding writes */
726         mmiowb();
727
728         REG_WR(bp, addr, val);
729         if (REG_RD(bp, addr) != val)
730                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
731 }
732
733 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
734 {
735         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
736         int i, offset;
737
738         /* disable interrupt handling */
739         atomic_inc(&bp->intr_sem);
740         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
741
742         if (disable_hw)
743                 /* prevent the HW from sending interrupts */
744                 bnx2x_int_disable(bp);
745
746         /* make sure all ISRs are done */
747         if (msix) {
748                 synchronize_irq(bp->msix_table[0].vector);
749                 offset = 1;
750 #ifdef BCM_CNIC
751                 offset++;
752 #endif
753                 for_each_queue(bp, i)
754                         synchronize_irq(bp->msix_table[i + offset].vector);
755         } else
756                 synchronize_irq(bp->pdev->irq);
757
758         /* make sure sp_task is not running */
759         cancel_delayed_work(&bp->sp_task);
760         flush_workqueue(bnx2x_wq);
761 }
762
763 /* fast path */
764
765 /*
766  * General service functions
767  */
768
769 /* Return true if succeeded to acquire the lock */
770 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
771 {
772         u32 lock_status;
773         u32 resource_bit = (1 << resource);
774         int func = BP_FUNC(bp);
775         u32 hw_lock_control_reg;
776
777         DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
778
779         /* Validating that the resource is within range */
780         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
781                 DP(NETIF_MSG_HW,
782                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
783                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
784                 return false;
785         }
786
787         if (func <= 5)
788                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
789         else
790                 hw_lock_control_reg =
791                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
792
793         /* Try to acquire the lock */
794         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
795         lock_status = REG_RD(bp, hw_lock_control_reg);
796         if (lock_status & resource_bit)
797                 return true;
798
799         DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
800         return false;
801 }
802
803
804 #ifdef BCM_CNIC
805 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
806 #endif
807
808 void bnx2x_sp_event(struct bnx2x_fastpath *fp,
809                            union eth_rx_cqe *rr_cqe)
810 {
811         struct bnx2x *bp = fp->bp;
812         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
813         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
814
815         DP(BNX2X_MSG_SP,
816            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
817            fp->index, cid, command, bp->state,
818            rr_cqe->ramrod_cqe.ramrod_type);
819
820         bp->spq_left++;
821
822         if (fp->index) {
823                 switch (command | fp->state) {
824                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
825                                                 BNX2X_FP_STATE_OPENING):
826                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
827                            cid);
828                         fp->state = BNX2X_FP_STATE_OPEN;
829                         break;
830
831                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
832                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
833                            cid);
834                         fp->state = BNX2X_FP_STATE_HALTED;
835                         break;
836
837                 default:
838                         BNX2X_ERR("unexpected MC reply (%d)  "
839                                   "fp[%d] state is %x\n",
840                                   command, fp->index, fp->state);
841                         break;
842                 }
843                 mb(); /* force bnx2x_wait_ramrod() to see the change */
844                 return;
845         }
846
847         switch (command | bp->state) {
848         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
849                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
850                 bp->state = BNX2X_STATE_OPEN;
851                 break;
852
853         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
854                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
855                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
856                 fp->state = BNX2X_FP_STATE_HALTED;
857                 break;
858
859         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
860                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
861                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
862                 break;
863
864 #ifdef BCM_CNIC
865         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
866                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
867                 bnx2x_cnic_cfc_comp(bp, cid);
868                 break;
869 #endif
870
871         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
872         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
873                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
874                 bp->set_mac_pending--;
875                 smp_wmb();
876                 break;
877
878         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
879                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
880                 bp->set_mac_pending--;
881                 smp_wmb();
882                 break;
883
884         default:
885                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
886                           command, bp->state);
887                 break;
888         }
889         mb(); /* force bnx2x_wait_ramrod() to see the change */
890 }
891
892 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
893 {
894         struct bnx2x *bp = netdev_priv(dev_instance);
895         u16 status = bnx2x_ack_int(bp);
896         u16 mask;
897         int i;
898
899         /* Return here if interrupt is shared and it's not for us */
900         if (unlikely(status == 0)) {
901                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
902                 return IRQ_NONE;
903         }
904         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
905
906         /* Return here if interrupt is disabled */
907         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
908                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
909                 return IRQ_HANDLED;
910         }
911
912 #ifdef BNX2X_STOP_ON_ERROR
913         if (unlikely(bp->panic))
914                 return IRQ_HANDLED;
915 #endif
916
917         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
918                 struct bnx2x_fastpath *fp = &bp->fp[i];
919
920                 mask = 0x2 << fp->sb_id;
921                 if (status & mask) {
922                         /* Handle Rx and Tx according to SB id */
923                         prefetch(fp->rx_cons_sb);
924                         prefetch(&fp->status_blk->u_status_block.
925                                                 status_block_index);
926                         prefetch(fp->tx_cons_sb);
927                         prefetch(&fp->status_blk->c_status_block.
928                                                 status_block_index);
929                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
930                         status &= ~mask;
931                 }
932         }
933
934 #ifdef BCM_CNIC
935         mask = 0x2 << CNIC_SB_ID(bp);
936         if (status & (mask | 0x1)) {
937                 struct cnic_ops *c_ops = NULL;
938
939                 rcu_read_lock();
940                 c_ops = rcu_dereference(bp->cnic_ops);
941                 if (c_ops)
942                         c_ops->cnic_handler(bp->cnic_data, NULL);
943                 rcu_read_unlock();
944
945                 status &= ~mask;
946         }
947 #endif
948
949         if (unlikely(status & 0x1)) {
950                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
951
952                 status &= ~0x1;
953                 if (!status)
954                         return IRQ_HANDLED;
955         }
956
957         if (unlikely(status))
958                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
959                    status);
960
961         return IRQ_HANDLED;
962 }
963
964 /* end of fast path */
965
966
967 /* Link */
968
969 /*
970  * General service functions
971  */
972
973 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
974 {
975         u32 lock_status;
976         u32 resource_bit = (1 << resource);
977         int func = BP_FUNC(bp);
978         u32 hw_lock_control_reg;
979         int cnt;
980
981         /* Validating that the resource is within range */
982         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
983                 DP(NETIF_MSG_HW,
984                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
985                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
986                 return -EINVAL;
987         }
988
989         if (func <= 5) {
990                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
991         } else {
992                 hw_lock_control_reg =
993                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
994         }
995
996         /* Validating that the resource is not already taken */
997         lock_status = REG_RD(bp, hw_lock_control_reg);
998         if (lock_status & resource_bit) {
999                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1000                    lock_status, resource_bit);
1001                 return -EEXIST;
1002         }
1003
1004         /* Try for 5 second every 5ms */
1005         for (cnt = 0; cnt < 1000; cnt++) {
1006                 /* Try to acquire the lock */
1007                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1008                 lock_status = REG_RD(bp, hw_lock_control_reg);
1009                 if (lock_status & resource_bit)
1010                         return 0;
1011
1012                 msleep(5);
1013         }
1014         DP(NETIF_MSG_HW, "Timeout\n");
1015         return -EAGAIN;
1016 }
1017
1018 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1019 {
1020         u32 lock_status;
1021         u32 resource_bit = (1 << resource);
1022         int func = BP_FUNC(bp);
1023         u32 hw_lock_control_reg;
1024
1025         DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1026
1027         /* Validating that the resource is within range */
1028         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1029                 DP(NETIF_MSG_HW,
1030                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1031                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1032                 return -EINVAL;
1033         }
1034
1035         if (func <= 5) {
1036                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1037         } else {
1038                 hw_lock_control_reg =
1039                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1040         }
1041
1042         /* Validating that the resource is currently taken */
1043         lock_status = REG_RD(bp, hw_lock_control_reg);
1044         if (!(lock_status & resource_bit)) {
1045                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1046                    lock_status, resource_bit);
1047                 return -EFAULT;
1048         }
1049
1050         REG_WR(bp, hw_lock_control_reg, resource_bit);
1051         return 0;
1052 }
1053
1054
1055 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1056 {
1057         /* The GPIO should be swapped if swap register is set and active */
1058         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1059                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1060         int gpio_shift = gpio_num +
1061                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1062         u32 gpio_mask = (1 << gpio_shift);
1063         u32 gpio_reg;
1064         int value;
1065
1066         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1067                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1068                 return -EINVAL;
1069         }
1070
1071         /* read GPIO value */
1072         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1073
1074         /* get the requested pin value */
1075         if ((gpio_reg & gpio_mask) == gpio_mask)
1076                 value = 1;
1077         else
1078                 value = 0;
1079
1080         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1081
1082         return value;
1083 }
1084
1085 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1086 {
1087         /* The GPIO should be swapped if swap register is set and active */
1088         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1089                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1090         int gpio_shift = gpio_num +
1091                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1092         u32 gpio_mask = (1 << gpio_shift);
1093         u32 gpio_reg;
1094
1095         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1096                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1097                 return -EINVAL;
1098         }
1099
1100         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1101         /* read GPIO and mask except the float bits */
1102         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1103
1104         switch (mode) {
1105         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1106                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1107                    gpio_num, gpio_shift);
1108                 /* clear FLOAT and set CLR */
1109                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1110                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1111                 break;
1112
1113         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1114                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1115                    gpio_num, gpio_shift);
1116                 /* clear FLOAT and set SET */
1117                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1118                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1119                 break;
1120
1121         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1122                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1123                    gpio_num, gpio_shift);
1124                 /* set FLOAT */
1125                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1126                 break;
1127
1128         default:
1129                 break;
1130         }
1131
1132         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1133         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1134
1135         return 0;
1136 }
1137
1138 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1139 {
1140         /* The GPIO should be swapped if swap register is set and active */
1141         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1142                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1143         int gpio_shift = gpio_num +
1144                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1145         u32 gpio_mask = (1 << gpio_shift);
1146         u32 gpio_reg;
1147
1148         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1149                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1150                 return -EINVAL;
1151         }
1152
1153         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1154         /* read GPIO int */
1155         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1156
1157         switch (mode) {
1158         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1159                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1160                                    "output low\n", gpio_num, gpio_shift);
1161                 /* clear SET and set CLR */
1162                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1163                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1164                 break;
1165
1166         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1167                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1168                                    "output high\n", gpio_num, gpio_shift);
1169                 /* clear CLR and set SET */
1170                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1171                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1172                 break;
1173
1174         default:
1175                 break;
1176         }
1177
1178         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1179         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1180
1181         return 0;
1182 }
1183
1184 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1185 {
1186         u32 spio_mask = (1 << spio_num);
1187         u32 spio_reg;
1188
1189         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1190             (spio_num > MISC_REGISTERS_SPIO_7)) {
1191                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1192                 return -EINVAL;
1193         }
1194
1195         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1196         /* read SPIO and mask except the float bits */
1197         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1198
1199         switch (mode) {
1200         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1201                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1202                 /* clear FLOAT and set CLR */
1203                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1204                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1205                 break;
1206
1207         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1208                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1209                 /* clear FLOAT and set SET */
1210                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1211                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1212                 break;
1213
1214         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1215                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1216                 /* set FLOAT */
1217                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1218                 break;
1219
1220         default:
1221                 break;
1222         }
1223
1224         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1225         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1226
1227         return 0;
1228 }
1229
1230 void bnx2x_calc_fc_adv(struct bnx2x *bp)
1231 {
1232         switch (bp->link_vars.ieee_fc &
1233                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1234         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1235                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1236                                           ADVERTISED_Pause);
1237                 break;
1238
1239         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1240                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1241                                          ADVERTISED_Pause);
1242                 break;
1243
1244         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1245                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1246                 break;
1247
1248         default:
1249                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1250                                           ADVERTISED_Pause);
1251                 break;
1252         }
1253 }
1254
1255
1256 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1257 {
1258         if (!BP_NOMCP(bp)) {
1259                 u8 rc;
1260
1261                 /* Initialize link parameters structure variables */
1262                 /* It is recommended to turn off RX FC for jumbo frames
1263                    for better performance */
1264                 if (bp->dev->mtu > 5000)
1265                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1266                 else
1267                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1268
1269                 bnx2x_acquire_phy_lock(bp);
1270
1271                 if (load_mode == LOAD_DIAG)
1272                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
1273
1274                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1275
1276                 bnx2x_release_phy_lock(bp);
1277
1278                 bnx2x_calc_fc_adv(bp);
1279
1280                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1281                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1282                         bnx2x_link_report(bp);
1283                 }
1284
1285                 return rc;
1286         }
1287         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1288         return -EINVAL;
1289 }
1290
1291 void bnx2x_link_set(struct bnx2x *bp)
1292 {
1293         if (!BP_NOMCP(bp)) {
1294                 bnx2x_acquire_phy_lock(bp);
1295                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1296                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1297                 bnx2x_release_phy_lock(bp);
1298
1299                 bnx2x_calc_fc_adv(bp);
1300         } else
1301                 BNX2X_ERR("Bootcode is missing - can not set link\n");
1302 }
1303
1304 static void bnx2x__link_reset(struct bnx2x *bp)
1305 {
1306         if (!BP_NOMCP(bp)) {
1307                 bnx2x_acquire_phy_lock(bp);
1308                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1309                 bnx2x_release_phy_lock(bp);
1310         } else
1311                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1312 }
1313
1314 u8 bnx2x_link_test(struct bnx2x *bp)
1315 {
1316         u8 rc = 0;
1317
1318         if (!BP_NOMCP(bp)) {
1319                 bnx2x_acquire_phy_lock(bp);
1320                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1321                 bnx2x_release_phy_lock(bp);
1322         } else
1323                 BNX2X_ERR("Bootcode is missing - can not test link\n");
1324
1325         return rc;
1326 }
1327
1328 static void bnx2x_init_port_minmax(struct bnx2x *bp)
1329 {
1330         u32 r_param = bp->link_vars.line_speed / 8;
1331         u32 fair_periodic_timeout_usec;
1332         u32 t_fair;
1333
1334         memset(&(bp->cmng.rs_vars), 0,
1335                sizeof(struct rate_shaping_vars_per_port));
1336         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1337
1338         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1339         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1340
1341         /* this is the threshold below which no timer arming will occur
1342            1.25 coefficient is for the threshold to be a little bigger
1343            than the real time, to compensate for timer in-accuracy */
1344         bp->cmng.rs_vars.rs_threshold =
1345                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1346
1347         /* resolution of fairness timer */
1348         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1349         /* for 10G it is 1000usec. for 1G it is 10000usec. */
1350         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1351
1352         /* this is the threshold below which we won't arm the timer anymore */
1353         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1354
1355         /* we multiply by 1e3/8 to get bytes/msec.
1356            We don't want the credits to pass a credit
1357            of the t_fair*FAIR_MEM (algorithm resolution) */
1358         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1359         /* since each tick is 4 usec */
1360         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1361 }
1362
1363 /* Calculates the sum of vn_min_rates.
1364    It's needed for further normalizing of the min_rates.
1365    Returns:
1366      sum of vn_min_rates.
1367        or
1368      0 - if all the min_rates are 0.
1369      In the later case fainess algorithm should be deactivated.
1370      If not all min_rates are zero then those that are zeroes will be set to 1.
1371  */
1372 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1373 {
1374         int all_zero = 1;
1375         int port = BP_PORT(bp);
1376         int vn;
1377
1378         bp->vn_weight_sum = 0;
1379         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1380                 int func = 2*vn + port;
1381                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1382                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1383                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1384
1385                 /* Skip hidden vns */
1386                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1387                         continue;
1388
1389                 /* If min rate is zero - set it to 1 */
1390                 if (!vn_min_rate)
1391                         vn_min_rate = DEF_MIN_RATE;
1392                 else
1393                         all_zero = 0;
1394
1395                 bp->vn_weight_sum += vn_min_rate;
1396         }
1397
1398         /* ... only if all min rates are zeros - disable fairness */
1399         if (all_zero) {
1400                 bp->cmng.flags.cmng_enables &=
1401                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1402                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1403                    "  fairness will be disabled\n");
1404         } else
1405                 bp->cmng.flags.cmng_enables |=
1406                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1407 }
1408
1409 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1410 {
1411         struct rate_shaping_vars_per_vn m_rs_vn;
1412         struct fairness_vars_per_vn m_fair_vn;
1413         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1414         u16 vn_min_rate, vn_max_rate;
1415         int i;
1416
1417         /* If function is hidden - set min and max to zeroes */
1418         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1419                 vn_min_rate = 0;
1420                 vn_max_rate = 0;
1421
1422         } else {
1423                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1424                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1425                 /* If min rate is zero - set it to 1 */
1426                 if (!vn_min_rate)
1427                         vn_min_rate = DEF_MIN_RATE;
1428                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1429                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1430         }
1431         DP(NETIF_MSG_IFUP,
1432            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
1433            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1434
1435         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1436         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1437
1438         /* global vn counter - maximal Mbps for this vn */
1439         m_rs_vn.vn_counter.rate = vn_max_rate;
1440
1441         /* quota - number of bytes transmitted in this period */
1442         m_rs_vn.vn_counter.quota =
1443                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1444
1445         if (bp->vn_weight_sum) {
1446                 /* credit for each period of the fairness algorithm:
1447                    number of bytes in T_FAIR (the vn share the port rate).
1448                    vn_weight_sum should not be larger than 10000, thus
1449                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1450                    than zero */
1451                 m_fair_vn.vn_credit_delta =
1452                         max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1453                                                    (8 * bp->vn_weight_sum))),
1454                               (bp->cmng.fair_vars.fair_threshold * 2));
1455                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
1456                    m_fair_vn.vn_credit_delta);
1457         }
1458
1459         /* Store it to internal memory */
1460         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1461                 REG_WR(bp, BAR_XSTRORM_INTMEM +
1462                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1463                        ((u32 *)(&m_rs_vn))[i]);
1464
1465         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1466                 REG_WR(bp, BAR_XSTRORM_INTMEM +
1467                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1468                        ((u32 *)(&m_fair_vn))[i]);
1469 }
1470
1471
1472 /* This function is called upon link interrupt */
1473 static void bnx2x_link_attn(struct bnx2x *bp)
1474 {
1475         u32 prev_link_status = bp->link_vars.link_status;
1476         /* Make sure that we are synced with the current statistics */
1477         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1478
1479         bnx2x_link_update(&bp->link_params, &bp->link_vars);
1480
1481         if (bp->link_vars.link_up) {
1482
1483                 /* dropless flow control */
1484                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1485                         int port = BP_PORT(bp);
1486                         u32 pause_enabled = 0;
1487
1488                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1489                                 pause_enabled = 1;
1490
1491                         REG_WR(bp, BAR_USTRORM_INTMEM +
1492                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1493                                pause_enabled);
1494                 }
1495
1496                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1497                         struct host_port_stats *pstats;
1498
1499                         pstats = bnx2x_sp(bp, port_stats);
1500                         /* reset old bmac stats */
1501                         memset(&(pstats->mac_stx[0]), 0,
1502                                sizeof(struct mac_stx));
1503                 }
1504                 if (bp->state == BNX2X_STATE_OPEN)
1505                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1506         }
1507
1508         /* indicate link status only if link status actually changed */
1509         if (prev_link_status != bp->link_vars.link_status)
1510                 bnx2x_link_report(bp);
1511
1512         if (IS_E1HMF(bp)) {
1513                 int port = BP_PORT(bp);
1514                 int func;
1515                 int vn;
1516
1517                 /* Set the attention towards other drivers on the same port */
1518                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1519                         if (vn == BP_E1HVN(bp))
1520                                 continue;
1521
1522                         func = ((vn << 1) | port);
1523                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1524                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1525                 }
1526
1527                 if (bp->link_vars.link_up) {
1528                         int i;
1529
1530                         /* Init rate shaping and fairness contexts */
1531                         bnx2x_init_port_minmax(bp);
1532
1533                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
1534                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
1535
1536                         /* Store it to internal memory */
1537                         for (i = 0;
1538                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
1539                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
1540                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1541                                        ((u32 *)(&bp->cmng))[i]);
1542                 }
1543         }
1544 }
1545
1546 void bnx2x__link_status_update(struct bnx2x *bp)
1547 {
1548         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
1549                 return;
1550
1551         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
1552
1553         if (bp->link_vars.link_up)
1554                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1555         else
1556                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1557
1558         bnx2x_calc_vn_weight_sum(bp);
1559
1560         /* indicate link status */
1561         bnx2x_link_report(bp);
1562 }
1563
1564 static void bnx2x_pmf_update(struct bnx2x *bp)
1565 {
1566         int port = BP_PORT(bp);
1567         u32 val;
1568
1569         bp->port.pmf = 1;
1570         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1571
1572         /* enable nig attention */
1573         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1574         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1575         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1576
1577         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1578 }
1579
1580 /* end of Link */
1581
1582 /* slow path */
1583
1584 /*
1585  * General service functions
1586  */
1587
1588 /* send the MCP a request, block until there is a reply */
1589 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
1590 {
1591         int func = BP_FUNC(bp);
1592         u32 seq = ++bp->fw_seq;
1593         u32 rc = 0;
1594         u32 cnt = 1;
1595         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1596
1597         mutex_lock(&bp->fw_mb_mutex);
1598         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1599         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1600
1601         do {
1602                 /* let the FW do it's magic ... */
1603                 msleep(delay);
1604
1605                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
1606
1607                 /* Give the FW up to 5 second (500*10ms) */
1608         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
1609
1610         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
1611            cnt*delay, rc, seq);
1612
1613         /* is this a reply to our command? */
1614         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
1615                 rc &= FW_MSG_CODE_MASK;
1616         else {
1617                 /* FW BUG! */
1618                 BNX2X_ERR("FW failed to respond!\n");
1619                 bnx2x_fw_dump(bp);
1620                 rc = 0;
1621         }
1622         mutex_unlock(&bp->fw_mb_mutex);
1623
1624         return rc;
1625 }
1626
1627 static void bnx2x_e1h_disable(struct bnx2x *bp)
1628 {
1629         int port = BP_PORT(bp);
1630
1631         netif_tx_disable(bp->dev);
1632
1633         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
1634
1635         netif_carrier_off(bp->dev);
1636 }
1637
1638 static void bnx2x_e1h_enable(struct bnx2x *bp)
1639 {
1640         int port = BP_PORT(bp);
1641
1642         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
1643
1644         /* Tx queue should be only reenabled */
1645         netif_tx_wake_all_queues(bp->dev);
1646
1647         /*
1648          * Should not call netif_carrier_on since it will be called if the link
1649          * is up when checking for link state
1650          */
1651 }
1652
1653 static void bnx2x_update_min_max(struct bnx2x *bp)
1654 {
1655         int port = BP_PORT(bp);
1656         int vn, i;
1657
1658         /* Init rate shaping and fairness contexts */
1659         bnx2x_init_port_minmax(bp);
1660
1661         bnx2x_calc_vn_weight_sum(bp);
1662
1663         for (vn = VN_0; vn < E1HVN_MAX; vn++)
1664                 bnx2x_init_vn_minmax(bp, 2*vn + port);
1665
1666         if (bp->port.pmf) {
1667                 int func;
1668
1669                 /* Set the attention towards other drivers on the same port */
1670                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1671                         if (vn == BP_E1HVN(bp))
1672                                 continue;
1673
1674                         func = ((vn << 1) | port);
1675                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1676                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1677                 }
1678
1679                 /* Store it to internal memory */
1680                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1681                         REG_WR(bp, BAR_XSTRORM_INTMEM +
1682                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1683                                ((u32 *)(&bp->cmng))[i]);
1684         }
1685 }
1686
1687 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1688 {
1689         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
1690
1691         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
1692
1693                 /*
1694                  * This is the only place besides the function initialization
1695                  * where the bp->flags can change so it is done without any
1696                  * locks
1697                  */
1698                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1699                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
1700                         bp->flags |= MF_FUNC_DIS;
1701
1702                         bnx2x_e1h_disable(bp);
1703                 } else {
1704                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
1705                         bp->flags &= ~MF_FUNC_DIS;
1706
1707                         bnx2x_e1h_enable(bp);
1708                 }
1709                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
1710         }
1711         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1712
1713                 bnx2x_update_min_max(bp);
1714                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1715         }
1716
1717         /* Report results to MCP */
1718         if (dcc_event)
1719                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
1720         else
1721                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
1722 }
1723
1724 /* must be called under the spq lock */
1725 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
1726 {
1727         struct eth_spe *next_spe = bp->spq_prod_bd;
1728
1729         if (bp->spq_prod_bd == bp->spq_last_bd) {
1730                 bp->spq_prod_bd = bp->spq;
1731                 bp->spq_prod_idx = 0;
1732                 DP(NETIF_MSG_TIMER, "end of spq\n");
1733         } else {
1734                 bp->spq_prod_bd++;
1735                 bp->spq_prod_idx++;
1736         }
1737         return next_spe;
1738 }
1739
1740 /* must be called under the spq lock */
1741 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1742 {
1743         int func = BP_FUNC(bp);
1744
1745         /* Make sure that BD data is updated before writing the producer */
1746         wmb();
1747
1748         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1749                bp->spq_prod_idx);
1750         mmiowb();
1751 }
1752
1753 /* the slow path queue is odd since completions arrive on the fastpath ring */
1754 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1755                          u32 data_hi, u32 data_lo, int common)
1756 {
1757         struct eth_spe *spe;
1758
1759 #ifdef BNX2X_STOP_ON_ERROR
1760         if (unlikely(bp->panic))
1761                 return -EIO;
1762 #endif
1763
1764         spin_lock_bh(&bp->spq_lock);
1765
1766         if (!bp->spq_left) {
1767                 BNX2X_ERR("BUG! SPQ ring full!\n");
1768                 spin_unlock_bh(&bp->spq_lock);
1769                 bnx2x_panic();
1770                 return -EBUSY;
1771         }
1772
1773         spe = bnx2x_sp_get_next(bp);
1774
1775         /* CID needs port number to be encoded int it */
1776         spe->hdr.conn_and_cmd_data =
1777                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1778                                     HW_CID(bp, cid));
1779         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
1780         if (common)
1781                 spe->hdr.type |=
1782                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1783
1784         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1785         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
1786
1787         bp->spq_left--;
1788
1789         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1790            "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
1791            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1792            (u32)(U64_LO(bp->spq_mapping) +
1793            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1794            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1795
1796         bnx2x_sp_prod_update(bp);
1797         spin_unlock_bh(&bp->spq_lock);
1798         return 0;
1799 }
1800
1801 /* acquire split MCP access lock register */
1802 static int bnx2x_acquire_alr(struct bnx2x *bp)
1803 {
1804         u32 j, val;
1805         int rc = 0;
1806
1807         might_sleep();
1808         for (j = 0; j < 1000; j++) {
1809                 val = (1UL << 31);
1810                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1811                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1812                 if (val & (1L << 31))
1813                         break;
1814
1815                 msleep(5);
1816         }
1817         if (!(val & (1L << 31))) {
1818                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
1819                 rc = -EBUSY;
1820         }
1821
1822         return rc;
1823 }
1824
1825 /* release split MCP access lock register */
1826 static void bnx2x_release_alr(struct bnx2x *bp)
1827 {
1828         REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
1829 }
1830
1831 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1832 {
1833         struct host_def_status_block *def_sb = bp->def_status_blk;
1834         u16 rc = 0;
1835
1836         barrier(); /* status block is written to by the chip */
1837         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1838                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1839                 rc |= 1;
1840         }
1841         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1842                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1843                 rc |= 2;
1844         }
1845         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1846                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1847                 rc |= 4;
1848         }
1849         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1850                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1851                 rc |= 8;
1852         }
1853         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1854                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1855                 rc |= 16;
1856         }
1857         return rc;
1858 }
1859
1860 /*
1861  * slow path service functions
1862  */
1863
1864 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1865 {
1866         int port = BP_PORT(bp);
1867         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
1868                        COMMAND_REG_ATTN_BITS_SET);
1869         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1870                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
1871         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1872                                        NIG_REG_MASK_INTERRUPT_PORT0;
1873         u32 aeu_mask;
1874         u32 nig_mask = 0;
1875
1876         if (bp->attn_state & asserted)
1877                 BNX2X_ERR("IGU ERROR\n");
1878
1879         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1880         aeu_mask = REG_RD(bp, aeu_addr);
1881
1882         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
1883            aeu_mask, asserted);
1884         aeu_mask &= ~(asserted & 0x3ff);
1885         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
1886
1887         REG_WR(bp, aeu_addr, aeu_mask);
1888         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1889
1890         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
1891         bp->attn_state |= asserted;
1892         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
1893
1894         if (asserted & ATTN_HARD_WIRED_MASK) {
1895                 if (asserted & ATTN_NIG_FOR_FUNC) {
1896
1897                         bnx2x_acquire_phy_lock(bp);
1898
1899                         /* save nig interrupt mask */
1900                         nig_mask = REG_RD(bp, nig_int_mask_addr);
1901                         REG_WR(bp, nig_int_mask_addr, 0);
1902
1903                         bnx2x_link_attn(bp);
1904
1905                         /* handle unicore attn? */
1906                 }
1907                 if (asserted & ATTN_SW_TIMER_4_FUNC)
1908                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1909
1910                 if (asserted & GPIO_2_FUNC)
1911                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1912
1913                 if (asserted & GPIO_3_FUNC)
1914                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1915
1916                 if (asserted & GPIO_4_FUNC)
1917                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1918
1919                 if (port == 0) {
1920                         if (asserted & ATTN_GENERAL_ATTN_1) {
1921                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1922                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1923                         }
1924                         if (asserted & ATTN_GENERAL_ATTN_2) {
1925                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1926                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1927                         }
1928                         if (asserted & ATTN_GENERAL_ATTN_3) {
1929                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1930                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1931                         }
1932                 } else {
1933                         if (asserted & ATTN_GENERAL_ATTN_4) {
1934                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1935                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1936                         }
1937                         if (asserted & ATTN_GENERAL_ATTN_5) {
1938                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1939                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1940                         }
1941                         if (asserted & ATTN_GENERAL_ATTN_6) {
1942                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1943                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1944                         }
1945                 }
1946
1947         } /* if hardwired */
1948
1949         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
1950            asserted, hc_addr);
1951         REG_WR(bp, hc_addr, asserted);
1952
1953         /* now set back the mask */
1954         if (asserted & ATTN_NIG_FOR_FUNC) {
1955                 REG_WR(bp, nig_int_mask_addr, nig_mask);
1956                 bnx2x_release_phy_lock(bp);
1957         }
1958 }
1959
1960 static inline void bnx2x_fan_failure(struct bnx2x *bp)
1961 {
1962         int port = BP_PORT(bp);
1963         u32 ext_phy_config;
1964         /* mark the failure */
1965         ext_phy_config =
1966                 SHMEM_RD(bp,
1967                          dev_info.port_hw_config[port].external_phy_config);
1968
1969         ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
1970         ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
1971         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
1972                  ext_phy_config);
1973
1974         /* log the failure */
1975         netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
1976                " the driver to shutdown the card to prevent permanent"
1977                " damage.  Please contact OEM Support for assistance\n");
1978 }
1979
1980 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
1981 {
1982         int port = BP_PORT(bp);
1983         int reg_offset;
1984         u32 val, swap_val, swap_override;
1985
1986         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
1987                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
1988
1989         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
1990
1991                 val = REG_RD(bp, reg_offset);
1992                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
1993                 REG_WR(bp, reg_offset, val);
1994
1995                 BNX2X_ERR("SPIO5 hw attention\n");
1996
1997                 /* Fan failure attention */
1998                 switch (bp->link_params.phy[EXT_PHY1].type) {
1999                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2000                         /* Low power mode is controlled by GPIO 2 */
2001                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2002                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2003                         /* The PHY reset is controlled by GPIO 1 */
2004                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2005                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2006                         break;
2007
2008                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2009                         /* The PHY reset is controlled by GPIO 1 */
2010                         /* fake the port number to cancel the swap done in
2011                            set_gpio() */
2012                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2013                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2014                         port = (swap_val && swap_override) ^ 1;
2015                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2016                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2017                         break;
2018
2019                 default:
2020                         break;
2021                 }
2022                 bnx2x_fan_failure(bp);
2023         }
2024
2025         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2026                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2027                 bnx2x_acquire_phy_lock(bp);
2028                 bnx2x_handle_module_detect_int(&bp->link_params);
2029                 bnx2x_release_phy_lock(bp);
2030         }
2031
2032         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2033
2034                 val = REG_RD(bp, reg_offset);
2035                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2036                 REG_WR(bp, reg_offset, val);
2037
2038                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2039                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2040                 bnx2x_panic();
2041         }
2042 }
2043
2044 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2045 {
2046         u32 val;
2047
2048         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2049
2050                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2051                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2052                 /* DORQ discard attention */
2053                 if (val & 0x2)
2054                         BNX2X_ERR("FATAL error from DORQ\n");
2055         }
2056
2057         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2058
2059                 int port = BP_PORT(bp);
2060                 int reg_offset;
2061
2062                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2063                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2064
2065                 val = REG_RD(bp, reg_offset);
2066                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2067                 REG_WR(bp, reg_offset, val);
2068
2069                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2070                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2071                 bnx2x_panic();
2072         }
2073 }
2074
2075 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2076 {
2077         u32 val;
2078
2079         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2080
2081                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2082                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2083                 /* CFC error attention */
2084                 if (val & 0x2)
2085                         BNX2X_ERR("FATAL error from CFC\n");
2086         }
2087
2088         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2089
2090                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2091                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2092                 /* RQ_USDMDP_FIFO_OVERFLOW */
2093                 if (val & 0x18000)
2094                         BNX2X_ERR("FATAL error from PXP\n");
2095         }
2096
2097         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2098
2099                 int port = BP_PORT(bp);
2100                 int reg_offset;
2101
2102                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2103                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2104
2105                 val = REG_RD(bp, reg_offset);
2106                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2107                 REG_WR(bp, reg_offset, val);
2108
2109                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2110                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
2111                 bnx2x_panic();
2112         }
2113 }
2114
2115 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2116 {
2117         u32 val;
2118
2119         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2120
2121                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2122                         int func = BP_FUNC(bp);
2123
2124                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2125                         bp->mf_config = SHMEM_RD(bp,
2126                                            mf_cfg.func_mf_config[func].config);
2127                         val = SHMEM_RD(bp, func_mb[func].drv_status);
2128                         if (val & DRV_STATUS_DCC_EVENT_MASK)
2129                                 bnx2x_dcc_event(bp,
2130                                             (val & DRV_STATUS_DCC_EVENT_MASK));
2131                         bnx2x__link_status_update(bp);
2132                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
2133                                 bnx2x_pmf_update(bp);
2134
2135                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2136
2137                         BNX2X_ERR("MC assert!\n");
2138                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2139                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2140                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2141                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2142                         bnx2x_panic();
2143
2144                 } else if (attn & BNX2X_MCP_ASSERT) {
2145
2146                         BNX2X_ERR("MCP assert!\n");
2147                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2148                         bnx2x_fw_dump(bp);
2149
2150                 } else
2151                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2152         }
2153
2154         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2155                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2156                 if (attn & BNX2X_GRC_TIMEOUT) {
2157                         val = CHIP_IS_E1H(bp) ?
2158                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2159                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2160                 }
2161                 if (attn & BNX2X_GRC_RSV) {
2162                         val = CHIP_IS_E1H(bp) ?
2163                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2164                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2165                 }
2166                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2167         }
2168 }
2169
2170 #define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
2171 #define LOAD_COUNTER_BITS       16 /* Number of bits for load counter */
2172 #define LOAD_COUNTER_MASK       (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2173 #define RESET_DONE_FLAG_MASK    (~LOAD_COUNTER_MASK)
2174 #define RESET_DONE_FLAG_SHIFT   LOAD_COUNTER_BITS
2175 #define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2176 /*
2177  * should be run under rtnl lock
2178  */
2179 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2180 {
2181         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2182         val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2183         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2184         barrier();
2185         mmiowb();
2186 }
2187
2188 /*
2189  * should be run under rtnl lock
2190  */
2191 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2192 {
2193         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2194         val |= (1 << 16);
2195         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2196         barrier();
2197         mmiowb();
2198 }
2199
2200 /*
2201  * should be run under rtnl lock
2202  */
2203 bool bnx2x_reset_is_done(struct bnx2x *bp)
2204 {
2205         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2206         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2207         return (val & RESET_DONE_FLAG_MASK) ? false : true;
2208 }
2209
2210 /*
2211  * should be run under rtnl lock
2212  */
2213 inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
2214 {
2215         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2216
2217         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2218
2219         val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2220         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2221         barrier();
2222         mmiowb();
2223 }
2224
2225 /*
2226  * should be run under rtnl lock
2227  */
2228 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
2229 {
2230         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2231
2232         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2233
2234         val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2235         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2236         barrier();
2237         mmiowb();
2238
2239         return val1;
2240 }
2241
2242 /*
2243  * should be run under rtnl lock
2244  */
2245 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2246 {
2247         return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2248 }
2249
2250 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2251 {
2252         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2253         REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2254 }
2255
2256 static inline void _print_next_block(int idx, const char *blk)
2257 {
2258         if (idx)
2259                 pr_cont(", ");
2260         pr_cont("%s", blk);
2261 }
2262
2263 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2264 {
2265         int i = 0;
2266         u32 cur_bit = 0;
2267         for (i = 0; sig; i++) {
2268                 cur_bit = ((u32)0x1 << i);
2269                 if (sig & cur_bit) {
2270                         switch (cur_bit) {
2271                         case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2272                                 _print_next_block(par_num++, "BRB");
2273                                 break;
2274                         case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2275                                 _print_next_block(par_num++, "PARSER");
2276                                 break;
2277                         case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2278                                 _print_next_block(par_num++, "TSDM");
2279                                 break;
2280                         case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2281                                 _print_next_block(par_num++, "SEARCHER");
2282                                 break;
2283                         case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2284                                 _print_next_block(par_num++, "TSEMI");
2285                                 break;
2286                         }
2287
2288                         /* Clear the bit */
2289                         sig &= ~cur_bit;
2290                 }
2291         }
2292
2293         return par_num;
2294 }
2295
2296 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2297 {
2298         int i = 0;
2299         u32 cur_bit = 0;
2300         for (i = 0; sig; i++) {
2301                 cur_bit = ((u32)0x1 << i);
2302                 if (sig & cur_bit) {
2303                         switch (cur_bit) {
2304                         case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2305                                 _print_next_block(par_num++, "PBCLIENT");
2306                                 break;
2307                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2308                                 _print_next_block(par_num++, "QM");
2309                                 break;
2310                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2311                                 _print_next_block(par_num++, "XSDM");
2312                                 break;
2313                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2314                                 _print_next_block(par_num++, "XSEMI");
2315                                 break;
2316                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2317                                 _print_next_block(par_num++, "DOORBELLQ");
2318                                 break;
2319                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2320                                 _print_next_block(par_num++, "VAUX PCI CORE");
2321                                 break;
2322                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2323                                 _print_next_block(par_num++, "DEBUG");
2324                                 break;
2325                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2326                                 _print_next_block(par_num++, "USDM");
2327                                 break;
2328                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2329                                 _print_next_block(par_num++, "USEMI");
2330                                 break;
2331                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2332                                 _print_next_block(par_num++, "UPB");
2333                                 break;
2334                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2335                                 _print_next_block(par_num++, "CSDM");
2336                                 break;
2337                         }
2338
2339                         /* Clear the bit */
2340                         sig &= ~cur_bit;
2341                 }
2342         }
2343
2344         return par_num;
2345 }
2346
2347 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
2348 {
2349         int i = 0;
2350         u32 cur_bit = 0;
2351         for (i = 0; sig; i++) {
2352                 cur_bit = ((u32)0x1 << i);
2353                 if (sig & cur_bit) {
2354                         switch (cur_bit) {
2355                         case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
2356                                 _print_next_block(par_num++, "CSEMI");
2357                                 break;
2358                         case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
2359                                 _print_next_block(par_num++, "PXP");
2360                                 break;
2361                         case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
2362                                 _print_next_block(par_num++,
2363                                         "PXPPCICLOCKCLIENT");
2364                                 break;
2365                         case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
2366                                 _print_next_block(par_num++, "CFC");
2367                                 break;
2368                         case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
2369                                 _print_next_block(par_num++, "CDU");
2370                                 break;
2371                         case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
2372                                 _print_next_block(par_num++, "IGU");
2373                                 break;
2374                         case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
2375                                 _print_next_block(par_num++, "MISC");
2376                                 break;
2377                         }
2378
2379                         /* Clear the bit */
2380                         sig &= ~cur_bit;
2381                 }
2382         }
2383
2384         return par_num;
2385 }
2386
2387 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
2388 {
2389         int i = 0;
2390         u32 cur_bit = 0;
2391         for (i = 0; sig; i++) {
2392                 cur_bit = ((u32)0x1 << i);
2393                 if (sig & cur_bit) {
2394                         switch (cur_bit) {
2395                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
2396                                 _print_next_block(par_num++, "MCP ROM");
2397                                 break;
2398                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
2399                                 _print_next_block(par_num++, "MCP UMP RX");
2400                                 break;
2401                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
2402                                 _print_next_block(par_num++, "MCP UMP TX");
2403                                 break;
2404                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
2405                                 _print_next_block(par_num++, "MCP SCPAD");
2406                                 break;
2407                         }
2408
2409                         /* Clear the bit */
2410                         sig &= ~cur_bit;
2411                 }
2412         }
2413
2414         return par_num;
2415 }
2416
2417 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
2418                                      u32 sig2, u32 sig3)
2419 {
2420         if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
2421             (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
2422                 int par_num = 0;
2423                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
2424                         "[0]:0x%08x [1]:0x%08x "
2425                         "[2]:0x%08x [3]:0x%08x\n",
2426                           sig0 & HW_PRTY_ASSERT_SET_0,
2427                           sig1 & HW_PRTY_ASSERT_SET_1,
2428                           sig2 & HW_PRTY_ASSERT_SET_2,
2429                           sig3 & HW_PRTY_ASSERT_SET_3);
2430                 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
2431                        bp->dev->name);
2432                 par_num = bnx2x_print_blocks_with_parity0(
2433                         sig0 & HW_PRTY_ASSERT_SET_0, par_num);
2434                 par_num = bnx2x_print_blocks_with_parity1(
2435                         sig1 & HW_PRTY_ASSERT_SET_1, par_num);
2436                 par_num = bnx2x_print_blocks_with_parity2(
2437                         sig2 & HW_PRTY_ASSERT_SET_2, par_num);
2438                 par_num = bnx2x_print_blocks_with_parity3(
2439                         sig3 & HW_PRTY_ASSERT_SET_3, par_num);
2440                 printk("\n");
2441                 return true;
2442         } else
2443                 return false;
2444 }
2445
2446 bool bnx2x_chk_parity_attn(struct bnx2x *bp)
2447 {
2448         struct attn_route attn;
2449         int port = BP_PORT(bp);
2450
2451         attn.sig[0] = REG_RD(bp,
2452                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
2453                              port*4);
2454         attn.sig[1] = REG_RD(bp,
2455                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
2456                              port*4);
2457         attn.sig[2] = REG_RD(bp,
2458                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
2459                              port*4);
2460         attn.sig[3] = REG_RD(bp,
2461                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
2462                              port*4);
2463
2464         return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
2465                                         attn.sig[3]);
2466 }
2467
2468 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2469 {
2470         struct attn_route attn, *group_mask;
2471         int port = BP_PORT(bp);
2472         int index;
2473         u32 reg_addr;
2474         u32 val;
2475         u32 aeu_mask;
2476
2477         /* need to take HW lock because MCP or other port might also
2478            try to handle this event */
2479         bnx2x_acquire_alr(bp);
2480
2481         if (bnx2x_chk_parity_attn(bp)) {
2482                 bp->recovery_state = BNX2X_RECOVERY_INIT;
2483                 bnx2x_set_reset_in_progress(bp);
2484                 schedule_delayed_work(&bp->reset_task, 0);
2485                 /* Disable HW interrupts */
2486                 bnx2x_int_disable(bp);
2487                 bnx2x_release_alr(bp);
2488                 /* In case of parity errors don't handle attentions so that
2489                  * other function would "see" parity errors.
2490                  */
2491                 return;
2492         }
2493
2494         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2495         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2496         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2497         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2498         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2499            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2500
2501         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2502                 if (deasserted & (1 << index)) {
2503                         group_mask = &bp->attn_group[index];
2504
2505                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2506                            index, group_mask->sig[0], group_mask->sig[1],
2507                            group_mask->sig[2], group_mask->sig[3]);
2508
2509                         bnx2x_attn_int_deasserted3(bp,
2510                                         attn.sig[3] & group_mask->sig[3]);
2511                         bnx2x_attn_int_deasserted1(bp,
2512                                         attn.sig[1] & group_mask->sig[1]);
2513                         bnx2x_attn_int_deasserted2(bp,
2514                                         attn.sig[2] & group_mask->sig[2]);
2515                         bnx2x_attn_int_deasserted0(bp,
2516                                         attn.sig[0] & group_mask->sig[0]);
2517                 }
2518         }
2519
2520         bnx2x_release_alr(bp);
2521
2522         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2523
2524         val = ~deasserted;
2525         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2526            val, reg_addr);
2527         REG_WR(bp, reg_addr, val);
2528
2529         if (~bp->attn_state & deasserted)
2530                 BNX2X_ERR("IGU ERROR\n");
2531
2532         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2533                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2534
2535         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2536         aeu_mask = REG_RD(bp, reg_addr);
2537
2538         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2539            aeu_mask, deasserted);
2540         aeu_mask |= (deasserted & 0x3ff);
2541         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2542
2543         REG_WR(bp, reg_addr, aeu_mask);
2544         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2545
2546         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2547         bp->attn_state &= ~deasserted;
2548         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2549 }
2550
2551 static void bnx2x_attn_int(struct bnx2x *bp)
2552 {
2553         /* read local copy of bits */
2554         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2555                                                                 attn_bits);
2556         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2557                                                                 attn_bits_ack);
2558         u32 attn_state = bp->attn_state;
2559
2560         /* look for changed bits */
2561         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2562         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2563
2564         DP(NETIF_MSG_HW,
2565            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2566            attn_bits, attn_ack, asserted, deasserted);
2567
2568         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2569                 BNX2X_ERR("BAD attention state\n");
2570
2571         /* handle bits that were raised */
2572         if (asserted)
2573                 bnx2x_attn_int_asserted(bp, asserted);
2574
2575         if (deasserted)
2576                 bnx2x_attn_int_deasserted(bp, deasserted);
2577 }
2578
2579 static void bnx2x_sp_task(struct work_struct *work)
2580 {
2581         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2582         u16 status;
2583
2584         /* Return here if interrupt is disabled */
2585         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2586                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2587                 return;
2588         }
2589
2590         status = bnx2x_update_dsb_idx(bp);
2591 /*      if (status == 0)                                     */
2592 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2593
2594         DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
2595
2596         /* HW attentions */
2597         if (status & 0x1) {
2598                 bnx2x_attn_int(bp);
2599                 status &= ~0x1;
2600         }
2601
2602         /* CStorm events: STAT_QUERY */
2603         if (status & 0x2) {
2604                 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
2605                 status &= ~0x2;
2606         }
2607
2608         if (unlikely(status))
2609                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2610                    status);
2611
2612         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2613                      IGU_INT_NOP, 1);
2614         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2615                      IGU_INT_NOP, 1);
2616         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2617                      IGU_INT_NOP, 1);
2618         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2619                      IGU_INT_NOP, 1);
2620         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2621                      IGU_INT_ENABLE, 1);
2622 }
2623
2624 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2625 {
2626         struct net_device *dev = dev_instance;
2627         struct bnx2x *bp = netdev_priv(dev);
2628
2629         /* Return here if interrupt is disabled */
2630         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2631                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2632                 return IRQ_HANDLED;
2633         }
2634
2635         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2636
2637 #ifdef BNX2X_STOP_ON_ERROR
2638         if (unlikely(bp->panic))
2639                 return IRQ_HANDLED;
2640 #endif
2641
2642 #ifdef BCM_CNIC
2643         {
2644                 struct cnic_ops *c_ops;
2645
2646                 rcu_read_lock();
2647                 c_ops = rcu_dereference(bp->cnic_ops);
2648                 if (c_ops)
2649                         c_ops->cnic_handler(bp->cnic_data, NULL);
2650                 rcu_read_unlock();
2651         }
2652 #endif
2653         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2654
2655         return IRQ_HANDLED;
2656 }
2657
2658 /* end of slow path */
2659
2660 static void bnx2x_timer(unsigned long data)
2661 {
2662         struct bnx2x *bp = (struct bnx2x *) data;
2663
2664         if (!netif_running(bp->dev))
2665                 return;
2666
2667         if (atomic_read(&bp->intr_sem) != 0)
2668                 goto timer_restart;
2669
2670         if (poll) {
2671                 struct bnx2x_fastpath *fp = &bp->fp[0];
2672                 int rc;
2673
2674                 bnx2x_tx_int(fp);
2675                 rc = bnx2x_rx_int(fp, 1000);
2676         }
2677
2678         if (!BP_NOMCP(bp)) {
2679                 int func = BP_FUNC(bp);
2680                 u32 drv_pulse;
2681                 u32 mcp_pulse;
2682
2683                 ++bp->fw_drv_pulse_wr_seq;
2684                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
2685                 /* TBD - add SYSTEM_TIME */
2686                 drv_pulse = bp->fw_drv_pulse_wr_seq;
2687                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
2688
2689                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
2690                              MCP_PULSE_SEQ_MASK);
2691                 /* The delta between driver pulse and mcp response
2692                  * should be 1 (before mcp response) or 0 (after mcp response)
2693                  */
2694                 if ((drv_pulse != mcp_pulse) &&
2695                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
2696                         /* someone lost a heartbeat... */
2697                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2698                                   drv_pulse, mcp_pulse);
2699                 }
2700         }
2701
2702         if (bp->state == BNX2X_STATE_OPEN)
2703                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
2704
2705 timer_restart:
2706         mod_timer(&bp->timer, jiffies + bp->current_interval);
2707 }
2708
2709 /* end of Statistics */
2710
2711 /* nic init */
2712
2713 /*
2714  * nic init service functions
2715  */
2716
2717 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
2718 {
2719         int port = BP_PORT(bp);
2720
2721         /* "CSTORM" */
2722         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2723                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
2724                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
2725         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2726                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
2727                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
2728 }
2729
2730 void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
2731                           dma_addr_t mapping, int sb_id)
2732 {
2733         int port = BP_PORT(bp);
2734         int func = BP_FUNC(bp);
2735         int index;
2736         u64 section;
2737
2738         /* USTORM */
2739         section = ((u64)mapping) + offsetof(struct host_status_block,
2740                                             u_status_block);
2741         sb->u_status_block.status_block_id = sb_id;
2742
2743         REG_WR(bp, BAR_CSTRORM_INTMEM +
2744                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
2745         REG_WR(bp, BAR_CSTRORM_INTMEM +
2746                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
2747                U64_HI(section));
2748         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
2749                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
2750
2751         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
2752                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2753                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
2754
2755         /* CSTORM */
2756         section = ((u64)mapping) + offsetof(struct host_status_block,
2757                                             c_status_block);
2758         sb->c_status_block.status_block_id = sb_id;
2759
2760         REG_WR(bp, BAR_CSTRORM_INTMEM +
2761                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
2762         REG_WR(bp, BAR_CSTRORM_INTMEM +
2763                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
2764                U64_HI(section));
2765         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
2766                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
2767
2768         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
2769                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2770                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
2771
2772         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2773 }
2774
2775 static void bnx2x_zero_def_sb(struct bnx2x *bp)
2776 {
2777         int func = BP_FUNC(bp);
2778
2779         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
2780                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2781                         sizeof(struct tstorm_def_status_block)/4);
2782         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2783                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
2784                         sizeof(struct cstorm_def_status_block_u)/4);
2785         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2786                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
2787                         sizeof(struct cstorm_def_status_block_c)/4);
2788         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
2789                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2790                         sizeof(struct xstorm_def_status_block)/4);
2791 }
2792
2793 static void bnx2x_init_def_sb(struct bnx2x *bp,
2794                               struct host_def_status_block *def_sb,
2795                               dma_addr_t mapping, int sb_id)
2796 {
2797         int port = BP_PORT(bp);
2798         int func = BP_FUNC(bp);
2799         int index, val, reg_offset;
2800         u64 section;
2801
2802         /* ATTN */
2803         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2804                                             atten_status_block);
2805         def_sb->atten_status_block.status_block_id = sb_id;
2806
2807         bp->attn_state = 0;
2808
2809         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2810                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2811
2812         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2813                 bp->attn_group[index].sig[0] = REG_RD(bp,
2814                                                      reg_offset + 0x10*index);
2815                 bp->attn_group[index].sig[1] = REG_RD(bp,
2816                                                reg_offset + 0x4 + 0x10*index);
2817                 bp->attn_group[index].sig[2] = REG_RD(bp,
2818                                                reg_offset + 0x8 + 0x10*index);
2819                 bp->attn_group[index].sig[3] = REG_RD(bp,
2820                                                reg_offset + 0xc + 0x10*index);
2821         }
2822
2823         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
2824                              HC_REG_ATTN_MSG0_ADDR_L);
2825
2826         REG_WR(bp, reg_offset, U64_LO(section));
2827         REG_WR(bp, reg_offset + 4, U64_HI(section));
2828
2829         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
2830
2831         val = REG_RD(bp, reg_offset);
2832         val |= sb_id;
2833         REG_WR(bp, reg_offset, val);
2834
2835         /* USTORM */
2836         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2837                                             u_def_status_block);
2838         def_sb->u_def_status_block.status_block_id = sb_id;
2839
2840         REG_WR(bp, BAR_CSTRORM_INTMEM +
2841                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
2842         REG_WR(bp, BAR_CSTRORM_INTMEM +
2843                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
2844                U64_HI(section));
2845         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
2846                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
2847
2848         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
2849                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2850                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
2851
2852         /* CSTORM */
2853         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2854                                             c_def_status_block);
2855         def_sb->c_def_status_block.status_block_id = sb_id;
2856
2857         REG_WR(bp, BAR_CSTRORM_INTMEM +
2858                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
2859         REG_WR(bp, BAR_CSTRORM_INTMEM +
2860                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
2861                U64_HI(section));
2862         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
2863                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
2864
2865         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2866                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2867                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
2868
2869         /* TSTORM */
2870         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2871                                             t_def_status_block);
2872         def_sb->t_def_status_block.status_block_id = sb_id;
2873
2874         REG_WR(bp, BAR_TSTRORM_INTMEM +
2875                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2876         REG_WR(bp, BAR_TSTRORM_INTMEM +
2877                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2878                U64_HI(section));
2879         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
2880                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2881
2882         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2883                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
2884                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2885
2886         /* XSTORM */
2887         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2888                                             x_def_status_block);
2889         def_sb->x_def_status_block.status_block_id = sb_id;
2890
2891         REG_WR(bp, BAR_XSTRORM_INTMEM +
2892                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2893         REG_WR(bp, BAR_XSTRORM_INTMEM +
2894                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2895                U64_HI(section));
2896         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
2897                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2898
2899         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2900                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
2901                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2902
2903         bp->stats_pending = 0;
2904         bp->set_mac_pending = 0;
2905
2906         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2907 }
2908
2909 void bnx2x_update_coalesce(struct bnx2x *bp)
2910 {
2911         int port = BP_PORT(bp);
2912         int i;
2913
2914         for_each_queue(bp, i) {
2915                 int sb_id = bp->fp[i].sb_id;
2916
2917                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
2918                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2919                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
2920                                                       U_SB_ETH_RX_CQ_INDEX),
2921                         bp->rx_ticks/(4 * BNX2X_BTR));
2922                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2923                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
2924                                                        U_SB_ETH_RX_CQ_INDEX),
2925                          (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2926
2927                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2928                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2929                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
2930                                                       C_SB_ETH_TX_CQ_INDEX),
2931                         bp->tx_ticks/(4 * BNX2X_BTR));
2932                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2933                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
2934                                                        C_SB_ETH_TX_CQ_INDEX),
2935                          (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2936         }
2937 }
2938
2939 static void bnx2x_init_sp_ring(struct bnx2x *bp)
2940 {
2941         int func = BP_FUNC(bp);
2942
2943         spin_lock_init(&bp->spq_lock);
2944
2945         bp->spq_left = MAX_SPQ_PENDING;
2946         bp->spq_prod_idx = 0;
2947         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
2948         bp->spq_prod_bd = bp->spq;
2949         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
2950
2951         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
2952                U64_LO(bp->spq_mapping));
2953         REG_WR(bp,
2954                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
2955                U64_HI(bp->spq_mapping));
2956
2957         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
2958                bp->spq_prod_idx);
2959 }
2960
2961 static void bnx2x_init_context(struct bnx2x *bp)
2962 {
2963         int i;
2964
2965         /* Rx */
2966         for_each_queue(bp, i) {
2967                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
2968                 struct bnx2x_fastpath *fp = &bp->fp[i];
2969                 u8 cl_id = fp->cl_id;
2970
2971                 context->ustorm_st_context.common.sb_index_numbers =
2972                                                 BNX2X_RX_SB_INDEX_NUM;
2973                 context->ustorm_st_context.common.clientId = cl_id;
2974                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
2975                 context->ustorm_st_context.common.flags =
2976                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
2977                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
2978                 context->ustorm_st_context.common.statistics_counter_id =
2979                                                 cl_id;
2980                 context->ustorm_st_context.common.mc_alignment_log_size =
2981                                                 BNX2X_RX_ALIGN_SHIFT;
2982                 context->ustorm_st_context.common.bd_buff_size =
2983                                                 bp->rx_buf_size;
2984                 context->ustorm_st_context.common.bd_page_base_hi =
2985                                                 U64_HI(fp->rx_desc_mapping);
2986                 context->ustorm_st_context.common.bd_page_base_lo =
2987                                                 U64_LO(fp->rx_desc_mapping);
2988                 if (!fp->disable_tpa) {
2989                         context->ustorm_st_context.common.flags |=
2990                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
2991                         context->ustorm_st_context.common.sge_buff_size =
2992                                 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
2993                                            0xffff);
2994                         context->ustorm_st_context.common.sge_page_base_hi =
2995                                                 U64_HI(fp->rx_sge_mapping);
2996                         context->ustorm_st_context.common.sge_page_base_lo =
2997                                                 U64_LO(fp->rx_sge_mapping);
2998
2999                         context->ustorm_st_context.common.max_sges_for_packet =
3000                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
3001                         context->ustorm_st_context.common.max_sges_for_packet =
3002                                 ((context->ustorm_st_context.common.
3003                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
3004                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
3005                 }
3006
3007                 context->ustorm_ag_context.cdu_usage =
3008                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3009                                                CDU_REGION_NUMBER_UCM_AG,
3010                                                ETH_CONNECTION_TYPE);
3011
3012                 context->xstorm_ag_context.cdu_reserved =
3013                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3014                                                CDU_REGION_NUMBER_XCM_AG,
3015                                                ETH_CONNECTION_TYPE);
3016         }
3017
3018         /* Tx */
3019         for_each_queue(bp, i) {
3020                 struct bnx2x_fastpath *fp = &bp->fp[i];
3021                 struct eth_context *context =
3022                         bnx2x_sp(bp, context[i].eth);
3023
3024                 context->cstorm_st_context.sb_index_number =
3025                                                 C_SB_ETH_TX_CQ_INDEX;
3026                 context->cstorm_st_context.status_block_id = fp->sb_id;
3027
3028                 context->xstorm_st_context.tx_bd_page_base_hi =
3029                                                 U64_HI(fp->tx_desc_mapping);
3030                 context->xstorm_st_context.tx_bd_page_base_lo =
3031                                                 U64_LO(fp->tx_desc_mapping);
3032                 context->xstorm_st_context.statistics_data = (fp->cl_id |
3033                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
3034         }
3035 }
3036
3037 static void bnx2x_init_ind_table(struct bnx2x *bp)
3038 {
3039         int func = BP_FUNC(bp);
3040         int i;
3041
3042         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
3043                 return;
3044
3045         DP(NETIF_MSG_IFUP,
3046            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
3047         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
3048                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
3049                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
3050                         bp->fp->cl_id + (i % bp->num_queues));
3051 }
3052
3053 void bnx2x_set_client_config(struct bnx2x *bp)
3054 {
3055         struct tstorm_eth_client_config tstorm_client = {0};
3056         int port = BP_PORT(bp);
3057         int i;
3058
3059         tstorm_client.mtu = bp->dev->mtu;
3060         tstorm_client.config_flags =
3061                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
3062                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
3063 #ifdef BCM_VLAN
3064         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
3065                 tstorm_client.config_flags |=
3066                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
3067                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3068         }
3069 #endif
3070
3071         for_each_queue(bp, i) {
3072                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
3073
3074                 REG_WR(bp, BAR_TSTRORM_INTMEM +
3075                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
3076                        ((u32 *)&tstorm_client)[0]);
3077                 REG_WR(bp, BAR_TSTRORM_INTMEM +
3078                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
3079                        ((u32 *)&tstorm_client)[1]);
3080         }
3081
3082         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
3083            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
3084 }
3085
3086 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3087 {
3088         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
3089         int mode = bp->rx_mode;
3090         int mask = bp->rx_mode_cl_mask;
3091         int func = BP_FUNC(bp);
3092         int port = BP_PORT(bp);
3093         int i;
3094         /* All but management unicast packets should pass to the host as well */
3095         u32 llh_mask =
3096                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
3097                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
3098                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3099                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
3100
3101         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
3102
3103         switch (mode) {
3104         case BNX2X_RX_MODE_NONE: /* no Rx */
3105                 tstorm_mac_filter.ucast_drop_all = mask;
3106                 tstorm_mac_filter.mcast_drop_all = mask;
3107                 tstorm_mac_filter.bcast_drop_all = mask;
3108                 break;
3109
3110         case BNX2X_RX_MODE_NORMAL:
3111                 tstorm_mac_filter.bcast_accept_all = mask;
3112                 break;
3113
3114         case BNX2X_RX_MODE_ALLMULTI:
3115                 tstorm_mac_filter.mcast_accept_all = mask;
3116                 tstorm_mac_filter.bcast_accept_all = mask;
3117                 break;
3118
3119         case BNX2X_RX_MODE_PROMISC:
3120                 tstorm_mac_filter.ucast_accept_all = mask;
3121                 tstorm_mac_filter.mcast_accept_all = mask;
3122                 tstorm_mac_filter.bcast_accept_all = mask;
3123                 /* pass management unicast packets as well */
3124                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
3125                 break;
3126
3127         default:
3128                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
3129                 break;
3130         }
3131
3132         REG_WR(bp,
3133                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
3134                llh_mask);
3135
3136         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
3137                 REG_WR(bp, BAR_TSTRORM_INTMEM +
3138                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
3139                        ((u32 *)&tstorm_mac_filter)[i]);
3140
3141 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
3142                    ((u32 *)&tstorm_mac_filter)[i]); */
3143         }
3144
3145         if (mode != BNX2X_RX_MODE_NONE)
3146                 bnx2x_set_client_config(bp);
3147 }
3148
3149 static void bnx2x_init_internal_common(struct bnx2x *bp)
3150 {
3151         int i;
3152
3153         /* Zero this manually as its initialization is
3154            currently missing in the initTool */
3155         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3156                 REG_WR(bp, BAR_USTRORM_INTMEM +
3157                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
3158 }
3159
3160 static void bnx2x_init_internal_port(struct bnx2x *bp)
3161 {
3162         int port = BP_PORT(bp);
3163
3164         REG_WR(bp,
3165                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
3166         REG_WR(bp,
3167                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
3168         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3169         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3170 }
3171
3172 static void bnx2x_init_internal_func(struct bnx2x *bp)
3173 {
3174         struct tstorm_eth_function_common_config tstorm_config = {0};
3175         struct stats_indication_flags stats_flags = {0};
3176         int port = BP_PORT(bp);
3177         int func = BP_FUNC(bp);
3178         int i, j;
3179         u32 offset;
3180         u16 max_agg_size;
3181
3182         tstorm_config.config_flags = RSS_FLAGS(bp);
3183
3184         if (is_multi(bp))
3185                 tstorm_config.rss_result_mask = MULTI_MASK;
3186
3187         /* Enable TPA if needed */
3188         if (bp->flags & TPA_ENABLE_FLAG)
3189                 tstorm_config.config_flags |=
3190                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
3191
3192         if (IS_E1HMF(bp))
3193                 tstorm_config.config_flags |=
3194                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
3195
3196         tstorm_config.leading_client_id = BP_L_ID(bp);
3197
3198         REG_WR(bp, BAR_TSTRORM_INTMEM +
3199                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
3200                (*(u32 *)&tstorm_config));
3201
3202         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
3203         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
3204         bnx2x_set_storm_rx_mode(bp);
3205
3206         for_each_queue(bp, i) {
3207                 u8 cl_id = bp->fp[i].cl_id;
3208
3209                 /* reset xstorm per client statistics */
3210                 offset = BAR_XSTRORM_INTMEM +
3211                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3212                 for (j = 0;
3213                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
3214                         REG_WR(bp, offset + j*4, 0);
3215
3216                 /* reset tstorm per client statistics */
3217                 offset = BAR_TSTRORM_INTMEM +
3218                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3219                 for (j = 0;
3220                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
3221                         REG_WR(bp, offset + j*4, 0);
3222
3223                 /* reset ustorm per client statistics */
3224                 offset = BAR_USTRORM_INTMEM +
3225                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3226                 for (j = 0;
3227                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
3228                         REG_WR(bp, offset + j*4, 0);
3229         }
3230
3231         /* Init statistics related context */
3232         stats_flags.collect_eth = 1;
3233
3234         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
3235                ((u32 *)&stats_flags)[0]);
3236         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
3237                ((u32 *)&stats_flags)[1]);
3238
3239         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
3240                ((u32 *)&stats_flags)[0]);
3241         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
3242                ((u32 *)&stats_flags)[1]);
3243
3244         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
3245                ((u32 *)&stats_flags)[0]);
3246         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
3247                ((u32 *)&stats_flags)[1]);
3248
3249         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
3250                ((u32 *)&stats_flags)[0]);
3251         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
3252                ((u32 *)&stats_flags)[1]);
3253
3254         REG_WR(bp, BAR_XSTRORM_INTMEM +
3255                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3256                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3257         REG_WR(bp, BAR_XSTRORM_INTMEM +
3258                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3259                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3260
3261         REG_WR(bp, BAR_TSTRORM_INTMEM +
3262                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3263                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3264         REG_WR(bp, BAR_TSTRORM_INTMEM +
3265                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3266                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3267
3268         REG_WR(bp, BAR_USTRORM_INTMEM +
3269                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3270                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3271         REG_WR(bp, BAR_USTRORM_INTMEM +
3272                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3273                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3274
3275         if (CHIP_IS_E1H(bp)) {
3276                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3277                         IS_E1HMF(bp));
3278                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3279                         IS_E1HMF(bp));
3280                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3281                         IS_E1HMF(bp));
3282                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3283                         IS_E1HMF(bp));
3284
3285                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
3286                          bp->e1hov);
3287         }
3288
3289         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
3290         max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
3291                                    SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
3292         for_each_queue(bp, i) {
3293                 struct bnx2x_fastpath *fp = &bp->fp[i];
3294
3295                 REG_WR(bp, BAR_USTRORM_INTMEM +
3296                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
3297                        U64_LO(fp->rx_comp_mapping));
3298                 REG_WR(bp, BAR_USTRORM_INTMEM +
3299                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
3300                        U64_HI(fp->rx_comp_mapping));
3301
3302                 /* Next page */
3303                 REG_WR(bp, BAR_USTRORM_INTMEM +
3304                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
3305                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3306                 REG_WR(bp, BAR_USTRORM_INTMEM +
3307                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
3308                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3309
3310                 REG_WR16(bp, BAR_USTRORM_INTMEM +
3311                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
3312                          max_agg_size);
3313         }
3314
3315         /* dropless flow control */
3316         if (CHIP_IS_E1H(bp)) {
3317                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
3318
3319                 rx_pause.bd_thr_low = 250;
3320                 rx_pause.cqe_thr_low = 250;
3321                 rx_pause.cos = 1;
3322                 rx_pause.sge_thr_low = 0;
3323                 rx_pause.bd_thr_high = 350;
3324                 rx_pause.cqe_thr_high = 350;
3325                 rx_pause.sge_thr_high = 0;
3326
3327                 for_each_queue(bp, i) {
3328                         struct bnx2x_fastpath *fp = &bp->fp[i];
3329
3330                         if (!fp->disable_tpa) {
3331                                 rx_pause.sge_thr_low = 150;
3332                                 rx_pause.sge_thr_high = 250;
3333                         }
3334
3335
3336                         offset = BAR_USTRORM_INTMEM +
3337                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
3338                                                                    fp->cl_id);
3339                         for (j = 0;
3340                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
3341                              j++)
3342                                 REG_WR(bp, offset + j*4,
3343                                        ((u32 *)&rx_pause)[j]);
3344                 }
3345         }
3346
3347         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3348
3349         /* Init rate shaping and fairness contexts */
3350         if (IS_E1HMF(bp)) {
3351                 int vn;
3352
3353                 /* During init there is no active link
3354                    Until link is up, set link rate to 10Gbps */
3355                 bp->link_vars.line_speed = SPEED_10000;
3356                 bnx2x_init_port_minmax(bp);
3357
3358                 if (!BP_NOMCP(bp))
3359                         bp->mf_config =
3360                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
3361                 bnx2x_calc_vn_weight_sum(bp);
3362
3363                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
3364                         bnx2x_init_vn_minmax(bp, 2*vn + port);
3365
3366                 /* Enable rate shaping and fairness */
3367                 bp->cmng.flags.cmng_enables |=
3368                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
3369
3370         } else {
3371                 /* rate shaping and fairness are disabled */
3372                 DP(NETIF_MSG_IFUP,
3373                    "single function mode  minmax will be disabled\n");
3374         }
3375
3376
3377         /* Store cmng structures to internal memory */
3378         if (bp->port.pmf)
3379                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
3380                         REG_WR(bp, BAR_XSTRORM_INTMEM +
3381                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
3382                                ((u32 *)(&bp->cmng))[i]);
3383 }
3384
3385 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3386 {
3387         switch (load_code) {
3388         case FW_MSG_CODE_DRV_LOAD_COMMON:
3389                 bnx2x_init_internal_common(bp);
3390                 /* no break */
3391
3392         case FW_MSG_CODE_DRV_LOAD_PORT:
3393                 bnx2x_init_internal_port(bp);
3394                 /* no break */
3395
3396         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3397                 bnx2x_init_internal_func(bp);
3398                 break;
3399
3400         default:
3401                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3402                 break;
3403         }
3404 }
3405
3406 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
3407 {
3408         int i;
3409
3410         for_each_queue(bp, i) {
3411                 struct bnx2x_fastpath *fp = &bp->fp[i];
3412
3413                 fp->bp = bp;
3414                 fp->state = BNX2X_FP_STATE_CLOSED;
3415                 fp->index = i;
3416                 fp->cl_id = BP_L_ID(bp) + i;
3417 #ifdef BCM_CNIC
3418                 fp->sb_id = fp->cl_id + 1;
3419 #else
3420                 fp->sb_id = fp->cl_id;
3421 #endif
3422                 DP(NETIF_MSG_IFUP,
3423                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
3424                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
3425                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
3426                               fp->sb_id);
3427                 bnx2x_update_fpsb_idx(fp);
3428         }
3429
3430         /* ensure status block indices were read */
3431         rmb();
3432
3433
3434         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
3435                           DEF_SB_ID);
3436         bnx2x_update_dsb_idx(bp);
3437         bnx2x_update_coalesce(bp);
3438         bnx2x_init_rx_rings(bp);
3439         bnx2x_init_tx_ring(bp);
3440         bnx2x_init_sp_ring(bp);
3441         bnx2x_init_context(bp);
3442         bnx2x_init_internal(bp, load_code);
3443         bnx2x_init_ind_table(bp);
3444         bnx2x_stats_init(bp);
3445
3446         /* At this point, we are ready for interrupts */
3447         atomic_set(&bp->intr_sem, 0);
3448
3449         /* flush all before enabling interrupts */
3450         mb();
3451         mmiowb();
3452
3453         bnx2x_int_enable(bp);
3454
3455         /* Check for SPIO5 */
3456         bnx2x_attn_int_deasserted0(bp,
3457                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
3458                                    AEU_INPUTS_ATTN_BITS_SPIO5);
3459 }
3460
3461 /* end of nic init */
3462
3463 /*
3464  * gzip service functions
3465  */
3466
3467 static int bnx2x_gunzip_init(struct bnx2x *bp)
3468 {
3469         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
3470                                             &bp->gunzip_mapping, GFP_KERNEL);
3471         if (bp->gunzip_buf  == NULL)
3472                 goto gunzip_nomem1;
3473
3474         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3475         if (bp->strm  == NULL)
3476                 goto gunzip_nomem2;
3477
3478         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3479                                       GFP_KERNEL);
3480         if (bp->strm->workspace == NULL)
3481                 goto gunzip_nomem3;
3482
3483         return 0;
3484
3485 gunzip_nomem3:
3486         kfree(bp->strm);
3487         bp->strm = NULL;
3488
3489 gunzip_nomem2:
3490         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3491                           bp->gunzip_mapping);
3492         bp->gunzip_buf = NULL;
3493
3494 gunzip_nomem1:
3495         netdev_err(bp->dev, "Cannot allocate firmware buffer for"
3496                " un-compression\n");
3497         return -ENOMEM;
3498 }
3499
3500 static void bnx2x_gunzip_end(struct bnx2x *bp)
3501 {
3502         kfree(bp->strm->workspace);
3503
3504         kfree(bp->strm);
3505         bp->strm = NULL;
3506
3507         if (bp->gunzip_buf) {
3508                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3509                                   bp->gunzip_mapping);
3510                 bp->gunzip_buf = NULL;
3511         }
3512 }
3513
3514 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
3515 {
3516         int n, rc;
3517
3518         /* check gzip header */
3519         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
3520                 BNX2X_ERR("Bad gzip header\n");
3521                 return -EINVAL;
3522         }
3523
3524         n = 10;
3525
3526 #define FNAME                           0x8
3527
3528         if (zbuf[3] & FNAME)
3529                 while ((zbuf[n++] != 0) && (n < len));
3530
3531         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
3532         bp->strm->avail_in = len - n;
3533         bp->strm->next_out = bp->gunzip_buf;
3534         bp->strm->avail_out = FW_BUF_SIZE;
3535
3536         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
3537         if (rc != Z_OK)
3538                 return rc;
3539
3540         rc = zlib_inflate(bp->strm, Z_FINISH);
3541         if ((rc != Z_OK) && (rc != Z_STREAM_END))
3542                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
3543                            bp->strm->msg);
3544
3545         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
3546         if (bp->gunzip_outlen & 0x3)
3547                 netdev_err(bp->dev, "Firmware decompression error:"
3548                                     " gunzip_outlen (%d) not aligned\n",
3549                                 bp->gunzip_outlen);
3550         bp->gunzip_outlen >>= 2;
3551
3552         zlib_inflateEnd(bp->strm);
3553
3554         if (rc == Z_STREAM_END)
3555                 return 0;
3556
3557         return rc;
3558 }
3559
3560 /* nic load/unload */
3561
3562 /*
3563  * General service functions
3564  */
3565
3566 /* send a NIG loopback debug packet */
3567 static void bnx2x_lb_pckt(struct bnx2x *bp)
3568 {
3569         u32 wb_write[3];
3570
3571         /* Ethernet source and destination addresses */
3572         wb_write[0] = 0x55555555;
3573         wb_write[1] = 0x55555555;
3574         wb_write[2] = 0x20;             /* SOP */
3575         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3576
3577         /* NON-IP protocol */
3578         wb_write[0] = 0x09000000;
3579         wb_write[1] = 0x55555555;
3580         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
3581         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3582 }
3583
3584 /* some of the internal memories
3585  * are not directly readable from the driver
3586  * to test them we send debug packets
3587  */
3588 static int bnx2x_int_mem_test(struct bnx2x *bp)
3589 {
3590         int factor;
3591         int count, i;
3592         u32 val = 0;
3593
3594         if (CHIP_REV_IS_FPGA(bp))
3595                 factor = 120;
3596         else if (CHIP_REV_IS_EMUL(bp))
3597                 factor = 200;
3598         else
3599                 factor = 1;
3600
3601         DP(NETIF_MSG_HW, "start part1\n");
3602
3603         /* Disable inputs of parser neighbor blocks */
3604         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3605         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3606         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3607         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
3608
3609         /*  Write 0 to parser credits for CFC search request */
3610         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3611
3612         /* send Ethernet packet */
3613         bnx2x_lb_pckt(bp);
3614
3615         /* TODO do i reset NIG statistic? */
3616         /* Wait until NIG register shows 1 packet of size 0x10 */
3617         count = 1000 * factor;
3618         while (count) {
3619
3620                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3621                 val = *bnx2x_sp(bp, wb_data[0]);
3622                 if (val == 0x10)
3623                         break;
3624
3625                 msleep(10);
3626                 count--;
3627         }
3628         if (val != 0x10) {
3629                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
3630                 return -1;
3631         }
3632
3633         /* Wait until PRS register shows 1 packet */
3634         count = 1000 * factor;
3635         while (count) {
3636                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3637                 if (val == 1)
3638                         break;
3639
3640                 msleep(10);
3641                 count--;
3642         }
3643         if (val != 0x1) {
3644                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3645                 return -2;
3646         }
3647
3648         /* Reset and init BRB, PRS */
3649         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3650         msleep(50);
3651         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3652         msleep(50);
3653         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3654         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3655
3656         DP(NETIF_MSG_HW, "part2\n");
3657
3658         /* Disable inputs of parser neighbor blocks */
3659         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3660         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3661         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3662         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
3663
3664         /* Write 0 to parser credits for CFC search request */
3665         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3666
3667         /* send 10 Ethernet packets */
3668         for (i = 0; i < 10; i++)
3669                 bnx2x_lb_pckt(bp);
3670
3671         /* Wait until NIG register shows 10 + 1
3672            packets of size 11*0x10 = 0xb0 */
3673         count = 1000 * factor;
3674         while (count) {
3675
3676                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3677                 val = *bnx2x_sp(bp, wb_data[0]);
3678                 if (val == 0xb0)
3679                         break;
3680
3681                 msleep(10);
3682                 count--;
3683         }
3684         if (val != 0xb0) {
3685                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
3686                 return -3;
3687         }
3688
3689         /* Wait until PRS register shows 2 packets */
3690         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3691         if (val != 2)
3692                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
3693
3694         /* Write 1 to parser credits for CFC search request */
3695         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
3696
3697         /* Wait until PRS register shows 3 packets */
3698         msleep(10 * factor);
3699         /* Wait until NIG register shows 1 packet of size 0x10 */
3700         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3701         if (val != 3)
3702                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
3703
3704         /* clear NIG EOP FIFO */
3705         for (i = 0; i < 11; i++)
3706                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
3707         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
3708         if (val != 1) {
3709                 BNX2X_ERR("clear of NIG failed\n");
3710                 return -4;
3711         }
3712
3713         /* Reset and init BRB, PRS, NIG */
3714         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3715         msleep(50);
3716         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3717         msleep(50);
3718         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3719         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3720 #ifndef BCM_CNIC
3721         /* set NIC mode */
3722         REG_WR(bp, PRS_REG_NIC_MODE, 1);
3723 #endif
3724
3725         /* Enable inputs of parser neighbor blocks */
3726         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
3727         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
3728         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3729         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
3730
3731         DP(NETIF_MSG_HW, "done\n");
3732
3733         return 0; /* OK */
3734 }
3735
3736 static void enable_blocks_attention(struct bnx2x *bp)
3737 {
3738         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3739         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
3740         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3741         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
3742         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
3743         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
3744         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
3745         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
3746         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
3747 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3748 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
3749         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
3750         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
3751         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
3752 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3753 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
3754         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
3755         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
3756         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
3757         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
3758 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3759 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3760         if (CHIP_REV_IS_FPGA(bp))
3761                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
3762         else
3763                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
3764         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
3765         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
3766         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
3767 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3768 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
3769         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
3770         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
3771 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3772         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
3773 }
3774
3775 static const struct {
3776         u32 addr;
3777         u32 mask;
3778 } bnx2x_parity_mask[] = {
3779         {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
3780         {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
3781         {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
3782         {HC_REG_HC_PRTY_MASK, 0xffffffff},
3783         {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
3784         {QM_REG_QM_PRTY_MASK, 0x0},
3785         {DORQ_REG_DORQ_PRTY_MASK, 0x0},
3786         {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
3787         {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
3788         {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
3789         {CDU_REG_CDU_PRTY_MASK, 0x0},
3790         {CFC_REG_CFC_PRTY_MASK, 0x0},
3791         {DBG_REG_DBG_PRTY_MASK, 0x0},
3792         {DMAE_REG_DMAE_PRTY_MASK, 0x0},
3793         {BRB1_REG_BRB1_PRTY_MASK, 0x0},
3794         {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
3795         {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
3796         {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
3797         {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
3798         {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
3799         {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
3800         {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
3801         {USEM_REG_USEM_PRTY_MASK_0, 0x0},
3802         {USEM_REG_USEM_PRTY_MASK_1, 0x0},
3803         {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
3804         {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
3805         {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
3806         {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
3807 };
3808
3809 static void enable_blocks_parity(struct bnx2x *bp)
3810 {
3811         int i, mask_arr_len =
3812                 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
3813
3814         for (i = 0; i < mask_arr_len; i++)
3815                 REG_WR(bp, bnx2x_parity_mask[i].addr,
3816                         bnx2x_parity_mask[i].mask);
3817 }
3818
3819
3820 static void bnx2x_reset_common(struct bnx2x *bp)
3821 {
3822         /* reset_common */
3823         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
3824                0xd3ffff7f);
3825         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
3826 }
3827
3828 static void bnx2x_init_pxp(struct bnx2x *bp)
3829 {
3830         u16 devctl;
3831         int r_order, w_order;
3832
3833         pci_read_config_word(bp->pdev,
3834                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
3835         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
3836         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3837         if (bp->mrrs == -1)
3838                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3839         else {
3840                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
3841                 r_order = bp->mrrs;
3842         }
3843
3844         bnx2x_init_pxp_arb(bp, r_order, w_order);
3845 }
3846
3847 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3848 {
3849         int is_required;
3850         u32 val;
3851         int port;
3852
3853         if (BP_NOMCP(bp))
3854                 return;
3855
3856         is_required = 0;
3857         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
3858               SHARED_HW_CFG_FAN_FAILURE_MASK;
3859
3860         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
3861                 is_required = 1;
3862
3863         /*
3864          * The fan failure mechanism is usually related to the PHY type since
3865          * the power consumption of the board is affected by the PHY. Currently,
3866          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
3867          */
3868         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
3869                 for (port = PORT_0; port < PORT_MAX; port++) {
3870                         u32 phy_type =
3871                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
3872                                          external_phy_config) &
3873                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3874                         is_required |=
3875                                 ((phy_type ==
3876                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
3877                                  (phy_type ==
3878                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
3879                                  (phy_type ==
3880                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
3881                 }
3882
3883         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
3884
3885         if (is_required == 0)
3886                 return;
3887
3888         /* Fan failure is indicated by SPIO 5 */
3889         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
3890                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
3891
3892         /* set to active low mode */
3893         val = REG_RD(bp, MISC_REG_SPIO_INT);
3894         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
3895                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
3896         REG_WR(bp, MISC_REG_SPIO_INT, val);
3897
3898         /* enable interrupt to signal the IGU */
3899         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
3900         val |= (1 << MISC_REGISTERS_SPIO_5);
3901         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3902 }
3903
3904 static int bnx2x_init_common(struct bnx2x *bp)
3905 {
3906         u32 val, i;
3907 #ifdef BCM_CNIC
3908         u32 wb_write[2];
3909 #endif
3910
3911         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
3912
3913         bnx2x_reset_common(bp);
3914         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
3915         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
3916
3917         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
3918         if (CHIP_IS_E1H(bp))
3919                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
3920
3921         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
3922         msleep(30);
3923         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
3924
3925         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
3926         if (CHIP_IS_E1(bp)) {
3927                 /* enable HW interrupt from PXP on USDM overflow
3928                    bit 16 on INT_MASK_0 */
3929                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3930         }
3931
3932         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
3933         bnx2x_init_pxp(bp);
3934
3935 #ifdef __BIG_ENDIAN
3936         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
3937         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
3938         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
3939         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
3940         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
3941         /* make sure this value is 0 */
3942         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
3943
3944 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3945         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
3946         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
3947         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
3948         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
3949 #endif
3950
3951         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
3952 #ifdef BCM_CNIC
3953         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3954         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3955         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
3956 #endif
3957
3958         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
3959                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
3960
3961         /* let the HW do it's magic ... */
3962         msleep(100);
3963         /* finish PXP init */
3964         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
3965         if (val != 1) {
3966                 BNX2X_ERR("PXP2 CFG failed\n");
3967                 return -EBUSY;
3968         }
3969         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
3970         if (val != 1) {
3971                 BNX2X_ERR("PXP2 RD_INIT failed\n");
3972                 return -EBUSY;
3973         }
3974
3975         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
3976         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
3977
3978         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
3979
3980         /* clean the DMAE memory */
3981         bp->dmae_ready = 1;
3982         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
3983
3984         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
3985         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
3986         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
3987         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
3988
3989         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
3990         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
3991         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
3992         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
3993
3994         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
3995
3996 #ifdef BCM_CNIC
3997         wb_write[0] = 0;
3998         wb_write[1] = 0;
3999         for (i = 0; i < 64; i++) {
4000                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
4001                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
4002
4003                 if (CHIP_IS_E1H(bp)) {
4004                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
4005                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
4006                                           wb_write, 2);
4007                 }
4008         }
4009 #endif
4010         /* soft reset pulse */
4011         REG_WR(bp, QM_REG_SOFT_RESET, 1);
4012         REG_WR(bp, QM_REG_SOFT_RESET, 0);
4013
4014 #ifdef BCM_CNIC
4015         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
4016 #endif
4017
4018         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
4019         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
4020         if (!CHIP_REV_IS_SLOW(bp)) {
4021                 /* enable hw interrupt from doorbell Q */
4022                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4023         }
4024
4025         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4026         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4027         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4028 #ifndef BCM_CNIC
4029         /* set NIC mode */
4030         REG_WR(bp, PRS_REG_NIC_MODE, 1);
4031 #endif
4032         if (CHIP_IS_E1H(bp))
4033                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
4034
4035         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4036         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4037         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4038         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
4039
4040         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4041         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4042         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4043         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4044
4045         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4046         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4047         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4048         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
4049
4050         /* sync semi rtc */
4051         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4052                0x80000000);
4053         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4054                0x80000000);
4055
4056         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4057         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4058         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
4059
4060         REG_WR(bp, SRC_REG_SOFT_RST, 1);
4061         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4062                 REG_WR(bp, i, random32());
4063         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
4064 #ifdef BCM_CNIC
4065         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4066         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4067         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4068         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4069         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4070         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4071         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4072         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4073         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4074         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4075 #endif
4076         REG_WR(bp, SRC_REG_SOFT_RST, 0);
4077
4078         if (sizeof(union cdu_context) != 1024)
4079                 /* we currently assume that a context is 1024 bytes */
4080                 dev_alert(&bp->pdev->dev, "please adjust the size "
4081                                           "of cdu_context(%ld)\n",
4082                          (long)sizeof(union cdu_context));
4083
4084         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
4085         val = (4 << 24) + (0 << 12) + 1024;
4086         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
4087
4088         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
4089         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
4090         /* enable context validation interrupt from CFC */
4091         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4092
4093         /* set the thresholds to prevent CFC/CDU race */
4094         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
4095
4096         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
4097         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
4098
4099         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
4100         /* Reset PCIE errors for debug */
4101         REG_WR(bp, 0x2814, 0xffffffff);
4102         REG_WR(bp, 0x3820, 0xffffffff);
4103
4104         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
4105         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
4106         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
4107         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
4108
4109         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
4110         if (CHIP_IS_E1H(bp)) {
4111                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
4112                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
4113         }
4114
4115         if (CHIP_REV_IS_SLOW(bp))
4116                 msleep(200);
4117
4118         /* finish CFC init */
4119         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4120         if (val != 1) {
4121                 BNX2X_ERR("CFC LL_INIT failed\n");
4122                 return -EBUSY;
4123         }
4124         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4125         if (val != 1) {
4126                 BNX2X_ERR("CFC AC_INIT failed\n");
4127                 return -EBUSY;
4128         }
4129         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4130         if (val != 1) {
4131                 BNX2X_ERR("CFC CAM_INIT failed\n");
4132                 return -EBUSY;
4133         }
4134         REG_WR(bp, CFC_REG_DEBUG0, 0);
4135
4136         /* read NIG statistic
4137            to see if this is our first up since powerup */
4138         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4139         val = *bnx2x_sp(bp, wb_data[0]);
4140
4141         /* do internal memory self test */
4142         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4143                 BNX2X_ERR("internal mem self test failed\n");
4144                 return -EBUSY;
4145         }
4146
4147         switch (bp->link_params.phy[EXT_PHY1].type) {
4148         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4149         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4150         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4151         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4152                 bp->port.need_hw_lock = 1;
4153                 break;
4154
4155         default:
4156                 break;
4157         }
4158
4159         bnx2x_setup_fan_failure_detection(bp);
4160
4161         /* clear PXP2 attentions */
4162         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
4163
4164         enable_blocks_attention(bp);
4165         if (CHIP_PARITY_SUPPORTED(bp))
4166                 enable_blocks_parity(bp);
4167
4168         if (!BP_NOMCP(bp)) {
4169                 bnx2x_acquire_phy_lock(bp);
4170                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
4171                 bnx2x_release_phy_lock(bp);
4172         } else
4173                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4174
4175         return 0;
4176 }
4177
4178 static int bnx2x_init_port(struct bnx2x *bp)
4179 {
4180         int port = BP_PORT(bp);
4181         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
4182         u32 low, high;
4183         u32 val;
4184
4185         DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
4186
4187         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
4188
4189         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
4190         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
4191
4192         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4193         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4194         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
4195         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
4196
4197 #ifdef BCM_CNIC
4198         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
4199
4200         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
4201         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4202         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
4203 #endif
4204
4205         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
4206
4207         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
4208         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
4209                 /* no pause for emulation and FPGA */
4210                 low = 0;
4211                 high = 513;
4212         } else {
4213                 if (IS_E1HMF(bp))
4214                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4215                 else if (bp->dev->mtu > 4096) {
4216                         if (bp->flags & ONE_PORT_FLAG)
4217                                 low = 160;
4218                         else {
4219                                 val = bp->dev->mtu;
4220                                 /* (24*1024 + val*4)/256 */
4221                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
4222                         }
4223                 } else
4224                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4225                 high = low + 56;        /* 14*1024/256 */
4226         }
4227         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4228         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4229
4230
4231         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
4232
4233         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
4234         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
4235         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
4236         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
4237
4238         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4239         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4240         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4241         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
4242
4243         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
4244         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
4245
4246         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
4247
4248         /* configure PBF to work without PAUSE mtu 9000 */
4249         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
4250
4251         /* update threshold */
4252         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
4253         /* update init credit */
4254         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
4255
4256         /* probe changes */
4257         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
4258         msleep(5);
4259         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
4260
4261 #ifdef BCM_CNIC
4262         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
4263 #endif
4264         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
4265         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
4266
4267         if (CHIP_IS_E1(bp)) {
4268                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4269                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4270         }
4271         bnx2x_init_block(bp, HC_BLOCK, init_stage);
4272
4273         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
4274         /* init aeu_mask_attn_func_0/1:
4275          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4276          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4277          *             bits 4-7 are used for "per vn group attention" */
4278         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4279                (IS_E1HMF(bp) ? 0xF7 : 0x7));
4280
4281         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
4282         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
4283         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
4284         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
4285         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
4286
4287         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
4288
4289         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4290
4291         if (CHIP_IS_E1H(bp)) {
4292                 /* 0x2 disable e1hov, 0x1 enable */
4293                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4294                        (IS_E1HMF(bp) ? 0x1 : 0x2));
4295
4296                 {
4297                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4298                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
4299                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
4300                 }
4301         }
4302
4303         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
4304         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
4305
4306         switch (bp->link_params.phy[EXT_PHY1].type) {
4307         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4308                 {
4309                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
4310
4311                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
4312                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
4313
4314                 /* The GPIO should be swapped if the swap register is
4315                    set and active */
4316                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4317                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
4318
4319                 /* Select function upon port-swap configuration */
4320                 if (port == 0) {
4321                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
4322                         aeu_gpio_mask = (swap_val && swap_override) ?
4323                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
4324                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
4325                 } else {
4326                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
4327                         aeu_gpio_mask = (swap_val && swap_override) ?
4328                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
4329                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
4330                 }
4331                 val = REG_RD(bp, offset);
4332                 /* add GPIO3 to group */
4333                 val |= aeu_gpio_mask;
4334                 REG_WR(bp, offset, val);
4335                 }
4336                 bp->port.need_hw_lock = 1;
4337                 break;
4338
4339         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4340                 bp->port.need_hw_lock = 1;
4341         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4342                 /* add SPIO 5 to group 0 */
4343                 {
4344                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4345                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4346                 val = REG_RD(bp, reg_addr);
4347                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4348                 REG_WR(bp, reg_addr, val);
4349                 }
4350                 break;
4351         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4352         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4353                 bp->port.need_hw_lock = 1;
4354                 break;
4355         default:
4356                 break;
4357         }
4358         bnx2x__link_reset(bp);
4359
4360         return 0;
4361 }
4362
4363 #define ILT_PER_FUNC            (768/2)
4364 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
4365 /* the phys address is shifted right 12 bits and has an added
4366    1=valid bit added to the 53rd bit
4367    then since this is a wide register(TM)
4368    we split it into two 32 bit writes
4369  */
4370 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4371 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
4372 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
4373 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
4374
4375 #ifdef BCM_CNIC
4376 #define CNIC_ILT_LINES          127
4377 #define CNIC_CTX_PER_ILT        16
4378 #else
4379 #define CNIC_ILT_LINES          0
4380 #endif
4381
4382 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4383 {
4384         int reg;
4385
4386         if (CHIP_IS_E1H(bp))
4387                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4388         else /* E1 */
4389                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4390
4391         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4392 }
4393
4394 static int bnx2x_init_func(struct bnx2x *bp)
4395 {
4396         int port = BP_PORT(bp);
4397         int func = BP_FUNC(bp);
4398         u32 addr, val;
4399         int i;
4400
4401         DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
4402
4403         /* set MSI reconfigure capability */
4404         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4405         val = REG_RD(bp, addr);
4406         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
4407         REG_WR(bp, addr, val);
4408
4409         i = FUNC_ILT_BASE(func);
4410
4411         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
4412         if (CHIP_IS_E1H(bp)) {
4413                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
4414                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
4415         } else /* E1 */
4416                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
4417                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
4418
4419 #ifdef BCM_CNIC
4420         i += 1 + CNIC_ILT_LINES;
4421         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
4422         if (CHIP_IS_E1(bp))
4423                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
4424         else {
4425                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
4426                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
4427         }
4428
4429         i++;
4430         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
4431         if (CHIP_IS_E1(bp))
4432                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
4433         else {
4434                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
4435                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
4436         }
4437
4438         i++;
4439         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
4440         if (CHIP_IS_E1(bp))
4441                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
4442         else {
4443                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
4444                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
4445         }
4446
4447         /* tell the searcher where the T2 table is */
4448         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
4449
4450         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
4451                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
4452
4453         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
4454                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
4455                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
4456
4457         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
4458 #endif
4459
4460         if (CHIP_IS_E1H(bp)) {
4461                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4462                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4463                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4464                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4465                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4466                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4467                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4468                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4469                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
4470
4471                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4472                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
4473         }
4474
4475         /* HC init per function */
4476         if (CHIP_IS_E1H(bp)) {
4477                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4478
4479                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4480                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4481         }
4482         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
4483
4484         /* Reset PCIE errors for debug */
4485         REG_WR(bp, 0x2114, 0xffffffff);
4486         REG_WR(bp, 0x2120, 0xffffffff);
4487         bnx2x_phy_probe(&bp->link_params);
4488         return 0;
4489 }
4490
4491 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4492 {
4493         int i, rc = 0;
4494
4495         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
4496            BP_FUNC(bp), load_code);
4497
4498         bp->dmae_ready = 0;
4499         mutex_init(&bp->dmae_mutex);
4500         rc = bnx2x_gunzip_init(bp);
4501         if (rc)
4502                 return rc;
4503
4504         switch (load_code) {
4505         case FW_MSG_CODE_DRV_LOAD_COMMON:
4506                 rc = bnx2x_init_common(bp);
4507                 if (rc)
4508                         goto init_hw_err;
4509                 /* no break */
4510
4511         case FW_MSG_CODE_DRV_LOAD_PORT:
4512                 bp->dmae_ready = 1;
4513                 rc = bnx2x_init_port(bp);
4514                 if (rc)
4515                         goto init_hw_err;
4516                 /* no break */
4517
4518         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4519                 bp->dmae_ready = 1;
4520                 rc = bnx2x_init_func(bp);
4521                 if (rc)
4522                         goto init_hw_err;
4523                 break;
4524
4525         default:
4526                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4527                 break;
4528         }
4529
4530         if (!BP_NOMCP(bp)) {
4531                 int func = BP_FUNC(bp);
4532
4533                 bp->fw_drv_pulse_wr_seq =
4534                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
4535                                  DRV_PULSE_SEQ_MASK);
4536                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4537         }
4538
4539         /* this needs to be done before gunzip end */
4540         bnx2x_zero_def_sb(bp);
4541         for_each_queue(bp, i)
4542                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4543 #ifdef BCM_CNIC
4544         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4545 #endif
4546
4547 init_hw_err:
4548         bnx2x_gunzip_end(bp);
4549
4550         return rc;
4551 }
4552
4553 void bnx2x_free_mem(struct bnx2x *bp)
4554 {
4555
4556 #define BNX2X_PCI_FREE(x, y, size) \
4557         do { \
4558                 if (x) { \
4559                         dma_free_coherent(&bp->pdev->dev, size, x, y); \
4560                         x = NULL; \
4561                         y = 0; \
4562                 } \
4563         } while (0)
4564
4565 #define BNX2X_FREE(x) \
4566         do { \
4567                 if (x) { \
4568                         vfree(x); \
4569                         x = NULL; \
4570                 } \
4571         } while (0)
4572
4573         int i;
4574
4575         /* fastpath */
4576         /* Common */
4577         for_each_queue(bp, i) {
4578
4579                 /* status blocks */
4580                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
4581                                bnx2x_fp(bp, i, status_blk_mapping),
4582                                sizeof(struct host_status_block));
4583         }
4584         /* Rx */
4585         for_each_queue(bp, i) {
4586
4587                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4588                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
4589                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
4590                                bnx2x_fp(bp, i, rx_desc_mapping),
4591                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
4592
4593                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
4594                                bnx2x_fp(bp, i, rx_comp_mapping),
4595                                sizeof(struct eth_fast_path_rx_cqe) *
4596                                NUM_RCQ_BD);
4597
4598                 /* SGE ring */
4599                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
4600                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
4601                                bnx2x_fp(bp, i, rx_sge_mapping),
4602                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4603         }
4604         /* Tx */
4605         for_each_queue(bp, i) {
4606
4607                 /* fastpath tx rings: tx_buf tx_desc */
4608                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
4609                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
4610                                bnx2x_fp(bp, i, tx_desc_mapping),
4611                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4612         }
4613         /* end of fastpath */
4614
4615         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
4616                        sizeof(struct host_def_status_block));
4617
4618         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
4619                        sizeof(struct bnx2x_slowpath));
4620
4621 #ifdef BCM_CNIC
4622         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
4623         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
4624         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
4625         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
4626         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
4627                        sizeof(struct host_status_block));
4628 #endif
4629         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
4630
4631 #undef BNX2X_PCI_FREE
4632 #undef BNX2X_KFREE
4633 }
4634
4635 int bnx2x_alloc_mem(struct bnx2x *bp)
4636 {
4637
4638 #define BNX2X_PCI_ALLOC(x, y, size) \
4639         do { \
4640                 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
4641                 if (x == NULL) \
4642                         goto alloc_mem_err; \
4643                 memset(x, 0, size); \
4644         } while (0)
4645
4646 #define BNX2X_ALLOC(x, size) \
4647         do { \
4648                 x = vmalloc(size); \
4649                 if (x == NULL) \
4650                         goto alloc_mem_err; \
4651                 memset(x, 0, size); \
4652         } while (0)
4653
4654         int i;
4655
4656         /* fastpath */
4657         /* Common */
4658         for_each_queue(bp, i) {
4659                 bnx2x_fp(bp, i, bp) = bp;
4660
4661                 /* status blocks */
4662                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
4663                                 &bnx2x_fp(bp, i, status_blk_mapping),
4664                                 sizeof(struct host_status_block));
4665         }
4666         /* Rx */
4667         for_each_queue(bp, i) {
4668
4669                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4670                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
4671                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4672                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
4673                                 &bnx2x_fp(bp, i, rx_desc_mapping),
4674                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4675
4676                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
4677                                 &bnx2x_fp(bp, i, rx_comp_mapping),
4678                                 sizeof(struct eth_fast_path_rx_cqe) *
4679                                 NUM_RCQ_BD);
4680
4681                 /* SGE ring */
4682                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
4683                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4684                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
4685                                 &bnx2x_fp(bp, i, rx_sge_mapping),
4686                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4687         }
4688         /* Tx */
4689         for_each_queue(bp, i) {
4690
4691                 /* fastpath tx rings: tx_buf tx_desc */
4692                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
4693                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4694                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
4695                                 &bnx2x_fp(bp, i, tx_desc_mapping),
4696                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4697         }
4698         /* end of fastpath */
4699
4700         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4701                         sizeof(struct host_def_status_block));
4702
4703         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4704                         sizeof(struct bnx2x_slowpath));
4705
4706 #ifdef BCM_CNIC
4707         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
4708
4709         /* allocate searcher T2 table
4710            we allocate 1/4 of alloc num for T2
4711           (which is not entered into the ILT) */
4712         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
4713
4714         /* Initialize T2 (for 1024 connections) */
4715         for (i = 0; i < 16*1024; i += 64)
4716                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
4717
4718         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
4719         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
4720
4721         /* QM queues (128*MAX_CONN) */
4722         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
4723
4724         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
4725                         sizeof(struct host_status_block));
4726 #endif
4727
4728         /* Slow path ring */
4729         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
4730
4731         return 0;
4732
4733 alloc_mem_err:
4734         bnx2x_free_mem(bp);
4735         return -ENOMEM;
4736
4737 #undef BNX2X_PCI_ALLOC
4738 #undef BNX2X_ALLOC
4739 }
4740
4741
4742 /*
4743  * Init service functions
4744  */
4745
4746 /**
4747  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
4748  *
4749  * @param bp driver descriptor
4750  * @param set set or clear an entry (1 or 0)
4751  * @param mac pointer to a buffer containing a MAC
4752  * @param cl_bit_vec bit vector of clients to register a MAC for
4753  * @param cam_offset offset in a CAM to use
4754  * @param with_bcast set broadcast MAC as well
4755  */
4756 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
4757                                       u32 cl_bit_vec, u8 cam_offset,
4758                                       u8 with_bcast)
4759 {
4760         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
4761         int port = BP_PORT(bp);
4762
4763         /* CAM allocation
4764          * unicasts 0-31:port0 32-63:port1
4765          * multicast 64-127:port0 128-191:port1
4766          */
4767         config->hdr.length = 1 + (with_bcast ? 1 : 0);
4768         config->hdr.offset = cam_offset;
4769         config->hdr.client_id = 0xff;
4770         config->hdr.reserved1 = 0;
4771
4772         /* primary MAC */
4773         config->config_table[0].cam_entry.msb_mac_addr =
4774                                         swab16(*(u16 *)&mac[0]);
4775         config->config_table[0].cam_entry.middle_mac_addr =
4776                                         swab16(*(u16 *)&mac[2]);
4777         config->config_table[0].cam_entry.lsb_mac_addr =
4778                                         swab16(*(u16 *)&mac[4]);
4779         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
4780         if (set)
4781                 config->config_table[0].target_table_entry.flags = 0;
4782         else
4783                 CAM_INVALIDATE(config->config_table[0]);
4784         config->config_table[0].target_table_entry.clients_bit_vector =
4785                                                 cpu_to_le32(cl_bit_vec);
4786         config->config_table[0].target_table_entry.vlan_id = 0;
4787
4788         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
4789            (set ? "setting" : "clearing"),
4790            config->config_table[0].cam_entry.msb_mac_addr,
4791            config->config_table[0].cam_entry.middle_mac_addr,
4792            config->config_table[0].cam_entry.lsb_mac_addr);
4793
4794         /* broadcast */
4795         if (with_bcast) {
4796                 config->config_table[1].cam_entry.msb_mac_addr =
4797                         cpu_to_le16(0xffff);
4798                 config->config_table[1].cam_entry.middle_mac_addr =
4799                         cpu_to_le16(0xffff);
4800                 config->config_table[1].cam_entry.lsb_mac_addr =
4801                         cpu_to_le16(0xffff);
4802                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
4803                 if (set)
4804                         config->config_table[1].target_table_entry.flags =
4805                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4806                 else
4807                         CAM_INVALIDATE(config->config_table[1]);
4808                 config->config_table[1].target_table_entry.clients_bit_vector =
4809                                                         cpu_to_le32(cl_bit_vec);
4810                 config->config_table[1].target_table_entry.vlan_id = 0;
4811         }
4812
4813         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4814                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4815                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4816 }
4817
4818 /**
4819  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
4820  *
4821  * @param bp driver descriptor
4822  * @param set set or clear an entry (1 or 0)
4823  * @param mac pointer to a buffer containing a MAC
4824  * @param cl_bit_vec bit vector of clients to register a MAC for
4825  * @param cam_offset offset in a CAM to use
4826  */
4827 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
4828                                        u32 cl_bit_vec, u8 cam_offset)
4829 {
4830         struct mac_configuration_cmd_e1h *config =
4831                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
4832
4833         config->hdr.length = 1;
4834         config->hdr.offset = cam_offset;
4835         config->hdr.client_id = 0xff;
4836         config->hdr.reserved1 = 0;
4837
4838         /* primary MAC */
4839         config->config_table[0].msb_mac_addr =
4840                                         swab16(*(u16 *)&mac[0]);
4841         config->config_table[0].middle_mac_addr =
4842                                         swab16(*(u16 *)&mac[2]);
4843         config->config_table[0].lsb_mac_addr =
4844                                         swab16(*(u16 *)&mac[4]);
4845         config->config_table[0].clients_bit_vector =
4846                                         cpu_to_le32(cl_bit_vec);
4847         config->config_table[0].vlan_id = 0;
4848         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
4849         if (set)
4850                 config->config_table[0].flags = BP_PORT(bp);
4851         else
4852                 config->config_table[0].flags =
4853                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
4854
4855         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
4856            (set ? "setting" : "clearing"),
4857            config->config_table[0].msb_mac_addr,
4858            config->config_table[0].middle_mac_addr,
4859            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
4860
4861         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4862                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4863                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4864 }
4865
4866 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4867                              int *state_p, int poll)
4868 {
4869         /* can take a while if any port is running */
4870         int cnt = 5000;
4871
4872         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4873            poll ? "polling" : "waiting", state, idx);
4874
4875         might_sleep();
4876         while (cnt--) {
4877                 if (poll) {
4878                         bnx2x_rx_int(bp->fp, 10);
4879                         /* if index is different from 0
4880                          * the reply for some commands will
4881                          * be on the non default queue
4882                          */
4883                         if (idx)
4884                                 bnx2x_rx_int(&bp->fp[idx], 10);
4885                 }
4886
4887                 mb(); /* state is changed by bnx2x_sp_event() */
4888                 if (*state_p == state) {
4889 #ifdef BNX2X_STOP_ON_ERROR
4890                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
4891 #endif
4892                         return 0;
4893                 }
4894
4895                 msleep(1);
4896
4897                 if (bp->panic)
4898                         return -EIO;
4899         }
4900
4901         /* timeout! */
4902         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4903                   poll ? "polling" : "waiting", state, idx);
4904 #ifdef BNX2X_STOP_ON_ERROR
4905         bnx2x_panic();
4906 #endif
4907
4908         return -EBUSY;
4909 }
4910
4911 void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
4912 {
4913         bp->set_mac_pending++;
4914         smp_wmb();
4915
4916         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
4917                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
4918
4919         /* Wait for a completion */
4920         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4921 }
4922
4923 void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
4924 {
4925         bp->set_mac_pending++;
4926         smp_wmb();
4927
4928         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
4929                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
4930                                   1);
4931
4932         /* Wait for a completion */
4933         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4934 }
4935
4936 #ifdef BCM_CNIC
4937 /**
4938  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
4939  * MAC(s). This function will wait until the ramdord completion
4940  * returns.
4941  *
4942  * @param bp driver handle
4943  * @param set set or clear the CAM entry
4944  *
4945  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
4946  */
4947 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
4948 {
4949         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
4950
4951         bp->set_mac_pending++;
4952         smp_wmb();
4953
4954         /* Send a SET_MAC ramrod */
4955         if (CHIP_IS_E1(bp))
4956                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
4957                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
4958                                   1);
4959         else
4960                 /* CAM allocation for E1H
4961                 * unicasts: by func number
4962                 * multicast: 20+FUNC*20, 20 each
4963                 */
4964                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
4965                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
4966
4967         /* Wait for a completion when setting */
4968         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4969
4970         return 0;
4971 }
4972 #endif
4973
4974 int bnx2x_setup_leading(struct bnx2x *bp)
4975 {
4976         int rc;
4977
4978         /* reset IGU state */
4979         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4980
4981         /* SETUP ramrod */
4982         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
4983
4984         /* Wait for completion */
4985         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
4986
4987         return rc;
4988 }
4989
4990 int bnx2x_setup_multi(struct bnx2x *bp, int index)
4991 {
4992         struct bnx2x_fastpath *fp = &bp->fp[index];
4993
4994         /* reset IGU state */
4995         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4996
4997         /* SETUP ramrod */
4998         fp->state = BNX2X_FP_STATE_OPENING;
4999         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
5000                       fp->cl_id, 0);
5001
5002         /* Wait for completion */
5003         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
5004                                  &(fp->state), 0);
5005 }
5006
5007
5008 void bnx2x_set_num_queues_msix(struct bnx2x *bp)
5009 {
5010
5011         switch (bp->multi_mode) {
5012         case ETH_RSS_MODE_DISABLED:
5013                 bp->num_queues = 1;
5014                 break;
5015
5016         case ETH_RSS_MODE_REGULAR:
5017                 if (num_queues)
5018                         bp->num_queues = min_t(u32, num_queues,
5019                                                   BNX2X_MAX_QUEUES(bp));
5020                 else
5021                         bp->num_queues = min_t(u32, num_online_cpus(),
5022                                                   BNX2X_MAX_QUEUES(bp));
5023                 break;
5024
5025
5026         default:
5027                 bp->num_queues = 1;
5028                 break;
5029         }
5030 }
5031
5032
5033
5034 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
5035 {
5036         struct bnx2x_fastpath *fp = &bp->fp[index];
5037         int rc;
5038
5039         /* halt the connection */
5040         fp->state = BNX2X_FP_STATE_HALTING;
5041         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
5042
5043         /* Wait for completion */
5044         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
5045                                &(fp->state), 1);
5046         if (rc) /* timeout */
5047                 return rc;
5048
5049         /* delete cfc entry */
5050         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
5051
5052         /* Wait for completion */
5053         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
5054                                &(fp->state), 1);
5055         return rc;
5056 }
5057
5058 static int bnx2x_stop_leading(struct bnx2x *bp)
5059 {
5060         __le16 dsb_sp_prod_idx;
5061         /* if the other port is handling traffic,
5062            this can take a lot of time */
5063         int cnt = 500;
5064         int rc;
5065
5066         might_sleep();
5067
5068         /* Send HALT ramrod */
5069         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
5070         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
5071
5072         /* Wait for completion */
5073         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
5074                                &(bp->fp[0].state), 1);
5075         if (rc) /* timeout */
5076                 return rc;
5077
5078         dsb_sp_prod_idx = *bp->dsb_sp_prod;
5079
5080         /* Send PORT_DELETE ramrod */
5081         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
5082
5083         /* Wait for completion to arrive on default status block
5084            we are going to reset the chip anyway
5085            so there is not much to do if this times out
5086          */
5087         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
5088                 if (!cnt) {
5089                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
5090                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5091                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
5092 #ifdef BNX2X_STOP_ON_ERROR
5093                         bnx2x_panic();
5094 #endif
5095                         rc = -EBUSY;
5096                         break;
5097                 }
5098                 cnt--;
5099                 msleep(1);
5100                 rmb(); /* Refresh the dsb_sp_prod */
5101         }
5102         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
5103         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
5104
5105         return rc;
5106 }
5107
5108 static void bnx2x_reset_func(struct bnx2x *bp)
5109 {
5110         int port = BP_PORT(bp);
5111         int func = BP_FUNC(bp);
5112         int base, i;
5113
5114         /* Configure IGU */
5115         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5116         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5117
5118 #ifdef BCM_CNIC
5119         /* Disable Timer scan */
5120         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
5121         /*
5122          * Wait for at least 10ms and up to 2 second for the timers scan to
5123          * complete
5124          */
5125         for (i = 0; i < 200; i++) {
5126                 msleep(10);
5127                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
5128                         break;
5129         }
5130 #endif
5131         /* Clear ILT */
5132         base = FUNC_ILT_BASE(func);
5133         for (i = base; i < base + ILT_PER_FUNC; i++)
5134                 bnx2x_ilt_wr(bp, i, 0);
5135 }
5136
5137 static void bnx2x_reset_port(struct bnx2x *bp)
5138 {
5139         int port = BP_PORT(bp);
5140         u32 val;
5141
5142         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5143
5144         /* Do not rcv packets to BRB */
5145         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5146         /* Do not direct rcv packets that are not for MCP to the BRB */
5147         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5148                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5149
5150         /* Configure AEU */
5151         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5152
5153         msleep(100);
5154         /* Check for BRB port occupancy */
5155         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5156         if (val)
5157                 DP(NETIF_MSG_IFDOWN,
5158                    "BRB1 is not empty  %d blocks are occupied\n", val);
5159
5160         /* TODO: Close Doorbell port? */
5161 }
5162
5163 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5164 {
5165         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
5166            BP_FUNC(bp), reset_code);
5167
5168         switch (reset_code) {
5169         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5170                 bnx2x_reset_port(bp);
5171                 bnx2x_reset_func(bp);
5172                 bnx2x_reset_common(bp);
5173                 break;
5174
5175         case FW_MSG_CODE_DRV_UNLOAD_PORT:
5176                 bnx2x_reset_port(bp);
5177                 bnx2x_reset_func(bp);
5178                 break;
5179
5180         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5181                 bnx2x_reset_func(bp);
5182                 break;
5183
5184         default:
5185                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5186                 break;
5187         }
5188 }
5189
5190 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5191 {
5192         int port = BP_PORT(bp);
5193         u32 reset_code = 0;
5194         int i, cnt, rc;
5195
5196         /* Wait until tx fastpath tasks complete */
5197         for_each_queue(bp, i) {
5198                 struct bnx2x_fastpath *fp = &bp->fp[i];
5199
5200                 cnt = 1000;
5201                 while (bnx2x_has_tx_work_unload(fp)) {
5202
5203                         bnx2x_tx_int(fp);
5204                         if (!cnt) {
5205                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
5206                                           i);
5207 #ifdef BNX2X_STOP_ON_ERROR
5208                                 bnx2x_panic();
5209                                 return -EBUSY;
5210 #else
5211                                 break;
5212 #endif
5213                         }
5214                         cnt--;
5215                         msleep(1);
5216                 }
5217         }
5218         /* Give HW time to discard old tx messages */
5219         msleep(1);
5220
5221         if (CHIP_IS_E1(bp)) {
5222                 struct mac_configuration_cmd *config =
5223                                                 bnx2x_sp(bp, mcast_config);
5224
5225                 bnx2x_set_eth_mac_addr_e1(bp, 0);
5226
5227                 for (i = 0; i < config->hdr.length; i++)
5228                         CAM_INVALIDATE(config->config_table[i]);
5229
5230                 config->hdr.length = i;
5231                 if (CHIP_REV_IS_SLOW(bp))
5232                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
5233                 else
5234                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
5235                 config->hdr.client_id = bp->fp->cl_id;
5236                 config->hdr.reserved1 = 0;
5237
5238                 bp->set_mac_pending++;
5239                 smp_wmb();
5240
5241                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5242                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
5243                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
5244
5245         } else { /* E1H */
5246                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5247
5248                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
5249
5250                 for (i = 0; i < MC_HASH_SIZE; i++)
5251                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
5252
5253                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
5254         }
5255 #ifdef BCM_CNIC
5256         /* Clear iSCSI L2 MAC */
5257         mutex_lock(&bp->cnic_mutex);
5258         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
5259                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
5260                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
5261         }
5262         mutex_unlock(&bp->cnic_mutex);
5263 #endif
5264
5265         if (unload_mode == UNLOAD_NORMAL)
5266                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5267
5268         else if (bp->flags & NO_WOL_FLAG)
5269                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
5270
5271         else if (bp->wol) {
5272                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5273                 u8 *mac_addr = bp->dev->dev_addr;
5274                 u32 val;
5275                 /* The mac address is written to entries 1-4 to
5276                    preserve entry 0 which is used by the PMF */
5277                 u8 entry = (BP_E1HVN(bp) + 1)*8;
5278
5279                 val = (mac_addr[0] << 8) | mac_addr[1];
5280                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
5281
5282                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
5283                       (mac_addr[4] << 8) | mac_addr[5];
5284                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
5285
5286                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
5287
5288         } else
5289                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5290
5291         /* Close multi and leading connections
5292            Completions for ramrods are collected in a synchronous way */
5293         for_each_nondefault_queue(bp, i)
5294                 if (bnx2x_stop_multi(bp, i))
5295                         goto unload_error;
5296
5297         rc = bnx2x_stop_leading(bp);
5298         if (rc) {
5299                 BNX2X_ERR("Stop leading failed!\n");
5300 #ifdef BNX2X_STOP_ON_ERROR
5301                 return -EBUSY;
5302 #else
5303                 goto unload_error;
5304 #endif
5305         }
5306
5307 unload_error:
5308         if (!BP_NOMCP(bp))
5309                 reset_code = bnx2x_fw_command(bp, reset_code);
5310         else {
5311                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
5312                    load_count[0], load_count[1], load_count[2]);
5313                 load_count[0]--;
5314                 load_count[1 + port]--;
5315                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
5316                    load_count[0], load_count[1], load_count[2]);
5317                 if (load_count[0] == 0)
5318                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
5319                 else if (load_count[1 + port] == 0)
5320                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
5321                 else
5322                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
5323         }
5324
5325         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
5326             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
5327                 bnx2x__link_reset(bp);
5328
5329         /* Reset the chip */
5330         bnx2x_reset_chip(bp, reset_code);
5331
5332         /* Report UNLOAD_DONE to MCP */
5333         if (!BP_NOMCP(bp))
5334                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5335
5336 }
5337
5338 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
5339 {
5340         u32 val;
5341
5342         DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
5343
5344         if (CHIP_IS_E1(bp)) {
5345                 int port = BP_PORT(bp);
5346                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5347                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
5348
5349                 val = REG_RD(bp, addr);
5350                 val &= ~(0x300);
5351                 REG_WR(bp, addr, val);
5352         } else if (CHIP_IS_E1H(bp)) {
5353                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
5354                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
5355                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
5356                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
5357         }
5358 }
5359
5360
5361 /* Close gates #2, #3 and #4: */
5362 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
5363 {
5364         u32 val, addr;
5365
5366         /* Gates #2 and #4a are closed/opened for "not E1" only */
5367         if (!CHIP_IS_E1(bp)) {
5368                 /* #4 */
5369                 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
5370                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
5371                        close ? (val | 0x1) : (val & (~(u32)1)));
5372                 /* #2 */
5373                 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
5374                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
5375                        close ? (val | 0x1) : (val & (~(u32)1)));
5376         }
5377
5378         /* #3 */
5379         addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
5380         val = REG_RD(bp, addr);
5381         REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
5382
5383         DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
5384                 close ? "closing" : "opening");
5385         mmiowb();
5386 }
5387
5388 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
5389
5390 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
5391 {
5392         /* Do some magic... */
5393         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5394         *magic_val = val & SHARED_MF_CLP_MAGIC;
5395         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
5396 }
5397
5398 /* Restore the value of the `magic' bit.
5399  *
5400  * @param pdev Device handle.
5401  * @param magic_val Old value of the `magic' bit.
5402  */
5403 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
5404 {
5405         /* Restore the `magic' bit value... */
5406         /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
5407         SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
5408                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
5409         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5410         MF_CFG_WR(bp, shared_mf_config.clp_mb,
5411                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
5412 }
5413
5414 /* Prepares for MCP reset: takes care of CLP configurations.
5415  *
5416  * @param bp
5417  * @param magic_val Old value of 'magic' bit.
5418  */
5419 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
5420 {
5421         u32 shmem;
5422         u32 validity_offset;
5423
5424         DP(NETIF_MSG_HW, "Starting\n");
5425
5426         /* Set `magic' bit in order to save MF config */
5427         if (!CHIP_IS_E1(bp))
5428                 bnx2x_clp_reset_prep(bp, magic_val);
5429
5430         /* Get shmem offset */
5431         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5432         validity_offset = offsetof(struct shmem_region, validity_map[0]);
5433
5434         /* Clear validity map flags */
5435         if (shmem > 0)
5436                 REG_WR(bp, shmem + validity_offset, 0);
5437 }
5438
5439 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
5440 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
5441
5442 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
5443  * depending on the HW type.
5444  *
5445  * @param bp
5446  */
5447 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
5448 {
5449         /* special handling for emulation and FPGA,
5450            wait 10 times longer */
5451         if (CHIP_REV_IS_SLOW(bp))
5452                 msleep(MCP_ONE_TIMEOUT*10);
5453         else
5454                 msleep(MCP_ONE_TIMEOUT);
5455 }
5456
5457 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
5458 {
5459         u32 shmem, cnt, validity_offset, val;
5460         int rc = 0;
5461
5462         msleep(100);
5463
5464         /* Get shmem offset */
5465         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5466         if (shmem == 0) {
5467                 BNX2X_ERR("Shmem 0 return failure\n");
5468                 rc = -ENOTTY;
5469                 goto exit_lbl;
5470         }
5471
5472         validity_offset = offsetof(struct shmem_region, validity_map[0]);
5473
5474         /* Wait for MCP to come up */
5475         for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
5476                 /* TBD: its best to check validity map of last port.
5477                  * currently checks on port 0.
5478                  */
5479                 val = REG_RD(bp, shmem + validity_offset);
5480                 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
5481                    shmem + validity_offset, val);
5482
5483                 /* check that shared memory is valid. */
5484                 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5485                     == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5486                         break;
5487
5488                 bnx2x_mcp_wait_one(bp);
5489         }
5490
5491         DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
5492
5493         /* Check that shared memory is valid. This indicates that MCP is up. */
5494         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
5495             (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
5496                 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
5497                 rc = -ENOTTY;
5498                 goto exit_lbl;
5499         }
5500
5501 exit_lbl:
5502         /* Restore the `magic' bit value */
5503         if (!CHIP_IS_E1(bp))
5504                 bnx2x_clp_reset_done(bp, magic_val);
5505
5506         return rc;
5507 }
5508
5509 static void bnx2x_pxp_prep(struct bnx2x *bp)
5510 {
5511         if (!CHIP_IS_E1(bp)) {
5512                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
5513                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
5514                 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
5515                 mmiowb();
5516         }
5517 }
5518
5519 /*
5520  * Reset the whole chip except for:
5521  *      - PCIE core
5522  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
5523  *              one reset bit)
5524  *      - IGU
5525  *      - MISC (including AEU)
5526  *      - GRC
5527  *      - RBCN, RBCP
5528  */
5529 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
5530 {
5531         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
5532
5533         not_reset_mask1 =
5534                 MISC_REGISTERS_RESET_REG_1_RST_HC |
5535                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
5536                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
5537
5538         not_reset_mask2 =
5539                 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
5540                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
5541                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
5542                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
5543                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
5544                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
5545                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
5546                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
5547
5548         reset_mask1 = 0xffffffff;
5549
5550         if (CHIP_IS_E1(bp))
5551                 reset_mask2 = 0xffff;
5552         else
5553                 reset_mask2 = 0x1ffff;
5554
5555         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5556                reset_mask1 & (~not_reset_mask1));
5557         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5558                reset_mask2 & (~not_reset_mask2));
5559
5560         barrier();
5561         mmiowb();
5562
5563         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
5564         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
5565         mmiowb();
5566 }
5567
5568 static int bnx2x_process_kill(struct bnx2x *bp)
5569 {
5570         int cnt = 1000;
5571         u32 val = 0;
5572         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
5573
5574
5575         /* Empty the Tetris buffer, wait for 1s */
5576         do {
5577                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
5578                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
5579                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
5580                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
5581                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
5582                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
5583                     ((port_is_idle_0 & 0x1) == 0x1) &&
5584                     ((port_is_idle_1 & 0x1) == 0x1) &&
5585                     (pgl_exp_rom2 == 0xffffffff))
5586                         break;
5587                 msleep(1);
5588         } while (cnt-- > 0);
5589
5590         if (cnt <= 0) {
5591                 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
5592                           " are still"
5593                           " outstanding read requests after 1s!\n");
5594                 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
5595                           " port_is_idle_0=0x%08x,"
5596                           " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
5597                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
5598                           pgl_exp_rom2);
5599                 return -EAGAIN;
5600         }
5601
5602         barrier();
5603
5604         /* Close gates #2, #3 and #4 */
5605         bnx2x_set_234_gates(bp, true);
5606
5607         /* TBD: Indicate that "process kill" is in progress to MCP */
5608
5609         /* Clear "unprepared" bit */
5610         REG_WR(bp, MISC_REG_UNPREPARED, 0);
5611         barrier();
5612
5613         /* Make sure all is written to the chip before the reset */
5614         mmiowb();
5615
5616         /* Wait for 1ms to empty GLUE and PCI-E core queues,
5617          * PSWHST, GRC and PSWRD Tetris buffer.
5618          */
5619         msleep(1);
5620
5621         /* Prepare to chip reset: */
5622         /* MCP */
5623         bnx2x_reset_mcp_prep(bp, &val);
5624
5625         /* PXP */
5626         bnx2x_pxp_prep(bp);
5627         barrier();
5628
5629         /* reset the chip */
5630         bnx2x_process_kill_chip_reset(bp);
5631         barrier();
5632
5633         /* Recover after reset: */
5634         /* MCP */
5635         if (bnx2x_reset_mcp_comp(bp, val))
5636                 return -EAGAIN;
5637
5638         /* PXP */
5639         bnx2x_pxp_prep(bp);
5640
5641         /* Open the gates #2, #3 and #4 */
5642         bnx2x_set_234_gates(bp, false);
5643
5644         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
5645          * reset state, re-enable attentions. */
5646
5647         return 0;
5648 }
5649
5650 static int bnx2x_leader_reset(struct bnx2x *bp)
5651 {
5652         int rc = 0;
5653         /* Try to recover after the failure */
5654         if (bnx2x_process_kill(bp)) {
5655                 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
5656                        bp->dev->name);
5657                 rc = -EAGAIN;
5658                 goto exit_leader_reset;
5659         }
5660
5661         /* Clear "reset is in progress" bit and update the driver state */
5662         bnx2x_set_reset_done(bp);
5663         bp->recovery_state = BNX2X_RECOVERY_DONE;
5664
5665 exit_leader_reset:
5666         bp->is_leader = 0;
5667         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
5668         smp_wmb();
5669         return rc;
5670 }
5671
5672 /* Assumption: runs under rtnl lock. This together with the fact
5673  * that it's called only from bnx2x_reset_task() ensure that it
5674  * will never be called when netif_running(bp->dev) is false.
5675  */
5676 static void bnx2x_parity_recover(struct bnx2x *bp)
5677 {
5678         DP(NETIF_MSG_HW, "Handling parity\n");
5679         while (1) {
5680                 switch (bp->recovery_state) {
5681                 case BNX2X_RECOVERY_INIT:
5682                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
5683                         /* Try to get a LEADER_LOCK HW lock */
5684                         if (bnx2x_trylock_hw_lock(bp,
5685                                 HW_LOCK_RESOURCE_RESERVED_08))
5686                                 bp->is_leader = 1;
5687
5688                         /* Stop the driver */
5689                         /* If interface has been removed - break */
5690                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
5691                                 return;
5692
5693                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
5694                         /* Ensure "is_leader" and "recovery_state"
5695                          *  update values are seen on other CPUs
5696                          */
5697                         smp_wmb();
5698                         break;
5699
5700                 case BNX2X_RECOVERY_WAIT:
5701                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
5702                         if (bp->is_leader) {
5703                                 u32 load_counter = bnx2x_get_load_cnt(bp);
5704                                 if (load_counter) {
5705                                         /* Wait until all other functions get
5706                                          * down.
5707                                          */
5708                                         schedule_delayed_work(&bp->reset_task,
5709                                                                 HZ/10);
5710                                         return;
5711                                 } else {
5712                                         /* If all other functions got down -
5713                                          * try to bring the chip back to
5714                                          * normal. In any case it's an exit
5715                                          * point for a leader.
5716                                          */
5717                                         if (bnx2x_leader_reset(bp) ||
5718                                         bnx2x_nic_load(bp, LOAD_NORMAL)) {
5719                                                 printk(KERN_ERR"%s: Recovery "
5720                                                 "has failed. Power cycle is "
5721                                                 "needed.\n", bp->dev->name);
5722                                                 /* Disconnect this device */
5723                                                 netif_device_detach(bp->dev);
5724                                                 /* Block ifup for all function
5725                                                  * of this ASIC until
5726                                                  * "process kill" or power
5727                                                  * cycle.
5728                                                  */
5729                                                 bnx2x_set_reset_in_progress(bp);
5730                                                 /* Shut down the power */
5731                                                 bnx2x_set_power_state(bp,
5732                                                                 PCI_D3hot);
5733                                                 return;
5734                                         }
5735
5736                                         return;
5737                                 }
5738                         } else { /* non-leader */
5739                                 if (!bnx2x_reset_is_done(bp)) {
5740                                         /* Try to get a LEADER_LOCK HW lock as
5741                                          * long as a former leader may have
5742                                          * been unloaded by the user or
5743                                          * released a leadership by another
5744                                          * reason.
5745                                          */
5746                                         if (bnx2x_trylock_hw_lock(bp,
5747                                             HW_LOCK_RESOURCE_RESERVED_08)) {
5748                                                 /* I'm a leader now! Restart a
5749                                                  * switch case.
5750                                                  */
5751                                                 bp->is_leader = 1;
5752                                                 break;
5753                                         }
5754
5755                                         schedule_delayed_work(&bp->reset_task,
5756                                                                 HZ/10);
5757                                         return;
5758
5759                                 } else { /* A leader has completed
5760                                           * the "process kill". It's an exit
5761                                           * point for a non-leader.
5762                                           */
5763                                         bnx2x_nic_load(bp, LOAD_NORMAL);
5764                                         bp->recovery_state =
5765                                                 BNX2X_RECOVERY_DONE;
5766                                         smp_wmb();
5767                                         return;
5768                                 }
5769                         }
5770                 default:
5771                         return;
5772                 }
5773         }
5774 }
5775
5776 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
5777  * scheduled on a general queue in order to prevent a dead lock.
5778  */
5779 static void bnx2x_reset_task(struct work_struct *work)
5780 {
5781         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
5782
5783 #ifdef BNX2X_STOP_ON_ERROR
5784         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
5785                   " so reset not done to allow debug dump,\n"
5786          KERN_ERR " you will need to reboot when done\n");
5787         return;
5788 #endif
5789
5790         rtnl_lock();
5791
5792         if (!netif_running(bp->dev))
5793                 goto reset_task_exit;
5794
5795         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
5796                 bnx2x_parity_recover(bp);
5797         else {
5798                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
5799                 bnx2x_nic_load(bp, LOAD_NORMAL);
5800         }
5801
5802 reset_task_exit:
5803         rtnl_unlock();
5804 }
5805
5806 /* end of nic load/unload */
5807
5808 /*
5809  * Init service functions
5810  */
5811
5812 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
5813 {
5814         switch (func) {
5815         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
5816         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
5817         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
5818         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
5819         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
5820         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
5821         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
5822         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
5823         default:
5824                 BNX2X_ERR("Unsupported function index: %d\n", func);
5825                 return (u32)(-1);
5826         }
5827 }
5828
5829 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
5830 {
5831         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
5832
5833         /* Flush all outstanding writes */
5834         mmiowb();
5835
5836         /* Pretend to be function 0 */
5837         REG_WR(bp, reg, 0);
5838         /* Flush the GRC transaction (in the chip) */
5839         new_val = REG_RD(bp, reg);
5840         if (new_val != 0) {
5841                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
5842                           new_val);
5843                 BUG();
5844         }
5845
5846         /* From now we are in the "like-E1" mode */
5847         bnx2x_int_disable(bp);
5848
5849         /* Flush all outstanding writes */
5850         mmiowb();
5851
5852         /* Restore the original funtion settings */
5853         REG_WR(bp, reg, orig_func);
5854         new_val = REG_RD(bp, reg);
5855         if (new_val != orig_func) {
5856                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
5857                           orig_func, new_val);
5858                 BUG();
5859         }
5860 }
5861
5862 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
5863 {
5864         if (CHIP_IS_E1H(bp))
5865                 bnx2x_undi_int_disable_e1h(bp, func);
5866         else
5867                 bnx2x_int_disable(bp);
5868 }
5869
5870 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5871 {
5872         u32 val;
5873
5874         /* Check if there is any driver already loaded */
5875         val = REG_RD(bp, MISC_REG_UNPREPARED);
5876         if (val == 0x1) {
5877                 /* Check if it is the UNDI driver
5878                  * UNDI driver initializes CID offset for normal bell to 0x7
5879                  */
5880                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5881                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
5882                 if (val == 0x7) {
5883                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5884                         /* save our func */
5885                         int func = BP_FUNC(bp);
5886                         u32 swap_en;
5887                         u32 swap_val;
5888
5889                         /* clear the UNDI indication */
5890                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
5891
5892                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
5893
5894                         /* try unload UNDI on port 0 */
5895                         bp->func = 0;
5896                         bp->fw_seq =
5897                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5898                                 DRV_MSG_SEQ_NUMBER_MASK);
5899                         reset_code = bnx2x_fw_command(bp, reset_code);
5900
5901                         /* if UNDI is loaded on the other port */
5902                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
5903
5904                                 /* send "DONE" for previous unload */
5905                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5906
5907                                 /* unload UNDI on port 1 */
5908                                 bp->func = 1;
5909                                 bp->fw_seq =
5910                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5911                                         DRV_MSG_SEQ_NUMBER_MASK);
5912                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5913
5914                                 bnx2x_fw_command(bp, reset_code);
5915                         }
5916
5917                         /* now it's safe to release the lock */
5918                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5919
5920                         bnx2x_undi_int_disable(bp, func);
5921
5922                         /* close input traffic and wait for it */
5923                         /* Do not rcv packets to BRB */
5924                         REG_WR(bp,
5925                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
5926                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
5927                         /* Do not direct rcv packets that are not for MCP to
5928                          * the BRB */
5929                         REG_WR(bp,
5930                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
5931                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5932                         /* clear AEU */
5933                         REG_WR(bp,
5934                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5935                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
5936                         msleep(10);
5937
5938                         /* save NIG port swap info */
5939                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5940                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5941                         /* reset device */
5942                         REG_WR(bp,
5943                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5944                                0xd3ffffff);
5945                         REG_WR(bp,
5946                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5947                                0x1403);
5948                         /* take the NIG out of reset and restore swap values */
5949                         REG_WR(bp,
5950                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5951                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
5952                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
5953                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
5954
5955                         /* send unload done to the MCP */
5956                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5957
5958                         /* restore our func and fw_seq */
5959                         bp->func = func;
5960                         bp->fw_seq =
5961                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5962                                 DRV_MSG_SEQ_NUMBER_MASK);
5963
5964                 } else
5965                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5966         }
5967 }
5968
5969 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5970 {
5971         u32 val, val2, val3, val4, id;
5972         u16 pmc;
5973
5974         /* Get the chip revision id and number. */
5975         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5976         val = REG_RD(bp, MISC_REG_CHIP_NUM);
5977         id = ((val & 0xffff) << 16);
5978         val = REG_RD(bp, MISC_REG_CHIP_REV);
5979         id |= ((val & 0xf) << 12);
5980         val = REG_RD(bp, MISC_REG_CHIP_METAL);
5981         id |= ((val & 0xff) << 4);
5982         val = REG_RD(bp, MISC_REG_BOND_ID);
5983         id |= (val & 0xf);
5984         bp->common.chip_id = id;
5985         bp->link_params.chip_id = bp->common.chip_id;
5986         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
5987
5988         val = (REG_RD(bp, 0x2874) & 0x55);
5989         if ((bp->common.chip_id & 0x1) ||
5990             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
5991                 bp->flags |= ONE_PORT_FLAG;
5992                 BNX2X_DEV_INFO("single port device\n");
5993         }
5994
5995         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
5996         bp->common.flash_size = (NVRAM_1MB_SIZE <<
5997                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
5998         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5999                        bp->common.flash_size, bp->common.flash_size);
6000
6001         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6002         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
6003         bp->link_params.shmem_base = bp->common.shmem_base;
6004         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
6005                        bp->common.shmem_base, bp->common.shmem2_base);
6006
6007         if (!bp->common.shmem_base ||
6008             (bp->common.shmem_base < 0xA0000) ||
6009             (bp->common.shmem_base >= 0xC0000)) {
6010                 BNX2X_DEV_INFO("MCP not active\n");
6011                 bp->flags |= NO_MCP_FLAG;
6012                 return;
6013         }
6014
6015         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6016         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6017                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6018                 BNX2X_ERROR("BAD MCP validity signature\n");
6019
6020         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6021         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
6022
6023         bp->link_params.hw_led_mode = ((bp->common.hw_config &
6024                                         SHARED_HW_CFG_LED_MODE_MASK) >>
6025                                        SHARED_HW_CFG_LED_MODE_SHIFT);
6026
6027         bp->link_params.feature_config_flags = 0;
6028         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
6029         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
6030                 bp->link_params.feature_config_flags |=
6031                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6032         else
6033                 bp->link_params.feature_config_flags &=
6034                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6035
6036         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6037         bp->common.bc_ver = val;
6038         BNX2X_DEV_INFO("bc_ver %X\n", val);
6039         if (val < BNX2X_BC_VER) {
6040                 /* for now only warn
6041                  * later we might need to enforce this */
6042                 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
6043                             "please upgrade BC\n", BNX2X_BC_VER, val);
6044         }
6045         bp->link_params.feature_config_flags |=
6046                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
6047                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
6048
6049         if (BP_E1HVN(bp) == 0) {
6050                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6051                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6052         } else {
6053                 /* no WOL capability for E1HVN != 0 */
6054                 bp->flags |= NO_WOL_FLAG;
6055         }
6056         BNX2X_DEV_INFO("%sWoL capable\n",
6057                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
6058
6059         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6060         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6061         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6062         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6063
6064         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
6065                  val, val2, val3, val4);
6066 }
6067
6068 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6069                                                     u32 switch_cfg)
6070 {
6071         int port = BP_PORT(bp);
6072         bp->port.supported = 0;
6073         switch (bp->link_params.num_phys) {
6074         case 1:
6075                 bp->port.supported = bp->link_params.phy[INT_PHY].supported;
6076                         break;
6077         case 2:
6078                 bp->port.supported = bp->link_params.phy[EXT_PHY1].supported;
6079                         break;
6080         }
6081
6082         if (!(bp->port.supported)) {
6083                 BNX2X_ERR("NVRAM config error. BAD phy config."
6084                           "PHY1 config 0x%x\n",
6085                            SHMEM_RD(bp,
6086                            dev_info.port_hw_config[port].external_phy_config));
6087                         return;
6088                 }
6089
6090         switch (switch_cfg) {
6091         case SWITCH_CFG_1G:
6092                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6093                                            port*0x10);
6094                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6095                 break;
6096
6097         case SWITCH_CFG_10G:
6098                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6099                                            port*0x18);
6100                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6101
6102                 break;
6103
6104         default:
6105                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
6106                           bp->port.link_config);
6107                 return;
6108         }
6109         /* mask what we support according to speed_cap_mask */
6110         if (!(bp->link_params.speed_cap_mask &
6111                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
6112                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
6113
6114         if (!(bp->link_params.speed_cap_mask &
6115                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
6116                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
6117
6118         if (!(bp->link_params.speed_cap_mask &
6119                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
6120                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
6121
6122         if (!(bp->link_params.speed_cap_mask &
6123                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
6124                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
6125
6126         if (!(bp->link_params.speed_cap_mask &
6127                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
6128                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
6129                                         SUPPORTED_1000baseT_Full);
6130
6131         if (!(bp->link_params.speed_cap_mask &
6132                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
6133                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
6134
6135         if (!(bp->link_params.speed_cap_mask &
6136                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
6137                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
6138
6139         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
6140 }
6141
6142 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
6143 {
6144         bp->link_params.req_duplex = DUPLEX_FULL;
6145
6146         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
6147         case PORT_FEATURE_LINK_SPEED_AUTO:
6148                 if (bp->port.supported & SUPPORTED_Autoneg) {
6149                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
6150                         bp->port.advertising = bp->port.supported;
6151                 } else {
6152                                 /* force 10G, no AN */
6153                                 bp->link_params.req_line_speed = SPEED_10000;
6154                         bp->port.advertising =  (ADVERTISED_10000baseT_Full |
6155                                                  ADVERTISED_FIBRE);
6156                 }
6157                 break;
6158
6159         case PORT_FEATURE_LINK_SPEED_10M_FULL:
6160                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
6161                         bp->link_params.req_line_speed = SPEED_10;
6162                         bp->port.advertising = (ADVERTISED_10baseT_Full |
6163                                                 ADVERTISED_TP);
6164                 } else {
6165                         BNX2X_ERROR("NVRAM config error. "
6166                                     "Invalid link_config 0x%x"
6167                                     "  speed_cap_mask 0x%x\n",
6168                                     bp->port.link_config,
6169                                     bp->link_params.speed_cap_mask);
6170                         return;
6171                 }
6172                 break;
6173
6174         case PORT_FEATURE_LINK_SPEED_10M_HALF:
6175                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
6176                         bp->link_params.req_line_speed = SPEED_10;
6177                         bp->link_params.req_duplex = DUPLEX_HALF;
6178                         bp->port.advertising = (ADVERTISED_10baseT_Half |
6179                                                 ADVERTISED_TP);
6180                 } else {
6181                         BNX2X_ERROR("NVRAM config error. "
6182                                     "Invalid link_config 0x%x"
6183                                     "  speed_cap_mask 0x%x\n",
6184                                     bp->port.link_config,
6185                                     bp->link_params.speed_cap_mask);
6186                         return;
6187                 }
6188                 break;
6189
6190         case PORT_FEATURE_LINK_SPEED_100M_FULL:
6191                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
6192                         bp->link_params.req_line_speed = SPEED_100;
6193                         bp->port.advertising = (ADVERTISED_100baseT_Full |
6194                                                 ADVERTISED_TP);
6195                 } else {
6196                         BNX2X_ERROR("NVRAM config error. "
6197                                     "Invalid link_config 0x%x"
6198                                     "  speed_cap_mask 0x%x\n",
6199                                     bp->port.link_config,
6200                                     bp->link_params.speed_cap_mask);
6201                         return;
6202                 }
6203                 break;
6204
6205         case PORT_FEATURE_LINK_SPEED_100M_HALF:
6206                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
6207                         bp->link_params.req_line_speed = SPEED_100;
6208                         bp->link_params.req_duplex = DUPLEX_HALF;
6209                         bp->port.advertising = (ADVERTISED_100baseT_Half |
6210                                                 ADVERTISED_TP);
6211                 } else {
6212                         BNX2X_ERROR("NVRAM config error. "
6213                                     "Invalid link_config 0x%x"
6214                                     "  speed_cap_mask 0x%x\n",
6215                                     bp->port.link_config,
6216                                     bp->link_params.speed_cap_mask);
6217                         return;
6218                 }
6219                 break;
6220
6221         case PORT_FEATURE_LINK_SPEED_1G:
6222                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
6223                         bp->link_params.req_line_speed = SPEED_1000;
6224                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
6225                                                 ADVERTISED_TP);
6226                 } else {
6227                         BNX2X_ERROR("NVRAM config error. "
6228                                     "Invalid link_config 0x%x"
6229                                     "  speed_cap_mask 0x%x\n",
6230                                     bp->port.link_config,
6231                                     bp->link_params.speed_cap_mask);
6232                         return;
6233                 }
6234                 break;
6235
6236         case PORT_FEATURE_LINK_SPEED_2_5G:
6237                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
6238                         bp->link_params.req_line_speed = SPEED_2500;
6239                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
6240                                                 ADVERTISED_TP);
6241                 } else {
6242                         BNX2X_ERROR("NVRAM config error. "
6243                                     "Invalid link_config 0x%x"
6244                                     "  speed_cap_mask 0x%x\n",
6245                                     bp->port.link_config,
6246                                     bp->link_params.speed_cap_mask);
6247                         return;
6248                 }
6249                 break;
6250
6251         case PORT_FEATURE_LINK_SPEED_10G_CX4:
6252         case PORT_FEATURE_LINK_SPEED_10G_KX4:
6253         case PORT_FEATURE_LINK_SPEED_10G_KR:
6254                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
6255                         bp->link_params.req_line_speed = SPEED_10000;
6256                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
6257                                                 ADVERTISED_FIBRE);
6258                 } else {
6259                         BNX2X_ERROR("NVRAM config error. "
6260                                     "Invalid link_config 0x%x"
6261                                     "  speed_cap_mask 0x%x\n",
6262                                     bp->port.link_config,
6263                                     bp->link_params.speed_cap_mask);
6264                         return;
6265                 }
6266                 break;
6267
6268         default:
6269                 BNX2X_ERROR("NVRAM config error. "
6270                             "BAD link speed link_config 0x%x\n",
6271                             bp->port.link_config);
6272                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
6273                 bp->port.advertising = bp->port.supported;
6274                 break;
6275         }
6276
6277         bp->link_params.req_flow_ctrl = (bp->port.link_config &
6278                                          PORT_FEATURE_FLOW_CONTROL_MASK);
6279         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
6280             !(bp->port.supported & SUPPORTED_Autoneg))
6281                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
6282
6283         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
6284                        "  advertising 0x%x\n",
6285                        bp->link_params.req_line_speed,
6286                        bp->link_params.req_duplex,
6287                        bp->link_params.req_flow_ctrl, bp->port.advertising);
6288 }
6289
6290 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
6291 {
6292         mac_hi = cpu_to_be16(mac_hi);
6293         mac_lo = cpu_to_be32(mac_lo);
6294         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
6295         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
6296 }
6297
6298 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
6299 {
6300         int port = BP_PORT(bp);
6301         u32 val, val2;
6302         u32 config;
6303         u32 ext_phy_type, ext_phy_config;;
6304
6305         bp->link_params.bp = bp;
6306         bp->link_params.port = port;
6307
6308         bp->link_params.lane_config =
6309                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
6310
6311         bp->link_params.speed_cap_mask =
6312                 SHMEM_RD(bp,
6313                          dev_info.port_hw_config[port].speed_capability_mask);
6314
6315         bp->port.link_config =
6316                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6317
6318
6319         /* If the device is capable of WoL, set the default state according
6320          * to the HW
6321          */
6322         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
6323         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
6324                    (config & PORT_FEATURE_WOL_ENABLED));
6325
6326         BNX2X_DEV_INFO("lane_config 0x%08x"
6327                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
6328                        bp->link_params.lane_config,
6329                        bp->link_params.speed_cap_mask, bp->port.link_config);
6330
6331         bp->link_params.switch_cfg |= (bp->port.link_config &
6332                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
6333         bnx2x_phy_probe(&bp->link_params);
6334         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
6335
6336         bnx2x_link_settings_requested(bp);
6337
6338         /*
6339          * If connected directly, work with the internal PHY, otherwise, work
6340          * with the external PHY
6341          */
6342         ext_phy_config =
6343                 SHMEM_RD(bp,
6344                          dev_info.port_hw_config[port].external_phy_config);
6345         ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
6346         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
6347                 bp->mdio.prtad = bp->port.phy_addr;
6348
6349         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
6350                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
6351                 bp->mdio.prtad =
6352                         XGXS_EXT_PHY_ADDR(ext_phy_config);
6353
6354         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6355         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
6356         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
6357         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
6358         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6359
6360 #ifdef BCM_CNIC
6361         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
6362         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
6363         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
6364 #endif
6365 }
6366
6367 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6368 {
6369         int func = BP_FUNC(bp);
6370         u32 val, val2;
6371         int rc = 0;
6372
6373         bnx2x_get_common_hwinfo(bp);
6374
6375         bp->e1hov = 0;
6376         bp->e1hmf = 0;
6377         if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
6378                 bp->mf_config =
6379                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
6380
6381                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
6382                        FUNC_MF_CFG_E1HOV_TAG_MASK);
6383                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
6384                         bp->e1hmf = 1;
6385                 BNX2X_DEV_INFO("%s function mode\n",
6386                                IS_E1HMF(bp) ? "multi" : "single");
6387
6388                 if (IS_E1HMF(bp)) {
6389                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
6390                                                                 e1hov_tag) &
6391                                FUNC_MF_CFG_E1HOV_TAG_MASK);
6392                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
6393                                 bp->e1hov = val;
6394                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
6395                                                "(0x%04x)\n",
6396                                                func, bp->e1hov, bp->e1hov);
6397                         } else {
6398                                 BNX2X_ERROR("No valid E1HOV for func %d,"
6399                                             "  aborting\n", func);
6400                                 rc = -EPERM;
6401                         }
6402                 } else {
6403                         if (BP_E1HVN(bp)) {
6404                                 BNX2X_ERROR("VN %d in single function mode,"
6405                                             "  aborting\n", BP_E1HVN(bp));
6406                                 rc = -EPERM;
6407                         }
6408                 }
6409         }
6410
6411         if (!BP_NOMCP(bp)) {
6412                 bnx2x_get_port_hwinfo(bp);
6413
6414                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
6415                               DRV_MSG_SEQ_NUMBER_MASK);
6416                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
6417         }
6418
6419         if (IS_E1HMF(bp)) {
6420                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
6421                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
6422                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
6423                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
6424                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6425                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6426                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6427                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6428                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
6429                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
6430                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
6431                                ETH_ALEN);
6432                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
6433                                ETH_ALEN);
6434                 }
6435
6436                 return rc;
6437         }
6438
6439         if (BP_NOMCP(bp)) {
6440                 /* only supposed to happen on emulation/FPGA */
6441                 BNX2X_ERROR("warning: random MAC workaround active\n");
6442                 random_ether_addr(bp->dev->dev_addr);
6443                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6444         }
6445
6446         return rc;
6447 }
6448
6449 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
6450 {
6451         int cnt, i, block_end, rodi;
6452         char vpd_data[BNX2X_VPD_LEN+1];
6453         char str_id_reg[VENDOR_ID_LEN+1];
6454         char str_id_cap[VENDOR_ID_LEN+1];
6455         u8 len;
6456
6457         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
6458         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
6459
6460         if (cnt < BNX2X_VPD_LEN)
6461                 goto out_not_found;
6462
6463         i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
6464                              PCI_VPD_LRDT_RO_DATA);
6465         if (i < 0)
6466                 goto out_not_found;
6467
6468
6469         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
6470                     pci_vpd_lrdt_size(&vpd_data[i]);
6471
6472         i += PCI_VPD_LRDT_TAG_SIZE;
6473
6474         if (block_end > BNX2X_VPD_LEN)
6475                 goto out_not_found;
6476
6477         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6478                                    PCI_VPD_RO_KEYWORD_MFR_ID);
6479         if (rodi < 0)
6480                 goto out_not_found;
6481
6482         len = pci_vpd_info_field_size(&vpd_data[rodi]);
6483
6484         if (len != VENDOR_ID_LEN)
6485                 goto out_not_found;
6486
6487         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6488
6489         /* vendor specific info */
6490         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
6491         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
6492         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
6493             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
6494
6495                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6496                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
6497                 if (rodi >= 0) {
6498                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
6499
6500                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6501
6502                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
6503                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
6504                                 bp->fw_ver[len] = ' ';
6505                         }
6506                 }
6507                 return;
6508         }
6509 out_not_found:
6510         return;
6511 }
6512
6513 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6514 {
6515         int func = BP_FUNC(bp);
6516         int timer_interval;
6517         int rc;
6518
6519         /* Disable interrupt handling until HW is initialized */
6520         atomic_set(&bp->intr_sem, 1);
6521         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6522
6523         mutex_init(&bp->port.phy_mutex);
6524         mutex_init(&bp->fw_mb_mutex);
6525         spin_lock_init(&bp->stats_lock);
6526 #ifdef BCM_CNIC
6527         mutex_init(&bp->cnic_mutex);
6528 #endif
6529
6530         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
6531         INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
6532
6533         rc = bnx2x_get_hwinfo(bp);
6534
6535         bnx2x_read_fwinfo(bp);
6536         /* need to reset chip if undi was active */
6537         if (!BP_NOMCP(bp))
6538                 bnx2x_undi_unload(bp);
6539
6540         if (CHIP_REV_IS_FPGA(bp))
6541                 dev_err(&bp->pdev->dev, "FPGA detected\n");
6542
6543         if (BP_NOMCP(bp) && (func == 0))
6544                 dev_err(&bp->pdev->dev, "MCP disabled, "
6545                                         "must load devices in order!\n");
6546
6547         /* Set multi queue mode */
6548         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
6549             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
6550                 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
6551                                         "requested is not MSI-X\n");
6552                 multi_mode = ETH_RSS_MODE_DISABLED;
6553         }
6554         bp->multi_mode = multi_mode;
6555         bp->int_mode = int_mode;
6556
6557         bp->dev->features |= NETIF_F_GRO;
6558
6559         /* Set TPA flags */
6560         if (disable_tpa) {
6561                 bp->flags &= ~TPA_ENABLE_FLAG;
6562                 bp->dev->features &= ~NETIF_F_LRO;
6563         } else {
6564                 bp->flags |= TPA_ENABLE_FLAG;
6565                 bp->dev->features |= NETIF_F_LRO;
6566         }
6567         bp->disable_tpa = disable_tpa;
6568
6569         if (CHIP_IS_E1(bp))
6570                 bp->dropless_fc = 0;
6571         else
6572                 bp->dropless_fc = dropless_fc;
6573
6574         bp->mrrs = mrrs;
6575
6576         bp->tx_ring_size = MAX_TX_AVAIL;
6577         bp->rx_ring_size = MAX_RX_AVAIL;
6578
6579         bp->rx_csum = 1;
6580
6581         /* make sure that the numbers are in the right granularity */
6582         bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6583         bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6584
6585         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
6586         bp->current_interval = (poll ? poll : timer_interval);
6587
6588         init_timer(&bp->timer);
6589         bp->timer.expires = jiffies + bp->current_interval;
6590         bp->timer.data = (unsigned long) bp;
6591         bp->timer.function = bnx2x_timer;
6592
6593         return rc;
6594 }
6595
6596
6597 /****************************************************************************
6598 * General service functions
6599 ****************************************************************************/
6600
6601 /* called with rtnl_lock */
6602 static int bnx2x_open(struct net_device *dev)
6603 {
6604         struct bnx2x *bp = netdev_priv(dev);
6605
6606         netif_carrier_off(dev);
6607
6608         bnx2x_set_power_state(bp, PCI_D0);
6609
6610         if (!bnx2x_reset_is_done(bp)) {
6611                 do {
6612                         /* Reset MCP mail box sequence if there is on going
6613                          * recovery
6614                          */
6615                         bp->fw_seq = 0;
6616
6617                         /* If it's the first function to load and reset done
6618                          * is still not cleared it may mean that. We don't
6619                          * check the attention state here because it may have
6620                          * already been cleared by a "common" reset but we
6621                          * shell proceed with "process kill" anyway.
6622                          */
6623                         if ((bnx2x_get_load_cnt(bp) == 0) &&
6624                                 bnx2x_trylock_hw_lock(bp,
6625                                 HW_LOCK_RESOURCE_RESERVED_08) &&
6626                                 (!bnx2x_leader_reset(bp))) {
6627                                 DP(NETIF_MSG_HW, "Recovered in open\n");
6628                                 break;
6629                         }
6630
6631                         bnx2x_set_power_state(bp, PCI_D3hot);
6632
6633                         printk(KERN_ERR"%s: Recovery flow hasn't been properly"
6634                         " completed yet. Try again later. If u still see this"
6635                         " message after a few retries then power cycle is"
6636                         " required.\n", bp->dev->name);
6637
6638                         return -EAGAIN;
6639                 } while (0);
6640         }
6641
6642         bp->recovery_state = BNX2X_RECOVERY_DONE;
6643
6644         return bnx2x_nic_load(bp, LOAD_OPEN);
6645 }
6646
6647 /* called with rtnl_lock */
6648 static int bnx2x_close(struct net_device *dev)
6649 {
6650         struct bnx2x *bp = netdev_priv(dev);
6651
6652         /* Unload the driver, release IRQs */
6653         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
6654         bnx2x_set_power_state(bp, PCI_D3hot);
6655
6656         return 0;
6657 }
6658
6659 /* called with netif_tx_lock from dev_mcast.c */
6660 void bnx2x_set_rx_mode(struct net_device *dev)
6661 {
6662         struct bnx2x *bp = netdev_priv(dev);
6663         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
6664         int port = BP_PORT(bp);
6665
6666         if (bp->state != BNX2X_STATE_OPEN) {
6667                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6668                 return;
6669         }
6670
6671         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
6672
6673         if (dev->flags & IFF_PROMISC)
6674                 rx_mode = BNX2X_RX_MODE_PROMISC;
6675
6676         else if ((dev->flags & IFF_ALLMULTI) ||
6677                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
6678                   CHIP_IS_E1(bp)))
6679                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6680
6681         else { /* some multicasts */
6682                 if (CHIP_IS_E1(bp)) {
6683                         int i, old, offset;
6684                         struct netdev_hw_addr *ha;
6685                         struct mac_configuration_cmd *config =
6686                                                 bnx2x_sp(bp, mcast_config);
6687
6688                         i = 0;
6689                         netdev_for_each_mc_addr(ha, dev) {
6690                                 config->config_table[i].
6691                                         cam_entry.msb_mac_addr =
6692                                         swab16(*(u16 *)&ha->addr[0]);
6693                                 config->config_table[i].
6694                                         cam_entry.middle_mac_addr =
6695                                         swab16(*(u16 *)&ha->addr[2]);
6696                                 config->config_table[i].
6697                                         cam_entry.lsb_mac_addr =
6698                                         swab16(*(u16 *)&ha->addr[4]);
6699                                 config->config_table[i].cam_entry.flags =
6700                                                         cpu_to_le16(port);
6701                                 config->config_table[i].
6702                                         target_table_entry.flags = 0;
6703                                 config->config_table[i].target_table_entry.
6704                                         clients_bit_vector =
6705                                                 cpu_to_le32(1 << BP_L_ID(bp));
6706                                 config->config_table[i].
6707                                         target_table_entry.vlan_id = 0;
6708
6709                                 DP(NETIF_MSG_IFUP,
6710                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6711                                    config->config_table[i].
6712                                                 cam_entry.msb_mac_addr,
6713                                    config->config_table[i].
6714                                                 cam_entry.middle_mac_addr,
6715                                    config->config_table[i].
6716                                                 cam_entry.lsb_mac_addr);
6717                                 i++;
6718                         }
6719                         old = config->hdr.length;
6720                         if (old > i) {
6721                                 for (; i < old; i++) {
6722                                         if (CAM_IS_INVALID(config->
6723                                                            config_table[i])) {
6724                                                 /* already invalidated */
6725                                                 break;
6726                                         }
6727                                         /* invalidate */
6728                                         CAM_INVALIDATE(config->
6729                                                        config_table[i]);
6730                                 }
6731                         }
6732
6733                         if (CHIP_REV_IS_SLOW(bp))
6734                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6735                         else
6736                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
6737
6738                         config->hdr.length = i;
6739                         config->hdr.offset = offset;
6740                         config->hdr.client_id = bp->fp->cl_id;
6741                         config->hdr.reserved1 = 0;
6742
6743                         bp->set_mac_pending++;
6744                         smp_wmb();
6745
6746                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6747                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6748                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
6749                                       0);
6750                 } else { /* E1H */
6751                         /* Accept one or more multicasts */
6752                         struct netdev_hw_addr *ha;
6753                         u32 mc_filter[MC_HASH_SIZE];
6754                         u32 crc, bit, regidx;
6755                         int i;
6756
6757                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6758
6759                         netdev_for_each_mc_addr(ha, dev) {
6760                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6761                                    ha->addr);
6762
6763                                 crc = crc32c_le(0, ha->addr, ETH_ALEN);
6764                                 bit = (crc >> 24) & 0xff;
6765                                 regidx = bit >> 5;
6766                                 bit &= 0x1f;
6767                                 mc_filter[regidx] |= (1 << bit);
6768                         }
6769
6770                         for (i = 0; i < MC_HASH_SIZE; i++)
6771                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6772                                        mc_filter[i]);
6773                 }
6774         }
6775
6776         bp->rx_mode = rx_mode;
6777         bnx2x_set_storm_rx_mode(bp);
6778 }
6779
6780
6781 /* called with rtnl_lock */
6782 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
6783                            int devad, u16 addr)
6784 {
6785         struct bnx2x *bp = netdev_priv(netdev);
6786         u16 value;
6787         int rc;
6788
6789         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
6790            prtad, devad, addr);
6791
6792         /* The HW expects different devad if CL22 is used */
6793         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
6794
6795         bnx2x_acquire_phy_lock(bp);
6796         rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
6797         bnx2x_release_phy_lock(bp);
6798         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
6799
6800         if (!rc)
6801                 rc = value;
6802         return rc;
6803 }
6804
6805 /* called with rtnl_lock */
6806 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
6807                             u16 addr, u16 value)
6808 {
6809         struct bnx2x *bp = netdev_priv(netdev);
6810         int rc;
6811
6812         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
6813                            " value 0x%x\n", prtad, devad, addr, value);
6814
6815         /* The HW expects different devad if CL22 is used */
6816         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
6817
6818         bnx2x_acquire_phy_lock(bp);
6819         rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
6820         bnx2x_release_phy_lock(bp);
6821         return rc;
6822 }
6823
6824 /* called with rtnl_lock */
6825 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6826 {
6827         struct bnx2x *bp = netdev_priv(dev);
6828         struct mii_ioctl_data *mdio = if_mii(ifr);
6829
6830         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
6831            mdio->phy_id, mdio->reg_num, mdio->val_in);
6832
6833         if (!netif_running(dev))
6834                 return -EAGAIN;
6835
6836         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
6837 }
6838
6839 #ifdef CONFIG_NET_POLL_CONTROLLER
6840 static void poll_bnx2x(struct net_device *dev)
6841 {
6842         struct bnx2x *bp = netdev_priv(dev);
6843
6844         disable_irq(bp->pdev->irq);
6845         bnx2x_interrupt(bp->pdev->irq, dev);
6846         enable_irq(bp->pdev->irq);
6847 }
6848 #endif
6849
6850 static const struct net_device_ops bnx2x_netdev_ops = {
6851         .ndo_open               = bnx2x_open,
6852         .ndo_stop               = bnx2x_close,
6853         .ndo_start_xmit         = bnx2x_start_xmit,
6854         .ndo_set_multicast_list = bnx2x_set_rx_mode,
6855         .ndo_set_mac_address    = bnx2x_change_mac_addr,
6856         .ndo_validate_addr      = eth_validate_addr,
6857         .ndo_do_ioctl           = bnx2x_ioctl,
6858         .ndo_change_mtu         = bnx2x_change_mtu,
6859         .ndo_tx_timeout         = bnx2x_tx_timeout,
6860 #ifdef BCM_VLAN
6861         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
6862 #endif
6863 #ifdef CONFIG_NET_POLL_CONTROLLER
6864         .ndo_poll_controller    = poll_bnx2x,
6865 #endif
6866 };
6867
6868 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
6869                                     struct net_device *dev)
6870 {
6871         struct bnx2x *bp;
6872         int rc;
6873
6874         SET_NETDEV_DEV(dev, &pdev->dev);
6875         bp = netdev_priv(dev);
6876
6877         bp->dev = dev;
6878         bp->pdev = pdev;
6879         bp->flags = 0;
6880         bp->func = PCI_FUNC(pdev->devfn);
6881
6882         rc = pci_enable_device(pdev);
6883         if (rc) {
6884                 dev_err(&bp->pdev->dev,
6885                         "Cannot enable PCI device, aborting\n");
6886                 goto err_out;
6887         }
6888
6889         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6890                 dev_err(&bp->pdev->dev,
6891                         "Cannot find PCI device base address, aborting\n");
6892                 rc = -ENODEV;
6893                 goto err_out_disable;
6894         }
6895
6896         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
6897                 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
6898                        " base address, aborting\n");
6899                 rc = -ENODEV;
6900                 goto err_out_disable;
6901         }
6902
6903         if (atomic_read(&pdev->enable_cnt) == 1) {
6904                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6905                 if (rc) {
6906                         dev_err(&bp->pdev->dev,
6907                                 "Cannot obtain PCI resources, aborting\n");
6908                         goto err_out_disable;
6909                 }
6910
6911                 pci_set_master(pdev);
6912                 pci_save_state(pdev);
6913         }
6914
6915         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6916         if (bp->pm_cap == 0) {
6917                 dev_err(&bp->pdev->dev,
6918                         "Cannot find power management capability, aborting\n");
6919                 rc = -EIO;
6920                 goto err_out_release;
6921         }
6922
6923         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
6924         if (bp->pcie_cap == 0) {
6925                 dev_err(&bp->pdev->dev,
6926                         "Cannot find PCI Express capability, aborting\n");
6927                 rc = -EIO;
6928                 goto err_out_release;
6929         }
6930
6931         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
6932                 bp->flags |= USING_DAC_FLAG;
6933                 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
6934                         dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
6935                                " failed, aborting\n");
6936                         rc = -EIO;
6937                         goto err_out_release;
6938                 }
6939
6940         } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
6941                 dev_err(&bp->pdev->dev,
6942                         "System does not support DMA, aborting\n");
6943                 rc = -EIO;
6944                 goto err_out_release;
6945         }
6946
6947         dev->mem_start = pci_resource_start(pdev, 0);
6948         dev->base_addr = dev->mem_start;
6949         dev->mem_end = pci_resource_end(pdev, 0);
6950
6951         dev->irq = pdev->irq;
6952
6953         bp->regview = pci_ioremap_bar(pdev, 0);
6954         if (!bp->regview) {
6955                 dev_err(&bp->pdev->dev,
6956                         "Cannot map register space, aborting\n");
6957                 rc = -ENOMEM;
6958                 goto err_out_release;
6959         }
6960
6961         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
6962                                         min_t(u64, BNX2X_DB_SIZE,
6963                                               pci_resource_len(pdev, 2)));
6964         if (!bp->doorbells) {
6965                 dev_err(&bp->pdev->dev,
6966                         "Cannot map doorbell space, aborting\n");
6967                 rc = -ENOMEM;
6968                 goto err_out_unmap;
6969         }
6970
6971         bnx2x_set_power_state(bp, PCI_D0);
6972
6973         /* clean indirect addresses */
6974         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
6975                                PCICFG_VENDOR_ID_OFFSET);
6976         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
6977         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
6978         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
6979         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
6980
6981         /* Reset the load counter */
6982         bnx2x_clear_load_cnt(bp);
6983
6984         dev->watchdog_timeo = TX_TIMEOUT;
6985
6986         dev->netdev_ops = &bnx2x_netdev_ops;
6987         bnx2x_set_ethtool_ops(dev);
6988         dev->features |= NETIF_F_SG;
6989         dev->features |= NETIF_F_HW_CSUM;
6990         if (bp->flags & USING_DAC_FLAG)
6991                 dev->features |= NETIF_F_HIGHDMA;
6992         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
6993         dev->features |= NETIF_F_TSO6;
6994 #ifdef BCM_VLAN
6995         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
6996         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
6997
6998         dev->vlan_features |= NETIF_F_SG;
6999         dev->vlan_features |= NETIF_F_HW_CSUM;
7000         if (bp->flags & USING_DAC_FLAG)
7001                 dev->vlan_features |= NETIF_F_HIGHDMA;
7002         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7003         dev->vlan_features |= NETIF_F_TSO6;
7004 #endif
7005
7006         /* get_port_hwinfo() will set prtad and mmds properly */
7007         bp->mdio.prtad = MDIO_PRTAD_NONE;
7008         bp->mdio.mmds = 0;
7009         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7010         bp->mdio.dev = dev;
7011         bp->mdio.mdio_read = bnx2x_mdio_read;
7012         bp->mdio.mdio_write = bnx2x_mdio_write;
7013
7014         return 0;
7015
7016 err_out_unmap:
7017         if (bp->regview) {
7018                 iounmap(bp->regview);
7019                 bp->regview = NULL;
7020         }
7021         if (bp->doorbells) {
7022                 iounmap(bp->doorbells);
7023                 bp->doorbells = NULL;
7024         }
7025
7026 err_out_release:
7027         if (atomic_read(&pdev->enable_cnt) == 1)
7028                 pci_release_regions(pdev);
7029
7030 err_out_disable:
7031         pci_disable_device(pdev);
7032         pci_set_drvdata(pdev, NULL);
7033
7034 err_out:
7035         return rc;
7036 }
7037
7038 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
7039                                                  int *width, int *speed)
7040 {
7041         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7042
7043         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
7044
7045         /* return value of 1=2.5GHz 2=5GHz */
7046         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
7047 }
7048
7049 static int bnx2x_check_firmware(struct bnx2x *bp)
7050 {
7051         const struct firmware *firmware = bp->firmware;
7052         struct bnx2x_fw_file_hdr *fw_hdr;
7053         struct bnx2x_fw_file_section *sections;
7054         u32 offset, len, num_ops;
7055         u16 *ops_offsets;
7056         int i;
7057         const u8 *fw_ver;
7058
7059         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
7060                 return -EINVAL;
7061
7062         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
7063         sections = (struct bnx2x_fw_file_section *)fw_hdr;
7064
7065         /* Make sure none of the offsets and sizes make us read beyond
7066          * the end of the firmware data */
7067         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
7068                 offset = be32_to_cpu(sections[i].offset);
7069                 len = be32_to_cpu(sections[i].len);
7070                 if (offset + len > firmware->size) {
7071                         dev_err(&bp->pdev->dev,
7072                                 "Section %d length is out of bounds\n", i);
7073                         return -EINVAL;
7074                 }
7075         }
7076
7077         /* Likewise for the init_ops offsets */
7078         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
7079         ops_offsets = (u16 *)(firmware->data + offset);
7080         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
7081
7082         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
7083                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
7084                         dev_err(&bp->pdev->dev,
7085                                 "Section offset %d is out of bounds\n", i);
7086                         return -EINVAL;
7087                 }
7088         }
7089
7090         /* Check FW version */
7091         offset = be32_to_cpu(fw_hdr->fw_version.offset);
7092         fw_ver = firmware->data + offset;
7093         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
7094             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
7095             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
7096             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
7097                 dev_err(&bp->pdev->dev,
7098                         "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
7099                        fw_ver[0], fw_ver[1], fw_ver[2],
7100                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
7101                        BCM_5710_FW_MINOR_VERSION,
7102                        BCM_5710_FW_REVISION_VERSION,
7103                        BCM_5710_FW_ENGINEERING_VERSION);
7104                 return -EINVAL;
7105         }
7106
7107         return 0;
7108 }
7109
7110 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7111 {
7112         const __be32 *source = (const __be32 *)_source;
7113         u32 *target = (u32 *)_target;
7114         u32 i;
7115
7116         for (i = 0; i < n/4; i++)
7117                 target[i] = be32_to_cpu(source[i]);
7118 }
7119
7120 /*
7121    Ops array is stored in the following format:
7122    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7123  */
7124 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
7125 {
7126         const __be32 *source = (const __be32 *)_source;
7127         struct raw_op *target = (struct raw_op *)_target;
7128         u32 i, j, tmp;
7129
7130         for (i = 0, j = 0; i < n/8; i++, j += 2) {
7131                 tmp = be32_to_cpu(source[j]);
7132                 target[i].op = (tmp >> 24) & 0xff;
7133                 target[i].offset = tmp & 0xffffff;
7134                 target[i].raw_data = be32_to_cpu(source[j + 1]);
7135         }
7136 }
7137
7138 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7139 {
7140         const __be16 *source = (const __be16 *)_source;
7141         u16 *target = (u16 *)_target;
7142         u32 i;
7143
7144         for (i = 0; i < n/2; i++)
7145                 target[i] = be16_to_cpu(source[i]);
7146 }
7147
7148 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
7149 do {                                                                    \
7150         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
7151         bp->arr = kmalloc(len, GFP_KERNEL);                             \
7152         if (!bp->arr) {                                                 \
7153                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7154                 goto lbl;                                               \
7155         }                                                               \
7156         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
7157              (u8 *)bp->arr, len);                                       \
7158 } while (0)
7159
7160 int bnx2x_init_firmware(struct bnx2x *bp)
7161 {
7162         const char *fw_file_name;
7163         struct bnx2x_fw_file_hdr *fw_hdr;
7164         int rc;
7165
7166         if (CHIP_IS_E1(bp))
7167                 fw_file_name = FW_FILE_NAME_E1;
7168         else if (CHIP_IS_E1H(bp))
7169                 fw_file_name = FW_FILE_NAME_E1H;
7170         else {
7171                 BNX2X_ERR("Unsupported chip revision\n");
7172                 return -EINVAL;
7173         }
7174
7175         BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
7176
7177         rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
7178         if (rc) {
7179                 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
7180                 goto request_firmware_exit;
7181         }
7182
7183         rc = bnx2x_check_firmware(bp);
7184         if (rc) {
7185                 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
7186                 goto request_firmware_exit;
7187         }
7188
7189         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
7190
7191         /* Initialize the pointers to the init arrays */
7192         /* Blob */
7193         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
7194
7195         /* Opcodes */
7196         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
7197
7198         /* Offsets */
7199         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
7200                             be16_to_cpu_n);
7201
7202         /* STORMs firmware */
7203         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7204                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
7205         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
7206                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
7207         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7208                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
7209         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
7210                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
7211         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7212                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
7213         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
7214                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
7215         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7216                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
7217         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
7218                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
7219
7220         return 0;
7221
7222 init_offsets_alloc_err:
7223         kfree(bp->init_ops);
7224 init_ops_alloc_err:
7225         kfree(bp->init_data);
7226 request_firmware_exit:
7227         release_firmware(bp->firmware);
7228
7229         return rc;
7230 }
7231
7232
7233 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7234                                     const struct pci_device_id *ent)
7235 {
7236         struct net_device *dev = NULL;
7237         struct bnx2x *bp;
7238         int pcie_width, pcie_speed;
7239         int rc;
7240
7241         /* dev zeroed in init_etherdev */
7242         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
7243         if (!dev) {
7244                 dev_err(&pdev->dev, "Cannot allocate net device\n");
7245                 return -ENOMEM;
7246         }
7247
7248         bp = netdev_priv(dev);
7249         bp->msg_enable = debug;
7250
7251         pci_set_drvdata(pdev, dev);
7252
7253         rc = bnx2x_init_dev(pdev, dev);
7254         if (rc < 0) {
7255                 free_netdev(dev);
7256                 return rc;
7257         }
7258
7259         rc = bnx2x_init_bp(bp);
7260         if (rc)
7261                 goto init_one_exit;
7262
7263         rc = register_netdev(dev);
7264         if (rc) {
7265                 dev_err(&pdev->dev, "Cannot register net device\n");
7266                 goto init_one_exit;
7267         }
7268
7269         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
7270         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
7271                " IRQ %d, ", board_info[ent->driver_data].name,
7272                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
7273                pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
7274                dev->base_addr, bp->pdev->irq);
7275         pr_cont("node addr %pM\n", dev->dev_addr);
7276
7277         return 0;
7278
7279 init_one_exit:
7280         if (bp->regview)
7281                 iounmap(bp->regview);
7282
7283         if (bp->doorbells)
7284                 iounmap(bp->doorbells);
7285
7286         free_netdev(dev);
7287
7288         if (atomic_read(&pdev->enable_cnt) == 1)
7289                 pci_release_regions(pdev);
7290
7291         pci_disable_device(pdev);
7292         pci_set_drvdata(pdev, NULL);
7293
7294         return rc;
7295 }
7296
7297 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7298 {
7299         struct net_device *dev = pci_get_drvdata(pdev);
7300         struct bnx2x *bp;
7301
7302         if (!dev) {
7303                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
7304                 return;
7305         }
7306         bp = netdev_priv(dev);
7307
7308         unregister_netdev(dev);
7309
7310         /* Make sure RESET task is not scheduled before continuing */
7311         cancel_delayed_work_sync(&bp->reset_task);
7312
7313         if (bp->regview)
7314                 iounmap(bp->regview);
7315
7316         if (bp->doorbells)
7317                 iounmap(bp->doorbells);
7318
7319         free_netdev(dev);
7320
7321         if (atomic_read(&pdev->enable_cnt) == 1)
7322                 pci_release_regions(pdev);
7323
7324         pci_disable_device(pdev);
7325         pci_set_drvdata(pdev, NULL);
7326 }
7327
7328 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
7329 {
7330         int i;
7331
7332         bp->state = BNX2X_STATE_ERROR;
7333
7334         bp->rx_mode = BNX2X_RX_MODE_NONE;
7335
7336         bnx2x_netif_stop(bp, 0);
7337         netif_carrier_off(bp->dev);
7338
7339         del_timer_sync(&bp->timer);
7340         bp->stats_state = STATS_STATE_DISABLED;
7341         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
7342
7343         /* Release IRQs */
7344         bnx2x_free_irq(bp, false);
7345
7346         if (CHIP_IS_E1(bp)) {
7347                 struct mac_configuration_cmd *config =
7348                                                 bnx2x_sp(bp, mcast_config);
7349
7350                 for (i = 0; i < config->hdr.length; i++)
7351                         CAM_INVALIDATE(config->config_table[i]);
7352         }
7353
7354         /* Free SKBs, SGEs, TPA pool and driver internals */
7355         bnx2x_free_skbs(bp);
7356         for_each_queue(bp, i)
7357                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7358         for_each_queue(bp, i)
7359                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7360         bnx2x_free_mem(bp);
7361
7362         bp->state = BNX2X_STATE_CLOSED;
7363
7364         return 0;
7365 }
7366
7367 static void bnx2x_eeh_recover(struct bnx2x *bp)
7368 {
7369         u32 val;
7370
7371         mutex_init(&bp->port.phy_mutex);
7372
7373         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7374         bp->link_params.shmem_base = bp->common.shmem_base;
7375         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7376
7377         if (!bp->common.shmem_base ||
7378             (bp->common.shmem_base < 0xA0000) ||
7379             (bp->common.shmem_base >= 0xC0000)) {
7380                 BNX2X_DEV_INFO("MCP not active\n");
7381                 bp->flags |= NO_MCP_FLAG;
7382                 return;
7383         }
7384
7385         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7386         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7387                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7388                 BNX2X_ERR("BAD MCP validity signature\n");
7389
7390         if (!BP_NOMCP(bp)) {
7391                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
7392                               & DRV_MSG_SEQ_NUMBER_MASK);
7393                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7394         }
7395 }
7396
7397 /**
7398  * bnx2x_io_error_detected - called when PCI error is detected
7399  * @pdev: Pointer to PCI device
7400  * @state: The current pci connection state
7401  *
7402  * This function is called after a PCI bus error affecting
7403  * this device has been detected.
7404  */
7405 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
7406                                                 pci_channel_state_t state)
7407 {
7408         struct net_device *dev = pci_get_drvdata(pdev);
7409         struct bnx2x *bp = netdev_priv(dev);
7410
7411         rtnl_lock();
7412
7413         netif_device_detach(dev);
7414
7415         if (state == pci_channel_io_perm_failure) {
7416                 rtnl_unlock();
7417                 return PCI_ERS_RESULT_DISCONNECT;
7418         }
7419
7420         if (netif_running(dev))
7421                 bnx2x_eeh_nic_unload(bp);
7422
7423         pci_disable_device(pdev);
7424
7425         rtnl_unlock();
7426
7427         /* Request a slot reset */
7428         return PCI_ERS_RESULT_NEED_RESET;
7429 }
7430
7431 /**
7432  * bnx2x_io_slot_reset - called after the PCI bus has been reset
7433  * @pdev: Pointer to PCI device
7434  *
7435  * Restart the card from scratch, as if from a cold-boot.
7436  */
7437 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
7438 {
7439         struct net_device *dev = pci_get_drvdata(pdev);
7440         struct bnx2x *bp = netdev_priv(dev);
7441
7442         rtnl_lock();
7443
7444         if (pci_enable_device(pdev)) {
7445                 dev_err(&pdev->dev,
7446                         "Cannot re-enable PCI device after reset\n");
7447                 rtnl_unlock();
7448                 return PCI_ERS_RESULT_DISCONNECT;
7449         }
7450
7451         pci_set_master(pdev);
7452         pci_restore_state(pdev);
7453
7454         if (netif_running(dev))
7455                 bnx2x_set_power_state(bp, PCI_D0);
7456
7457         rtnl_unlock();
7458
7459         return PCI_ERS_RESULT_RECOVERED;
7460 }
7461
7462 /**
7463  * bnx2x_io_resume - called when traffic can start flowing again
7464  * @pdev: Pointer to PCI device
7465  *
7466  * This callback is called when the error recovery driver tells us that
7467  * its OK to resume normal operation.
7468  */
7469 static void bnx2x_io_resume(struct pci_dev *pdev)
7470 {
7471         struct net_device *dev = pci_get_drvdata(pdev);
7472         struct bnx2x *bp = netdev_priv(dev);
7473
7474         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
7475                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
7476                 return;
7477         }
7478
7479         rtnl_lock();
7480
7481         bnx2x_eeh_recover(bp);
7482
7483         if (netif_running(dev))
7484                 bnx2x_nic_load(bp, LOAD_NORMAL);
7485
7486         netif_device_attach(dev);
7487
7488         rtnl_unlock();
7489 }
7490
7491 static struct pci_error_handlers bnx2x_err_handler = {
7492         .error_detected = bnx2x_io_error_detected,
7493         .slot_reset     = bnx2x_io_slot_reset,
7494         .resume         = bnx2x_io_resume,
7495 };
7496
7497 static struct pci_driver bnx2x_pci_driver = {
7498         .name        = DRV_MODULE_NAME,
7499         .id_table    = bnx2x_pci_tbl,
7500         .probe       = bnx2x_init_one,
7501         .remove      = __devexit_p(bnx2x_remove_one),
7502         .suspend     = bnx2x_suspend,
7503         .resume      = bnx2x_resume,
7504         .err_handler = &bnx2x_err_handler,
7505 };
7506
7507 static int __init bnx2x_init(void)
7508 {
7509         int ret;
7510
7511         pr_info("%s", version);
7512
7513         bnx2x_wq = create_singlethread_workqueue("bnx2x");
7514         if (bnx2x_wq == NULL) {
7515                 pr_err("Cannot create workqueue\n");
7516                 return -ENOMEM;
7517         }
7518
7519         ret = pci_register_driver(&bnx2x_pci_driver);
7520         if (ret) {
7521                 pr_err("Cannot register driver\n");
7522                 destroy_workqueue(bnx2x_wq);
7523         }
7524         return ret;
7525 }
7526
7527 static void __exit bnx2x_cleanup(void)
7528 {
7529         pci_unregister_driver(&bnx2x_pci_driver);
7530
7531         destroy_workqueue(bnx2x_wq);
7532 }
7533
7534 module_init(bnx2x_init);
7535 module_exit(bnx2x_cleanup);
7536
7537 #ifdef BCM_CNIC
7538
7539 /* count denotes the number of new completions we have seen */
7540 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
7541 {
7542         struct eth_spe *spe;
7543
7544 #ifdef BNX2X_STOP_ON_ERROR
7545         if (unlikely(bp->panic))
7546                 return;
7547 #endif
7548
7549         spin_lock_bh(&bp->spq_lock);
7550         bp->cnic_spq_pending -= count;
7551
7552         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
7553              bp->cnic_spq_pending++) {
7554
7555                 if (!bp->cnic_kwq_pending)
7556                         break;
7557
7558                 spe = bnx2x_sp_get_next(bp);
7559                 *spe = *bp->cnic_kwq_cons;
7560
7561                 bp->cnic_kwq_pending--;
7562
7563                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
7564                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
7565
7566                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
7567                         bp->cnic_kwq_cons = bp->cnic_kwq;
7568                 else
7569                         bp->cnic_kwq_cons++;
7570         }
7571         bnx2x_sp_prod_update(bp);
7572         spin_unlock_bh(&bp->spq_lock);
7573 }
7574
7575 static int bnx2x_cnic_sp_queue(struct net_device *dev,
7576                                struct kwqe_16 *kwqes[], u32 count)
7577 {
7578         struct bnx2x *bp = netdev_priv(dev);
7579         int i;
7580
7581 #ifdef BNX2X_STOP_ON_ERROR
7582         if (unlikely(bp->panic))
7583                 return -EIO;
7584 #endif
7585
7586         spin_lock_bh(&bp->spq_lock);
7587
7588         for (i = 0; i < count; i++) {
7589                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
7590
7591                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
7592                         break;
7593
7594                 *bp->cnic_kwq_prod = *spe;
7595
7596                 bp->cnic_kwq_pending++;
7597
7598                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
7599                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
7600                    spe->data.mac_config_addr.hi,
7601                    spe->data.mac_config_addr.lo,
7602                    bp->cnic_kwq_pending);
7603
7604                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
7605                         bp->cnic_kwq_prod = bp->cnic_kwq;
7606                 else
7607                         bp->cnic_kwq_prod++;
7608         }
7609
7610         spin_unlock_bh(&bp->spq_lock);
7611
7612         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
7613                 bnx2x_cnic_sp_post(bp, 0);
7614
7615         return i;
7616 }
7617
7618 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7619 {
7620         struct cnic_ops *c_ops;
7621         int rc = 0;
7622
7623         mutex_lock(&bp->cnic_mutex);
7624         c_ops = bp->cnic_ops;
7625         if (c_ops)
7626                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7627         mutex_unlock(&bp->cnic_mutex);
7628
7629         return rc;
7630 }
7631
7632 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7633 {
7634         struct cnic_ops *c_ops;
7635         int rc = 0;
7636
7637         rcu_read_lock();
7638         c_ops = rcu_dereference(bp->cnic_ops);
7639         if (c_ops)
7640                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7641         rcu_read_unlock();
7642
7643         return rc;
7644 }
7645
7646 /*
7647  * for commands that have no data
7648  */
7649 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
7650 {
7651         struct cnic_ctl_info ctl = {0};
7652
7653         ctl.cmd = cmd;
7654
7655         return bnx2x_cnic_ctl_send(bp, &ctl);
7656 }
7657
7658 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
7659 {
7660         struct cnic_ctl_info ctl;
7661
7662         /* first we tell CNIC and only then we count this as a completion */
7663         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
7664         ctl.data.comp.cid = cid;
7665
7666         bnx2x_cnic_ctl_send_bh(bp, &ctl);
7667         bnx2x_cnic_sp_post(bp, 1);
7668 }
7669
7670 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7671 {
7672         struct bnx2x *bp = netdev_priv(dev);
7673         int rc = 0;
7674
7675         switch (ctl->cmd) {
7676         case DRV_CTL_CTXTBL_WR_CMD: {
7677                 u32 index = ctl->data.io.offset;
7678                 dma_addr_t addr = ctl->data.io.dma_addr;
7679
7680                 bnx2x_ilt_wr(bp, index, addr);
7681                 break;
7682         }
7683
7684         case DRV_CTL_COMPLETION_CMD: {
7685                 int count = ctl->data.comp.comp_count;
7686
7687                 bnx2x_cnic_sp_post(bp, count);
7688                 break;
7689         }
7690
7691         /* rtnl_lock is held.  */
7692         case DRV_CTL_START_L2_CMD: {
7693                 u32 cli = ctl->data.ring.client_id;
7694
7695                 bp->rx_mode_cl_mask |= (1 << cli);
7696                 bnx2x_set_storm_rx_mode(bp);
7697                 break;
7698         }
7699
7700         /* rtnl_lock is held.  */
7701         case DRV_CTL_STOP_L2_CMD: {
7702                 u32 cli = ctl->data.ring.client_id;
7703
7704                 bp->rx_mode_cl_mask &= ~(1 << cli);
7705                 bnx2x_set_storm_rx_mode(bp);
7706                 break;
7707         }
7708
7709         default:
7710                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
7711                 rc = -EINVAL;
7712         }
7713
7714         return rc;
7715 }
7716
7717 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
7718 {
7719         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7720
7721         if (bp->flags & USING_MSIX_FLAG) {
7722                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
7723                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
7724                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
7725         } else {
7726                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
7727                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
7728         }
7729         cp->irq_arr[0].status_blk = bp->cnic_sb;
7730         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
7731         cp->irq_arr[1].status_blk = bp->def_status_blk;
7732         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
7733
7734         cp->num_irq = 2;
7735 }
7736
7737 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
7738                                void *data)
7739 {
7740         struct bnx2x *bp = netdev_priv(dev);
7741         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7742
7743         if (ops == NULL)
7744                 return -EINVAL;
7745
7746         if (atomic_read(&bp->intr_sem) != 0)
7747                 return -EBUSY;
7748
7749         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
7750         if (!bp->cnic_kwq)
7751                 return -ENOMEM;
7752
7753         bp->cnic_kwq_cons = bp->cnic_kwq;
7754         bp->cnic_kwq_prod = bp->cnic_kwq;
7755         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
7756
7757         bp->cnic_spq_pending = 0;
7758         bp->cnic_kwq_pending = 0;
7759
7760         bp->cnic_data = data;
7761
7762         cp->num_irq = 0;
7763         cp->drv_state = CNIC_DRV_STATE_REGD;
7764
7765         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
7766
7767         bnx2x_setup_cnic_irq_info(bp);
7768         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7769         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7770         rcu_assign_pointer(bp->cnic_ops, ops);
7771
7772         return 0;
7773 }
7774
7775 static int bnx2x_unregister_cnic(struct net_device *dev)
7776 {
7777         struct bnx2x *bp = netdev_priv(dev);
7778         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7779
7780         mutex_lock(&bp->cnic_mutex);
7781         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7782                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7783                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7784         }
7785         cp->drv_state = 0;
7786         rcu_assign_pointer(bp->cnic_ops, NULL);
7787         mutex_unlock(&bp->cnic_mutex);
7788         synchronize_rcu();
7789         kfree(bp->cnic_kwq);
7790         bp->cnic_kwq = NULL;
7791
7792         return 0;
7793 }
7794
7795 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
7796 {
7797         struct bnx2x *bp = netdev_priv(dev);
7798         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7799
7800         cp->drv_owner = THIS_MODULE;
7801         cp->chip_id = CHIP_ID(bp);
7802         cp->pdev = bp->pdev;
7803         cp->io_base = bp->regview;
7804         cp->io_base2 = bp->doorbells;
7805         cp->max_kwqe_pending = 8;
7806         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
7807         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
7808         cp->ctx_tbl_len = CNIC_ILT_LINES;
7809         cp->starting_cid = BCM_CNIC_CID_START;
7810         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
7811         cp->drv_ctl = bnx2x_drv_ctl;
7812         cp->drv_register_cnic = bnx2x_register_cnic;
7813         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
7814
7815         return cp;
7816 }
7817 EXPORT_SYMBOL(bnx2x_cnic_probe);
7818
7819 #endif /* BCM_CNIC */
7820