]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/bnx2x/bnx2x_main.c
Merge branch 'bug-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/josef/btrfs...
[net-next-2.6.git] / drivers / net / bnx2x / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 #include <linux/stringify.h>
53
54 #define BNX2X_MAIN
55 #include "bnx2x.h"
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_cmn.h"
59
60
61 #include <linux/firmware.h>
62 #include "bnx2x_fw_file_hdr.h"
63 /* FW files */
64 #define FW_FILE_VERSION                                 \
65         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
66         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
67         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
68         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
69 #define FW_FILE_NAME_E1         "bnx2x-e1-" FW_FILE_VERSION ".fw"
70 #define FW_FILE_NAME_E1H        "bnx2x-e1h-" FW_FILE_VERSION ".fw"
71
72 /* Time in jiffies before concluding the transmitter is hung */
73 #define TX_TIMEOUT              (5*HZ)
74
75 static char version[] __devinitdata =
76         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
77         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
79 MODULE_AUTHOR("Eliezer Tamir");
80 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_MODULE_VERSION);
83 MODULE_FIRMWARE(FW_FILE_NAME_E1);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
85
86 static int multi_mode = 1;
87 module_param(multi_mode, int, 0);
88 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
89                              "(0 Disable; 1 Enable (default))");
90
91 static int num_queues;
92 module_param(num_queues, int, 0);
93 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
94                                 " (default is as a number of CPUs)");
95
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
99
100 static int int_mode;
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
103                                 "(1 INT#x; 2 MSI)");
104
105 static int dropless_fc;
106 module_param(dropless_fc, int, 0);
107 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
108
109 static int poll;
110 module_param(poll, int, 0);
111 MODULE_PARM_DESC(poll, " Use polling (for debug)");
112
113 static int mrrs = -1;
114 module_param(mrrs, int, 0);
115 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
116
117 static int debug;
118 module_param(debug, int, 0);
119 MODULE_PARM_DESC(debug, " Default debug msglevel");
120
121 static struct workqueue_struct *bnx2x_wq;
122
123 enum bnx2x_board_type {
124         BCM57710 = 0,
125         BCM57711 = 1,
126         BCM57711E = 2,
127 };
128
129 /* indexed by board_type, above */
130 static struct {
131         char *name;
132 } board_info[] __devinitdata = {
133         { "Broadcom NetXtreme II BCM57710 XGb" },
134         { "Broadcom NetXtreme II BCM57711 XGb" },
135         { "Broadcom NetXtreme II BCM57711E XGb" }
136 };
137
138
139 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
140         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
143         { 0 }
144 };
145
146 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148 /****************************************************************************
149 * General service functions
150 ****************************************************************************/
151
152 /* used only at init
153  * locking is done by mcp
154  */
155 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
156 {
157         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160                                PCICFG_VENDOR_ID_OFFSET);
161 }
162
163 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164 {
165         u32 val;
166
167         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170                                PCICFG_VENDOR_ID_OFFSET);
171
172         return val;
173 }
174
175 const u32 dmae_reg_go_c[] = {
176         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180 };
181
182 /* copy command into DMAE command memory and set DMAE command go */
183 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
184 {
185         u32 cmd_offset;
186         int i;
187
188         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
189         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
190                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
191
192                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
193                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
194         }
195         REG_WR(bp, dmae_reg_go_c[idx], 1);
196 }
197
198 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
199                       u32 len32)
200 {
201         struct dmae_command dmae;
202         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
203         int cnt = 200;
204
205         if (!bp->dmae_ready) {
206                 u32 *data = bnx2x_sp(bp, wb_data[0]);
207
208                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
209                    "  using indirect\n", dst_addr, len32);
210                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
211                 return;
212         }
213
214         memset(&dmae, 0, sizeof(struct dmae_command));
215
216         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
217                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
218                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
219 #ifdef __BIG_ENDIAN
220                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
221 #else
222                        DMAE_CMD_ENDIANITY_DW_SWAP |
223 #endif
224                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
225                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
226         dmae.src_addr_lo = U64_LO(dma_addr);
227         dmae.src_addr_hi = U64_HI(dma_addr);
228         dmae.dst_addr_lo = dst_addr >> 2;
229         dmae.dst_addr_hi = 0;
230         dmae.len = len32;
231         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
232         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
233         dmae.comp_val = DMAE_COMP_VAL;
234
235         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
236            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
237                     "dst_addr [%x:%08x (%08x)]\n"
238            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
239            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
240            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
241            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
242         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
243            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
244            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
245
246         mutex_lock(&bp->dmae_mutex);
247
248         *wb_comp = 0;
249
250         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
251
252         udelay(5);
253
254         while (*wb_comp != DMAE_COMP_VAL) {
255                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
256
257                 if (!cnt) {
258                         BNX2X_ERR("DMAE timeout!\n");
259                         break;
260                 }
261                 cnt--;
262                 /* adjust delay for emulation/FPGA */
263                 if (CHIP_REV_IS_SLOW(bp))
264                         msleep(100);
265                 else
266                         udelay(5);
267         }
268
269         mutex_unlock(&bp->dmae_mutex);
270 }
271
272 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
273 {
274         struct dmae_command dmae;
275         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
276         int cnt = 200;
277
278         if (!bp->dmae_ready) {
279                 u32 *data = bnx2x_sp(bp, wb_data[0]);
280                 int i;
281
282                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
283                    "  using indirect\n", src_addr, len32);
284                 for (i = 0; i < len32; i++)
285                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
286                 return;
287         }
288
289         memset(&dmae, 0, sizeof(struct dmae_command));
290
291         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
292                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
293                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
294 #ifdef __BIG_ENDIAN
295                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
296 #else
297                        DMAE_CMD_ENDIANITY_DW_SWAP |
298 #endif
299                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
300                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
301         dmae.src_addr_lo = src_addr >> 2;
302         dmae.src_addr_hi = 0;
303         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
304         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
305         dmae.len = len32;
306         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
307         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
308         dmae.comp_val = DMAE_COMP_VAL;
309
310         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
311            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
312                     "dst_addr [%x:%08x (%08x)]\n"
313            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
314            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
315            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
316            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
317
318         mutex_lock(&bp->dmae_mutex);
319
320         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
321         *wb_comp = 0;
322
323         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
324
325         udelay(5);
326
327         while (*wb_comp != DMAE_COMP_VAL) {
328
329                 if (!cnt) {
330                         BNX2X_ERR("DMAE timeout!\n");
331                         break;
332                 }
333                 cnt--;
334                 /* adjust delay for emulation/FPGA */
335                 if (CHIP_REV_IS_SLOW(bp))
336                         msleep(100);
337                 else
338                         udelay(5);
339         }
340         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
341            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
342            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
343
344         mutex_unlock(&bp->dmae_mutex);
345 }
346
347 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
348                                u32 addr, u32 len)
349 {
350         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
351         int offset = 0;
352
353         while (len > dmae_wr_max) {
354                 bnx2x_write_dmae(bp, phys_addr + offset,
355                                  addr + offset, dmae_wr_max);
356                 offset += dmae_wr_max * 4;
357                 len -= dmae_wr_max;
358         }
359
360         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
361 }
362
363 /* used only for slowpath so not inlined */
364 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
365 {
366         u32 wb_write[2];
367
368         wb_write[0] = val_hi;
369         wb_write[1] = val_lo;
370         REG_WR_DMAE(bp, reg, wb_write, 2);
371 }
372
373 #ifdef USE_WB_RD
374 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
375 {
376         u32 wb_data[2];
377
378         REG_RD_DMAE(bp, reg, wb_data, 2);
379
380         return HILO_U64(wb_data[0], wb_data[1]);
381 }
382 #endif
383
384 static int bnx2x_mc_assert(struct bnx2x *bp)
385 {
386         char last_idx;
387         int i, rc = 0;
388         u32 row0, row1, row2, row3;
389
390         /* XSTORM */
391         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
392                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
393         if (last_idx)
394                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
395
396         /* print the asserts */
397         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
398
399                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
400                               XSTORM_ASSERT_LIST_OFFSET(i));
401                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
402                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
403                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
405                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
407
408                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
409                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
410                                   " 0x%08x 0x%08x 0x%08x\n",
411                                   i, row3, row2, row1, row0);
412                         rc++;
413                 } else {
414                         break;
415                 }
416         }
417
418         /* TSTORM */
419         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
420                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
421         if (last_idx)
422                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
423
424         /* print the asserts */
425         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
426
427                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
428                               TSTORM_ASSERT_LIST_OFFSET(i));
429                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
430                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
431                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
433                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
435
436                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
437                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
438                                   " 0x%08x 0x%08x 0x%08x\n",
439                                   i, row3, row2, row1, row0);
440                         rc++;
441                 } else {
442                         break;
443                 }
444         }
445
446         /* CSTORM */
447         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
448                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
449         if (last_idx)
450                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
451
452         /* print the asserts */
453         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
454
455                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
456                               CSTORM_ASSERT_LIST_OFFSET(i));
457                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
458                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
459                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
461                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
463
464                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
465                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
466                                   " 0x%08x 0x%08x 0x%08x\n",
467                                   i, row3, row2, row1, row0);
468                         rc++;
469                 } else {
470                         break;
471                 }
472         }
473
474         /* USTORM */
475         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
476                            USTORM_ASSERT_LIST_INDEX_OFFSET);
477         if (last_idx)
478                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
479
480         /* print the asserts */
481         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
482
483                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
484                               USTORM_ASSERT_LIST_OFFSET(i));
485                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
486                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
487                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
488                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
489                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
490                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
491
492                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
493                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
494                                   " 0x%08x 0x%08x 0x%08x\n",
495                                   i, row3, row2, row1, row0);
496                         rc++;
497                 } else {
498                         break;
499                 }
500         }
501
502         return rc;
503 }
504
505 static void bnx2x_fw_dump(struct bnx2x *bp)
506 {
507         u32 addr;
508         u32 mark, offset;
509         __be32 data[9];
510         int word;
511
512         if (BP_NOMCP(bp)) {
513                 BNX2X_ERR("NO MCP - can not dump\n");
514                 return;
515         }
516
517         addr = bp->common.shmem_base - 0x0800 + 4;
518         mark = REG_RD(bp, addr);
519         mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
520         pr_err("begin fw dump (mark 0x%x)\n", mark);
521
522         pr_err("");
523         for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
524                 for (word = 0; word < 8; word++)
525                         data[word] = htonl(REG_RD(bp, offset + 4*word));
526                 data[8] = 0x0;
527                 pr_cont("%s", (char *)data);
528         }
529         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
530                 for (word = 0; word < 8; word++)
531                         data[word] = htonl(REG_RD(bp, offset + 4*word));
532                 data[8] = 0x0;
533                 pr_cont("%s", (char *)data);
534         }
535         pr_err("end of fw dump\n");
536 }
537
538 void bnx2x_panic_dump(struct bnx2x *bp)
539 {
540         int i;
541         u16 j, start, end;
542
543         bp->stats_state = STATS_STATE_DISABLED;
544         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
545
546         BNX2X_ERR("begin crash dump -----------------\n");
547
548         /* Indices */
549         /* Common */
550         BNX2X_ERR("def_c_idx(0x%x)  def_u_idx(0x%x)  def_x_idx(0x%x)"
551                   "  def_t_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
552                   "  spq_prod_idx(0x%x)\n",
553                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
554                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
555
556         /* Rx */
557         for_each_queue(bp, i) {
558                 struct bnx2x_fastpath *fp = &bp->fp[i];
559
560                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
561                           "  *rx_bd_cons_sb(0x%x)  rx_comp_prod(0x%x)"
562                           "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
563                           i, fp->rx_bd_prod, fp->rx_bd_cons,
564                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
565                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
566                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
567                           "  fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
568                           fp->rx_sge_prod, fp->last_max_sge,
569                           le16_to_cpu(fp->fp_u_idx),
570                           fp->status_blk->u_status_block.status_block_index);
571         }
572
573         /* Tx */
574         for_each_queue(bp, i) {
575                 struct bnx2x_fastpath *fp = &bp->fp[i];
576
577                 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
578                           "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
579                           "  *tx_cons_sb(0x%x)\n",
580                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
581                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
582                 BNX2X_ERR("     fp_c_idx(0x%x)  *sb_c_idx(0x%x)"
583                           "  tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
584                           fp->status_blk->c_status_block.status_block_index,
585                           fp->tx_db.data.prod);
586         }
587
588         /* Rings */
589         /* Rx */
590         for_each_queue(bp, i) {
591                 struct bnx2x_fastpath *fp = &bp->fp[i];
592
593                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
594                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
595                 for (j = start; j != end; j = RX_BD(j + 1)) {
596                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
597                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
598
599                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
600                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
601                 }
602
603                 start = RX_SGE(fp->rx_sge_prod);
604                 end = RX_SGE(fp->last_max_sge);
605                 for (j = start; j != end; j = RX_SGE(j + 1)) {
606                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
607                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
608
609                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
610                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
611                 }
612
613                 start = RCQ_BD(fp->rx_comp_cons - 10);
614                 end = RCQ_BD(fp->rx_comp_cons + 503);
615                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
616                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
617
618                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
619                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
620                 }
621         }
622
623         /* Tx */
624         for_each_queue(bp, i) {
625                 struct bnx2x_fastpath *fp = &bp->fp[i];
626
627                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
628                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
629                 for (j = start; j != end; j = TX_BD(j + 1)) {
630                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
631
632                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
633                                   i, j, sw_bd->skb, sw_bd->first_bd);
634                 }
635
636                 start = TX_BD(fp->tx_bd_cons - 10);
637                 end = TX_BD(fp->tx_bd_cons + 254);
638                 for (j = start; j != end; j = TX_BD(j + 1)) {
639                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
640
641                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
642                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
643                 }
644         }
645
646         bnx2x_fw_dump(bp);
647         bnx2x_mc_assert(bp);
648         BNX2X_ERR("end crash dump -----------------\n");
649 }
650
651 void bnx2x_int_enable(struct bnx2x *bp)
652 {
653         int port = BP_PORT(bp);
654         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
655         u32 val = REG_RD(bp, addr);
656         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
657         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
658
659         if (msix) {
660                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
661                          HC_CONFIG_0_REG_INT_LINE_EN_0);
662                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
664         } else if (msi) {
665                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
666                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
668                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
669         } else {
670                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
671                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
672                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
673                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
674
675                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
676                    val, port, addr);
677
678                 REG_WR(bp, addr, val);
679
680                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
681         }
682
683         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
684            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
685
686         REG_WR(bp, addr, val);
687         /*
688          * Ensure that HC_CONFIG is written before leading/trailing edge config
689          */
690         mmiowb();
691         barrier();
692
693         if (CHIP_IS_E1H(bp)) {
694                 /* init leading/trailing edge */
695                 if (IS_E1HMF(bp)) {
696                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
697                         if (bp->port.pmf)
698                                 /* enable nig and gpio3 attention */
699                                 val |= 0x1100;
700                 } else
701                         val = 0xffff;
702
703                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
704                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
705         }
706
707         /* Make sure that interrupts are indeed enabled from here on */
708         mmiowb();
709 }
710
711 static void bnx2x_int_disable(struct bnx2x *bp)
712 {
713         int port = BP_PORT(bp);
714         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
715         u32 val = REG_RD(bp, addr);
716
717         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
718                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
719                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
720                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
721
722         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
723            val, port, addr);
724
725         /* flush all outstanding writes */
726         mmiowb();
727
728         REG_WR(bp, addr, val);
729         if (REG_RD(bp, addr) != val)
730                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
731 }
732
733 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
734 {
735         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
736         int i, offset;
737
738         /* disable interrupt handling */
739         atomic_inc(&bp->intr_sem);
740         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
741
742         if (disable_hw)
743                 /* prevent the HW from sending interrupts */
744                 bnx2x_int_disable(bp);
745
746         /* make sure all ISRs are done */
747         if (msix) {
748                 synchronize_irq(bp->msix_table[0].vector);
749                 offset = 1;
750 #ifdef BCM_CNIC
751                 offset++;
752 #endif
753                 for_each_queue(bp, i)
754                         synchronize_irq(bp->msix_table[i + offset].vector);
755         } else
756                 synchronize_irq(bp->pdev->irq);
757
758         /* make sure sp_task is not running */
759         cancel_delayed_work(&bp->sp_task);
760         flush_workqueue(bnx2x_wq);
761 }
762
763 /* fast path */
764
765 /*
766  * General service functions
767  */
768
769 /* Return true if succeeded to acquire the lock */
770 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
771 {
772         u32 lock_status;
773         u32 resource_bit = (1 << resource);
774         int func = BP_FUNC(bp);
775         u32 hw_lock_control_reg;
776
777         DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
778
779         /* Validating that the resource is within range */
780         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
781                 DP(NETIF_MSG_HW,
782                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
783                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
784                 return -EINVAL;
785         }
786
787         if (func <= 5)
788                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
789         else
790                 hw_lock_control_reg =
791                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
792
793         /* Try to acquire the lock */
794         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
795         lock_status = REG_RD(bp, hw_lock_control_reg);
796         if (lock_status & resource_bit)
797                 return true;
798
799         DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
800         return false;
801 }
802
803
804 #ifdef BCM_CNIC
805 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
806 #endif
807
808 void bnx2x_sp_event(struct bnx2x_fastpath *fp,
809                            union eth_rx_cqe *rr_cqe)
810 {
811         struct bnx2x *bp = fp->bp;
812         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
813         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
814
815         DP(BNX2X_MSG_SP,
816            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
817            fp->index, cid, command, bp->state,
818            rr_cqe->ramrod_cqe.ramrod_type);
819
820         bp->spq_left++;
821
822         if (fp->index) {
823                 switch (command | fp->state) {
824                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
825                                                 BNX2X_FP_STATE_OPENING):
826                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
827                            cid);
828                         fp->state = BNX2X_FP_STATE_OPEN;
829                         break;
830
831                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
832                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
833                            cid);
834                         fp->state = BNX2X_FP_STATE_HALTED;
835                         break;
836
837                 default:
838                         BNX2X_ERR("unexpected MC reply (%d)  "
839                                   "fp[%d] state is %x\n",
840                                   command, fp->index, fp->state);
841                         break;
842                 }
843                 mb(); /* force bnx2x_wait_ramrod() to see the change */
844                 return;
845         }
846
847         switch (command | bp->state) {
848         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
849                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
850                 bp->state = BNX2X_STATE_OPEN;
851                 break;
852
853         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
854                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
855                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
856                 fp->state = BNX2X_FP_STATE_HALTED;
857                 break;
858
859         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
860                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
861                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
862                 break;
863
864 #ifdef BCM_CNIC
865         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
866                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
867                 bnx2x_cnic_cfc_comp(bp, cid);
868                 break;
869 #endif
870
871         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
872         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
873                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
874                 bp->set_mac_pending--;
875                 smp_wmb();
876                 break;
877
878         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
879                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
880                 bp->set_mac_pending--;
881                 smp_wmb();
882                 break;
883
884         default:
885                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
886                           command, bp->state);
887                 break;
888         }
889         mb(); /* force bnx2x_wait_ramrod() to see the change */
890 }
891
892 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
893 {
894         struct bnx2x *bp = netdev_priv(dev_instance);
895         u16 status = bnx2x_ack_int(bp);
896         u16 mask;
897         int i;
898
899         /* Return here if interrupt is shared and it's not for us */
900         if (unlikely(status == 0)) {
901                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
902                 return IRQ_NONE;
903         }
904         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
905
906         /* Return here if interrupt is disabled */
907         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
908                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
909                 return IRQ_HANDLED;
910         }
911
912 #ifdef BNX2X_STOP_ON_ERROR
913         if (unlikely(bp->panic))
914                 return IRQ_HANDLED;
915 #endif
916
917         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
918                 struct bnx2x_fastpath *fp = &bp->fp[i];
919
920                 mask = 0x2 << fp->sb_id;
921                 if (status & mask) {
922                         /* Handle Rx and Tx according to SB id */
923                         prefetch(fp->rx_cons_sb);
924                         prefetch(&fp->status_blk->u_status_block.
925                                                 status_block_index);
926                         prefetch(fp->tx_cons_sb);
927                         prefetch(&fp->status_blk->c_status_block.
928                                                 status_block_index);
929                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
930                         status &= ~mask;
931                 }
932         }
933
934 #ifdef BCM_CNIC
935         mask = 0x2 << CNIC_SB_ID(bp);
936         if (status & (mask | 0x1)) {
937                 struct cnic_ops *c_ops = NULL;
938
939                 rcu_read_lock();
940                 c_ops = rcu_dereference(bp->cnic_ops);
941                 if (c_ops)
942                         c_ops->cnic_handler(bp->cnic_data, NULL);
943                 rcu_read_unlock();
944
945                 status &= ~mask;
946         }
947 #endif
948
949         if (unlikely(status & 0x1)) {
950                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
951
952                 status &= ~0x1;
953                 if (!status)
954                         return IRQ_HANDLED;
955         }
956
957         if (unlikely(status))
958                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
959                    status);
960
961         return IRQ_HANDLED;
962 }
963
964 /* end of fast path */
965
966
967 /* Link */
968
969 /*
970  * General service functions
971  */
972
973 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
974 {
975         u32 lock_status;
976         u32 resource_bit = (1 << resource);
977         int func = BP_FUNC(bp);
978         u32 hw_lock_control_reg;
979         int cnt;
980
981         /* Validating that the resource is within range */
982         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
983                 DP(NETIF_MSG_HW,
984                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
985                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
986                 return -EINVAL;
987         }
988
989         if (func <= 5) {
990                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
991         } else {
992                 hw_lock_control_reg =
993                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
994         }
995
996         /* Validating that the resource is not already taken */
997         lock_status = REG_RD(bp, hw_lock_control_reg);
998         if (lock_status & resource_bit) {
999                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1000                    lock_status, resource_bit);
1001                 return -EEXIST;
1002         }
1003
1004         /* Try for 5 second every 5ms */
1005         for (cnt = 0; cnt < 1000; cnt++) {
1006                 /* Try to acquire the lock */
1007                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1008                 lock_status = REG_RD(bp, hw_lock_control_reg);
1009                 if (lock_status & resource_bit)
1010                         return 0;
1011
1012                 msleep(5);
1013         }
1014         DP(NETIF_MSG_HW, "Timeout\n");
1015         return -EAGAIN;
1016 }
1017
1018 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1019 {
1020         u32 lock_status;
1021         u32 resource_bit = (1 << resource);
1022         int func = BP_FUNC(bp);
1023         u32 hw_lock_control_reg;
1024
1025         DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1026
1027         /* Validating that the resource is within range */
1028         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1029                 DP(NETIF_MSG_HW,
1030                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1031                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1032                 return -EINVAL;
1033         }
1034
1035         if (func <= 5) {
1036                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1037         } else {
1038                 hw_lock_control_reg =
1039                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1040         }
1041
1042         /* Validating that the resource is currently taken */
1043         lock_status = REG_RD(bp, hw_lock_control_reg);
1044         if (!(lock_status & resource_bit)) {
1045                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1046                    lock_status, resource_bit);
1047                 return -EFAULT;
1048         }
1049
1050         REG_WR(bp, hw_lock_control_reg, resource_bit);
1051         return 0;
1052 }
1053
1054
1055 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1056 {
1057         /* The GPIO should be swapped if swap register is set and active */
1058         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1059                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1060         int gpio_shift = gpio_num +
1061                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1062         u32 gpio_mask = (1 << gpio_shift);
1063         u32 gpio_reg;
1064         int value;
1065
1066         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1067                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1068                 return -EINVAL;
1069         }
1070
1071         /* read GPIO value */
1072         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1073
1074         /* get the requested pin value */
1075         if ((gpio_reg & gpio_mask) == gpio_mask)
1076                 value = 1;
1077         else
1078                 value = 0;
1079
1080         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1081
1082         return value;
1083 }
1084
1085 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1086 {
1087         /* The GPIO should be swapped if swap register is set and active */
1088         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1089                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1090         int gpio_shift = gpio_num +
1091                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1092         u32 gpio_mask = (1 << gpio_shift);
1093         u32 gpio_reg;
1094
1095         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1096                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1097                 return -EINVAL;
1098         }
1099
1100         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1101         /* read GPIO and mask except the float bits */
1102         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1103
1104         switch (mode) {
1105         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1106                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1107                    gpio_num, gpio_shift);
1108                 /* clear FLOAT and set CLR */
1109                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1110                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1111                 break;
1112
1113         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1114                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1115                    gpio_num, gpio_shift);
1116                 /* clear FLOAT and set SET */
1117                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1118                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1119                 break;
1120
1121         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1122                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1123                    gpio_num, gpio_shift);
1124                 /* set FLOAT */
1125                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1126                 break;
1127
1128         default:
1129                 break;
1130         }
1131
1132         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1133         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1134
1135         return 0;
1136 }
1137
1138 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1139 {
1140         /* The GPIO should be swapped if swap register is set and active */
1141         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1142                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1143         int gpio_shift = gpio_num +
1144                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1145         u32 gpio_mask = (1 << gpio_shift);
1146         u32 gpio_reg;
1147
1148         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1149                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1150                 return -EINVAL;
1151         }
1152
1153         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1154         /* read GPIO int */
1155         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1156
1157         switch (mode) {
1158         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1159                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1160                                    "output low\n", gpio_num, gpio_shift);
1161                 /* clear SET and set CLR */
1162                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1163                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1164                 break;
1165
1166         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1167                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1168                                    "output high\n", gpio_num, gpio_shift);
1169                 /* clear CLR and set SET */
1170                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1171                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1172                 break;
1173
1174         default:
1175                 break;
1176         }
1177
1178         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1179         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1180
1181         return 0;
1182 }
1183
1184 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1185 {
1186         u32 spio_mask = (1 << spio_num);
1187         u32 spio_reg;
1188
1189         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1190             (spio_num > MISC_REGISTERS_SPIO_7)) {
1191                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1192                 return -EINVAL;
1193         }
1194
1195         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1196         /* read SPIO and mask except the float bits */
1197         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1198
1199         switch (mode) {
1200         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1201                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1202                 /* clear FLOAT and set CLR */
1203                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1204                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1205                 break;
1206
1207         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1208                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1209                 /* clear FLOAT and set SET */
1210                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1211                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1212                 break;
1213
1214         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1215                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1216                 /* set FLOAT */
1217                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1218                 break;
1219
1220         default:
1221                 break;
1222         }
1223
1224         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1225         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1226
1227         return 0;
1228 }
1229
1230 void bnx2x_calc_fc_adv(struct bnx2x *bp)
1231 {
1232         switch (bp->link_vars.ieee_fc &
1233                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1234         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1235                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1236                                           ADVERTISED_Pause);
1237                 break;
1238
1239         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1240                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1241                                          ADVERTISED_Pause);
1242                 break;
1243
1244         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1245                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1246                 break;
1247
1248         default:
1249                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1250                                           ADVERTISED_Pause);
1251                 break;
1252         }
1253 }
1254
1255
1256 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1257 {
1258         if (!BP_NOMCP(bp)) {
1259                 u8 rc;
1260
1261                 /* Initialize link parameters structure variables */
1262                 /* It is recommended to turn off RX FC for jumbo frames
1263                    for better performance */
1264                 if (bp->dev->mtu > 5000)
1265                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1266                 else
1267                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1268
1269                 bnx2x_acquire_phy_lock(bp);
1270
1271                 if (load_mode == LOAD_DIAG)
1272                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
1273
1274                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1275
1276                 bnx2x_release_phy_lock(bp);
1277
1278                 bnx2x_calc_fc_adv(bp);
1279
1280                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1281                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1282                         bnx2x_link_report(bp);
1283                 }
1284
1285                 return rc;
1286         }
1287         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1288         return -EINVAL;
1289 }
1290
1291 void bnx2x_link_set(struct bnx2x *bp)
1292 {
1293         if (!BP_NOMCP(bp)) {
1294                 bnx2x_acquire_phy_lock(bp);
1295                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1296                 bnx2x_release_phy_lock(bp);
1297
1298                 bnx2x_calc_fc_adv(bp);
1299         } else
1300                 BNX2X_ERR("Bootcode is missing - can not set link\n");
1301 }
1302
1303 static void bnx2x__link_reset(struct bnx2x *bp)
1304 {
1305         if (!BP_NOMCP(bp)) {
1306                 bnx2x_acquire_phy_lock(bp);
1307                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1308                 bnx2x_release_phy_lock(bp);
1309         } else
1310                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1311 }
1312
1313 u8 bnx2x_link_test(struct bnx2x *bp)
1314 {
1315         u8 rc = 0;
1316
1317         if (!BP_NOMCP(bp)) {
1318                 bnx2x_acquire_phy_lock(bp);
1319                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1320                 bnx2x_release_phy_lock(bp);
1321         } else
1322                 BNX2X_ERR("Bootcode is missing - can not test link\n");
1323
1324         return rc;
1325 }
1326
1327 static void bnx2x_init_port_minmax(struct bnx2x *bp)
1328 {
1329         u32 r_param = bp->link_vars.line_speed / 8;
1330         u32 fair_periodic_timeout_usec;
1331         u32 t_fair;
1332
1333         memset(&(bp->cmng.rs_vars), 0,
1334                sizeof(struct rate_shaping_vars_per_port));
1335         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1336
1337         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1338         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1339
1340         /* this is the threshold below which no timer arming will occur
1341            1.25 coefficient is for the threshold to be a little bigger
1342            than the real time, to compensate for timer in-accuracy */
1343         bp->cmng.rs_vars.rs_threshold =
1344                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1345
1346         /* resolution of fairness timer */
1347         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1348         /* for 10G it is 1000usec. for 1G it is 10000usec. */
1349         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1350
1351         /* this is the threshold below which we won't arm the timer anymore */
1352         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1353
1354         /* we multiply by 1e3/8 to get bytes/msec.
1355            We don't want the credits to pass a credit
1356            of the t_fair*FAIR_MEM (algorithm resolution) */
1357         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1358         /* since each tick is 4 usec */
1359         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1360 }
1361
1362 /* Calculates the sum of vn_min_rates.
1363    It's needed for further normalizing of the min_rates.
1364    Returns:
1365      sum of vn_min_rates.
1366        or
1367      0 - if all the min_rates are 0.
1368      In the later case fainess algorithm should be deactivated.
1369      If not all min_rates are zero then those that are zeroes will be set to 1.
1370  */
1371 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1372 {
1373         int all_zero = 1;
1374         int port = BP_PORT(bp);
1375         int vn;
1376
1377         bp->vn_weight_sum = 0;
1378         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1379                 int func = 2*vn + port;
1380                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1381                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1382                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1383
1384                 /* Skip hidden vns */
1385                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1386                         continue;
1387
1388                 /* If min rate is zero - set it to 1 */
1389                 if (!vn_min_rate)
1390                         vn_min_rate = DEF_MIN_RATE;
1391                 else
1392                         all_zero = 0;
1393
1394                 bp->vn_weight_sum += vn_min_rate;
1395         }
1396
1397         /* ... only if all min rates are zeros - disable fairness */
1398         if (all_zero) {
1399                 bp->cmng.flags.cmng_enables &=
1400                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1401                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1402                    "  fairness will be disabled\n");
1403         } else
1404                 bp->cmng.flags.cmng_enables |=
1405                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1406 }
1407
1408 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1409 {
1410         struct rate_shaping_vars_per_vn m_rs_vn;
1411         struct fairness_vars_per_vn m_fair_vn;
1412         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1413         u16 vn_min_rate, vn_max_rate;
1414         int i;
1415
1416         /* If function is hidden - set min and max to zeroes */
1417         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1418                 vn_min_rate = 0;
1419                 vn_max_rate = 0;
1420
1421         } else {
1422                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1423                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1424                 /* If min rate is zero - set it to 1 */
1425                 if (!vn_min_rate)
1426                         vn_min_rate = DEF_MIN_RATE;
1427                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1428                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1429         }
1430         DP(NETIF_MSG_IFUP,
1431            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
1432            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1433
1434         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1435         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1436
1437         /* global vn counter - maximal Mbps for this vn */
1438         m_rs_vn.vn_counter.rate = vn_max_rate;
1439
1440         /* quota - number of bytes transmitted in this period */
1441         m_rs_vn.vn_counter.quota =
1442                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1443
1444         if (bp->vn_weight_sum) {
1445                 /* credit for each period of the fairness algorithm:
1446                    number of bytes in T_FAIR (the vn share the port rate).
1447                    vn_weight_sum should not be larger than 10000, thus
1448                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1449                    than zero */
1450                 m_fair_vn.vn_credit_delta =
1451                         max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1452                                                    (8 * bp->vn_weight_sum))),
1453                               (bp->cmng.fair_vars.fair_threshold * 2));
1454                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
1455                    m_fair_vn.vn_credit_delta);
1456         }
1457
1458         /* Store it to internal memory */
1459         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1460                 REG_WR(bp, BAR_XSTRORM_INTMEM +
1461                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1462                        ((u32 *)(&m_rs_vn))[i]);
1463
1464         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1465                 REG_WR(bp, BAR_XSTRORM_INTMEM +
1466                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1467                        ((u32 *)(&m_fair_vn))[i]);
1468 }
1469
1470
1471 /* This function is called upon link interrupt */
1472 static void bnx2x_link_attn(struct bnx2x *bp)
1473 {
1474         u32 prev_link_status = bp->link_vars.link_status;
1475         /* Make sure that we are synced with the current statistics */
1476         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1477
1478         bnx2x_link_update(&bp->link_params, &bp->link_vars);
1479
1480         if (bp->link_vars.link_up) {
1481
1482                 /* dropless flow control */
1483                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1484                         int port = BP_PORT(bp);
1485                         u32 pause_enabled = 0;
1486
1487                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1488                                 pause_enabled = 1;
1489
1490                         REG_WR(bp, BAR_USTRORM_INTMEM +
1491                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1492                                pause_enabled);
1493                 }
1494
1495                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1496                         struct host_port_stats *pstats;
1497
1498                         pstats = bnx2x_sp(bp, port_stats);
1499                         /* reset old bmac stats */
1500                         memset(&(pstats->mac_stx[0]), 0,
1501                                sizeof(struct mac_stx));
1502                 }
1503                 if (bp->state == BNX2X_STATE_OPEN)
1504                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1505         }
1506
1507         /* indicate link status only if link status actually changed */
1508         if (prev_link_status != bp->link_vars.link_status)
1509                 bnx2x_link_report(bp);
1510
1511         if (IS_E1HMF(bp)) {
1512                 int port = BP_PORT(bp);
1513                 int func;
1514                 int vn;
1515
1516                 /* Set the attention towards other drivers on the same port */
1517                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1518                         if (vn == BP_E1HVN(bp))
1519                                 continue;
1520
1521                         func = ((vn << 1) | port);
1522                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1523                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1524                 }
1525
1526                 if (bp->link_vars.link_up) {
1527                         int i;
1528
1529                         /* Init rate shaping and fairness contexts */
1530                         bnx2x_init_port_minmax(bp);
1531
1532                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
1533                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
1534
1535                         /* Store it to internal memory */
1536                         for (i = 0;
1537                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
1538                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
1539                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1540                                        ((u32 *)(&bp->cmng))[i]);
1541                 }
1542         }
1543 }
1544
1545 void bnx2x__link_status_update(struct bnx2x *bp)
1546 {
1547         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
1548                 return;
1549
1550         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
1551
1552         if (bp->link_vars.link_up)
1553                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1554         else
1555                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1556
1557         bnx2x_calc_vn_weight_sum(bp);
1558
1559         /* indicate link status */
1560         bnx2x_link_report(bp);
1561 }
1562
1563 static void bnx2x_pmf_update(struct bnx2x *bp)
1564 {
1565         int port = BP_PORT(bp);
1566         u32 val;
1567
1568         bp->port.pmf = 1;
1569         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1570
1571         /* enable nig attention */
1572         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1573         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1574         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1575
1576         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1577 }
1578
1579 /* end of Link */
1580
1581 /* slow path */
1582
1583 /*
1584  * General service functions
1585  */
1586
1587 /* send the MCP a request, block until there is a reply */
1588 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
1589 {
1590         int func = BP_FUNC(bp);
1591         u32 seq = ++bp->fw_seq;
1592         u32 rc = 0;
1593         u32 cnt = 1;
1594         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1595
1596         mutex_lock(&bp->fw_mb_mutex);
1597         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1598         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1599
1600         do {
1601                 /* let the FW do it's magic ... */
1602                 msleep(delay);
1603
1604                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
1605
1606                 /* Give the FW up to 5 second (500*10ms) */
1607         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
1608
1609         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
1610            cnt*delay, rc, seq);
1611
1612         /* is this a reply to our command? */
1613         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
1614                 rc &= FW_MSG_CODE_MASK;
1615         else {
1616                 /* FW BUG! */
1617                 BNX2X_ERR("FW failed to respond!\n");
1618                 bnx2x_fw_dump(bp);
1619                 rc = 0;
1620         }
1621         mutex_unlock(&bp->fw_mb_mutex);
1622
1623         return rc;
1624 }
1625
1626 static void bnx2x_e1h_disable(struct bnx2x *bp)
1627 {
1628         int port = BP_PORT(bp);
1629
1630         netif_tx_disable(bp->dev);
1631
1632         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
1633
1634         netif_carrier_off(bp->dev);
1635 }
1636
1637 static void bnx2x_e1h_enable(struct bnx2x *bp)
1638 {
1639         int port = BP_PORT(bp);
1640
1641         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
1642
1643         /* Tx queue should be only reenabled */
1644         netif_tx_wake_all_queues(bp->dev);
1645
1646         /*
1647          * Should not call netif_carrier_on since it will be called if the link
1648          * is up when checking for link state
1649          */
1650 }
1651
1652 static void bnx2x_update_min_max(struct bnx2x *bp)
1653 {
1654         int port = BP_PORT(bp);
1655         int vn, i;
1656
1657         /* Init rate shaping and fairness contexts */
1658         bnx2x_init_port_minmax(bp);
1659
1660         bnx2x_calc_vn_weight_sum(bp);
1661
1662         for (vn = VN_0; vn < E1HVN_MAX; vn++)
1663                 bnx2x_init_vn_minmax(bp, 2*vn + port);
1664
1665         if (bp->port.pmf) {
1666                 int func;
1667
1668                 /* Set the attention towards other drivers on the same port */
1669                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1670                         if (vn == BP_E1HVN(bp))
1671                                 continue;
1672
1673                         func = ((vn << 1) | port);
1674                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1675                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1676                 }
1677
1678                 /* Store it to internal memory */
1679                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1680                         REG_WR(bp, BAR_XSTRORM_INTMEM +
1681                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1682                                ((u32 *)(&bp->cmng))[i]);
1683         }
1684 }
1685
1686 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1687 {
1688         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
1689
1690         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
1691
1692                 /*
1693                  * This is the only place besides the function initialization
1694                  * where the bp->flags can change so it is done without any
1695                  * locks
1696                  */
1697                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1698                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
1699                         bp->flags |= MF_FUNC_DIS;
1700
1701                         bnx2x_e1h_disable(bp);
1702                 } else {
1703                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
1704                         bp->flags &= ~MF_FUNC_DIS;
1705
1706                         bnx2x_e1h_enable(bp);
1707                 }
1708                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
1709         }
1710         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1711
1712                 bnx2x_update_min_max(bp);
1713                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1714         }
1715
1716         /* Report results to MCP */
1717         if (dcc_event)
1718                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
1719         else
1720                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
1721 }
1722
1723 /* must be called under the spq lock */
1724 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
1725 {
1726         struct eth_spe *next_spe = bp->spq_prod_bd;
1727
1728         if (bp->spq_prod_bd == bp->spq_last_bd) {
1729                 bp->spq_prod_bd = bp->spq;
1730                 bp->spq_prod_idx = 0;
1731                 DP(NETIF_MSG_TIMER, "end of spq\n");
1732         } else {
1733                 bp->spq_prod_bd++;
1734                 bp->spq_prod_idx++;
1735         }
1736         return next_spe;
1737 }
1738
1739 /* must be called under the spq lock */
1740 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1741 {
1742         int func = BP_FUNC(bp);
1743
1744         /* Make sure that BD data is updated before writing the producer */
1745         wmb();
1746
1747         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1748                bp->spq_prod_idx);
1749         mmiowb();
1750 }
1751
1752 /* the slow path queue is odd since completions arrive on the fastpath ring */
1753 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1754                          u32 data_hi, u32 data_lo, int common)
1755 {
1756         struct eth_spe *spe;
1757
1758 #ifdef BNX2X_STOP_ON_ERROR
1759         if (unlikely(bp->panic))
1760                 return -EIO;
1761 #endif
1762
1763         spin_lock_bh(&bp->spq_lock);
1764
1765         if (!bp->spq_left) {
1766                 BNX2X_ERR("BUG! SPQ ring full!\n");
1767                 spin_unlock_bh(&bp->spq_lock);
1768                 bnx2x_panic();
1769                 return -EBUSY;
1770         }
1771
1772         spe = bnx2x_sp_get_next(bp);
1773
1774         /* CID needs port number to be encoded int it */
1775         spe->hdr.conn_and_cmd_data =
1776                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1777                                     HW_CID(bp, cid));
1778         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
1779         if (common)
1780                 spe->hdr.type |=
1781                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1782
1783         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1784         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
1785
1786         bp->spq_left--;
1787
1788         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1789            "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
1790            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1791            (u32)(U64_LO(bp->spq_mapping) +
1792            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1793            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1794
1795         bnx2x_sp_prod_update(bp);
1796         spin_unlock_bh(&bp->spq_lock);
1797         return 0;
1798 }
1799
1800 /* acquire split MCP access lock register */
1801 static int bnx2x_acquire_alr(struct bnx2x *bp)
1802 {
1803         u32 j, val;
1804         int rc = 0;
1805
1806         might_sleep();
1807         for (j = 0; j < 1000; j++) {
1808                 val = (1UL << 31);
1809                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1810                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1811                 if (val & (1L << 31))
1812                         break;
1813
1814                 msleep(5);
1815         }
1816         if (!(val & (1L << 31))) {
1817                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
1818                 rc = -EBUSY;
1819         }
1820
1821         return rc;
1822 }
1823
1824 /* release split MCP access lock register */
1825 static void bnx2x_release_alr(struct bnx2x *bp)
1826 {
1827         REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
1828 }
1829
1830 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1831 {
1832         struct host_def_status_block *def_sb = bp->def_status_blk;
1833         u16 rc = 0;
1834
1835         barrier(); /* status block is written to by the chip */
1836         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1837                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1838                 rc |= 1;
1839         }
1840         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1841                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1842                 rc |= 2;
1843         }
1844         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1845                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1846                 rc |= 4;
1847         }
1848         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1849                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1850                 rc |= 8;
1851         }
1852         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1853                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1854                 rc |= 16;
1855         }
1856         return rc;
1857 }
1858
1859 /*
1860  * slow path service functions
1861  */
1862
1863 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1864 {
1865         int port = BP_PORT(bp);
1866         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
1867                        COMMAND_REG_ATTN_BITS_SET);
1868         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1869                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
1870         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1871                                        NIG_REG_MASK_INTERRUPT_PORT0;
1872         u32 aeu_mask;
1873         u32 nig_mask = 0;
1874
1875         if (bp->attn_state & asserted)
1876                 BNX2X_ERR("IGU ERROR\n");
1877
1878         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1879         aeu_mask = REG_RD(bp, aeu_addr);
1880
1881         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
1882            aeu_mask, asserted);
1883         aeu_mask &= ~(asserted & 0x3ff);
1884         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
1885
1886         REG_WR(bp, aeu_addr, aeu_mask);
1887         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1888
1889         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
1890         bp->attn_state |= asserted;
1891         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
1892
1893         if (asserted & ATTN_HARD_WIRED_MASK) {
1894                 if (asserted & ATTN_NIG_FOR_FUNC) {
1895
1896                         bnx2x_acquire_phy_lock(bp);
1897
1898                         /* save nig interrupt mask */
1899                         nig_mask = REG_RD(bp, nig_int_mask_addr);
1900                         REG_WR(bp, nig_int_mask_addr, 0);
1901
1902                         bnx2x_link_attn(bp);
1903
1904                         /* handle unicore attn? */
1905                 }
1906                 if (asserted & ATTN_SW_TIMER_4_FUNC)
1907                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1908
1909                 if (asserted & GPIO_2_FUNC)
1910                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1911
1912                 if (asserted & GPIO_3_FUNC)
1913                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1914
1915                 if (asserted & GPIO_4_FUNC)
1916                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1917
1918                 if (port == 0) {
1919                         if (asserted & ATTN_GENERAL_ATTN_1) {
1920                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1921                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1922                         }
1923                         if (asserted & ATTN_GENERAL_ATTN_2) {
1924                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1925                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1926                         }
1927                         if (asserted & ATTN_GENERAL_ATTN_3) {
1928                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1929                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1930                         }
1931                 } else {
1932                         if (asserted & ATTN_GENERAL_ATTN_4) {
1933                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1934                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1935                         }
1936                         if (asserted & ATTN_GENERAL_ATTN_5) {
1937                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1938                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1939                         }
1940                         if (asserted & ATTN_GENERAL_ATTN_6) {
1941                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1942                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1943                         }
1944                 }
1945
1946         } /* if hardwired */
1947
1948         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
1949            asserted, hc_addr);
1950         REG_WR(bp, hc_addr, asserted);
1951
1952         /* now set back the mask */
1953         if (asserted & ATTN_NIG_FOR_FUNC) {
1954                 REG_WR(bp, nig_int_mask_addr, nig_mask);
1955                 bnx2x_release_phy_lock(bp);
1956         }
1957 }
1958
1959 static inline void bnx2x_fan_failure(struct bnx2x *bp)
1960 {
1961         int port = BP_PORT(bp);
1962
1963         /* mark the failure */
1964         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
1965         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
1966         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
1967                  bp->link_params.ext_phy_config);
1968
1969         /* log the failure */
1970         netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
1971                " the driver to shutdown the card to prevent permanent"
1972                " damage.  Please contact OEM Support for assistance\n");
1973 }
1974
1975 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
1976 {
1977         int port = BP_PORT(bp);
1978         int reg_offset;
1979         u32 val, swap_val, swap_override;
1980
1981         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
1982                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
1983
1984         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
1985
1986                 val = REG_RD(bp, reg_offset);
1987                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
1988                 REG_WR(bp, reg_offset, val);
1989
1990                 BNX2X_ERR("SPIO5 hw attention\n");
1991
1992                 /* Fan failure attention */
1993                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
1994                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
1995                         /* Low power mode is controlled by GPIO 2 */
1996                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1997                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
1998                         /* The PHY reset is controlled by GPIO 1 */
1999                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2000                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2001                         break;
2002
2003                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2004                         /* The PHY reset is controlled by GPIO 1 */
2005                         /* fake the port number to cancel the swap done in
2006                            set_gpio() */
2007                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2008                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2009                         port = (swap_val && swap_override) ^ 1;
2010                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2011                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2012                         break;
2013
2014                 default:
2015                         break;
2016                 }
2017                 bnx2x_fan_failure(bp);
2018         }
2019
2020         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2021                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2022                 bnx2x_acquire_phy_lock(bp);
2023                 bnx2x_handle_module_detect_int(&bp->link_params);
2024                 bnx2x_release_phy_lock(bp);
2025         }
2026
2027         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2028
2029                 val = REG_RD(bp, reg_offset);
2030                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2031                 REG_WR(bp, reg_offset, val);
2032
2033                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2034                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2035                 bnx2x_panic();
2036         }
2037 }
2038
2039 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2040 {
2041         u32 val;
2042
2043         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2044
2045                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2046                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2047                 /* DORQ discard attention */
2048                 if (val & 0x2)
2049                         BNX2X_ERR("FATAL error from DORQ\n");
2050         }
2051
2052         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2053
2054                 int port = BP_PORT(bp);
2055                 int reg_offset;
2056
2057                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2058                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2059
2060                 val = REG_RD(bp, reg_offset);
2061                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2062                 REG_WR(bp, reg_offset, val);
2063
2064                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2065                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2066                 bnx2x_panic();
2067         }
2068 }
2069
2070 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2071 {
2072         u32 val;
2073
2074         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2075
2076                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2077                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2078                 /* CFC error attention */
2079                 if (val & 0x2)
2080                         BNX2X_ERR("FATAL error from CFC\n");
2081         }
2082
2083         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2084
2085                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2086                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2087                 /* RQ_USDMDP_FIFO_OVERFLOW */
2088                 if (val & 0x18000)
2089                         BNX2X_ERR("FATAL error from PXP\n");
2090         }
2091
2092         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2093
2094                 int port = BP_PORT(bp);
2095                 int reg_offset;
2096
2097                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2098                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2099
2100                 val = REG_RD(bp, reg_offset);
2101                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2102                 REG_WR(bp, reg_offset, val);
2103
2104                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2105                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
2106                 bnx2x_panic();
2107         }
2108 }
2109
2110 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2111 {
2112         u32 val;
2113
2114         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2115
2116                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2117                         int func = BP_FUNC(bp);
2118
2119                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2120                         bp->mf_config = SHMEM_RD(bp,
2121                                            mf_cfg.func_mf_config[func].config);
2122                         val = SHMEM_RD(bp, func_mb[func].drv_status);
2123                         if (val & DRV_STATUS_DCC_EVENT_MASK)
2124                                 bnx2x_dcc_event(bp,
2125                                             (val & DRV_STATUS_DCC_EVENT_MASK));
2126                         bnx2x__link_status_update(bp);
2127                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
2128                                 bnx2x_pmf_update(bp);
2129
2130                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2131
2132                         BNX2X_ERR("MC assert!\n");
2133                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2134                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2135                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2136                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2137                         bnx2x_panic();
2138
2139                 } else if (attn & BNX2X_MCP_ASSERT) {
2140
2141                         BNX2X_ERR("MCP assert!\n");
2142                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2143                         bnx2x_fw_dump(bp);
2144
2145                 } else
2146                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2147         }
2148
2149         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2150                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2151                 if (attn & BNX2X_GRC_TIMEOUT) {
2152                         val = CHIP_IS_E1H(bp) ?
2153                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2154                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2155                 }
2156                 if (attn & BNX2X_GRC_RSV) {
2157                         val = CHIP_IS_E1H(bp) ?
2158                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2159                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2160                 }
2161                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2162         }
2163 }
2164
2165 #define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
2166 #define LOAD_COUNTER_BITS       16 /* Number of bits for load counter */
2167 #define LOAD_COUNTER_MASK       (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2168 #define RESET_DONE_FLAG_MASK    (~LOAD_COUNTER_MASK)
2169 #define RESET_DONE_FLAG_SHIFT   LOAD_COUNTER_BITS
2170 #define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2171 /*
2172  * should be run under rtnl lock
2173  */
2174 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2175 {
2176         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2177         val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2178         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2179         barrier();
2180         mmiowb();
2181 }
2182
2183 /*
2184  * should be run under rtnl lock
2185  */
2186 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2187 {
2188         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2189         val |= (1 << 16);
2190         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2191         barrier();
2192         mmiowb();
2193 }
2194
2195 /*
2196  * should be run under rtnl lock
2197  */
2198 bool bnx2x_reset_is_done(struct bnx2x *bp)
2199 {
2200         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2201         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2202         return (val & RESET_DONE_FLAG_MASK) ? false : true;
2203 }
2204
2205 /*
2206  * should be run under rtnl lock
2207  */
2208 inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
2209 {
2210         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2211
2212         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2213
2214         val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2215         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2216         barrier();
2217         mmiowb();
2218 }
2219
2220 /*
2221  * should be run under rtnl lock
2222  */
2223 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
2224 {
2225         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2226
2227         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2228
2229         val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2230         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2231         barrier();
2232         mmiowb();
2233
2234         return val1;
2235 }
2236
2237 /*
2238  * should be run under rtnl lock
2239  */
2240 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2241 {
2242         return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2243 }
2244
2245 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2246 {
2247         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2248         REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2249 }
2250
2251 static inline void _print_next_block(int idx, const char *blk)
2252 {
2253         if (idx)
2254                 pr_cont(", ");
2255         pr_cont("%s", blk);
2256 }
2257
2258 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2259 {
2260         int i = 0;
2261         u32 cur_bit = 0;
2262         for (i = 0; sig; i++) {
2263                 cur_bit = ((u32)0x1 << i);
2264                 if (sig & cur_bit) {
2265                         switch (cur_bit) {
2266                         case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2267                                 _print_next_block(par_num++, "BRB");
2268                                 break;
2269                         case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2270                                 _print_next_block(par_num++, "PARSER");
2271                                 break;
2272                         case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2273                                 _print_next_block(par_num++, "TSDM");
2274                                 break;
2275                         case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2276                                 _print_next_block(par_num++, "SEARCHER");
2277                                 break;
2278                         case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2279                                 _print_next_block(par_num++, "TSEMI");
2280                                 break;
2281                         }
2282
2283                         /* Clear the bit */
2284                         sig &= ~cur_bit;
2285                 }
2286         }
2287
2288         return par_num;
2289 }
2290
2291 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2292 {
2293         int i = 0;
2294         u32 cur_bit = 0;
2295         for (i = 0; sig; i++) {
2296                 cur_bit = ((u32)0x1 << i);
2297                 if (sig & cur_bit) {
2298                         switch (cur_bit) {
2299                         case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2300                                 _print_next_block(par_num++, "PBCLIENT");
2301                                 break;
2302                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2303                                 _print_next_block(par_num++, "QM");
2304                                 break;
2305                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2306                                 _print_next_block(par_num++, "XSDM");
2307                                 break;
2308                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2309                                 _print_next_block(par_num++, "XSEMI");
2310                                 break;
2311                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2312                                 _print_next_block(par_num++, "DOORBELLQ");
2313                                 break;
2314                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2315                                 _print_next_block(par_num++, "VAUX PCI CORE");
2316                                 break;
2317                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2318                                 _print_next_block(par_num++, "DEBUG");
2319                                 break;
2320                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2321                                 _print_next_block(par_num++, "USDM");
2322                                 break;
2323                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2324                                 _print_next_block(par_num++, "USEMI");
2325                                 break;
2326                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2327                                 _print_next_block(par_num++, "UPB");
2328                                 break;
2329                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2330                                 _print_next_block(par_num++, "CSDM");
2331                                 break;
2332                         }
2333
2334                         /* Clear the bit */
2335                         sig &= ~cur_bit;
2336                 }
2337         }
2338
2339         return par_num;
2340 }
2341
2342 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
2343 {
2344         int i = 0;
2345         u32 cur_bit = 0;
2346         for (i = 0; sig; i++) {
2347                 cur_bit = ((u32)0x1 << i);
2348                 if (sig & cur_bit) {
2349                         switch (cur_bit) {
2350                         case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
2351                                 _print_next_block(par_num++, "CSEMI");
2352                                 break;
2353                         case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
2354                                 _print_next_block(par_num++, "PXP");
2355                                 break;
2356                         case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
2357                                 _print_next_block(par_num++,
2358                                         "PXPPCICLOCKCLIENT");
2359                                 break;
2360                         case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
2361                                 _print_next_block(par_num++, "CFC");
2362                                 break;
2363                         case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
2364                                 _print_next_block(par_num++, "CDU");
2365                                 break;
2366                         case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
2367                                 _print_next_block(par_num++, "IGU");
2368                                 break;
2369                         case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
2370                                 _print_next_block(par_num++, "MISC");
2371                                 break;
2372                         }
2373
2374                         /* Clear the bit */
2375                         sig &= ~cur_bit;
2376                 }
2377         }
2378
2379         return par_num;
2380 }
2381
2382 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
2383 {
2384         int i = 0;
2385         u32 cur_bit = 0;
2386         for (i = 0; sig; i++) {
2387                 cur_bit = ((u32)0x1 << i);
2388                 if (sig & cur_bit) {
2389                         switch (cur_bit) {
2390                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
2391                                 _print_next_block(par_num++, "MCP ROM");
2392                                 break;
2393                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
2394                                 _print_next_block(par_num++, "MCP UMP RX");
2395                                 break;
2396                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
2397                                 _print_next_block(par_num++, "MCP UMP TX");
2398                                 break;
2399                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
2400                                 _print_next_block(par_num++, "MCP SCPAD");
2401                                 break;
2402                         }
2403
2404                         /* Clear the bit */
2405                         sig &= ~cur_bit;
2406                 }
2407         }
2408
2409         return par_num;
2410 }
2411
2412 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
2413                                      u32 sig2, u32 sig3)
2414 {
2415         if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
2416             (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
2417                 int par_num = 0;
2418                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
2419                         "[0]:0x%08x [1]:0x%08x "
2420                         "[2]:0x%08x [3]:0x%08x\n",
2421                           sig0 & HW_PRTY_ASSERT_SET_0,
2422                           sig1 & HW_PRTY_ASSERT_SET_1,
2423                           sig2 & HW_PRTY_ASSERT_SET_2,
2424                           sig3 & HW_PRTY_ASSERT_SET_3);
2425                 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
2426                        bp->dev->name);
2427                 par_num = bnx2x_print_blocks_with_parity0(
2428                         sig0 & HW_PRTY_ASSERT_SET_0, par_num);
2429                 par_num = bnx2x_print_blocks_with_parity1(
2430                         sig1 & HW_PRTY_ASSERT_SET_1, par_num);
2431                 par_num = bnx2x_print_blocks_with_parity2(
2432                         sig2 & HW_PRTY_ASSERT_SET_2, par_num);
2433                 par_num = bnx2x_print_blocks_with_parity3(
2434                         sig3 & HW_PRTY_ASSERT_SET_3, par_num);
2435                 printk("\n");
2436                 return true;
2437         } else
2438                 return false;
2439 }
2440
2441 bool bnx2x_chk_parity_attn(struct bnx2x *bp)
2442 {
2443         struct attn_route attn;
2444         int port = BP_PORT(bp);
2445
2446         attn.sig[0] = REG_RD(bp,
2447                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
2448                              port*4);
2449         attn.sig[1] = REG_RD(bp,
2450                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
2451                              port*4);
2452         attn.sig[2] = REG_RD(bp,
2453                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
2454                              port*4);
2455         attn.sig[3] = REG_RD(bp,
2456                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
2457                              port*4);
2458
2459         return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
2460                                         attn.sig[3]);
2461 }
2462
2463 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2464 {
2465         struct attn_route attn, *group_mask;
2466         int port = BP_PORT(bp);
2467         int index;
2468         u32 reg_addr;
2469         u32 val;
2470         u32 aeu_mask;
2471
2472         /* need to take HW lock because MCP or other port might also
2473            try to handle this event */
2474         bnx2x_acquire_alr(bp);
2475
2476         if (bnx2x_chk_parity_attn(bp)) {
2477                 bp->recovery_state = BNX2X_RECOVERY_INIT;
2478                 bnx2x_set_reset_in_progress(bp);
2479                 schedule_delayed_work(&bp->reset_task, 0);
2480                 /* Disable HW interrupts */
2481                 bnx2x_int_disable(bp);
2482                 bnx2x_release_alr(bp);
2483                 /* In case of parity errors don't handle attentions so that
2484                  * other function would "see" parity errors.
2485                  */
2486                 return;
2487         }
2488
2489         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2490         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2491         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2492         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2493         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2494            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2495
2496         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2497                 if (deasserted & (1 << index)) {
2498                         group_mask = &bp->attn_group[index];
2499
2500                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2501                            index, group_mask->sig[0], group_mask->sig[1],
2502                            group_mask->sig[2], group_mask->sig[3]);
2503
2504                         bnx2x_attn_int_deasserted3(bp,
2505                                         attn.sig[3] & group_mask->sig[3]);
2506                         bnx2x_attn_int_deasserted1(bp,
2507                                         attn.sig[1] & group_mask->sig[1]);
2508                         bnx2x_attn_int_deasserted2(bp,
2509                                         attn.sig[2] & group_mask->sig[2]);
2510                         bnx2x_attn_int_deasserted0(bp,
2511                                         attn.sig[0] & group_mask->sig[0]);
2512                 }
2513         }
2514
2515         bnx2x_release_alr(bp);
2516
2517         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2518
2519         val = ~deasserted;
2520         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2521            val, reg_addr);
2522         REG_WR(bp, reg_addr, val);
2523
2524         if (~bp->attn_state & deasserted)
2525                 BNX2X_ERR("IGU ERROR\n");
2526
2527         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2528                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2529
2530         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2531         aeu_mask = REG_RD(bp, reg_addr);
2532
2533         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2534            aeu_mask, deasserted);
2535         aeu_mask |= (deasserted & 0x3ff);
2536         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2537
2538         REG_WR(bp, reg_addr, aeu_mask);
2539         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2540
2541         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2542         bp->attn_state &= ~deasserted;
2543         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2544 }
2545
2546 static void bnx2x_attn_int(struct bnx2x *bp)
2547 {
2548         /* read local copy of bits */
2549         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2550                                                                 attn_bits);
2551         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2552                                                                 attn_bits_ack);
2553         u32 attn_state = bp->attn_state;
2554
2555         /* look for changed bits */
2556         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2557         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2558
2559         DP(NETIF_MSG_HW,
2560            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2561            attn_bits, attn_ack, asserted, deasserted);
2562
2563         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2564                 BNX2X_ERR("BAD attention state\n");
2565
2566         /* handle bits that were raised */
2567         if (asserted)
2568                 bnx2x_attn_int_asserted(bp, asserted);
2569
2570         if (deasserted)
2571                 bnx2x_attn_int_deasserted(bp, deasserted);
2572 }
2573
2574 static void bnx2x_sp_task(struct work_struct *work)
2575 {
2576         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2577         u16 status;
2578
2579         /* Return here if interrupt is disabled */
2580         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2581                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2582                 return;
2583         }
2584
2585         status = bnx2x_update_dsb_idx(bp);
2586 /*      if (status == 0)                                     */
2587 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2588
2589         DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
2590
2591         /* HW attentions */
2592         if (status & 0x1) {
2593                 bnx2x_attn_int(bp);
2594                 status &= ~0x1;
2595         }
2596
2597         /* CStorm events: STAT_QUERY */
2598         if (status & 0x2) {
2599                 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
2600                 status &= ~0x2;
2601         }
2602
2603         if (unlikely(status))
2604                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2605                    status);
2606
2607         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2608                      IGU_INT_NOP, 1);
2609         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2610                      IGU_INT_NOP, 1);
2611         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2612                      IGU_INT_NOP, 1);
2613         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2614                      IGU_INT_NOP, 1);
2615         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2616                      IGU_INT_ENABLE, 1);
2617 }
2618
2619 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2620 {
2621         struct net_device *dev = dev_instance;
2622         struct bnx2x *bp = netdev_priv(dev);
2623
2624         /* Return here if interrupt is disabled */
2625         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2626                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2627                 return IRQ_HANDLED;
2628         }
2629
2630         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2631
2632 #ifdef BNX2X_STOP_ON_ERROR
2633         if (unlikely(bp->panic))
2634                 return IRQ_HANDLED;
2635 #endif
2636
2637 #ifdef BCM_CNIC
2638         {
2639                 struct cnic_ops *c_ops;
2640
2641                 rcu_read_lock();
2642                 c_ops = rcu_dereference(bp->cnic_ops);
2643                 if (c_ops)
2644                         c_ops->cnic_handler(bp->cnic_data, NULL);
2645                 rcu_read_unlock();
2646         }
2647 #endif
2648         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2649
2650         return IRQ_HANDLED;
2651 }
2652
2653 /* end of slow path */
2654
2655 static void bnx2x_timer(unsigned long data)
2656 {
2657         struct bnx2x *bp = (struct bnx2x *) data;
2658
2659         if (!netif_running(bp->dev))
2660                 return;
2661
2662         if (atomic_read(&bp->intr_sem) != 0)
2663                 goto timer_restart;
2664
2665         if (poll) {
2666                 struct bnx2x_fastpath *fp = &bp->fp[0];
2667                 int rc;
2668
2669                 bnx2x_tx_int(fp);
2670                 rc = bnx2x_rx_int(fp, 1000);
2671         }
2672
2673         if (!BP_NOMCP(bp)) {
2674                 int func = BP_FUNC(bp);
2675                 u32 drv_pulse;
2676                 u32 mcp_pulse;
2677
2678                 ++bp->fw_drv_pulse_wr_seq;
2679                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
2680                 /* TBD - add SYSTEM_TIME */
2681                 drv_pulse = bp->fw_drv_pulse_wr_seq;
2682                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
2683
2684                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
2685                              MCP_PULSE_SEQ_MASK);
2686                 /* The delta between driver pulse and mcp response
2687                  * should be 1 (before mcp response) or 0 (after mcp response)
2688                  */
2689                 if ((drv_pulse != mcp_pulse) &&
2690                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
2691                         /* someone lost a heartbeat... */
2692                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2693                                   drv_pulse, mcp_pulse);
2694                 }
2695         }
2696
2697         if (bp->state == BNX2X_STATE_OPEN)
2698                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
2699
2700 timer_restart:
2701         mod_timer(&bp->timer, jiffies + bp->current_interval);
2702 }
2703
2704 /* end of Statistics */
2705
2706 /* nic init */
2707
2708 /*
2709  * nic init service functions
2710  */
2711
2712 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
2713 {
2714         int port = BP_PORT(bp);
2715
2716         /* "CSTORM" */
2717         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2718                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
2719                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
2720         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2721                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
2722                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
2723 }
2724
2725 void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
2726                           dma_addr_t mapping, int sb_id)
2727 {
2728         int port = BP_PORT(bp);
2729         int func = BP_FUNC(bp);
2730         int index;
2731         u64 section;
2732
2733         /* USTORM */
2734         section = ((u64)mapping) + offsetof(struct host_status_block,
2735                                             u_status_block);
2736         sb->u_status_block.status_block_id = sb_id;
2737
2738         REG_WR(bp, BAR_CSTRORM_INTMEM +
2739                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
2740         REG_WR(bp, BAR_CSTRORM_INTMEM +
2741                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
2742                U64_HI(section));
2743         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
2744                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
2745
2746         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
2747                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2748                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
2749
2750         /* CSTORM */
2751         section = ((u64)mapping) + offsetof(struct host_status_block,
2752                                             c_status_block);
2753         sb->c_status_block.status_block_id = sb_id;
2754
2755         REG_WR(bp, BAR_CSTRORM_INTMEM +
2756                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
2757         REG_WR(bp, BAR_CSTRORM_INTMEM +
2758                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
2759                U64_HI(section));
2760         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
2761                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
2762
2763         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
2764                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2765                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
2766
2767         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2768 }
2769
2770 static void bnx2x_zero_def_sb(struct bnx2x *bp)
2771 {
2772         int func = BP_FUNC(bp);
2773
2774         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
2775                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2776                         sizeof(struct tstorm_def_status_block)/4);
2777         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2778                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
2779                         sizeof(struct cstorm_def_status_block_u)/4);
2780         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2781                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
2782                         sizeof(struct cstorm_def_status_block_c)/4);
2783         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
2784                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2785                         sizeof(struct xstorm_def_status_block)/4);
2786 }
2787
2788 static void bnx2x_init_def_sb(struct bnx2x *bp,
2789                               struct host_def_status_block *def_sb,
2790                               dma_addr_t mapping, int sb_id)
2791 {
2792         int port = BP_PORT(bp);
2793         int func = BP_FUNC(bp);
2794         int index, val, reg_offset;
2795         u64 section;
2796
2797         /* ATTN */
2798         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2799                                             atten_status_block);
2800         def_sb->atten_status_block.status_block_id = sb_id;
2801
2802         bp->attn_state = 0;
2803
2804         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2805                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2806
2807         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2808                 bp->attn_group[index].sig[0] = REG_RD(bp,
2809                                                      reg_offset + 0x10*index);
2810                 bp->attn_group[index].sig[1] = REG_RD(bp,
2811                                                reg_offset + 0x4 + 0x10*index);
2812                 bp->attn_group[index].sig[2] = REG_RD(bp,
2813                                                reg_offset + 0x8 + 0x10*index);
2814                 bp->attn_group[index].sig[3] = REG_RD(bp,
2815                                                reg_offset + 0xc + 0x10*index);
2816         }
2817
2818         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
2819                              HC_REG_ATTN_MSG0_ADDR_L);
2820
2821         REG_WR(bp, reg_offset, U64_LO(section));
2822         REG_WR(bp, reg_offset + 4, U64_HI(section));
2823
2824         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
2825
2826         val = REG_RD(bp, reg_offset);
2827         val |= sb_id;
2828         REG_WR(bp, reg_offset, val);
2829
2830         /* USTORM */
2831         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2832                                             u_def_status_block);
2833         def_sb->u_def_status_block.status_block_id = sb_id;
2834
2835         REG_WR(bp, BAR_CSTRORM_INTMEM +
2836                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
2837         REG_WR(bp, BAR_CSTRORM_INTMEM +
2838                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
2839                U64_HI(section));
2840         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
2841                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
2842
2843         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
2844                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2845                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
2846
2847         /* CSTORM */
2848         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2849                                             c_def_status_block);
2850         def_sb->c_def_status_block.status_block_id = sb_id;
2851
2852         REG_WR(bp, BAR_CSTRORM_INTMEM +
2853                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
2854         REG_WR(bp, BAR_CSTRORM_INTMEM +
2855                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
2856                U64_HI(section));
2857         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
2858                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
2859
2860         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2861                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2862                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
2863
2864         /* TSTORM */
2865         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2866                                             t_def_status_block);
2867         def_sb->t_def_status_block.status_block_id = sb_id;
2868
2869         REG_WR(bp, BAR_TSTRORM_INTMEM +
2870                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2871         REG_WR(bp, BAR_TSTRORM_INTMEM +
2872                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2873                U64_HI(section));
2874         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
2875                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2876
2877         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2878                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
2879                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2880
2881         /* XSTORM */
2882         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2883                                             x_def_status_block);
2884         def_sb->x_def_status_block.status_block_id = sb_id;
2885
2886         REG_WR(bp, BAR_XSTRORM_INTMEM +
2887                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2888         REG_WR(bp, BAR_XSTRORM_INTMEM +
2889                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2890                U64_HI(section));
2891         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
2892                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2893
2894         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2895                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
2896                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2897
2898         bp->stats_pending = 0;
2899         bp->set_mac_pending = 0;
2900
2901         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2902 }
2903
2904 void bnx2x_update_coalesce(struct bnx2x *bp)
2905 {
2906         int port = BP_PORT(bp);
2907         int i;
2908
2909         for_each_queue(bp, i) {
2910                 int sb_id = bp->fp[i].sb_id;
2911
2912                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
2913                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2914                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
2915                                                       U_SB_ETH_RX_CQ_INDEX),
2916                         bp->rx_ticks/(4 * BNX2X_BTR));
2917                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2918                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
2919                                                        U_SB_ETH_RX_CQ_INDEX),
2920                          (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2921
2922                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2923                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2924                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
2925                                                       C_SB_ETH_TX_CQ_INDEX),
2926                         bp->tx_ticks/(4 * BNX2X_BTR));
2927                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2928                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
2929                                                        C_SB_ETH_TX_CQ_INDEX),
2930                          (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2931         }
2932 }
2933
2934 static void bnx2x_init_sp_ring(struct bnx2x *bp)
2935 {
2936         int func = BP_FUNC(bp);
2937
2938         spin_lock_init(&bp->spq_lock);
2939
2940         bp->spq_left = MAX_SPQ_PENDING;
2941         bp->spq_prod_idx = 0;
2942         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
2943         bp->spq_prod_bd = bp->spq;
2944         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
2945
2946         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
2947                U64_LO(bp->spq_mapping));
2948         REG_WR(bp,
2949                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
2950                U64_HI(bp->spq_mapping));
2951
2952         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
2953                bp->spq_prod_idx);
2954 }
2955
2956 static void bnx2x_init_context(struct bnx2x *bp)
2957 {
2958         int i;
2959
2960         /* Rx */
2961         for_each_queue(bp, i) {
2962                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
2963                 struct bnx2x_fastpath *fp = &bp->fp[i];
2964                 u8 cl_id = fp->cl_id;
2965
2966                 context->ustorm_st_context.common.sb_index_numbers =
2967                                                 BNX2X_RX_SB_INDEX_NUM;
2968                 context->ustorm_st_context.common.clientId = cl_id;
2969                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
2970                 context->ustorm_st_context.common.flags =
2971                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
2972                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
2973                 context->ustorm_st_context.common.statistics_counter_id =
2974                                                 cl_id;
2975                 context->ustorm_st_context.common.mc_alignment_log_size =
2976                                                 BNX2X_RX_ALIGN_SHIFT;
2977                 context->ustorm_st_context.common.bd_buff_size =
2978                                                 bp->rx_buf_size;
2979                 context->ustorm_st_context.common.bd_page_base_hi =
2980                                                 U64_HI(fp->rx_desc_mapping);
2981                 context->ustorm_st_context.common.bd_page_base_lo =
2982                                                 U64_LO(fp->rx_desc_mapping);
2983                 if (!fp->disable_tpa) {
2984                         context->ustorm_st_context.common.flags |=
2985                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
2986                         context->ustorm_st_context.common.sge_buff_size =
2987                                 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
2988                                            0xffff);
2989                         context->ustorm_st_context.common.sge_page_base_hi =
2990                                                 U64_HI(fp->rx_sge_mapping);
2991                         context->ustorm_st_context.common.sge_page_base_lo =
2992                                                 U64_LO(fp->rx_sge_mapping);
2993
2994                         context->ustorm_st_context.common.max_sges_for_packet =
2995                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
2996                         context->ustorm_st_context.common.max_sges_for_packet =
2997                                 ((context->ustorm_st_context.common.
2998                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
2999                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
3000                 }
3001
3002                 context->ustorm_ag_context.cdu_usage =
3003                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3004                                                CDU_REGION_NUMBER_UCM_AG,
3005                                                ETH_CONNECTION_TYPE);
3006
3007                 context->xstorm_ag_context.cdu_reserved =
3008                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3009                                                CDU_REGION_NUMBER_XCM_AG,
3010                                                ETH_CONNECTION_TYPE);
3011         }
3012
3013         /* Tx */
3014         for_each_queue(bp, i) {
3015                 struct bnx2x_fastpath *fp = &bp->fp[i];
3016                 struct eth_context *context =
3017                         bnx2x_sp(bp, context[i].eth);
3018
3019                 context->cstorm_st_context.sb_index_number =
3020                                                 C_SB_ETH_TX_CQ_INDEX;
3021                 context->cstorm_st_context.status_block_id = fp->sb_id;
3022
3023                 context->xstorm_st_context.tx_bd_page_base_hi =
3024                                                 U64_HI(fp->tx_desc_mapping);
3025                 context->xstorm_st_context.tx_bd_page_base_lo =
3026                                                 U64_LO(fp->tx_desc_mapping);
3027                 context->xstorm_st_context.statistics_data = (fp->cl_id |
3028                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
3029         }
3030 }
3031
3032 static void bnx2x_init_ind_table(struct bnx2x *bp)
3033 {
3034         int func = BP_FUNC(bp);
3035         int i;
3036
3037         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
3038                 return;
3039
3040         DP(NETIF_MSG_IFUP,
3041            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
3042         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
3043                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
3044                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
3045                         bp->fp->cl_id + (i % bp->num_queues));
3046 }
3047
3048 void bnx2x_set_client_config(struct bnx2x *bp)
3049 {
3050         struct tstorm_eth_client_config tstorm_client = {0};
3051         int port = BP_PORT(bp);
3052         int i;
3053
3054         tstorm_client.mtu = bp->dev->mtu;
3055         tstorm_client.config_flags =
3056                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
3057                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
3058 #ifdef BCM_VLAN
3059         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
3060                 tstorm_client.config_flags |=
3061                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
3062                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3063         }
3064 #endif
3065
3066         for_each_queue(bp, i) {
3067                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
3068
3069                 REG_WR(bp, BAR_TSTRORM_INTMEM +
3070                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
3071                        ((u32 *)&tstorm_client)[0]);
3072                 REG_WR(bp, BAR_TSTRORM_INTMEM +
3073                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
3074                        ((u32 *)&tstorm_client)[1]);
3075         }
3076
3077         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
3078            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
3079 }
3080
3081 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3082 {
3083         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
3084         int mode = bp->rx_mode;
3085         int mask = bp->rx_mode_cl_mask;
3086         int func = BP_FUNC(bp);
3087         int port = BP_PORT(bp);
3088         int i;
3089         /* All but management unicast packets should pass to the host as well */
3090         u32 llh_mask =
3091                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
3092                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
3093                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3094                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
3095
3096         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
3097
3098         switch (mode) {
3099         case BNX2X_RX_MODE_NONE: /* no Rx */
3100                 tstorm_mac_filter.ucast_drop_all = mask;
3101                 tstorm_mac_filter.mcast_drop_all = mask;
3102                 tstorm_mac_filter.bcast_drop_all = mask;
3103                 break;
3104
3105         case BNX2X_RX_MODE_NORMAL:
3106                 tstorm_mac_filter.bcast_accept_all = mask;
3107                 break;
3108
3109         case BNX2X_RX_MODE_ALLMULTI:
3110                 tstorm_mac_filter.mcast_accept_all = mask;
3111                 tstorm_mac_filter.bcast_accept_all = mask;
3112                 break;
3113
3114         case BNX2X_RX_MODE_PROMISC:
3115                 tstorm_mac_filter.ucast_accept_all = mask;
3116                 tstorm_mac_filter.mcast_accept_all = mask;
3117                 tstorm_mac_filter.bcast_accept_all = mask;
3118                 /* pass management unicast packets as well */
3119                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
3120                 break;
3121
3122         default:
3123                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
3124                 break;
3125         }
3126
3127         REG_WR(bp,
3128                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
3129                llh_mask);
3130
3131         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
3132                 REG_WR(bp, BAR_TSTRORM_INTMEM +
3133                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
3134                        ((u32 *)&tstorm_mac_filter)[i]);
3135
3136 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
3137                    ((u32 *)&tstorm_mac_filter)[i]); */
3138         }
3139
3140         if (mode != BNX2X_RX_MODE_NONE)
3141                 bnx2x_set_client_config(bp);
3142 }
3143
3144 static void bnx2x_init_internal_common(struct bnx2x *bp)
3145 {
3146         int i;
3147
3148         /* Zero this manually as its initialization is
3149            currently missing in the initTool */
3150         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3151                 REG_WR(bp, BAR_USTRORM_INTMEM +
3152                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
3153 }
3154
3155 static void bnx2x_init_internal_port(struct bnx2x *bp)
3156 {
3157         int port = BP_PORT(bp);
3158
3159         REG_WR(bp,
3160                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
3161         REG_WR(bp,
3162                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
3163         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3164         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3165 }
3166
3167 static void bnx2x_init_internal_func(struct bnx2x *bp)
3168 {
3169         struct tstorm_eth_function_common_config tstorm_config = {0};
3170         struct stats_indication_flags stats_flags = {0};
3171         int port = BP_PORT(bp);
3172         int func = BP_FUNC(bp);
3173         int i, j;
3174         u32 offset;
3175         u16 max_agg_size;
3176
3177         tstorm_config.config_flags = RSS_FLAGS(bp);
3178
3179         if (is_multi(bp))
3180                 tstorm_config.rss_result_mask = MULTI_MASK;
3181
3182         /* Enable TPA if needed */
3183         if (bp->flags & TPA_ENABLE_FLAG)
3184                 tstorm_config.config_flags |=
3185                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
3186
3187         if (IS_E1HMF(bp))
3188                 tstorm_config.config_flags |=
3189                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
3190
3191         tstorm_config.leading_client_id = BP_L_ID(bp);
3192
3193         REG_WR(bp, BAR_TSTRORM_INTMEM +
3194                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
3195                (*(u32 *)&tstorm_config));
3196
3197         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
3198         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
3199         bnx2x_set_storm_rx_mode(bp);
3200
3201         for_each_queue(bp, i) {
3202                 u8 cl_id = bp->fp[i].cl_id;
3203
3204                 /* reset xstorm per client statistics */
3205                 offset = BAR_XSTRORM_INTMEM +
3206                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3207                 for (j = 0;
3208                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
3209                         REG_WR(bp, offset + j*4, 0);
3210
3211                 /* reset tstorm per client statistics */
3212                 offset = BAR_TSTRORM_INTMEM +
3213                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3214                 for (j = 0;
3215                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
3216                         REG_WR(bp, offset + j*4, 0);
3217
3218                 /* reset ustorm per client statistics */
3219                 offset = BAR_USTRORM_INTMEM +
3220                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3221                 for (j = 0;
3222                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
3223                         REG_WR(bp, offset + j*4, 0);
3224         }
3225
3226         /* Init statistics related context */
3227         stats_flags.collect_eth = 1;
3228
3229         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
3230                ((u32 *)&stats_flags)[0]);
3231         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
3232                ((u32 *)&stats_flags)[1]);
3233
3234         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
3235                ((u32 *)&stats_flags)[0]);
3236         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
3237                ((u32 *)&stats_flags)[1]);
3238
3239         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
3240                ((u32 *)&stats_flags)[0]);
3241         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
3242                ((u32 *)&stats_flags)[1]);
3243
3244         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
3245                ((u32 *)&stats_flags)[0]);
3246         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
3247                ((u32 *)&stats_flags)[1]);
3248
3249         REG_WR(bp, BAR_XSTRORM_INTMEM +
3250                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3251                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3252         REG_WR(bp, BAR_XSTRORM_INTMEM +
3253                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3254                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3255
3256         REG_WR(bp, BAR_TSTRORM_INTMEM +
3257                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3258                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3259         REG_WR(bp, BAR_TSTRORM_INTMEM +
3260                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3261                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3262
3263         REG_WR(bp, BAR_USTRORM_INTMEM +
3264                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3265                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3266         REG_WR(bp, BAR_USTRORM_INTMEM +
3267                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3268                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3269
3270         if (CHIP_IS_E1H(bp)) {
3271                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3272                         IS_E1HMF(bp));
3273                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3274                         IS_E1HMF(bp));
3275                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3276                         IS_E1HMF(bp));
3277                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3278                         IS_E1HMF(bp));
3279
3280                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
3281                          bp->e1hov);
3282         }
3283
3284         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
3285         max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
3286                                    SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
3287         for_each_queue(bp, i) {
3288                 struct bnx2x_fastpath *fp = &bp->fp[i];
3289
3290                 REG_WR(bp, BAR_USTRORM_INTMEM +
3291                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
3292                        U64_LO(fp->rx_comp_mapping));
3293                 REG_WR(bp, BAR_USTRORM_INTMEM +
3294                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
3295                        U64_HI(fp->rx_comp_mapping));
3296
3297                 /* Next page */
3298                 REG_WR(bp, BAR_USTRORM_INTMEM +
3299                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
3300                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3301                 REG_WR(bp, BAR_USTRORM_INTMEM +
3302                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
3303                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3304
3305                 REG_WR16(bp, BAR_USTRORM_INTMEM +
3306                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
3307                          max_agg_size);
3308         }
3309
3310         /* dropless flow control */
3311         if (CHIP_IS_E1H(bp)) {
3312                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
3313
3314                 rx_pause.bd_thr_low = 250;
3315                 rx_pause.cqe_thr_low = 250;
3316                 rx_pause.cos = 1;
3317                 rx_pause.sge_thr_low = 0;
3318                 rx_pause.bd_thr_high = 350;
3319                 rx_pause.cqe_thr_high = 350;
3320                 rx_pause.sge_thr_high = 0;
3321
3322                 for_each_queue(bp, i) {
3323                         struct bnx2x_fastpath *fp = &bp->fp[i];
3324
3325                         if (!fp->disable_tpa) {
3326                                 rx_pause.sge_thr_low = 150;
3327                                 rx_pause.sge_thr_high = 250;
3328                         }
3329
3330
3331                         offset = BAR_USTRORM_INTMEM +
3332                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
3333                                                                    fp->cl_id);
3334                         for (j = 0;
3335                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
3336                              j++)
3337                                 REG_WR(bp, offset + j*4,
3338                                        ((u32 *)&rx_pause)[j]);
3339                 }
3340         }
3341
3342         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3343
3344         /* Init rate shaping and fairness contexts */
3345         if (IS_E1HMF(bp)) {
3346                 int vn;
3347
3348                 /* During init there is no active link
3349                    Until link is up, set link rate to 10Gbps */
3350                 bp->link_vars.line_speed = SPEED_10000;
3351                 bnx2x_init_port_minmax(bp);
3352
3353                 if (!BP_NOMCP(bp))
3354                         bp->mf_config =
3355                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
3356                 bnx2x_calc_vn_weight_sum(bp);
3357
3358                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
3359                         bnx2x_init_vn_minmax(bp, 2*vn + port);
3360
3361                 /* Enable rate shaping and fairness */
3362                 bp->cmng.flags.cmng_enables |=
3363                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
3364
3365         } else {
3366                 /* rate shaping and fairness are disabled */
3367                 DP(NETIF_MSG_IFUP,
3368                    "single function mode  minmax will be disabled\n");
3369         }
3370
3371
3372         /* Store cmng structures to internal memory */
3373         if (bp->port.pmf)
3374                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
3375                         REG_WR(bp, BAR_XSTRORM_INTMEM +
3376                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
3377                                ((u32 *)(&bp->cmng))[i]);
3378 }
3379
3380 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3381 {
3382         switch (load_code) {
3383         case FW_MSG_CODE_DRV_LOAD_COMMON:
3384                 bnx2x_init_internal_common(bp);
3385                 /* no break */
3386
3387         case FW_MSG_CODE_DRV_LOAD_PORT:
3388                 bnx2x_init_internal_port(bp);
3389                 /* no break */
3390
3391         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3392                 bnx2x_init_internal_func(bp);
3393                 break;
3394
3395         default:
3396                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3397                 break;
3398         }
3399 }
3400
3401 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
3402 {
3403         int i;
3404
3405         for_each_queue(bp, i) {
3406                 struct bnx2x_fastpath *fp = &bp->fp[i];
3407
3408                 fp->bp = bp;
3409                 fp->state = BNX2X_FP_STATE_CLOSED;
3410                 fp->index = i;
3411                 fp->cl_id = BP_L_ID(bp) + i;
3412 #ifdef BCM_CNIC
3413                 fp->sb_id = fp->cl_id + 1;
3414 #else
3415                 fp->sb_id = fp->cl_id;
3416 #endif
3417                 DP(NETIF_MSG_IFUP,
3418                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
3419                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
3420                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
3421                               fp->sb_id);
3422                 bnx2x_update_fpsb_idx(fp);
3423         }
3424
3425         /* ensure status block indices were read */
3426         rmb();
3427
3428
3429         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
3430                           DEF_SB_ID);
3431         bnx2x_update_dsb_idx(bp);
3432         bnx2x_update_coalesce(bp);
3433         bnx2x_init_rx_rings(bp);
3434         bnx2x_init_tx_ring(bp);
3435         bnx2x_init_sp_ring(bp);
3436         bnx2x_init_context(bp);
3437         bnx2x_init_internal(bp, load_code);
3438         bnx2x_init_ind_table(bp);
3439         bnx2x_stats_init(bp);
3440
3441         /* At this point, we are ready for interrupts */
3442         atomic_set(&bp->intr_sem, 0);
3443
3444         /* flush all before enabling interrupts */
3445         mb();
3446         mmiowb();
3447
3448         bnx2x_int_enable(bp);
3449
3450         /* Check for SPIO5 */
3451         bnx2x_attn_int_deasserted0(bp,
3452                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
3453                                    AEU_INPUTS_ATTN_BITS_SPIO5);
3454 }
3455
3456 /* end of nic init */
3457
3458 /*
3459  * gzip service functions
3460  */
3461
3462 static int bnx2x_gunzip_init(struct bnx2x *bp)
3463 {
3464         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
3465                                             &bp->gunzip_mapping, GFP_KERNEL);
3466         if (bp->gunzip_buf  == NULL)
3467                 goto gunzip_nomem1;
3468
3469         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3470         if (bp->strm  == NULL)
3471                 goto gunzip_nomem2;
3472
3473         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3474                                       GFP_KERNEL);
3475         if (bp->strm->workspace == NULL)
3476                 goto gunzip_nomem3;
3477
3478         return 0;
3479
3480 gunzip_nomem3:
3481         kfree(bp->strm);
3482         bp->strm = NULL;
3483
3484 gunzip_nomem2:
3485         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3486                           bp->gunzip_mapping);
3487         bp->gunzip_buf = NULL;
3488
3489 gunzip_nomem1:
3490         netdev_err(bp->dev, "Cannot allocate firmware buffer for"
3491                " un-compression\n");
3492         return -ENOMEM;
3493 }
3494
3495 static void bnx2x_gunzip_end(struct bnx2x *bp)
3496 {
3497         kfree(bp->strm->workspace);
3498
3499         kfree(bp->strm);
3500         bp->strm = NULL;
3501
3502         if (bp->gunzip_buf) {
3503                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3504                                   bp->gunzip_mapping);
3505                 bp->gunzip_buf = NULL;
3506         }
3507 }
3508
3509 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
3510 {
3511         int n, rc;
3512
3513         /* check gzip header */
3514         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
3515                 BNX2X_ERR("Bad gzip header\n");
3516                 return -EINVAL;
3517         }
3518
3519         n = 10;
3520
3521 #define FNAME                           0x8
3522
3523         if (zbuf[3] & FNAME)
3524                 while ((zbuf[n++] != 0) && (n < len));
3525
3526         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
3527         bp->strm->avail_in = len - n;
3528         bp->strm->next_out = bp->gunzip_buf;
3529         bp->strm->avail_out = FW_BUF_SIZE;
3530
3531         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
3532         if (rc != Z_OK)
3533                 return rc;
3534
3535         rc = zlib_inflate(bp->strm, Z_FINISH);
3536         if ((rc != Z_OK) && (rc != Z_STREAM_END))
3537                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
3538                            bp->strm->msg);
3539
3540         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
3541         if (bp->gunzip_outlen & 0x3)
3542                 netdev_err(bp->dev, "Firmware decompression error:"
3543                                     " gunzip_outlen (%d) not aligned\n",
3544                                 bp->gunzip_outlen);
3545         bp->gunzip_outlen >>= 2;
3546
3547         zlib_inflateEnd(bp->strm);
3548
3549         if (rc == Z_STREAM_END)
3550                 return 0;
3551
3552         return rc;
3553 }
3554
3555 /* nic load/unload */
3556
3557 /*
3558  * General service functions
3559  */
3560
3561 /* send a NIG loopback debug packet */
3562 static void bnx2x_lb_pckt(struct bnx2x *bp)
3563 {
3564         u32 wb_write[3];
3565
3566         /* Ethernet source and destination addresses */
3567         wb_write[0] = 0x55555555;
3568         wb_write[1] = 0x55555555;
3569         wb_write[2] = 0x20;             /* SOP */
3570         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3571
3572         /* NON-IP protocol */
3573         wb_write[0] = 0x09000000;
3574         wb_write[1] = 0x55555555;
3575         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
3576         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3577 }
3578
3579 /* some of the internal memories
3580  * are not directly readable from the driver
3581  * to test them we send debug packets
3582  */
3583 static int bnx2x_int_mem_test(struct bnx2x *bp)
3584 {
3585         int factor;
3586         int count, i;
3587         u32 val = 0;
3588
3589         if (CHIP_REV_IS_FPGA(bp))
3590                 factor = 120;
3591         else if (CHIP_REV_IS_EMUL(bp))
3592                 factor = 200;
3593         else
3594                 factor = 1;
3595
3596         DP(NETIF_MSG_HW, "start part1\n");
3597
3598         /* Disable inputs of parser neighbor blocks */
3599         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3600         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3601         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3602         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
3603
3604         /*  Write 0 to parser credits for CFC search request */
3605         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3606
3607         /* send Ethernet packet */
3608         bnx2x_lb_pckt(bp);
3609
3610         /* TODO do i reset NIG statistic? */
3611         /* Wait until NIG register shows 1 packet of size 0x10 */
3612         count = 1000 * factor;
3613         while (count) {
3614
3615                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3616                 val = *bnx2x_sp(bp, wb_data[0]);
3617                 if (val == 0x10)
3618                         break;
3619
3620                 msleep(10);
3621                 count--;
3622         }
3623         if (val != 0x10) {
3624                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
3625                 return -1;
3626         }
3627
3628         /* Wait until PRS register shows 1 packet */
3629         count = 1000 * factor;
3630         while (count) {
3631                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3632                 if (val == 1)
3633                         break;
3634
3635                 msleep(10);
3636                 count--;
3637         }
3638         if (val != 0x1) {
3639                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3640                 return -2;
3641         }
3642
3643         /* Reset and init BRB, PRS */
3644         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3645         msleep(50);
3646         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3647         msleep(50);
3648         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3649         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3650
3651         DP(NETIF_MSG_HW, "part2\n");
3652
3653         /* Disable inputs of parser neighbor blocks */
3654         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3655         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3656         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3657         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
3658
3659         /* Write 0 to parser credits for CFC search request */
3660         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3661
3662         /* send 10 Ethernet packets */
3663         for (i = 0; i < 10; i++)
3664                 bnx2x_lb_pckt(bp);
3665
3666         /* Wait until NIG register shows 10 + 1
3667            packets of size 11*0x10 = 0xb0 */
3668         count = 1000 * factor;
3669         while (count) {
3670
3671                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3672                 val = *bnx2x_sp(bp, wb_data[0]);
3673                 if (val == 0xb0)
3674                         break;
3675
3676                 msleep(10);
3677                 count--;
3678         }
3679         if (val != 0xb0) {
3680                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
3681                 return -3;
3682         }
3683
3684         /* Wait until PRS register shows 2 packets */
3685         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3686         if (val != 2)
3687                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
3688
3689         /* Write 1 to parser credits for CFC search request */
3690         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
3691
3692         /* Wait until PRS register shows 3 packets */
3693         msleep(10 * factor);
3694         /* Wait until NIG register shows 1 packet of size 0x10 */
3695         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3696         if (val != 3)
3697                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
3698
3699         /* clear NIG EOP FIFO */
3700         for (i = 0; i < 11; i++)
3701                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
3702         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
3703         if (val != 1) {
3704                 BNX2X_ERR("clear of NIG failed\n");
3705                 return -4;
3706         }
3707
3708         /* Reset and init BRB, PRS, NIG */
3709         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3710         msleep(50);
3711         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3712         msleep(50);
3713         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3714         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3715 #ifndef BCM_CNIC
3716         /* set NIC mode */
3717         REG_WR(bp, PRS_REG_NIC_MODE, 1);
3718 #endif
3719
3720         /* Enable inputs of parser neighbor blocks */
3721         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
3722         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
3723         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3724         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
3725
3726         DP(NETIF_MSG_HW, "done\n");
3727
3728         return 0; /* OK */
3729 }
3730
3731 static void enable_blocks_attention(struct bnx2x *bp)
3732 {
3733         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3734         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
3735         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3736         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
3737         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
3738         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
3739         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
3740         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
3741         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
3742 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3743 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
3744         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
3745         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
3746         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
3747 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3748 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
3749         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
3750         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
3751         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
3752         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
3753 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3754 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3755         if (CHIP_REV_IS_FPGA(bp))
3756                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
3757         else
3758                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
3759         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
3760         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
3761         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
3762 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3763 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
3764         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
3765         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
3766 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3767         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
3768 }
3769
3770 static const struct {
3771         u32 addr;
3772         u32 mask;
3773 } bnx2x_parity_mask[] = {
3774         {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
3775         {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
3776         {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
3777         {HC_REG_HC_PRTY_MASK, 0xffffffff},
3778         {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
3779         {QM_REG_QM_PRTY_MASK, 0x0},
3780         {DORQ_REG_DORQ_PRTY_MASK, 0x0},
3781         {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
3782         {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
3783         {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
3784         {CDU_REG_CDU_PRTY_MASK, 0x0},
3785         {CFC_REG_CFC_PRTY_MASK, 0x0},
3786         {DBG_REG_DBG_PRTY_MASK, 0x0},
3787         {DMAE_REG_DMAE_PRTY_MASK, 0x0},
3788         {BRB1_REG_BRB1_PRTY_MASK, 0x0},
3789         {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
3790         {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
3791         {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
3792         {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
3793         {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
3794         {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
3795         {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
3796         {USEM_REG_USEM_PRTY_MASK_0, 0x0},
3797         {USEM_REG_USEM_PRTY_MASK_1, 0x0},
3798         {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
3799         {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
3800         {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
3801         {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
3802 };
3803
3804 static void enable_blocks_parity(struct bnx2x *bp)
3805 {
3806         int i, mask_arr_len =
3807                 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
3808
3809         for (i = 0; i < mask_arr_len; i++)
3810                 REG_WR(bp, bnx2x_parity_mask[i].addr,
3811                         bnx2x_parity_mask[i].mask);
3812 }
3813
3814
3815 static void bnx2x_reset_common(struct bnx2x *bp)
3816 {
3817         /* reset_common */
3818         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
3819                0xd3ffff7f);
3820         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
3821 }
3822
3823 static void bnx2x_init_pxp(struct bnx2x *bp)
3824 {
3825         u16 devctl;
3826         int r_order, w_order;
3827
3828         pci_read_config_word(bp->pdev,
3829                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
3830         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
3831         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3832         if (bp->mrrs == -1)
3833                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3834         else {
3835                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
3836                 r_order = bp->mrrs;
3837         }
3838
3839         bnx2x_init_pxp_arb(bp, r_order, w_order);
3840 }
3841
3842 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3843 {
3844         int is_required;
3845         u32 val;
3846         int port;
3847
3848         if (BP_NOMCP(bp))
3849                 return;
3850
3851         is_required = 0;
3852         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
3853               SHARED_HW_CFG_FAN_FAILURE_MASK;
3854
3855         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
3856                 is_required = 1;
3857
3858         /*
3859          * The fan failure mechanism is usually related to the PHY type since
3860          * the power consumption of the board is affected by the PHY. Currently,
3861          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
3862          */
3863         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
3864                 for (port = PORT_0; port < PORT_MAX; port++) {
3865                         u32 phy_type =
3866                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
3867                                          external_phy_config) &
3868                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3869                         is_required |=
3870                                 ((phy_type ==
3871                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
3872                                  (phy_type ==
3873                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
3874                                  (phy_type ==
3875                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
3876                 }
3877
3878         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
3879
3880         if (is_required == 0)
3881                 return;
3882
3883         /* Fan failure is indicated by SPIO 5 */
3884         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
3885                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
3886
3887         /* set to active low mode */
3888         val = REG_RD(bp, MISC_REG_SPIO_INT);
3889         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
3890                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
3891         REG_WR(bp, MISC_REG_SPIO_INT, val);
3892
3893         /* enable interrupt to signal the IGU */
3894         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
3895         val |= (1 << MISC_REGISTERS_SPIO_5);
3896         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3897 }
3898
3899 static int bnx2x_init_common(struct bnx2x *bp)
3900 {
3901         u32 val, i;
3902 #ifdef BCM_CNIC
3903         u32 wb_write[2];
3904 #endif
3905
3906         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
3907
3908         bnx2x_reset_common(bp);
3909         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
3910         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
3911
3912         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
3913         if (CHIP_IS_E1H(bp))
3914                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
3915
3916         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
3917         msleep(30);
3918         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
3919
3920         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
3921         if (CHIP_IS_E1(bp)) {
3922                 /* enable HW interrupt from PXP on USDM overflow
3923                    bit 16 on INT_MASK_0 */
3924                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3925         }
3926
3927         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
3928         bnx2x_init_pxp(bp);
3929
3930 #ifdef __BIG_ENDIAN
3931         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
3932         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
3933         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
3934         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
3935         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
3936         /* make sure this value is 0 */
3937         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
3938
3939 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3940         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
3941         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
3942         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
3943         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
3944 #endif
3945
3946         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
3947 #ifdef BCM_CNIC
3948         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3949         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3950         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
3951 #endif
3952
3953         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
3954                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
3955
3956         /* let the HW do it's magic ... */
3957         msleep(100);
3958         /* finish PXP init */
3959         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
3960         if (val != 1) {
3961                 BNX2X_ERR("PXP2 CFG failed\n");
3962                 return -EBUSY;
3963         }
3964         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
3965         if (val != 1) {
3966                 BNX2X_ERR("PXP2 RD_INIT failed\n");
3967                 return -EBUSY;
3968         }
3969
3970         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
3971         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
3972
3973         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
3974
3975         /* clean the DMAE memory */
3976         bp->dmae_ready = 1;
3977         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
3978
3979         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
3980         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
3981         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
3982         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
3983
3984         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
3985         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
3986         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
3987         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
3988
3989         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
3990
3991 #ifdef BCM_CNIC
3992         wb_write[0] = 0;
3993         wb_write[1] = 0;
3994         for (i = 0; i < 64; i++) {
3995                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
3996                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
3997
3998                 if (CHIP_IS_E1H(bp)) {
3999                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
4000                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
4001                                           wb_write, 2);
4002                 }
4003         }
4004 #endif
4005         /* soft reset pulse */
4006         REG_WR(bp, QM_REG_SOFT_RESET, 1);
4007         REG_WR(bp, QM_REG_SOFT_RESET, 0);
4008
4009 #ifdef BCM_CNIC
4010         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
4011 #endif
4012
4013         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
4014         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
4015         if (!CHIP_REV_IS_SLOW(bp)) {
4016                 /* enable hw interrupt from doorbell Q */
4017                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4018         }
4019
4020         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4021         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4022         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4023 #ifndef BCM_CNIC
4024         /* set NIC mode */
4025         REG_WR(bp, PRS_REG_NIC_MODE, 1);
4026 #endif
4027         if (CHIP_IS_E1H(bp))
4028                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
4029
4030         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4031         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4032         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4033         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
4034
4035         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4036         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4037         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4038         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4039
4040         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4041         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4042         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4043         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
4044
4045         /* sync semi rtc */
4046         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4047                0x80000000);
4048         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4049                0x80000000);
4050
4051         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4052         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4053         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
4054
4055         REG_WR(bp, SRC_REG_SOFT_RST, 1);
4056         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4057                 REG_WR(bp, i, random32());
4058         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
4059 #ifdef BCM_CNIC
4060         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4061         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4062         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4063         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4064         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4065         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4066         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4067         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4068         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4069         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4070 #endif
4071         REG_WR(bp, SRC_REG_SOFT_RST, 0);
4072
4073         if (sizeof(union cdu_context) != 1024)
4074                 /* we currently assume that a context is 1024 bytes */
4075                 dev_alert(&bp->pdev->dev, "please adjust the size "
4076                                           "of cdu_context(%ld)\n",
4077                          (long)sizeof(union cdu_context));
4078
4079         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
4080         val = (4 << 24) + (0 << 12) + 1024;
4081         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
4082
4083         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
4084         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
4085         /* enable context validation interrupt from CFC */
4086         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4087
4088         /* set the thresholds to prevent CFC/CDU race */
4089         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
4090
4091         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
4092         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
4093
4094         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
4095         /* Reset PCIE errors for debug */
4096         REG_WR(bp, 0x2814, 0xffffffff);
4097         REG_WR(bp, 0x3820, 0xffffffff);
4098
4099         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
4100         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
4101         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
4102         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
4103
4104         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
4105         if (CHIP_IS_E1H(bp)) {
4106                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
4107                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
4108         }
4109
4110         if (CHIP_REV_IS_SLOW(bp))
4111                 msleep(200);
4112
4113         /* finish CFC init */
4114         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4115         if (val != 1) {
4116                 BNX2X_ERR("CFC LL_INIT failed\n");
4117                 return -EBUSY;
4118         }
4119         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4120         if (val != 1) {
4121                 BNX2X_ERR("CFC AC_INIT failed\n");
4122                 return -EBUSY;
4123         }
4124         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4125         if (val != 1) {
4126                 BNX2X_ERR("CFC CAM_INIT failed\n");
4127                 return -EBUSY;
4128         }
4129         REG_WR(bp, CFC_REG_DEBUG0, 0);
4130
4131         /* read NIG statistic
4132            to see if this is our first up since powerup */
4133         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4134         val = *bnx2x_sp(bp, wb_data[0]);
4135
4136         /* do internal memory self test */
4137         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4138                 BNX2X_ERR("internal mem self test failed\n");
4139                 return -EBUSY;
4140         }
4141
4142         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
4143         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4144         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4145         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4146         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4147                 bp->port.need_hw_lock = 1;
4148                 break;
4149
4150         default:
4151                 break;
4152         }
4153
4154         bnx2x_setup_fan_failure_detection(bp);
4155
4156         /* clear PXP2 attentions */
4157         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
4158
4159         enable_blocks_attention(bp);
4160         if (CHIP_PARITY_SUPPORTED(bp))
4161                 enable_blocks_parity(bp);
4162
4163         if (!BP_NOMCP(bp)) {
4164                 bnx2x_acquire_phy_lock(bp);
4165                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
4166                 bnx2x_release_phy_lock(bp);
4167         } else
4168                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4169
4170         return 0;
4171 }
4172
4173 static int bnx2x_init_port(struct bnx2x *bp)
4174 {
4175         int port = BP_PORT(bp);
4176         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
4177         u32 low, high;
4178         u32 val;
4179
4180         DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
4181
4182         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
4183
4184         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
4185         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
4186
4187         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4188         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4189         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
4190         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
4191
4192 #ifdef BCM_CNIC
4193         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
4194
4195         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
4196         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4197         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
4198 #endif
4199
4200         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
4201
4202         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
4203         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
4204                 /* no pause for emulation and FPGA */
4205                 low = 0;
4206                 high = 513;
4207         } else {
4208                 if (IS_E1HMF(bp))
4209                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4210                 else if (bp->dev->mtu > 4096) {
4211                         if (bp->flags & ONE_PORT_FLAG)
4212                                 low = 160;
4213                         else {
4214                                 val = bp->dev->mtu;
4215                                 /* (24*1024 + val*4)/256 */
4216                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
4217                         }
4218                 } else
4219                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4220                 high = low + 56;        /* 14*1024/256 */
4221         }
4222         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4223         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4224
4225
4226         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
4227
4228         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
4229         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
4230         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
4231         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
4232
4233         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4234         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4235         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4236         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
4237
4238         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
4239         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
4240
4241         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
4242
4243         /* configure PBF to work without PAUSE mtu 9000 */
4244         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
4245
4246         /* update threshold */
4247         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
4248         /* update init credit */
4249         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
4250
4251         /* probe changes */
4252         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
4253         msleep(5);
4254         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
4255
4256 #ifdef BCM_CNIC
4257         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
4258 #endif
4259         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
4260         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
4261
4262         if (CHIP_IS_E1(bp)) {
4263                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4264                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4265         }
4266         bnx2x_init_block(bp, HC_BLOCK, init_stage);
4267
4268         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
4269         /* init aeu_mask_attn_func_0/1:
4270          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4271          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4272          *             bits 4-7 are used for "per vn group attention" */
4273         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4274                (IS_E1HMF(bp) ? 0xF7 : 0x7));
4275
4276         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
4277         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
4278         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
4279         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
4280         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
4281
4282         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
4283
4284         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4285
4286         if (CHIP_IS_E1H(bp)) {
4287                 /* 0x2 disable e1hov, 0x1 enable */
4288                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4289                        (IS_E1HMF(bp) ? 0x1 : 0x2));
4290
4291                 {
4292                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4293                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
4294                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
4295                 }
4296         }
4297
4298         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
4299         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
4300
4301         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
4302         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4303                 {
4304                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
4305
4306                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
4307                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
4308
4309                 /* The GPIO should be swapped if the swap register is
4310                    set and active */
4311                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4312                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
4313
4314                 /* Select function upon port-swap configuration */
4315                 if (port == 0) {
4316                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
4317                         aeu_gpio_mask = (swap_val && swap_override) ?
4318                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
4319                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
4320                 } else {
4321                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
4322                         aeu_gpio_mask = (swap_val && swap_override) ?
4323                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
4324                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
4325                 }
4326                 val = REG_RD(bp, offset);
4327                 /* add GPIO3 to group */
4328                 val |= aeu_gpio_mask;
4329                 REG_WR(bp, offset, val);
4330                 }
4331                 bp->port.need_hw_lock = 1;
4332                 break;
4333
4334         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4335                 bp->port.need_hw_lock = 1;
4336         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4337                 /* add SPIO 5 to group 0 */
4338                 {
4339                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4340                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4341                 val = REG_RD(bp, reg_addr);
4342                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4343                 REG_WR(bp, reg_addr, val);
4344                 }
4345                 break;
4346         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4347         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4348                 bp->port.need_hw_lock = 1;
4349                 break;
4350         default:
4351                 break;
4352         }
4353
4354         bnx2x__link_reset(bp);
4355
4356         return 0;
4357 }
4358
4359 #define ILT_PER_FUNC            (768/2)
4360 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
4361 /* the phys address is shifted right 12 bits and has an added
4362    1=valid bit added to the 53rd bit
4363    then since this is a wide register(TM)
4364    we split it into two 32 bit writes
4365  */
4366 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4367 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
4368 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
4369 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
4370
4371 #ifdef BCM_CNIC
4372 #define CNIC_ILT_LINES          127
4373 #define CNIC_CTX_PER_ILT        16
4374 #else
4375 #define CNIC_ILT_LINES          0
4376 #endif
4377
4378 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4379 {
4380         int reg;
4381
4382         if (CHIP_IS_E1H(bp))
4383                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4384         else /* E1 */
4385                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4386
4387         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4388 }
4389
4390 static int bnx2x_init_func(struct bnx2x *bp)
4391 {
4392         int port = BP_PORT(bp);
4393         int func = BP_FUNC(bp);
4394         u32 addr, val;
4395         int i;
4396
4397         DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
4398
4399         /* set MSI reconfigure capability */
4400         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4401         val = REG_RD(bp, addr);
4402         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
4403         REG_WR(bp, addr, val);
4404
4405         i = FUNC_ILT_BASE(func);
4406
4407         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
4408         if (CHIP_IS_E1H(bp)) {
4409                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
4410                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
4411         } else /* E1 */
4412                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
4413                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
4414
4415 #ifdef BCM_CNIC
4416         i += 1 + CNIC_ILT_LINES;
4417         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
4418         if (CHIP_IS_E1(bp))
4419                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
4420         else {
4421                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
4422                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
4423         }
4424
4425         i++;
4426         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
4427         if (CHIP_IS_E1(bp))
4428                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
4429         else {
4430                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
4431                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
4432         }
4433
4434         i++;
4435         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
4436         if (CHIP_IS_E1(bp))
4437                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
4438         else {
4439                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
4440                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
4441         }
4442
4443         /* tell the searcher where the T2 table is */
4444         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
4445
4446         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
4447                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
4448
4449         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
4450                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
4451                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
4452
4453         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
4454 #endif
4455
4456         if (CHIP_IS_E1H(bp)) {
4457                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4458                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4459                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4460                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4461                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4462                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4463                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4464                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4465                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
4466
4467                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4468                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
4469         }
4470
4471         /* HC init per function */
4472         if (CHIP_IS_E1H(bp)) {
4473                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4474
4475                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4476                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4477         }
4478         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
4479
4480         /* Reset PCIE errors for debug */
4481         REG_WR(bp, 0x2114, 0xffffffff);
4482         REG_WR(bp, 0x2120, 0xffffffff);
4483
4484         return 0;
4485 }
4486
4487 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4488 {
4489         int i, rc = 0;
4490
4491         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
4492            BP_FUNC(bp), load_code);
4493
4494         bp->dmae_ready = 0;
4495         mutex_init(&bp->dmae_mutex);
4496         rc = bnx2x_gunzip_init(bp);
4497         if (rc)
4498                 return rc;
4499
4500         switch (load_code) {
4501         case FW_MSG_CODE_DRV_LOAD_COMMON:
4502                 rc = bnx2x_init_common(bp);
4503                 if (rc)
4504                         goto init_hw_err;
4505                 /* no break */
4506
4507         case FW_MSG_CODE_DRV_LOAD_PORT:
4508                 bp->dmae_ready = 1;
4509                 rc = bnx2x_init_port(bp);
4510                 if (rc)
4511                         goto init_hw_err;
4512                 /* no break */
4513
4514         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4515                 bp->dmae_ready = 1;
4516                 rc = bnx2x_init_func(bp);
4517                 if (rc)
4518                         goto init_hw_err;
4519                 break;
4520
4521         default:
4522                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4523                 break;
4524         }
4525
4526         if (!BP_NOMCP(bp)) {
4527                 int func = BP_FUNC(bp);
4528
4529                 bp->fw_drv_pulse_wr_seq =
4530                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
4531                                  DRV_PULSE_SEQ_MASK);
4532                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4533         }
4534
4535         /* this needs to be done before gunzip end */
4536         bnx2x_zero_def_sb(bp);
4537         for_each_queue(bp, i)
4538                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4539 #ifdef BCM_CNIC
4540         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4541 #endif
4542
4543 init_hw_err:
4544         bnx2x_gunzip_end(bp);
4545
4546         return rc;
4547 }
4548
4549 void bnx2x_free_mem(struct bnx2x *bp)
4550 {
4551
4552 #define BNX2X_PCI_FREE(x, y, size) \
4553         do { \
4554                 if (x) { \
4555                         dma_free_coherent(&bp->pdev->dev, size, x, y); \
4556                         x = NULL; \
4557                         y = 0; \
4558                 } \
4559         } while (0)
4560
4561 #define BNX2X_FREE(x) \
4562         do { \
4563                 if (x) { \
4564                         vfree(x); \
4565                         x = NULL; \
4566                 } \
4567         } while (0)
4568
4569         int i;
4570
4571         /* fastpath */
4572         /* Common */
4573         for_each_queue(bp, i) {
4574
4575                 /* status blocks */
4576                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
4577                                bnx2x_fp(bp, i, status_blk_mapping),
4578                                sizeof(struct host_status_block));
4579         }
4580         /* Rx */
4581         for_each_queue(bp, i) {
4582
4583                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4584                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
4585                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
4586                                bnx2x_fp(bp, i, rx_desc_mapping),
4587                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
4588
4589                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
4590                                bnx2x_fp(bp, i, rx_comp_mapping),
4591                                sizeof(struct eth_fast_path_rx_cqe) *
4592                                NUM_RCQ_BD);
4593
4594                 /* SGE ring */
4595                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
4596                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
4597                                bnx2x_fp(bp, i, rx_sge_mapping),
4598                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4599         }
4600         /* Tx */
4601         for_each_queue(bp, i) {
4602
4603                 /* fastpath tx rings: tx_buf tx_desc */
4604                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
4605                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
4606                                bnx2x_fp(bp, i, tx_desc_mapping),
4607                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4608         }
4609         /* end of fastpath */
4610
4611         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
4612                        sizeof(struct host_def_status_block));
4613
4614         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
4615                        sizeof(struct bnx2x_slowpath));
4616
4617 #ifdef BCM_CNIC
4618         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
4619         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
4620         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
4621         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
4622         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
4623                        sizeof(struct host_status_block));
4624 #endif
4625         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
4626
4627 #undef BNX2X_PCI_FREE
4628 #undef BNX2X_KFREE
4629 }
4630
4631 int bnx2x_alloc_mem(struct bnx2x *bp)
4632 {
4633
4634 #define BNX2X_PCI_ALLOC(x, y, size) \
4635         do { \
4636                 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
4637                 if (x == NULL) \
4638                         goto alloc_mem_err; \
4639                 memset(x, 0, size); \
4640         } while (0)
4641
4642 #define BNX2X_ALLOC(x, size) \
4643         do { \
4644                 x = vmalloc(size); \
4645                 if (x == NULL) \
4646                         goto alloc_mem_err; \
4647                 memset(x, 0, size); \
4648         } while (0)
4649
4650         int i;
4651
4652         /* fastpath */
4653         /* Common */
4654         for_each_queue(bp, i) {
4655                 bnx2x_fp(bp, i, bp) = bp;
4656
4657                 /* status blocks */
4658                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
4659                                 &bnx2x_fp(bp, i, status_blk_mapping),
4660                                 sizeof(struct host_status_block));
4661         }
4662         /* Rx */
4663         for_each_queue(bp, i) {
4664
4665                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4666                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
4667                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4668                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
4669                                 &bnx2x_fp(bp, i, rx_desc_mapping),
4670                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4671
4672                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
4673                                 &bnx2x_fp(bp, i, rx_comp_mapping),
4674                                 sizeof(struct eth_fast_path_rx_cqe) *
4675                                 NUM_RCQ_BD);
4676
4677                 /* SGE ring */
4678                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
4679                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4680                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
4681                                 &bnx2x_fp(bp, i, rx_sge_mapping),
4682                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4683         }
4684         /* Tx */
4685         for_each_queue(bp, i) {
4686
4687                 /* fastpath tx rings: tx_buf tx_desc */
4688                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
4689                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4690                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
4691                                 &bnx2x_fp(bp, i, tx_desc_mapping),
4692                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4693         }
4694         /* end of fastpath */
4695
4696         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4697                         sizeof(struct host_def_status_block));
4698
4699         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4700                         sizeof(struct bnx2x_slowpath));
4701
4702 #ifdef BCM_CNIC
4703         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
4704
4705         /* allocate searcher T2 table
4706            we allocate 1/4 of alloc num for T2
4707           (which is not entered into the ILT) */
4708         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
4709
4710         /* Initialize T2 (for 1024 connections) */
4711         for (i = 0; i < 16*1024; i += 64)
4712                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
4713
4714         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
4715         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
4716
4717         /* QM queues (128*MAX_CONN) */
4718         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
4719
4720         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
4721                         sizeof(struct host_status_block));
4722 #endif
4723
4724         /* Slow path ring */
4725         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
4726
4727         return 0;
4728
4729 alloc_mem_err:
4730         bnx2x_free_mem(bp);
4731         return -ENOMEM;
4732
4733 #undef BNX2X_PCI_ALLOC
4734 #undef BNX2X_ALLOC
4735 }
4736
4737
4738 /*
4739  * Init service functions
4740  */
4741
4742 /**
4743  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
4744  *
4745  * @param bp driver descriptor
4746  * @param set set or clear an entry (1 or 0)
4747  * @param mac pointer to a buffer containing a MAC
4748  * @param cl_bit_vec bit vector of clients to register a MAC for
4749  * @param cam_offset offset in a CAM to use
4750  * @param with_bcast set broadcast MAC as well
4751  */
4752 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
4753                                       u32 cl_bit_vec, u8 cam_offset,
4754                                       u8 with_bcast)
4755 {
4756         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
4757         int port = BP_PORT(bp);
4758
4759         /* CAM allocation
4760          * unicasts 0-31:port0 32-63:port1
4761          * multicast 64-127:port0 128-191:port1
4762          */
4763         config->hdr.length = 1 + (with_bcast ? 1 : 0);
4764         config->hdr.offset = cam_offset;
4765         config->hdr.client_id = 0xff;
4766         config->hdr.reserved1 = 0;
4767
4768         /* primary MAC */
4769         config->config_table[0].cam_entry.msb_mac_addr =
4770                                         swab16(*(u16 *)&mac[0]);
4771         config->config_table[0].cam_entry.middle_mac_addr =
4772                                         swab16(*(u16 *)&mac[2]);
4773         config->config_table[0].cam_entry.lsb_mac_addr =
4774                                         swab16(*(u16 *)&mac[4]);
4775         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
4776         if (set)
4777                 config->config_table[0].target_table_entry.flags = 0;
4778         else
4779                 CAM_INVALIDATE(config->config_table[0]);
4780         config->config_table[0].target_table_entry.clients_bit_vector =
4781                                                 cpu_to_le32(cl_bit_vec);
4782         config->config_table[0].target_table_entry.vlan_id = 0;
4783
4784         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
4785            (set ? "setting" : "clearing"),
4786            config->config_table[0].cam_entry.msb_mac_addr,
4787            config->config_table[0].cam_entry.middle_mac_addr,
4788            config->config_table[0].cam_entry.lsb_mac_addr);
4789
4790         /* broadcast */
4791         if (with_bcast) {
4792                 config->config_table[1].cam_entry.msb_mac_addr =
4793                         cpu_to_le16(0xffff);
4794                 config->config_table[1].cam_entry.middle_mac_addr =
4795                         cpu_to_le16(0xffff);
4796                 config->config_table[1].cam_entry.lsb_mac_addr =
4797                         cpu_to_le16(0xffff);
4798                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
4799                 if (set)
4800                         config->config_table[1].target_table_entry.flags =
4801                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4802                 else
4803                         CAM_INVALIDATE(config->config_table[1]);
4804                 config->config_table[1].target_table_entry.clients_bit_vector =
4805                                                         cpu_to_le32(cl_bit_vec);
4806                 config->config_table[1].target_table_entry.vlan_id = 0;
4807         }
4808
4809         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4810                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4811                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4812 }
4813
4814 /**
4815  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
4816  *
4817  * @param bp driver descriptor
4818  * @param set set or clear an entry (1 or 0)
4819  * @param mac pointer to a buffer containing a MAC
4820  * @param cl_bit_vec bit vector of clients to register a MAC for
4821  * @param cam_offset offset in a CAM to use
4822  */
4823 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
4824                                        u32 cl_bit_vec, u8 cam_offset)
4825 {
4826         struct mac_configuration_cmd_e1h *config =
4827                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
4828
4829         config->hdr.length = 1;
4830         config->hdr.offset = cam_offset;
4831         config->hdr.client_id = 0xff;
4832         config->hdr.reserved1 = 0;
4833
4834         /* primary MAC */
4835         config->config_table[0].msb_mac_addr =
4836                                         swab16(*(u16 *)&mac[0]);
4837         config->config_table[0].middle_mac_addr =
4838                                         swab16(*(u16 *)&mac[2]);
4839         config->config_table[0].lsb_mac_addr =
4840                                         swab16(*(u16 *)&mac[4]);
4841         config->config_table[0].clients_bit_vector =
4842                                         cpu_to_le32(cl_bit_vec);
4843         config->config_table[0].vlan_id = 0;
4844         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
4845         if (set)
4846                 config->config_table[0].flags = BP_PORT(bp);
4847         else
4848                 config->config_table[0].flags =
4849                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
4850
4851         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
4852            (set ? "setting" : "clearing"),
4853            config->config_table[0].msb_mac_addr,
4854            config->config_table[0].middle_mac_addr,
4855            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
4856
4857         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4858                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4859                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4860 }
4861
4862 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4863                              int *state_p, int poll)
4864 {
4865         /* can take a while if any port is running */
4866         int cnt = 5000;
4867
4868         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4869            poll ? "polling" : "waiting", state, idx);
4870
4871         might_sleep();
4872         while (cnt--) {
4873                 if (poll) {
4874                         bnx2x_rx_int(bp->fp, 10);
4875                         /* if index is different from 0
4876                          * the reply for some commands will
4877                          * be on the non default queue
4878                          */
4879                         if (idx)
4880                                 bnx2x_rx_int(&bp->fp[idx], 10);
4881                 }
4882
4883                 mb(); /* state is changed by bnx2x_sp_event() */
4884                 if (*state_p == state) {
4885 #ifdef BNX2X_STOP_ON_ERROR
4886                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
4887 #endif
4888                         return 0;
4889                 }
4890
4891                 msleep(1);
4892
4893                 if (bp->panic)
4894                         return -EIO;
4895         }
4896
4897         /* timeout! */
4898         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4899                   poll ? "polling" : "waiting", state, idx);
4900 #ifdef BNX2X_STOP_ON_ERROR
4901         bnx2x_panic();
4902 #endif
4903
4904         return -EBUSY;
4905 }
4906
4907 void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
4908 {
4909         bp->set_mac_pending++;
4910         smp_wmb();
4911
4912         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
4913                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
4914
4915         /* Wait for a completion */
4916         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4917 }
4918
4919 void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
4920 {
4921         bp->set_mac_pending++;
4922         smp_wmb();
4923
4924         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
4925                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
4926                                   1);
4927
4928         /* Wait for a completion */
4929         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4930 }
4931
4932 #ifdef BCM_CNIC
4933 /**
4934  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
4935  * MAC(s). This function will wait until the ramdord completion
4936  * returns.
4937  *
4938  * @param bp driver handle
4939  * @param set set or clear the CAM entry
4940  *
4941  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
4942  */
4943 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
4944 {
4945         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
4946
4947         bp->set_mac_pending++;
4948         smp_wmb();
4949
4950         /* Send a SET_MAC ramrod */
4951         if (CHIP_IS_E1(bp))
4952                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
4953                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
4954                                   1);
4955         else
4956                 /* CAM allocation for E1H
4957                 * unicasts: by func number
4958                 * multicast: 20+FUNC*20, 20 each
4959                 */
4960                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
4961                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
4962
4963         /* Wait for a completion when setting */
4964         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4965
4966         return 0;
4967 }
4968 #endif
4969
4970 int bnx2x_setup_leading(struct bnx2x *bp)
4971 {
4972         int rc;
4973
4974         /* reset IGU state */
4975         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4976
4977         /* SETUP ramrod */
4978         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
4979
4980         /* Wait for completion */
4981         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
4982
4983         return rc;
4984 }
4985
4986 int bnx2x_setup_multi(struct bnx2x *bp, int index)
4987 {
4988         struct bnx2x_fastpath *fp = &bp->fp[index];
4989
4990         /* reset IGU state */
4991         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4992
4993         /* SETUP ramrod */
4994         fp->state = BNX2X_FP_STATE_OPENING;
4995         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
4996                       fp->cl_id, 0);
4997
4998         /* Wait for completion */
4999         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
5000                                  &(fp->state), 0);
5001 }
5002
5003
5004 void bnx2x_set_num_queues_msix(struct bnx2x *bp)
5005 {
5006
5007         switch (bp->multi_mode) {
5008         case ETH_RSS_MODE_DISABLED:
5009                 bp->num_queues = 1;
5010                 break;
5011
5012         case ETH_RSS_MODE_REGULAR:
5013                 if (num_queues)
5014                         bp->num_queues = min_t(u32, num_queues,
5015                                                   BNX2X_MAX_QUEUES(bp));
5016                 else
5017                         bp->num_queues = min_t(u32, num_online_cpus(),
5018                                                   BNX2X_MAX_QUEUES(bp));
5019                 break;
5020
5021
5022         default:
5023                 bp->num_queues = 1;
5024                 break;
5025         }
5026 }
5027
5028
5029
5030 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
5031 {
5032         struct bnx2x_fastpath *fp = &bp->fp[index];
5033         int rc;
5034
5035         /* halt the connection */
5036         fp->state = BNX2X_FP_STATE_HALTING;
5037         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
5038
5039         /* Wait for completion */
5040         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
5041                                &(fp->state), 1);
5042         if (rc) /* timeout */
5043                 return rc;
5044
5045         /* delete cfc entry */
5046         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
5047
5048         /* Wait for completion */
5049         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
5050                                &(fp->state), 1);
5051         return rc;
5052 }
5053
5054 static int bnx2x_stop_leading(struct bnx2x *bp)
5055 {
5056         __le16 dsb_sp_prod_idx;
5057         /* if the other port is handling traffic,
5058            this can take a lot of time */
5059         int cnt = 500;
5060         int rc;
5061
5062         might_sleep();
5063
5064         /* Send HALT ramrod */
5065         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
5066         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
5067
5068         /* Wait for completion */
5069         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
5070                                &(bp->fp[0].state), 1);
5071         if (rc) /* timeout */
5072                 return rc;
5073
5074         dsb_sp_prod_idx = *bp->dsb_sp_prod;
5075
5076         /* Send PORT_DELETE ramrod */
5077         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
5078
5079         /* Wait for completion to arrive on default status block
5080            we are going to reset the chip anyway
5081            so there is not much to do if this times out
5082          */
5083         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
5084                 if (!cnt) {
5085                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
5086                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5087                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
5088 #ifdef BNX2X_STOP_ON_ERROR
5089                         bnx2x_panic();
5090 #endif
5091                         rc = -EBUSY;
5092                         break;
5093                 }
5094                 cnt--;
5095                 msleep(1);
5096                 rmb(); /* Refresh the dsb_sp_prod */
5097         }
5098         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
5099         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
5100
5101         return rc;
5102 }
5103
5104 static void bnx2x_reset_func(struct bnx2x *bp)
5105 {
5106         int port = BP_PORT(bp);
5107         int func = BP_FUNC(bp);
5108         int base, i;
5109
5110         /* Configure IGU */
5111         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5112         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5113
5114 #ifdef BCM_CNIC
5115         /* Disable Timer scan */
5116         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
5117         /*
5118          * Wait for at least 10ms and up to 2 second for the timers scan to
5119          * complete
5120          */
5121         for (i = 0; i < 200; i++) {
5122                 msleep(10);
5123                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
5124                         break;
5125         }
5126 #endif
5127         /* Clear ILT */
5128         base = FUNC_ILT_BASE(func);
5129         for (i = base; i < base + ILT_PER_FUNC; i++)
5130                 bnx2x_ilt_wr(bp, i, 0);
5131 }
5132
5133 static void bnx2x_reset_port(struct bnx2x *bp)
5134 {
5135         int port = BP_PORT(bp);
5136         u32 val;
5137
5138         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5139
5140         /* Do not rcv packets to BRB */
5141         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5142         /* Do not direct rcv packets that are not for MCP to the BRB */
5143         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5144                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5145
5146         /* Configure AEU */
5147         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5148
5149         msleep(100);
5150         /* Check for BRB port occupancy */
5151         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5152         if (val)
5153                 DP(NETIF_MSG_IFDOWN,
5154                    "BRB1 is not empty  %d blocks are occupied\n", val);
5155
5156         /* TODO: Close Doorbell port? */
5157 }
5158
5159 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5160 {
5161         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
5162            BP_FUNC(bp), reset_code);
5163
5164         switch (reset_code) {
5165         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5166                 bnx2x_reset_port(bp);
5167                 bnx2x_reset_func(bp);
5168                 bnx2x_reset_common(bp);
5169                 break;
5170
5171         case FW_MSG_CODE_DRV_UNLOAD_PORT:
5172                 bnx2x_reset_port(bp);
5173                 bnx2x_reset_func(bp);
5174                 break;
5175
5176         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5177                 bnx2x_reset_func(bp);
5178                 break;
5179
5180         default:
5181                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5182                 break;
5183         }
5184 }
5185
5186 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5187 {
5188         int port = BP_PORT(bp);
5189         u32 reset_code = 0;
5190         int i, cnt, rc;
5191
5192         /* Wait until tx fastpath tasks complete */
5193         for_each_queue(bp, i) {
5194                 struct bnx2x_fastpath *fp = &bp->fp[i];
5195
5196                 cnt = 1000;
5197                 while (bnx2x_has_tx_work_unload(fp)) {
5198
5199                         bnx2x_tx_int(fp);
5200                         if (!cnt) {
5201                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
5202                                           i);
5203 #ifdef BNX2X_STOP_ON_ERROR
5204                                 bnx2x_panic();
5205                                 return -EBUSY;
5206 #else
5207                                 break;
5208 #endif
5209                         }
5210                         cnt--;
5211                         msleep(1);
5212                 }
5213         }
5214         /* Give HW time to discard old tx messages */
5215         msleep(1);
5216
5217         if (CHIP_IS_E1(bp)) {
5218                 struct mac_configuration_cmd *config =
5219                                                 bnx2x_sp(bp, mcast_config);
5220
5221                 bnx2x_set_eth_mac_addr_e1(bp, 0);
5222
5223                 for (i = 0; i < config->hdr.length; i++)
5224                         CAM_INVALIDATE(config->config_table[i]);
5225
5226                 config->hdr.length = i;
5227                 if (CHIP_REV_IS_SLOW(bp))
5228                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
5229                 else
5230                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
5231                 config->hdr.client_id = bp->fp->cl_id;
5232                 config->hdr.reserved1 = 0;
5233
5234                 bp->set_mac_pending++;
5235                 smp_wmb();
5236
5237                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5238                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
5239                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
5240
5241         } else { /* E1H */
5242                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5243
5244                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
5245
5246                 for (i = 0; i < MC_HASH_SIZE; i++)
5247                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
5248
5249                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
5250         }
5251 #ifdef BCM_CNIC
5252         /* Clear iSCSI L2 MAC */
5253         mutex_lock(&bp->cnic_mutex);
5254         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
5255                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
5256                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
5257         }
5258         mutex_unlock(&bp->cnic_mutex);
5259 #endif
5260
5261         if (unload_mode == UNLOAD_NORMAL)
5262                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5263
5264         else if (bp->flags & NO_WOL_FLAG)
5265                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
5266
5267         else if (bp->wol) {
5268                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5269                 u8 *mac_addr = bp->dev->dev_addr;
5270                 u32 val;
5271                 /* The mac address is written to entries 1-4 to
5272                    preserve entry 0 which is used by the PMF */
5273                 u8 entry = (BP_E1HVN(bp) + 1)*8;
5274
5275                 val = (mac_addr[0] << 8) | mac_addr[1];
5276                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
5277
5278                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
5279                       (mac_addr[4] << 8) | mac_addr[5];
5280                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
5281
5282                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
5283
5284         } else
5285                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5286
5287         /* Close multi and leading connections
5288            Completions for ramrods are collected in a synchronous way */
5289         for_each_nondefault_queue(bp, i)
5290                 if (bnx2x_stop_multi(bp, i))
5291                         goto unload_error;
5292
5293         rc = bnx2x_stop_leading(bp);
5294         if (rc) {
5295                 BNX2X_ERR("Stop leading failed!\n");
5296 #ifdef BNX2X_STOP_ON_ERROR
5297                 return -EBUSY;
5298 #else
5299                 goto unload_error;
5300 #endif
5301         }
5302
5303 unload_error:
5304         if (!BP_NOMCP(bp))
5305                 reset_code = bnx2x_fw_command(bp, reset_code);
5306         else {
5307                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
5308                    load_count[0], load_count[1], load_count[2]);
5309                 load_count[0]--;
5310                 load_count[1 + port]--;
5311                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
5312                    load_count[0], load_count[1], load_count[2]);
5313                 if (load_count[0] == 0)
5314                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
5315                 else if (load_count[1 + port] == 0)
5316                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
5317                 else
5318                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
5319         }
5320
5321         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
5322             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
5323                 bnx2x__link_reset(bp);
5324
5325         /* Reset the chip */
5326         bnx2x_reset_chip(bp, reset_code);
5327
5328         /* Report UNLOAD_DONE to MCP */
5329         if (!BP_NOMCP(bp))
5330                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5331
5332 }
5333
5334 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
5335 {
5336         u32 val;
5337
5338         DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
5339
5340         if (CHIP_IS_E1(bp)) {
5341                 int port = BP_PORT(bp);
5342                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5343                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
5344
5345                 val = REG_RD(bp, addr);
5346                 val &= ~(0x300);
5347                 REG_WR(bp, addr, val);
5348         } else if (CHIP_IS_E1H(bp)) {
5349                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
5350                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
5351                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
5352                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
5353         }
5354 }
5355
5356
5357 /* Close gates #2, #3 and #4: */
5358 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
5359 {
5360         u32 val, addr;
5361
5362         /* Gates #2 and #4a are closed/opened for "not E1" only */
5363         if (!CHIP_IS_E1(bp)) {
5364                 /* #4 */
5365                 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
5366                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
5367                        close ? (val | 0x1) : (val & (~(u32)1)));
5368                 /* #2 */
5369                 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
5370                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
5371                        close ? (val | 0x1) : (val & (~(u32)1)));
5372         }
5373
5374         /* #3 */
5375         addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
5376         val = REG_RD(bp, addr);
5377         REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
5378
5379         DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
5380                 close ? "closing" : "opening");
5381         mmiowb();
5382 }
5383
5384 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
5385
5386 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
5387 {
5388         /* Do some magic... */
5389         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5390         *magic_val = val & SHARED_MF_CLP_MAGIC;
5391         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
5392 }
5393
5394 /* Restore the value of the `magic' bit.
5395  *
5396  * @param pdev Device handle.
5397  * @param magic_val Old value of the `magic' bit.
5398  */
5399 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
5400 {
5401         /* Restore the `magic' bit value... */
5402         /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
5403         SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
5404                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
5405         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5406         MF_CFG_WR(bp, shared_mf_config.clp_mb,
5407                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
5408 }
5409
5410 /* Prepares for MCP reset: takes care of CLP configurations.
5411  *
5412  * @param bp
5413  * @param magic_val Old value of 'magic' bit.
5414  */
5415 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
5416 {
5417         u32 shmem;
5418         u32 validity_offset;
5419
5420         DP(NETIF_MSG_HW, "Starting\n");
5421
5422         /* Set `magic' bit in order to save MF config */
5423         if (!CHIP_IS_E1(bp))
5424                 bnx2x_clp_reset_prep(bp, magic_val);
5425
5426         /* Get shmem offset */
5427         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5428         validity_offset = offsetof(struct shmem_region, validity_map[0]);
5429
5430         /* Clear validity map flags */
5431         if (shmem > 0)
5432                 REG_WR(bp, shmem + validity_offset, 0);
5433 }
5434
5435 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
5436 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
5437
5438 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
5439  * depending on the HW type.
5440  *
5441  * @param bp
5442  */
5443 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
5444 {
5445         /* special handling for emulation and FPGA,
5446            wait 10 times longer */
5447         if (CHIP_REV_IS_SLOW(bp))
5448                 msleep(MCP_ONE_TIMEOUT*10);
5449         else
5450                 msleep(MCP_ONE_TIMEOUT);
5451 }
5452
5453 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
5454 {
5455         u32 shmem, cnt, validity_offset, val;
5456         int rc = 0;
5457
5458         msleep(100);
5459
5460         /* Get shmem offset */
5461         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5462         if (shmem == 0) {
5463                 BNX2X_ERR("Shmem 0 return failure\n");
5464                 rc = -ENOTTY;
5465                 goto exit_lbl;
5466         }
5467
5468         validity_offset = offsetof(struct shmem_region, validity_map[0]);
5469
5470         /* Wait for MCP to come up */
5471         for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
5472                 /* TBD: its best to check validity map of last port.
5473                  * currently checks on port 0.
5474                  */
5475                 val = REG_RD(bp, shmem + validity_offset);
5476                 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
5477                    shmem + validity_offset, val);
5478
5479                 /* check that shared memory is valid. */
5480                 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5481                     == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5482                         break;
5483
5484                 bnx2x_mcp_wait_one(bp);
5485         }
5486
5487         DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
5488
5489         /* Check that shared memory is valid. This indicates that MCP is up. */
5490         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
5491             (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
5492                 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
5493                 rc = -ENOTTY;
5494                 goto exit_lbl;
5495         }
5496
5497 exit_lbl:
5498         /* Restore the `magic' bit value */
5499         if (!CHIP_IS_E1(bp))
5500                 bnx2x_clp_reset_done(bp, magic_val);
5501
5502         return rc;
5503 }
5504
5505 static void bnx2x_pxp_prep(struct bnx2x *bp)
5506 {
5507         if (!CHIP_IS_E1(bp)) {
5508                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
5509                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
5510                 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
5511                 mmiowb();
5512         }
5513 }
5514
5515 /*
5516  * Reset the whole chip except for:
5517  *      - PCIE core
5518  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
5519  *              one reset bit)
5520  *      - IGU
5521  *      - MISC (including AEU)
5522  *      - GRC
5523  *      - RBCN, RBCP
5524  */
5525 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
5526 {
5527         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
5528
5529         not_reset_mask1 =
5530                 MISC_REGISTERS_RESET_REG_1_RST_HC |
5531                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
5532                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
5533
5534         not_reset_mask2 =
5535                 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
5536                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
5537                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
5538                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
5539                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
5540                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
5541                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
5542                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
5543
5544         reset_mask1 = 0xffffffff;
5545
5546         if (CHIP_IS_E1(bp))
5547                 reset_mask2 = 0xffff;
5548         else
5549                 reset_mask2 = 0x1ffff;
5550
5551         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5552                reset_mask1 & (~not_reset_mask1));
5553         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5554                reset_mask2 & (~not_reset_mask2));
5555
5556         barrier();
5557         mmiowb();
5558
5559         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
5560         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
5561         mmiowb();
5562 }
5563
5564 static int bnx2x_process_kill(struct bnx2x *bp)
5565 {
5566         int cnt = 1000;
5567         u32 val = 0;
5568         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
5569
5570
5571         /* Empty the Tetris buffer, wait for 1s */
5572         do {
5573                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
5574                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
5575                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
5576                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
5577                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
5578                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
5579                     ((port_is_idle_0 & 0x1) == 0x1) &&
5580                     ((port_is_idle_1 & 0x1) == 0x1) &&
5581                     (pgl_exp_rom2 == 0xffffffff))
5582                         break;
5583                 msleep(1);
5584         } while (cnt-- > 0);
5585
5586         if (cnt <= 0) {
5587                 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
5588                           " are still"
5589                           " outstanding read requests after 1s!\n");
5590                 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
5591                           " port_is_idle_0=0x%08x,"
5592                           " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
5593                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
5594                           pgl_exp_rom2);
5595                 return -EAGAIN;
5596         }
5597
5598         barrier();
5599
5600         /* Close gates #2, #3 and #4 */
5601         bnx2x_set_234_gates(bp, true);
5602
5603         /* TBD: Indicate that "process kill" is in progress to MCP */
5604
5605         /* Clear "unprepared" bit */
5606         REG_WR(bp, MISC_REG_UNPREPARED, 0);
5607         barrier();
5608
5609         /* Make sure all is written to the chip before the reset */
5610         mmiowb();
5611
5612         /* Wait for 1ms to empty GLUE and PCI-E core queues,
5613          * PSWHST, GRC and PSWRD Tetris buffer.
5614          */
5615         msleep(1);
5616
5617         /* Prepare to chip reset: */
5618         /* MCP */
5619         bnx2x_reset_mcp_prep(bp, &val);
5620
5621         /* PXP */
5622         bnx2x_pxp_prep(bp);
5623         barrier();
5624
5625         /* reset the chip */
5626         bnx2x_process_kill_chip_reset(bp);
5627         barrier();
5628
5629         /* Recover after reset: */
5630         /* MCP */
5631         if (bnx2x_reset_mcp_comp(bp, val))
5632                 return -EAGAIN;
5633
5634         /* PXP */
5635         bnx2x_pxp_prep(bp);
5636
5637         /* Open the gates #2, #3 and #4 */
5638         bnx2x_set_234_gates(bp, false);
5639
5640         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
5641          * reset state, re-enable attentions. */
5642
5643         return 0;
5644 }
5645
5646 static int bnx2x_leader_reset(struct bnx2x *bp)
5647 {
5648         int rc = 0;
5649         /* Try to recover after the failure */
5650         if (bnx2x_process_kill(bp)) {
5651                 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
5652                        bp->dev->name);
5653                 rc = -EAGAIN;
5654                 goto exit_leader_reset;
5655         }
5656
5657         /* Clear "reset is in progress" bit and update the driver state */
5658         bnx2x_set_reset_done(bp);
5659         bp->recovery_state = BNX2X_RECOVERY_DONE;
5660
5661 exit_leader_reset:
5662         bp->is_leader = 0;
5663         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
5664         smp_wmb();
5665         return rc;
5666 }
5667
5668 /* Assumption: runs under rtnl lock. This together with the fact
5669  * that it's called only from bnx2x_reset_task() ensure that it
5670  * will never be called when netif_running(bp->dev) is false.
5671  */
5672 static void bnx2x_parity_recover(struct bnx2x *bp)
5673 {
5674         DP(NETIF_MSG_HW, "Handling parity\n");
5675         while (1) {
5676                 switch (bp->recovery_state) {
5677                 case BNX2X_RECOVERY_INIT:
5678                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
5679                         /* Try to get a LEADER_LOCK HW lock */
5680                         if (bnx2x_trylock_hw_lock(bp,
5681                                 HW_LOCK_RESOURCE_RESERVED_08))
5682                                 bp->is_leader = 1;
5683
5684                         /* Stop the driver */
5685                         /* If interface has been removed - break */
5686                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
5687                                 return;
5688
5689                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
5690                         /* Ensure "is_leader" and "recovery_state"
5691                          *  update values are seen on other CPUs
5692                          */
5693                         smp_wmb();
5694                         break;
5695
5696                 case BNX2X_RECOVERY_WAIT:
5697                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
5698                         if (bp->is_leader) {
5699                                 u32 load_counter = bnx2x_get_load_cnt(bp);
5700                                 if (load_counter) {
5701                                         /* Wait until all other functions get
5702                                          * down.
5703                                          */
5704                                         schedule_delayed_work(&bp->reset_task,
5705                                                                 HZ/10);
5706                                         return;
5707                                 } else {
5708                                         /* If all other functions got down -
5709                                          * try to bring the chip back to
5710                                          * normal. In any case it's an exit
5711                                          * point for a leader.
5712                                          */
5713                                         if (bnx2x_leader_reset(bp) ||
5714                                         bnx2x_nic_load(bp, LOAD_NORMAL)) {
5715                                                 printk(KERN_ERR"%s: Recovery "
5716                                                 "has failed. Power cycle is "
5717                                                 "needed.\n", bp->dev->name);
5718                                                 /* Disconnect this device */
5719                                                 netif_device_detach(bp->dev);
5720                                                 /* Block ifup for all function
5721                                                  * of this ASIC until
5722                                                  * "process kill" or power
5723                                                  * cycle.
5724                                                  */
5725                                                 bnx2x_set_reset_in_progress(bp);
5726                                                 /* Shut down the power */
5727                                                 bnx2x_set_power_state(bp,
5728                                                                 PCI_D3hot);
5729                                                 return;
5730                                         }
5731
5732                                         return;
5733                                 }
5734                         } else { /* non-leader */
5735                                 if (!bnx2x_reset_is_done(bp)) {
5736                                         /* Try to get a LEADER_LOCK HW lock as
5737                                          * long as a former leader may have
5738                                          * been unloaded by the user or
5739                                          * released a leadership by another
5740                                          * reason.
5741                                          */
5742                                         if (bnx2x_trylock_hw_lock(bp,
5743                                             HW_LOCK_RESOURCE_RESERVED_08)) {
5744                                                 /* I'm a leader now! Restart a
5745                                                  * switch case.
5746                                                  */
5747                                                 bp->is_leader = 1;
5748                                                 break;
5749                                         }
5750
5751                                         schedule_delayed_work(&bp->reset_task,
5752                                                                 HZ/10);
5753                                         return;
5754
5755                                 } else { /* A leader has completed
5756                                           * the "process kill". It's an exit
5757                                           * point for a non-leader.
5758                                           */
5759                                         bnx2x_nic_load(bp, LOAD_NORMAL);
5760                                         bp->recovery_state =
5761                                                 BNX2X_RECOVERY_DONE;
5762                                         smp_wmb();
5763                                         return;
5764                                 }
5765                         }
5766                 default:
5767                         return;
5768                 }
5769         }
5770 }
5771
5772 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
5773  * scheduled on a general queue in order to prevent a dead lock.
5774  */
5775 static void bnx2x_reset_task(struct work_struct *work)
5776 {
5777         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
5778
5779 #ifdef BNX2X_STOP_ON_ERROR
5780         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
5781                   " so reset not done to allow debug dump,\n"
5782          KERN_ERR " you will need to reboot when done\n");
5783         return;
5784 #endif
5785
5786         rtnl_lock();
5787
5788         if (!netif_running(bp->dev))
5789                 goto reset_task_exit;
5790
5791         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
5792                 bnx2x_parity_recover(bp);
5793         else {
5794                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
5795                 bnx2x_nic_load(bp, LOAD_NORMAL);
5796         }
5797
5798 reset_task_exit:
5799         rtnl_unlock();
5800 }
5801
5802 /* end of nic load/unload */
5803
5804 /*
5805  * Init service functions
5806  */
5807
5808 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
5809 {
5810         switch (func) {
5811         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
5812         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
5813         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
5814         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
5815         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
5816         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
5817         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
5818         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
5819         default:
5820                 BNX2X_ERR("Unsupported function index: %d\n", func);
5821                 return (u32)(-1);
5822         }
5823 }
5824
5825 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
5826 {
5827         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
5828
5829         /* Flush all outstanding writes */
5830         mmiowb();
5831
5832         /* Pretend to be function 0 */
5833         REG_WR(bp, reg, 0);
5834         /* Flush the GRC transaction (in the chip) */
5835         new_val = REG_RD(bp, reg);
5836         if (new_val != 0) {
5837                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
5838                           new_val);
5839                 BUG();
5840         }
5841
5842         /* From now we are in the "like-E1" mode */
5843         bnx2x_int_disable(bp);
5844
5845         /* Flush all outstanding writes */
5846         mmiowb();
5847
5848         /* Restore the original funtion settings */
5849         REG_WR(bp, reg, orig_func);
5850         new_val = REG_RD(bp, reg);
5851         if (new_val != orig_func) {
5852                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
5853                           orig_func, new_val);
5854                 BUG();
5855         }
5856 }
5857
5858 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
5859 {
5860         if (CHIP_IS_E1H(bp))
5861                 bnx2x_undi_int_disable_e1h(bp, func);
5862         else
5863                 bnx2x_int_disable(bp);
5864 }
5865
5866 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5867 {
5868         u32 val;
5869
5870         /* Check if there is any driver already loaded */
5871         val = REG_RD(bp, MISC_REG_UNPREPARED);
5872         if (val == 0x1) {
5873                 /* Check if it is the UNDI driver
5874                  * UNDI driver initializes CID offset for normal bell to 0x7
5875                  */
5876                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5877                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
5878                 if (val == 0x7) {
5879                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5880                         /* save our func */
5881                         int func = BP_FUNC(bp);
5882                         u32 swap_en;
5883                         u32 swap_val;
5884
5885                         /* clear the UNDI indication */
5886                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
5887
5888                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
5889
5890                         /* try unload UNDI on port 0 */
5891                         bp->func = 0;
5892                         bp->fw_seq =
5893                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5894                                 DRV_MSG_SEQ_NUMBER_MASK);
5895                         reset_code = bnx2x_fw_command(bp, reset_code);
5896
5897                         /* if UNDI is loaded on the other port */
5898                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
5899
5900                                 /* send "DONE" for previous unload */
5901                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5902
5903                                 /* unload UNDI on port 1 */
5904                                 bp->func = 1;
5905                                 bp->fw_seq =
5906                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5907                                         DRV_MSG_SEQ_NUMBER_MASK);
5908                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5909
5910                                 bnx2x_fw_command(bp, reset_code);
5911                         }
5912
5913                         /* now it's safe to release the lock */
5914                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5915
5916                         bnx2x_undi_int_disable(bp, func);
5917
5918                         /* close input traffic and wait for it */
5919                         /* Do not rcv packets to BRB */
5920                         REG_WR(bp,
5921                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
5922                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
5923                         /* Do not direct rcv packets that are not for MCP to
5924                          * the BRB */
5925                         REG_WR(bp,
5926                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
5927                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5928                         /* clear AEU */
5929                         REG_WR(bp,
5930                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5931                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
5932                         msleep(10);
5933
5934                         /* save NIG port swap info */
5935                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5936                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5937                         /* reset device */
5938                         REG_WR(bp,
5939                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5940                                0xd3ffffff);
5941                         REG_WR(bp,
5942                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5943                                0x1403);
5944                         /* take the NIG out of reset and restore swap values */
5945                         REG_WR(bp,
5946                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5947                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
5948                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
5949                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
5950
5951                         /* send unload done to the MCP */
5952                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5953
5954                         /* restore our func and fw_seq */
5955                         bp->func = func;
5956                         bp->fw_seq =
5957                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5958                                 DRV_MSG_SEQ_NUMBER_MASK);
5959
5960                 } else
5961                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5962         }
5963 }
5964
5965 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5966 {
5967         u32 val, val2, val3, val4, id;
5968         u16 pmc;
5969
5970         /* Get the chip revision id and number. */
5971         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5972         val = REG_RD(bp, MISC_REG_CHIP_NUM);
5973         id = ((val & 0xffff) << 16);
5974         val = REG_RD(bp, MISC_REG_CHIP_REV);
5975         id |= ((val & 0xf) << 12);
5976         val = REG_RD(bp, MISC_REG_CHIP_METAL);
5977         id |= ((val & 0xff) << 4);
5978         val = REG_RD(bp, MISC_REG_BOND_ID);
5979         id |= (val & 0xf);
5980         bp->common.chip_id = id;
5981         bp->link_params.chip_id = bp->common.chip_id;
5982         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
5983
5984         val = (REG_RD(bp, 0x2874) & 0x55);
5985         if ((bp->common.chip_id & 0x1) ||
5986             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
5987                 bp->flags |= ONE_PORT_FLAG;
5988                 BNX2X_DEV_INFO("single port device\n");
5989         }
5990
5991         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
5992         bp->common.flash_size = (NVRAM_1MB_SIZE <<
5993                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
5994         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5995                        bp->common.flash_size, bp->common.flash_size);
5996
5997         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5998         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
5999         bp->link_params.shmem_base = bp->common.shmem_base;
6000         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
6001                        bp->common.shmem_base, bp->common.shmem2_base);
6002
6003         if (!bp->common.shmem_base ||
6004             (bp->common.shmem_base < 0xA0000) ||
6005             (bp->common.shmem_base >= 0xC0000)) {
6006                 BNX2X_DEV_INFO("MCP not active\n");
6007                 bp->flags |= NO_MCP_FLAG;
6008                 return;
6009         }
6010
6011         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6012         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6013                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6014                 BNX2X_ERROR("BAD MCP validity signature\n");
6015
6016         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6017         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
6018
6019         bp->link_params.hw_led_mode = ((bp->common.hw_config &
6020                                         SHARED_HW_CFG_LED_MODE_MASK) >>
6021                                        SHARED_HW_CFG_LED_MODE_SHIFT);
6022
6023         bp->link_params.feature_config_flags = 0;
6024         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
6025         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
6026                 bp->link_params.feature_config_flags |=
6027                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6028         else
6029                 bp->link_params.feature_config_flags &=
6030                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6031
6032         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6033         bp->common.bc_ver = val;
6034         BNX2X_DEV_INFO("bc_ver %X\n", val);
6035         if (val < BNX2X_BC_VER) {
6036                 /* for now only warn
6037                  * later we might need to enforce this */
6038                 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
6039                             "please upgrade BC\n", BNX2X_BC_VER, val);
6040         }
6041         bp->link_params.feature_config_flags |=
6042                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
6043                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
6044
6045         if (BP_E1HVN(bp) == 0) {
6046                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6047                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6048         } else {
6049                 /* no WOL capability for E1HVN != 0 */
6050                 bp->flags |= NO_WOL_FLAG;
6051         }
6052         BNX2X_DEV_INFO("%sWoL capable\n",
6053                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
6054
6055         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6056         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6057         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6058         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6059
6060         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
6061                  val, val2, val3, val4);
6062 }
6063
6064 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6065                                                     u32 switch_cfg)
6066 {
6067         int port = BP_PORT(bp);
6068         u32 ext_phy_type;
6069
6070         switch (switch_cfg) {
6071         case SWITCH_CFG_1G:
6072                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6073
6074                 ext_phy_type =
6075                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6076                 switch (ext_phy_type) {
6077                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6078                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6079                                        ext_phy_type);
6080
6081                         bp->port.supported |= (SUPPORTED_10baseT_Half |
6082                                                SUPPORTED_10baseT_Full |
6083                                                SUPPORTED_100baseT_Half |
6084                                                SUPPORTED_100baseT_Full |
6085                                                SUPPORTED_1000baseT_Full |
6086                                                SUPPORTED_2500baseX_Full |
6087                                                SUPPORTED_TP |
6088                                                SUPPORTED_FIBRE |
6089                                                SUPPORTED_Autoneg |
6090                                                SUPPORTED_Pause |
6091                                                SUPPORTED_Asym_Pause);
6092                         break;
6093
6094                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6095                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6096                                        ext_phy_type);
6097
6098                         bp->port.supported |= (SUPPORTED_10baseT_Half |
6099                                                SUPPORTED_10baseT_Full |
6100                                                SUPPORTED_100baseT_Half |
6101                                                SUPPORTED_100baseT_Full |
6102                                                SUPPORTED_1000baseT_Full |
6103                                                SUPPORTED_TP |
6104                                                SUPPORTED_FIBRE |
6105                                                SUPPORTED_Autoneg |
6106                                                SUPPORTED_Pause |
6107                                                SUPPORTED_Asym_Pause);
6108                         break;
6109
6110                 default:
6111                         BNX2X_ERR("NVRAM config error. "
6112                                   "BAD SerDes ext_phy_config 0x%x\n",
6113                                   bp->link_params.ext_phy_config);
6114                         return;
6115                 }
6116
6117                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6118                                            port*0x10);
6119                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6120                 break;
6121
6122         case SWITCH_CFG_10G:
6123                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6124
6125                 ext_phy_type =
6126                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6127                 switch (ext_phy_type) {
6128                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6129                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6130                                        ext_phy_type);
6131
6132                         bp->port.supported |= (SUPPORTED_10baseT_Half |
6133                                                SUPPORTED_10baseT_Full |
6134                                                SUPPORTED_100baseT_Half |
6135                                                SUPPORTED_100baseT_Full |
6136                                                SUPPORTED_1000baseT_Full |
6137                                                SUPPORTED_2500baseX_Full |
6138                                                SUPPORTED_10000baseT_Full |
6139                                                SUPPORTED_TP |
6140                                                SUPPORTED_FIBRE |
6141                                                SUPPORTED_Autoneg |
6142                                                SUPPORTED_Pause |
6143                                                SUPPORTED_Asym_Pause);
6144                         break;
6145
6146                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6147                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
6148                                        ext_phy_type);
6149
6150                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6151                                                SUPPORTED_1000baseT_Full |
6152                                                SUPPORTED_FIBRE |
6153                                                SUPPORTED_Autoneg |
6154                                                SUPPORTED_Pause |
6155                                                SUPPORTED_Asym_Pause);
6156                         break;
6157
6158                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6159                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
6160                                        ext_phy_type);
6161
6162                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6163                                                SUPPORTED_2500baseX_Full |
6164                                                SUPPORTED_1000baseT_Full |
6165                                                SUPPORTED_FIBRE |
6166                                                SUPPORTED_Autoneg |
6167                                                SUPPORTED_Pause |
6168                                                SUPPORTED_Asym_Pause);
6169                         break;
6170
6171                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
6172                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
6173                                        ext_phy_type);
6174
6175                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6176                                                SUPPORTED_FIBRE |
6177                                                SUPPORTED_Pause |
6178                                                SUPPORTED_Asym_Pause);
6179                         break;
6180
6181                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6182                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
6183                                        ext_phy_type);
6184
6185                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6186                                                SUPPORTED_1000baseT_Full |
6187                                                SUPPORTED_FIBRE |
6188                                                SUPPORTED_Pause |
6189                                                SUPPORTED_Asym_Pause);
6190                         break;
6191
6192                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6193                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
6194                                        ext_phy_type);
6195
6196                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6197                                                SUPPORTED_1000baseT_Full |
6198                                                SUPPORTED_Autoneg |
6199                                                SUPPORTED_FIBRE |
6200                                                SUPPORTED_Pause |
6201                                                SUPPORTED_Asym_Pause);
6202                         break;
6203
6204                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6205                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
6206                                        ext_phy_type);
6207
6208                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6209                                                SUPPORTED_1000baseT_Full |
6210                                                SUPPORTED_Autoneg |
6211                                                SUPPORTED_FIBRE |
6212                                                SUPPORTED_Pause |
6213                                                SUPPORTED_Asym_Pause);
6214                         break;
6215
6216                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6217                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
6218                                        ext_phy_type);
6219
6220                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6221                                                SUPPORTED_TP |
6222                                                SUPPORTED_Autoneg |
6223                                                SUPPORTED_Pause |
6224                                                SUPPORTED_Asym_Pause);
6225                         break;
6226
6227                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
6228                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
6229                                        ext_phy_type);
6230
6231                         bp->port.supported |= (SUPPORTED_10baseT_Half |
6232                                                SUPPORTED_10baseT_Full |
6233                                                SUPPORTED_100baseT_Half |
6234                                                SUPPORTED_100baseT_Full |
6235                                                SUPPORTED_1000baseT_Full |
6236                                                SUPPORTED_10000baseT_Full |
6237                                                SUPPORTED_TP |
6238                                                SUPPORTED_Autoneg |
6239                                                SUPPORTED_Pause |
6240                                                SUPPORTED_Asym_Pause);
6241                         break;
6242
6243                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
6244                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
6245                                   bp->link_params.ext_phy_config);
6246                         break;
6247
6248                 default:
6249                         BNX2X_ERR("NVRAM config error. "
6250                                   "BAD XGXS ext_phy_config 0x%x\n",
6251                                   bp->link_params.ext_phy_config);
6252                         return;
6253                 }
6254
6255                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6256                                            port*0x18);
6257                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6258
6259                 break;
6260
6261         default:
6262                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
6263                           bp->port.link_config);
6264                 return;
6265         }
6266         bp->link_params.phy_addr = bp->port.phy_addr;
6267
6268         /* mask what we support according to speed_cap_mask */
6269         if (!(bp->link_params.speed_cap_mask &
6270                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
6271                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
6272
6273         if (!(bp->link_params.speed_cap_mask &
6274                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
6275                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
6276
6277         if (!(bp->link_params.speed_cap_mask &
6278                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
6279                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
6280
6281         if (!(bp->link_params.speed_cap_mask &
6282                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
6283                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
6284
6285         if (!(bp->link_params.speed_cap_mask &
6286                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
6287                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
6288                                         SUPPORTED_1000baseT_Full);
6289
6290         if (!(bp->link_params.speed_cap_mask &
6291                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
6292                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
6293
6294         if (!(bp->link_params.speed_cap_mask &
6295                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
6296                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
6297
6298         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
6299 }
6300
6301 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
6302 {
6303         bp->link_params.req_duplex = DUPLEX_FULL;
6304
6305         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
6306         case PORT_FEATURE_LINK_SPEED_AUTO:
6307                 if (bp->port.supported & SUPPORTED_Autoneg) {
6308                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
6309                         bp->port.advertising = bp->port.supported;
6310                 } else {
6311                         u32 ext_phy_type =
6312                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6313
6314                         if ((ext_phy_type ==
6315                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
6316                             (ext_phy_type ==
6317                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
6318                                 /* force 10G, no AN */
6319                                 bp->link_params.req_line_speed = SPEED_10000;
6320                                 bp->port.advertising =
6321                                                 (ADVERTISED_10000baseT_Full |
6322                                                  ADVERTISED_FIBRE);
6323                                 break;
6324                         }
6325                         BNX2X_ERR("NVRAM config error. "
6326                                   "Invalid link_config 0x%x"
6327                                   "  Autoneg not supported\n",
6328                                   bp->port.link_config);
6329                         return;
6330                 }
6331                 break;
6332
6333         case PORT_FEATURE_LINK_SPEED_10M_FULL:
6334                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
6335                         bp->link_params.req_line_speed = SPEED_10;
6336                         bp->port.advertising = (ADVERTISED_10baseT_Full |
6337                                                 ADVERTISED_TP);
6338                 } else {
6339                         BNX2X_ERROR("NVRAM config error. "
6340                                     "Invalid link_config 0x%x"
6341                                     "  speed_cap_mask 0x%x\n",
6342                                     bp->port.link_config,
6343                                     bp->link_params.speed_cap_mask);
6344                         return;
6345                 }
6346                 break;
6347
6348         case PORT_FEATURE_LINK_SPEED_10M_HALF:
6349                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
6350                         bp->link_params.req_line_speed = SPEED_10;
6351                         bp->link_params.req_duplex = DUPLEX_HALF;
6352                         bp->port.advertising = (ADVERTISED_10baseT_Half |
6353                                                 ADVERTISED_TP);
6354                 } else {
6355                         BNX2X_ERROR("NVRAM config error. "
6356                                     "Invalid link_config 0x%x"
6357                                     "  speed_cap_mask 0x%x\n",
6358                                     bp->port.link_config,
6359                                     bp->link_params.speed_cap_mask);
6360                         return;
6361                 }
6362                 break;
6363
6364         case PORT_FEATURE_LINK_SPEED_100M_FULL:
6365                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
6366                         bp->link_params.req_line_speed = SPEED_100;
6367                         bp->port.advertising = (ADVERTISED_100baseT_Full |
6368                                                 ADVERTISED_TP);
6369                 } else {
6370                         BNX2X_ERROR("NVRAM config error. "
6371                                     "Invalid link_config 0x%x"
6372                                     "  speed_cap_mask 0x%x\n",
6373                                     bp->port.link_config,
6374                                     bp->link_params.speed_cap_mask);
6375                         return;
6376                 }
6377                 break;
6378
6379         case PORT_FEATURE_LINK_SPEED_100M_HALF:
6380                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
6381                         bp->link_params.req_line_speed = SPEED_100;
6382                         bp->link_params.req_duplex = DUPLEX_HALF;
6383                         bp->port.advertising = (ADVERTISED_100baseT_Half |
6384                                                 ADVERTISED_TP);
6385                 } else {
6386                         BNX2X_ERROR("NVRAM config error. "
6387                                     "Invalid link_config 0x%x"
6388                                     "  speed_cap_mask 0x%x\n",
6389                                     bp->port.link_config,
6390                                     bp->link_params.speed_cap_mask);
6391                         return;
6392                 }
6393                 break;
6394
6395         case PORT_FEATURE_LINK_SPEED_1G:
6396                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
6397                         bp->link_params.req_line_speed = SPEED_1000;
6398                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
6399                                                 ADVERTISED_TP);
6400                 } else {
6401                         BNX2X_ERROR("NVRAM config error. "
6402                                     "Invalid link_config 0x%x"
6403                                     "  speed_cap_mask 0x%x\n",
6404                                     bp->port.link_config,
6405                                     bp->link_params.speed_cap_mask);
6406                         return;
6407                 }
6408                 break;
6409
6410         case PORT_FEATURE_LINK_SPEED_2_5G:
6411                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
6412                         bp->link_params.req_line_speed = SPEED_2500;
6413                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
6414                                                 ADVERTISED_TP);
6415                 } else {
6416                         BNX2X_ERROR("NVRAM config error. "
6417                                     "Invalid link_config 0x%x"
6418                                     "  speed_cap_mask 0x%x\n",
6419                                     bp->port.link_config,
6420                                     bp->link_params.speed_cap_mask);
6421                         return;
6422                 }
6423                 break;
6424
6425         case PORT_FEATURE_LINK_SPEED_10G_CX4:
6426         case PORT_FEATURE_LINK_SPEED_10G_KX4:
6427         case PORT_FEATURE_LINK_SPEED_10G_KR:
6428                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
6429                         bp->link_params.req_line_speed = SPEED_10000;
6430                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
6431                                                 ADVERTISED_FIBRE);
6432                 } else {
6433                         BNX2X_ERROR("NVRAM config error. "
6434                                     "Invalid link_config 0x%x"
6435                                     "  speed_cap_mask 0x%x\n",
6436                                     bp->port.link_config,
6437                                     bp->link_params.speed_cap_mask);
6438                         return;
6439                 }
6440                 break;
6441
6442         default:
6443                 BNX2X_ERROR("NVRAM config error. "
6444                             "BAD link speed link_config 0x%x\n",
6445                             bp->port.link_config);
6446                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
6447                 bp->port.advertising = bp->port.supported;
6448                 break;
6449         }
6450
6451         bp->link_params.req_flow_ctrl = (bp->port.link_config &
6452                                          PORT_FEATURE_FLOW_CONTROL_MASK);
6453         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
6454             !(bp->port.supported & SUPPORTED_Autoneg))
6455                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
6456
6457         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
6458                        "  advertising 0x%x\n",
6459                        bp->link_params.req_line_speed,
6460                        bp->link_params.req_duplex,
6461                        bp->link_params.req_flow_ctrl, bp->port.advertising);
6462 }
6463
6464 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
6465 {
6466         mac_hi = cpu_to_be16(mac_hi);
6467         mac_lo = cpu_to_be32(mac_lo);
6468         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
6469         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
6470 }
6471
6472 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
6473 {
6474         int port = BP_PORT(bp);
6475         u32 val, val2;
6476         u32 config;
6477         u16 i;
6478         u32 ext_phy_type;
6479
6480         bp->link_params.bp = bp;
6481         bp->link_params.port = port;
6482
6483         bp->link_params.lane_config =
6484                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
6485         bp->link_params.ext_phy_config =
6486                 SHMEM_RD(bp,
6487                          dev_info.port_hw_config[port].external_phy_config);
6488         /* BCM8727_NOC => BCM8727 no over current */
6489         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
6490             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
6491                 bp->link_params.ext_phy_config &=
6492                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6493                 bp->link_params.ext_phy_config |=
6494                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
6495                 bp->link_params.feature_config_flags |=
6496                         FEATURE_CONFIG_BCM8727_NOC;
6497         }
6498
6499         bp->link_params.speed_cap_mask =
6500                 SHMEM_RD(bp,
6501                          dev_info.port_hw_config[port].speed_capability_mask);
6502
6503         bp->port.link_config =
6504                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6505
6506         /* Get the 4 lanes xgxs config rx and tx */
6507         for (i = 0; i < 2; i++) {
6508                 val = SHMEM_RD(bp,
6509                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
6510                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
6511                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
6512
6513                 val = SHMEM_RD(bp,
6514                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
6515                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
6516                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
6517         }
6518
6519         /* If the device is capable of WoL, set the default state according
6520          * to the HW
6521          */
6522         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
6523         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
6524                    (config & PORT_FEATURE_WOL_ENABLED));
6525
6526         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
6527                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
6528                        bp->link_params.lane_config,
6529                        bp->link_params.ext_phy_config,
6530                        bp->link_params.speed_cap_mask, bp->port.link_config);
6531
6532         bp->link_params.switch_cfg |= (bp->port.link_config &
6533                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
6534         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
6535
6536         bnx2x_link_settings_requested(bp);
6537
6538         /*
6539          * If connected directly, work with the internal PHY, otherwise, work
6540          * with the external PHY
6541          */
6542         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6543         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
6544                 bp->mdio.prtad = bp->link_params.phy_addr;
6545
6546         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
6547                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
6548                 bp->mdio.prtad =
6549                         XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
6550
6551         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6552         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
6553         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
6554         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
6555         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6556
6557 #ifdef BCM_CNIC
6558         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
6559         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
6560         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
6561 #endif
6562 }
6563
6564 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6565 {
6566         int func = BP_FUNC(bp);
6567         u32 val, val2;
6568         int rc = 0;
6569
6570         bnx2x_get_common_hwinfo(bp);
6571
6572         bp->e1hov = 0;
6573         bp->e1hmf = 0;
6574         if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
6575                 bp->mf_config =
6576                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
6577
6578                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
6579                        FUNC_MF_CFG_E1HOV_TAG_MASK);
6580                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
6581                         bp->e1hmf = 1;
6582                 BNX2X_DEV_INFO("%s function mode\n",
6583                                IS_E1HMF(bp) ? "multi" : "single");
6584
6585                 if (IS_E1HMF(bp)) {
6586                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
6587                                                                 e1hov_tag) &
6588                                FUNC_MF_CFG_E1HOV_TAG_MASK);
6589                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
6590                                 bp->e1hov = val;
6591                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
6592                                                "(0x%04x)\n",
6593                                                func, bp->e1hov, bp->e1hov);
6594                         } else {
6595                                 BNX2X_ERROR("No valid E1HOV for func %d,"
6596                                             "  aborting\n", func);
6597                                 rc = -EPERM;
6598                         }
6599                 } else {
6600                         if (BP_E1HVN(bp)) {
6601                                 BNX2X_ERROR("VN %d in single function mode,"
6602                                             "  aborting\n", BP_E1HVN(bp));
6603                                 rc = -EPERM;
6604                         }
6605                 }
6606         }
6607
6608         if (!BP_NOMCP(bp)) {
6609                 bnx2x_get_port_hwinfo(bp);
6610
6611                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
6612                               DRV_MSG_SEQ_NUMBER_MASK);
6613                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
6614         }
6615
6616         if (IS_E1HMF(bp)) {
6617                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
6618                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
6619                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
6620                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
6621                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6622                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6623                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6624                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6625                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
6626                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
6627                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
6628                                ETH_ALEN);
6629                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
6630                                ETH_ALEN);
6631                 }
6632
6633                 return rc;
6634         }
6635
6636         if (BP_NOMCP(bp)) {
6637                 /* only supposed to happen on emulation/FPGA */
6638                 BNX2X_ERROR("warning: random MAC workaround active\n");
6639                 random_ether_addr(bp->dev->dev_addr);
6640                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6641         }
6642
6643         return rc;
6644 }
6645
6646 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
6647 {
6648         int cnt, i, block_end, rodi;
6649         char vpd_data[BNX2X_VPD_LEN+1];
6650         char str_id_reg[VENDOR_ID_LEN+1];
6651         char str_id_cap[VENDOR_ID_LEN+1];
6652         u8 len;
6653
6654         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
6655         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
6656
6657         if (cnt < BNX2X_VPD_LEN)
6658                 goto out_not_found;
6659
6660         i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
6661                              PCI_VPD_LRDT_RO_DATA);
6662         if (i < 0)
6663                 goto out_not_found;
6664
6665
6666         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
6667                     pci_vpd_lrdt_size(&vpd_data[i]);
6668
6669         i += PCI_VPD_LRDT_TAG_SIZE;
6670
6671         if (block_end > BNX2X_VPD_LEN)
6672                 goto out_not_found;
6673
6674         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6675                                    PCI_VPD_RO_KEYWORD_MFR_ID);
6676         if (rodi < 0)
6677                 goto out_not_found;
6678
6679         len = pci_vpd_info_field_size(&vpd_data[rodi]);
6680
6681         if (len != VENDOR_ID_LEN)
6682                 goto out_not_found;
6683
6684         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6685
6686         /* vendor specific info */
6687         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
6688         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
6689         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
6690             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
6691
6692                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6693                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
6694                 if (rodi >= 0) {
6695                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
6696
6697                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6698
6699                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
6700                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
6701                                 bp->fw_ver[len] = ' ';
6702                         }
6703                 }
6704                 return;
6705         }
6706 out_not_found:
6707         return;
6708 }
6709
6710 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6711 {
6712         int func = BP_FUNC(bp);
6713         int timer_interval;
6714         int rc;
6715
6716         /* Disable interrupt handling until HW is initialized */
6717         atomic_set(&bp->intr_sem, 1);
6718         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6719
6720         mutex_init(&bp->port.phy_mutex);
6721         mutex_init(&bp->fw_mb_mutex);
6722         spin_lock_init(&bp->stats_lock);
6723 #ifdef BCM_CNIC
6724         mutex_init(&bp->cnic_mutex);
6725 #endif
6726
6727         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
6728         INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
6729
6730         rc = bnx2x_get_hwinfo(bp);
6731
6732         bnx2x_read_fwinfo(bp);
6733         /* need to reset chip if undi was active */
6734         if (!BP_NOMCP(bp))
6735                 bnx2x_undi_unload(bp);
6736
6737         if (CHIP_REV_IS_FPGA(bp))
6738                 dev_err(&bp->pdev->dev, "FPGA detected\n");
6739
6740         if (BP_NOMCP(bp) && (func == 0))
6741                 dev_err(&bp->pdev->dev, "MCP disabled, "
6742                                         "must load devices in order!\n");
6743
6744         /* Set multi queue mode */
6745         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
6746             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
6747                 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
6748                                         "requested is not MSI-X\n");
6749                 multi_mode = ETH_RSS_MODE_DISABLED;
6750         }
6751         bp->multi_mode = multi_mode;
6752         bp->int_mode = int_mode;
6753
6754         bp->dev->features |= NETIF_F_GRO;
6755
6756         /* Set TPA flags */
6757         if (disable_tpa) {
6758                 bp->flags &= ~TPA_ENABLE_FLAG;
6759                 bp->dev->features &= ~NETIF_F_LRO;
6760         } else {
6761                 bp->flags |= TPA_ENABLE_FLAG;
6762                 bp->dev->features |= NETIF_F_LRO;
6763         }
6764         bp->disable_tpa = disable_tpa;
6765
6766         if (CHIP_IS_E1(bp))
6767                 bp->dropless_fc = 0;
6768         else
6769                 bp->dropless_fc = dropless_fc;
6770
6771         bp->mrrs = mrrs;
6772
6773         bp->tx_ring_size = MAX_TX_AVAIL;
6774         bp->rx_ring_size = MAX_RX_AVAIL;
6775
6776         bp->rx_csum = 1;
6777
6778         /* make sure that the numbers are in the right granularity */
6779         bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6780         bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6781
6782         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
6783         bp->current_interval = (poll ? poll : timer_interval);
6784
6785         init_timer(&bp->timer);
6786         bp->timer.expires = jiffies + bp->current_interval;
6787         bp->timer.data = (unsigned long) bp;
6788         bp->timer.function = bnx2x_timer;
6789
6790         return rc;
6791 }
6792
6793
6794 /****************************************************************************
6795 * General service functions
6796 ****************************************************************************/
6797
6798 /* called with rtnl_lock */
6799 static int bnx2x_open(struct net_device *dev)
6800 {
6801         struct bnx2x *bp = netdev_priv(dev);
6802
6803         netif_carrier_off(dev);
6804
6805         bnx2x_set_power_state(bp, PCI_D0);
6806
6807         if (!bnx2x_reset_is_done(bp)) {
6808                 do {
6809                         /* Reset MCP mail box sequence if there is on going
6810                          * recovery
6811                          */
6812                         bp->fw_seq = 0;
6813
6814                         /* If it's the first function to load and reset done
6815                          * is still not cleared it may mean that. We don't
6816                          * check the attention state here because it may have
6817                          * already been cleared by a "common" reset but we
6818                          * shell proceed with "process kill" anyway.
6819                          */
6820                         if ((bnx2x_get_load_cnt(bp) == 0) &&
6821                                 bnx2x_trylock_hw_lock(bp,
6822                                 HW_LOCK_RESOURCE_RESERVED_08) &&
6823                                 (!bnx2x_leader_reset(bp))) {
6824                                 DP(NETIF_MSG_HW, "Recovered in open\n");
6825                                 break;
6826                         }
6827
6828                         bnx2x_set_power_state(bp, PCI_D3hot);
6829
6830                         printk(KERN_ERR"%s: Recovery flow hasn't been properly"
6831                         " completed yet. Try again later. If u still see this"
6832                         " message after a few retries then power cycle is"
6833                         " required.\n", bp->dev->name);
6834
6835                         return -EAGAIN;
6836                 } while (0);
6837         }
6838
6839         bp->recovery_state = BNX2X_RECOVERY_DONE;
6840
6841         return bnx2x_nic_load(bp, LOAD_OPEN);
6842 }
6843
6844 /* called with rtnl_lock */
6845 static int bnx2x_close(struct net_device *dev)
6846 {
6847         struct bnx2x *bp = netdev_priv(dev);
6848
6849         /* Unload the driver, release IRQs */
6850         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
6851         bnx2x_set_power_state(bp, PCI_D3hot);
6852
6853         return 0;
6854 }
6855
6856 /* called with netif_tx_lock from dev_mcast.c */
6857 void bnx2x_set_rx_mode(struct net_device *dev)
6858 {
6859         struct bnx2x *bp = netdev_priv(dev);
6860         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
6861         int port = BP_PORT(bp);
6862
6863         if (bp->state != BNX2X_STATE_OPEN) {
6864                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6865                 return;
6866         }
6867
6868         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
6869
6870         if (dev->flags & IFF_PROMISC)
6871                 rx_mode = BNX2X_RX_MODE_PROMISC;
6872
6873         else if ((dev->flags & IFF_ALLMULTI) ||
6874                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
6875                   CHIP_IS_E1(bp)))
6876                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6877
6878         else { /* some multicasts */
6879                 if (CHIP_IS_E1(bp)) {
6880                         int i, old, offset;
6881                         struct netdev_hw_addr *ha;
6882                         struct mac_configuration_cmd *config =
6883                                                 bnx2x_sp(bp, mcast_config);
6884
6885                         i = 0;
6886                         netdev_for_each_mc_addr(ha, dev) {
6887                                 config->config_table[i].
6888                                         cam_entry.msb_mac_addr =
6889                                         swab16(*(u16 *)&ha->addr[0]);
6890                                 config->config_table[i].
6891                                         cam_entry.middle_mac_addr =
6892                                         swab16(*(u16 *)&ha->addr[2]);
6893                                 config->config_table[i].
6894                                         cam_entry.lsb_mac_addr =
6895                                         swab16(*(u16 *)&ha->addr[4]);
6896                                 config->config_table[i].cam_entry.flags =
6897                                                         cpu_to_le16(port);
6898                                 config->config_table[i].
6899                                         target_table_entry.flags = 0;
6900                                 config->config_table[i].target_table_entry.
6901                                         clients_bit_vector =
6902                                                 cpu_to_le32(1 << BP_L_ID(bp));
6903                                 config->config_table[i].
6904                                         target_table_entry.vlan_id = 0;
6905
6906                                 DP(NETIF_MSG_IFUP,
6907                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6908                                    config->config_table[i].
6909                                                 cam_entry.msb_mac_addr,
6910                                    config->config_table[i].
6911                                                 cam_entry.middle_mac_addr,
6912                                    config->config_table[i].
6913                                                 cam_entry.lsb_mac_addr);
6914                                 i++;
6915                         }
6916                         old = config->hdr.length;
6917                         if (old > i) {
6918                                 for (; i < old; i++) {
6919                                         if (CAM_IS_INVALID(config->
6920                                                            config_table[i])) {
6921                                                 /* already invalidated */
6922                                                 break;
6923                                         }
6924                                         /* invalidate */
6925                                         CAM_INVALIDATE(config->
6926                                                        config_table[i]);
6927                                 }
6928                         }
6929
6930                         if (CHIP_REV_IS_SLOW(bp))
6931                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6932                         else
6933                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
6934
6935                         config->hdr.length = i;
6936                         config->hdr.offset = offset;
6937                         config->hdr.client_id = bp->fp->cl_id;
6938                         config->hdr.reserved1 = 0;
6939
6940                         bp->set_mac_pending++;
6941                         smp_wmb();
6942
6943                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6944                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6945                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
6946                                       0);
6947                 } else { /* E1H */
6948                         /* Accept one or more multicasts */
6949                         struct netdev_hw_addr *ha;
6950                         u32 mc_filter[MC_HASH_SIZE];
6951                         u32 crc, bit, regidx;
6952                         int i;
6953
6954                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6955
6956                         netdev_for_each_mc_addr(ha, dev) {
6957                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6958                                    ha->addr);
6959
6960                                 crc = crc32c_le(0, ha->addr, ETH_ALEN);
6961                                 bit = (crc >> 24) & 0xff;
6962                                 regidx = bit >> 5;
6963                                 bit &= 0x1f;
6964                                 mc_filter[regidx] |= (1 << bit);
6965                         }
6966
6967                         for (i = 0; i < MC_HASH_SIZE; i++)
6968                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6969                                        mc_filter[i]);
6970                 }
6971         }
6972
6973         bp->rx_mode = rx_mode;
6974         bnx2x_set_storm_rx_mode(bp);
6975 }
6976
6977
6978 /* called with rtnl_lock */
6979 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
6980                            int devad, u16 addr)
6981 {
6982         struct bnx2x *bp = netdev_priv(netdev);
6983         u16 value;
6984         int rc;
6985         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6986
6987         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
6988            prtad, devad, addr);
6989
6990         if (prtad != bp->mdio.prtad) {
6991                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
6992                    prtad, bp->mdio.prtad);
6993                 return -EINVAL;
6994         }
6995
6996         /* The HW expects different devad if CL22 is used */
6997         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
6998
6999         bnx2x_acquire_phy_lock(bp);
7000         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
7001                              devad, addr, &value);
7002         bnx2x_release_phy_lock(bp);
7003         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
7004
7005         if (!rc)
7006                 rc = value;
7007         return rc;
7008 }
7009
7010 /* called with rtnl_lock */
7011 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
7012                             u16 addr, u16 value)
7013 {
7014         struct bnx2x *bp = netdev_priv(netdev);
7015         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7016         int rc;
7017
7018         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
7019                            " value 0x%x\n", prtad, devad, addr, value);
7020
7021         if (prtad != bp->mdio.prtad) {
7022                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
7023                    prtad, bp->mdio.prtad);
7024                 return -EINVAL;
7025         }
7026
7027         /* The HW expects different devad if CL22 is used */
7028         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
7029
7030         bnx2x_acquire_phy_lock(bp);
7031         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
7032                               devad, addr, value);
7033         bnx2x_release_phy_lock(bp);
7034         return rc;
7035 }
7036
7037 /* called with rtnl_lock */
7038 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7039 {
7040         struct bnx2x *bp = netdev_priv(dev);
7041         struct mii_ioctl_data *mdio = if_mii(ifr);
7042
7043         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
7044            mdio->phy_id, mdio->reg_num, mdio->val_in);
7045
7046         if (!netif_running(dev))
7047                 return -EAGAIN;
7048
7049         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
7050 }
7051
7052 #ifdef CONFIG_NET_POLL_CONTROLLER
7053 static void poll_bnx2x(struct net_device *dev)
7054 {
7055         struct bnx2x *bp = netdev_priv(dev);
7056
7057         disable_irq(bp->pdev->irq);
7058         bnx2x_interrupt(bp->pdev->irq, dev);
7059         enable_irq(bp->pdev->irq);
7060 }
7061 #endif
7062
7063 static const struct net_device_ops bnx2x_netdev_ops = {
7064         .ndo_open               = bnx2x_open,
7065         .ndo_stop               = bnx2x_close,
7066         .ndo_start_xmit         = bnx2x_start_xmit,
7067         .ndo_set_multicast_list = bnx2x_set_rx_mode,
7068         .ndo_set_mac_address    = bnx2x_change_mac_addr,
7069         .ndo_validate_addr      = eth_validate_addr,
7070         .ndo_do_ioctl           = bnx2x_ioctl,
7071         .ndo_change_mtu         = bnx2x_change_mtu,
7072         .ndo_tx_timeout         = bnx2x_tx_timeout,
7073 #ifdef BCM_VLAN
7074         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
7075 #endif
7076 #ifdef CONFIG_NET_POLL_CONTROLLER
7077         .ndo_poll_controller    = poll_bnx2x,
7078 #endif
7079 };
7080
7081 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
7082                                     struct net_device *dev)
7083 {
7084         struct bnx2x *bp;
7085         int rc;
7086
7087         SET_NETDEV_DEV(dev, &pdev->dev);
7088         bp = netdev_priv(dev);
7089
7090         bp->dev = dev;
7091         bp->pdev = pdev;
7092         bp->flags = 0;
7093         bp->func = PCI_FUNC(pdev->devfn);
7094
7095         rc = pci_enable_device(pdev);
7096         if (rc) {
7097                 dev_err(&bp->pdev->dev,
7098                         "Cannot enable PCI device, aborting\n");
7099                 goto err_out;
7100         }
7101
7102         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7103                 dev_err(&bp->pdev->dev,
7104                         "Cannot find PCI device base address, aborting\n");
7105                 rc = -ENODEV;
7106                 goto err_out_disable;
7107         }
7108
7109         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
7110                 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
7111                        " base address, aborting\n");
7112                 rc = -ENODEV;
7113                 goto err_out_disable;
7114         }
7115
7116         if (atomic_read(&pdev->enable_cnt) == 1) {
7117                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7118                 if (rc) {
7119                         dev_err(&bp->pdev->dev,
7120                                 "Cannot obtain PCI resources, aborting\n");
7121                         goto err_out_disable;
7122                 }
7123
7124                 pci_set_master(pdev);
7125                 pci_save_state(pdev);
7126         }
7127
7128         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7129         if (bp->pm_cap == 0) {
7130                 dev_err(&bp->pdev->dev,
7131                         "Cannot find power management capability, aborting\n");
7132                 rc = -EIO;
7133                 goto err_out_release;
7134         }
7135
7136         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
7137         if (bp->pcie_cap == 0) {
7138                 dev_err(&bp->pdev->dev,
7139                         "Cannot find PCI Express capability, aborting\n");
7140                 rc = -EIO;
7141                 goto err_out_release;
7142         }
7143
7144         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
7145                 bp->flags |= USING_DAC_FLAG;
7146                 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
7147                         dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
7148                                " failed, aborting\n");
7149                         rc = -EIO;
7150                         goto err_out_release;
7151                 }
7152
7153         } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
7154                 dev_err(&bp->pdev->dev,
7155                         "System does not support DMA, aborting\n");
7156                 rc = -EIO;
7157                 goto err_out_release;
7158         }
7159
7160         dev->mem_start = pci_resource_start(pdev, 0);
7161         dev->base_addr = dev->mem_start;
7162         dev->mem_end = pci_resource_end(pdev, 0);
7163
7164         dev->irq = pdev->irq;
7165
7166         bp->regview = pci_ioremap_bar(pdev, 0);
7167         if (!bp->regview) {
7168                 dev_err(&bp->pdev->dev,
7169                         "Cannot map register space, aborting\n");
7170                 rc = -ENOMEM;
7171                 goto err_out_release;
7172         }
7173
7174         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
7175                                         min_t(u64, BNX2X_DB_SIZE,
7176                                               pci_resource_len(pdev, 2)));
7177         if (!bp->doorbells) {
7178                 dev_err(&bp->pdev->dev,
7179                         "Cannot map doorbell space, aborting\n");
7180                 rc = -ENOMEM;
7181                 goto err_out_unmap;
7182         }
7183
7184         bnx2x_set_power_state(bp, PCI_D0);
7185
7186         /* clean indirect addresses */
7187         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
7188                                PCICFG_VENDOR_ID_OFFSET);
7189         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
7190         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
7191         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
7192         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
7193
7194         /* Reset the load counter */
7195         bnx2x_clear_load_cnt(bp);
7196
7197         dev->watchdog_timeo = TX_TIMEOUT;
7198
7199         dev->netdev_ops = &bnx2x_netdev_ops;
7200         bnx2x_set_ethtool_ops(dev);
7201         dev->features |= NETIF_F_SG;
7202         dev->features |= NETIF_F_HW_CSUM;
7203         if (bp->flags & USING_DAC_FLAG)
7204                 dev->features |= NETIF_F_HIGHDMA;
7205         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7206         dev->features |= NETIF_F_TSO6;
7207 #ifdef BCM_VLAN
7208         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
7209         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
7210
7211         dev->vlan_features |= NETIF_F_SG;
7212         dev->vlan_features |= NETIF_F_HW_CSUM;
7213         if (bp->flags & USING_DAC_FLAG)
7214                 dev->vlan_features |= NETIF_F_HIGHDMA;
7215         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7216         dev->vlan_features |= NETIF_F_TSO6;
7217 #endif
7218
7219         /* get_port_hwinfo() will set prtad and mmds properly */
7220         bp->mdio.prtad = MDIO_PRTAD_NONE;
7221         bp->mdio.mmds = 0;
7222         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7223         bp->mdio.dev = dev;
7224         bp->mdio.mdio_read = bnx2x_mdio_read;
7225         bp->mdio.mdio_write = bnx2x_mdio_write;
7226
7227         return 0;
7228
7229 err_out_unmap:
7230         if (bp->regview) {
7231                 iounmap(bp->regview);
7232                 bp->regview = NULL;
7233         }
7234         if (bp->doorbells) {
7235                 iounmap(bp->doorbells);
7236                 bp->doorbells = NULL;
7237         }
7238
7239 err_out_release:
7240         if (atomic_read(&pdev->enable_cnt) == 1)
7241                 pci_release_regions(pdev);
7242
7243 err_out_disable:
7244         pci_disable_device(pdev);
7245         pci_set_drvdata(pdev, NULL);
7246
7247 err_out:
7248         return rc;
7249 }
7250
7251 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
7252                                                  int *width, int *speed)
7253 {
7254         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7255
7256         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
7257
7258         /* return value of 1=2.5GHz 2=5GHz */
7259         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
7260 }
7261
7262 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
7263 {
7264         const struct firmware *firmware = bp->firmware;
7265         struct bnx2x_fw_file_hdr *fw_hdr;
7266         struct bnx2x_fw_file_section *sections;
7267         u32 offset, len, num_ops;
7268         u16 *ops_offsets;
7269         int i;
7270         const u8 *fw_ver;
7271
7272         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
7273                 return -EINVAL;
7274
7275         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
7276         sections = (struct bnx2x_fw_file_section *)fw_hdr;
7277
7278         /* Make sure none of the offsets and sizes make us read beyond
7279          * the end of the firmware data */
7280         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
7281                 offset = be32_to_cpu(sections[i].offset);
7282                 len = be32_to_cpu(sections[i].len);
7283                 if (offset + len > firmware->size) {
7284                         dev_err(&bp->pdev->dev,
7285                                 "Section %d length is out of bounds\n", i);
7286                         return -EINVAL;
7287                 }
7288         }
7289
7290         /* Likewise for the init_ops offsets */
7291         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
7292         ops_offsets = (u16 *)(firmware->data + offset);
7293         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
7294
7295         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
7296                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
7297                         dev_err(&bp->pdev->dev,
7298                                 "Section offset %d is out of bounds\n", i);
7299                         return -EINVAL;
7300                 }
7301         }
7302
7303         /* Check FW version */
7304         offset = be32_to_cpu(fw_hdr->fw_version.offset);
7305         fw_ver = firmware->data + offset;
7306         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
7307             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
7308             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
7309             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
7310                 dev_err(&bp->pdev->dev,
7311                         "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
7312                        fw_ver[0], fw_ver[1], fw_ver[2],
7313                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
7314                        BCM_5710_FW_MINOR_VERSION,
7315                        BCM_5710_FW_REVISION_VERSION,
7316                        BCM_5710_FW_ENGINEERING_VERSION);
7317                 return -EINVAL;
7318         }
7319
7320         return 0;
7321 }
7322
7323 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7324 {
7325         const __be32 *source = (const __be32 *)_source;
7326         u32 *target = (u32 *)_target;
7327         u32 i;
7328
7329         for (i = 0; i < n/4; i++)
7330                 target[i] = be32_to_cpu(source[i]);
7331 }
7332
7333 /*
7334    Ops array is stored in the following format:
7335    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7336  */
7337 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
7338 {
7339         const __be32 *source = (const __be32 *)_source;
7340         struct raw_op *target = (struct raw_op *)_target;
7341         u32 i, j, tmp;
7342
7343         for (i = 0, j = 0; i < n/8; i++, j += 2) {
7344                 tmp = be32_to_cpu(source[j]);
7345                 target[i].op = (tmp >> 24) & 0xff;
7346                 target[i].offset = tmp & 0xffffff;
7347                 target[i].raw_data = be32_to_cpu(source[j + 1]);
7348         }
7349 }
7350
7351 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7352 {
7353         const __be16 *source = (const __be16 *)_source;
7354         u16 *target = (u16 *)_target;
7355         u32 i;
7356
7357         for (i = 0; i < n/2; i++)
7358                 target[i] = be16_to_cpu(source[i]);
7359 }
7360
7361 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
7362 do {                                                                    \
7363         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
7364         bp->arr = kmalloc(len, GFP_KERNEL);                             \
7365         if (!bp->arr) {                                                 \
7366                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7367                 goto lbl;                                               \
7368         }                                                               \
7369         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
7370              (u8 *)bp->arr, len);                                       \
7371 } while (0)
7372
7373 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
7374 {
7375         const char *fw_file_name;
7376         struct bnx2x_fw_file_hdr *fw_hdr;
7377         int rc;
7378
7379         if (CHIP_IS_E1(bp))
7380                 fw_file_name = FW_FILE_NAME_E1;
7381         else if (CHIP_IS_E1H(bp))
7382                 fw_file_name = FW_FILE_NAME_E1H;
7383         else {
7384                 dev_err(dev, "Unsupported chip revision\n");
7385                 return -EINVAL;
7386         }
7387
7388         dev_info(dev, "Loading %s\n", fw_file_name);
7389
7390         rc = request_firmware(&bp->firmware, fw_file_name, dev);
7391         if (rc) {
7392                 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
7393                 goto request_firmware_exit;
7394         }
7395
7396         rc = bnx2x_check_firmware(bp);
7397         if (rc) {
7398                 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
7399                 goto request_firmware_exit;
7400         }
7401
7402         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
7403
7404         /* Initialize the pointers to the init arrays */
7405         /* Blob */
7406         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
7407
7408         /* Opcodes */
7409         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
7410
7411         /* Offsets */
7412         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
7413                             be16_to_cpu_n);
7414
7415         /* STORMs firmware */
7416         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7417                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
7418         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
7419                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
7420         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7421                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
7422         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
7423                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
7424         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7425                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
7426         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
7427                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
7428         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7429                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
7430         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
7431                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
7432
7433         return 0;
7434
7435 init_offsets_alloc_err:
7436         kfree(bp->init_ops);
7437 init_ops_alloc_err:
7438         kfree(bp->init_data);
7439 request_firmware_exit:
7440         release_firmware(bp->firmware);
7441
7442         return rc;
7443 }
7444
7445
7446 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7447                                     const struct pci_device_id *ent)
7448 {
7449         struct net_device *dev = NULL;
7450         struct bnx2x *bp;
7451         int pcie_width, pcie_speed;
7452         int rc;
7453
7454         /* dev zeroed in init_etherdev */
7455         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
7456         if (!dev) {
7457                 dev_err(&pdev->dev, "Cannot allocate net device\n");
7458                 return -ENOMEM;
7459         }
7460
7461         bp = netdev_priv(dev);
7462         bp->msg_enable = debug;
7463
7464         pci_set_drvdata(pdev, dev);
7465
7466         rc = bnx2x_init_dev(pdev, dev);
7467         if (rc < 0) {
7468                 free_netdev(dev);
7469                 return rc;
7470         }
7471
7472         rc = bnx2x_init_bp(bp);
7473         if (rc)
7474                 goto init_one_exit;
7475
7476         /* Set init arrays */
7477         rc = bnx2x_init_firmware(bp, &pdev->dev);
7478         if (rc) {
7479                 dev_err(&pdev->dev, "Error loading firmware\n");
7480                 goto init_one_exit;
7481         }
7482
7483         rc = register_netdev(dev);
7484         if (rc) {
7485                 dev_err(&pdev->dev, "Cannot register net device\n");
7486                 goto init_one_exit;
7487         }
7488
7489         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
7490         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
7491                " IRQ %d, ", board_info[ent->driver_data].name,
7492                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
7493                pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
7494                dev->base_addr, bp->pdev->irq);
7495         pr_cont("node addr %pM\n", dev->dev_addr);
7496
7497         return 0;
7498
7499 init_one_exit:
7500         if (bp->regview)
7501                 iounmap(bp->regview);
7502
7503         if (bp->doorbells)
7504                 iounmap(bp->doorbells);
7505
7506         free_netdev(dev);
7507
7508         if (atomic_read(&pdev->enable_cnt) == 1)
7509                 pci_release_regions(pdev);
7510
7511         pci_disable_device(pdev);
7512         pci_set_drvdata(pdev, NULL);
7513
7514         return rc;
7515 }
7516
7517 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7518 {
7519         struct net_device *dev = pci_get_drvdata(pdev);
7520         struct bnx2x *bp;
7521
7522         if (!dev) {
7523                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
7524                 return;
7525         }
7526         bp = netdev_priv(dev);
7527
7528         unregister_netdev(dev);
7529
7530         /* Make sure RESET task is not scheduled before continuing */
7531         cancel_delayed_work_sync(&bp->reset_task);
7532
7533         kfree(bp->init_ops_offsets);
7534         kfree(bp->init_ops);
7535         kfree(bp->init_data);
7536         release_firmware(bp->firmware);
7537
7538         if (bp->regview)
7539                 iounmap(bp->regview);
7540
7541         if (bp->doorbells)
7542                 iounmap(bp->doorbells);
7543
7544         free_netdev(dev);
7545
7546         if (atomic_read(&pdev->enable_cnt) == 1)
7547                 pci_release_regions(pdev);
7548
7549         pci_disable_device(pdev);
7550         pci_set_drvdata(pdev, NULL);
7551 }
7552
7553 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
7554 {
7555         int i;
7556
7557         bp->state = BNX2X_STATE_ERROR;
7558
7559         bp->rx_mode = BNX2X_RX_MODE_NONE;
7560
7561         bnx2x_netif_stop(bp, 0);
7562         netif_carrier_off(bp->dev);
7563
7564         del_timer_sync(&bp->timer);
7565         bp->stats_state = STATS_STATE_DISABLED;
7566         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
7567
7568         /* Release IRQs */
7569         bnx2x_free_irq(bp, false);
7570
7571         if (CHIP_IS_E1(bp)) {
7572                 struct mac_configuration_cmd *config =
7573                                                 bnx2x_sp(bp, mcast_config);
7574
7575                 for (i = 0; i < config->hdr.length; i++)
7576                         CAM_INVALIDATE(config->config_table[i]);
7577         }
7578
7579         /* Free SKBs, SGEs, TPA pool and driver internals */
7580         bnx2x_free_skbs(bp);
7581         for_each_queue(bp, i)
7582                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7583         for_each_queue(bp, i)
7584                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7585         bnx2x_free_mem(bp);
7586
7587         bp->state = BNX2X_STATE_CLOSED;
7588
7589         return 0;
7590 }
7591
7592 static void bnx2x_eeh_recover(struct bnx2x *bp)
7593 {
7594         u32 val;
7595
7596         mutex_init(&bp->port.phy_mutex);
7597
7598         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7599         bp->link_params.shmem_base = bp->common.shmem_base;
7600         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7601
7602         if (!bp->common.shmem_base ||
7603             (bp->common.shmem_base < 0xA0000) ||
7604             (bp->common.shmem_base >= 0xC0000)) {
7605                 BNX2X_DEV_INFO("MCP not active\n");
7606                 bp->flags |= NO_MCP_FLAG;
7607                 return;
7608         }
7609
7610         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7611         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7612                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7613                 BNX2X_ERR("BAD MCP validity signature\n");
7614
7615         if (!BP_NOMCP(bp)) {
7616                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
7617                               & DRV_MSG_SEQ_NUMBER_MASK);
7618                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7619         }
7620 }
7621
7622 /**
7623  * bnx2x_io_error_detected - called when PCI error is detected
7624  * @pdev: Pointer to PCI device
7625  * @state: The current pci connection state
7626  *
7627  * This function is called after a PCI bus error affecting
7628  * this device has been detected.
7629  */
7630 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
7631                                                 pci_channel_state_t state)
7632 {
7633         struct net_device *dev = pci_get_drvdata(pdev);
7634         struct bnx2x *bp = netdev_priv(dev);
7635
7636         rtnl_lock();
7637
7638         netif_device_detach(dev);
7639
7640         if (state == pci_channel_io_perm_failure) {
7641                 rtnl_unlock();
7642                 return PCI_ERS_RESULT_DISCONNECT;
7643         }
7644
7645         if (netif_running(dev))
7646                 bnx2x_eeh_nic_unload(bp);
7647
7648         pci_disable_device(pdev);
7649
7650         rtnl_unlock();
7651
7652         /* Request a slot reset */
7653         return PCI_ERS_RESULT_NEED_RESET;
7654 }
7655
7656 /**
7657  * bnx2x_io_slot_reset - called after the PCI bus has been reset
7658  * @pdev: Pointer to PCI device
7659  *
7660  * Restart the card from scratch, as if from a cold-boot.
7661  */
7662 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
7663 {
7664         struct net_device *dev = pci_get_drvdata(pdev);
7665         struct bnx2x *bp = netdev_priv(dev);
7666
7667         rtnl_lock();
7668
7669         if (pci_enable_device(pdev)) {
7670                 dev_err(&pdev->dev,
7671                         "Cannot re-enable PCI device after reset\n");
7672                 rtnl_unlock();
7673                 return PCI_ERS_RESULT_DISCONNECT;
7674         }
7675
7676         pci_set_master(pdev);
7677         pci_restore_state(pdev);
7678
7679         if (netif_running(dev))
7680                 bnx2x_set_power_state(bp, PCI_D0);
7681
7682         rtnl_unlock();
7683
7684         return PCI_ERS_RESULT_RECOVERED;
7685 }
7686
7687 /**
7688  * bnx2x_io_resume - called when traffic can start flowing again
7689  * @pdev: Pointer to PCI device
7690  *
7691  * This callback is called when the error recovery driver tells us that
7692  * its OK to resume normal operation.
7693  */
7694 static void bnx2x_io_resume(struct pci_dev *pdev)
7695 {
7696         struct net_device *dev = pci_get_drvdata(pdev);
7697         struct bnx2x *bp = netdev_priv(dev);
7698
7699         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
7700                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
7701                 return;
7702         }
7703
7704         rtnl_lock();
7705
7706         bnx2x_eeh_recover(bp);
7707
7708         if (netif_running(dev))
7709                 bnx2x_nic_load(bp, LOAD_NORMAL);
7710
7711         netif_device_attach(dev);
7712
7713         rtnl_unlock();
7714 }
7715
7716 static struct pci_error_handlers bnx2x_err_handler = {
7717         .error_detected = bnx2x_io_error_detected,
7718         .slot_reset     = bnx2x_io_slot_reset,
7719         .resume         = bnx2x_io_resume,
7720 };
7721
7722 static struct pci_driver bnx2x_pci_driver = {
7723         .name        = DRV_MODULE_NAME,
7724         .id_table    = bnx2x_pci_tbl,
7725         .probe       = bnx2x_init_one,
7726         .remove      = __devexit_p(bnx2x_remove_one),
7727         .suspend     = bnx2x_suspend,
7728         .resume      = bnx2x_resume,
7729         .err_handler = &bnx2x_err_handler,
7730 };
7731
7732 static int __init bnx2x_init(void)
7733 {
7734         int ret;
7735
7736         pr_info("%s", version);
7737
7738         bnx2x_wq = create_singlethread_workqueue("bnx2x");
7739         if (bnx2x_wq == NULL) {
7740                 pr_err("Cannot create workqueue\n");
7741                 return -ENOMEM;
7742         }
7743
7744         ret = pci_register_driver(&bnx2x_pci_driver);
7745         if (ret) {
7746                 pr_err("Cannot register driver\n");
7747                 destroy_workqueue(bnx2x_wq);
7748         }
7749         return ret;
7750 }
7751
7752 static void __exit bnx2x_cleanup(void)
7753 {
7754         pci_unregister_driver(&bnx2x_pci_driver);
7755
7756         destroy_workqueue(bnx2x_wq);
7757 }
7758
7759 module_init(bnx2x_init);
7760 module_exit(bnx2x_cleanup);
7761
7762 #ifdef BCM_CNIC
7763
7764 /* count denotes the number of new completions we have seen */
7765 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
7766 {
7767         struct eth_spe *spe;
7768
7769 #ifdef BNX2X_STOP_ON_ERROR
7770         if (unlikely(bp->panic))
7771                 return;
7772 #endif
7773
7774         spin_lock_bh(&bp->spq_lock);
7775         bp->cnic_spq_pending -= count;
7776
7777         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
7778              bp->cnic_spq_pending++) {
7779
7780                 if (!bp->cnic_kwq_pending)
7781                         break;
7782
7783                 spe = bnx2x_sp_get_next(bp);
7784                 *spe = *bp->cnic_kwq_cons;
7785
7786                 bp->cnic_kwq_pending--;
7787
7788                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
7789                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
7790
7791                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
7792                         bp->cnic_kwq_cons = bp->cnic_kwq;
7793                 else
7794                         bp->cnic_kwq_cons++;
7795         }
7796         bnx2x_sp_prod_update(bp);
7797         spin_unlock_bh(&bp->spq_lock);
7798 }
7799
7800 static int bnx2x_cnic_sp_queue(struct net_device *dev,
7801                                struct kwqe_16 *kwqes[], u32 count)
7802 {
7803         struct bnx2x *bp = netdev_priv(dev);
7804         int i;
7805
7806 #ifdef BNX2X_STOP_ON_ERROR
7807         if (unlikely(bp->panic))
7808                 return -EIO;
7809 #endif
7810
7811         spin_lock_bh(&bp->spq_lock);
7812
7813         for (i = 0; i < count; i++) {
7814                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
7815
7816                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
7817                         break;
7818
7819                 *bp->cnic_kwq_prod = *spe;
7820
7821                 bp->cnic_kwq_pending++;
7822
7823                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
7824                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
7825                    spe->data.mac_config_addr.hi,
7826                    spe->data.mac_config_addr.lo,
7827                    bp->cnic_kwq_pending);
7828
7829                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
7830                         bp->cnic_kwq_prod = bp->cnic_kwq;
7831                 else
7832                         bp->cnic_kwq_prod++;
7833         }
7834
7835         spin_unlock_bh(&bp->spq_lock);
7836
7837         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
7838                 bnx2x_cnic_sp_post(bp, 0);
7839
7840         return i;
7841 }
7842
7843 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7844 {
7845         struct cnic_ops *c_ops;
7846         int rc = 0;
7847
7848         mutex_lock(&bp->cnic_mutex);
7849         c_ops = bp->cnic_ops;
7850         if (c_ops)
7851                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7852         mutex_unlock(&bp->cnic_mutex);
7853
7854         return rc;
7855 }
7856
7857 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7858 {
7859         struct cnic_ops *c_ops;
7860         int rc = 0;
7861
7862         rcu_read_lock();
7863         c_ops = rcu_dereference(bp->cnic_ops);
7864         if (c_ops)
7865                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7866         rcu_read_unlock();
7867
7868         return rc;
7869 }
7870
7871 /*
7872  * for commands that have no data
7873  */
7874 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
7875 {
7876         struct cnic_ctl_info ctl = {0};
7877
7878         ctl.cmd = cmd;
7879
7880         return bnx2x_cnic_ctl_send(bp, &ctl);
7881 }
7882
7883 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
7884 {
7885         struct cnic_ctl_info ctl;
7886
7887         /* first we tell CNIC and only then we count this as a completion */
7888         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
7889         ctl.data.comp.cid = cid;
7890
7891         bnx2x_cnic_ctl_send_bh(bp, &ctl);
7892         bnx2x_cnic_sp_post(bp, 1);
7893 }
7894
7895 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7896 {
7897         struct bnx2x *bp = netdev_priv(dev);
7898         int rc = 0;
7899
7900         switch (ctl->cmd) {
7901         case DRV_CTL_CTXTBL_WR_CMD: {
7902                 u32 index = ctl->data.io.offset;
7903                 dma_addr_t addr = ctl->data.io.dma_addr;
7904
7905                 bnx2x_ilt_wr(bp, index, addr);
7906                 break;
7907         }
7908
7909         case DRV_CTL_COMPLETION_CMD: {
7910                 int count = ctl->data.comp.comp_count;
7911
7912                 bnx2x_cnic_sp_post(bp, count);
7913                 break;
7914         }
7915
7916         /* rtnl_lock is held.  */
7917         case DRV_CTL_START_L2_CMD: {
7918                 u32 cli = ctl->data.ring.client_id;
7919
7920                 bp->rx_mode_cl_mask |= (1 << cli);
7921                 bnx2x_set_storm_rx_mode(bp);
7922                 break;
7923         }
7924
7925         /* rtnl_lock is held.  */
7926         case DRV_CTL_STOP_L2_CMD: {
7927                 u32 cli = ctl->data.ring.client_id;
7928
7929                 bp->rx_mode_cl_mask &= ~(1 << cli);
7930                 bnx2x_set_storm_rx_mode(bp);
7931                 break;
7932         }
7933
7934         default:
7935                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
7936                 rc = -EINVAL;
7937         }
7938
7939         return rc;
7940 }
7941
7942 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
7943 {
7944         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7945
7946         if (bp->flags & USING_MSIX_FLAG) {
7947                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
7948                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
7949                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
7950         } else {
7951                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
7952                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
7953         }
7954         cp->irq_arr[0].status_blk = bp->cnic_sb;
7955         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
7956         cp->irq_arr[1].status_blk = bp->def_status_blk;
7957         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
7958
7959         cp->num_irq = 2;
7960 }
7961
7962 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
7963                                void *data)
7964 {
7965         struct bnx2x *bp = netdev_priv(dev);
7966         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7967
7968         if (ops == NULL)
7969                 return -EINVAL;
7970
7971         if (atomic_read(&bp->intr_sem) != 0)
7972                 return -EBUSY;
7973
7974         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
7975         if (!bp->cnic_kwq)
7976                 return -ENOMEM;
7977
7978         bp->cnic_kwq_cons = bp->cnic_kwq;
7979         bp->cnic_kwq_prod = bp->cnic_kwq;
7980         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
7981
7982         bp->cnic_spq_pending = 0;
7983         bp->cnic_kwq_pending = 0;
7984
7985         bp->cnic_data = data;
7986
7987         cp->num_irq = 0;
7988         cp->drv_state = CNIC_DRV_STATE_REGD;
7989
7990         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
7991
7992         bnx2x_setup_cnic_irq_info(bp);
7993         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7994         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7995         rcu_assign_pointer(bp->cnic_ops, ops);
7996
7997         return 0;
7998 }
7999
8000 static int bnx2x_unregister_cnic(struct net_device *dev)
8001 {
8002         struct bnx2x *bp = netdev_priv(dev);
8003         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8004
8005         mutex_lock(&bp->cnic_mutex);
8006         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8007                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8008                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8009         }
8010         cp->drv_state = 0;
8011         rcu_assign_pointer(bp->cnic_ops, NULL);
8012         mutex_unlock(&bp->cnic_mutex);
8013         synchronize_rcu();
8014         kfree(bp->cnic_kwq);
8015         bp->cnic_kwq = NULL;
8016
8017         return 0;
8018 }
8019
8020 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
8021 {
8022         struct bnx2x *bp = netdev_priv(dev);
8023         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8024
8025         cp->drv_owner = THIS_MODULE;
8026         cp->chip_id = CHIP_ID(bp);
8027         cp->pdev = bp->pdev;
8028         cp->io_base = bp->regview;
8029         cp->io_base2 = bp->doorbells;
8030         cp->max_kwqe_pending = 8;
8031         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
8032         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
8033         cp->ctx_tbl_len = CNIC_ILT_LINES;
8034         cp->starting_cid = BCM_CNIC_CID_START;
8035         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
8036         cp->drv_ctl = bnx2x_drv_ctl;
8037         cp->drv_register_cnic = bnx2x_register_cnic;
8038         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
8039
8040         return cp;
8041 }
8042 EXPORT_SYMBOL(bnx2x_cnic_probe);
8043
8044 #endif /* BCM_CNIC */
8045