]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/bnx2x_main.c
bnx2x: Use VPD-R V0 entry to display firmware revision
[net-next-2.6.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 #include <linux/stringify.h>
53
54
55 #include "bnx2x.h"
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
59
60 #define DRV_MODULE_VERSION      "1.52.1-8"
61 #define DRV_MODULE_RELDATE      "2010/04/01"
62 #define BNX2X_BC_VER            0x040200
63
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
66 /* FW files */
67 #define FW_FILE_VERSION                                 \
68         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
69         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
70         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
71         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1         "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H        "bnx2x-e1h-" FW_FILE_VERSION ".fw"
74
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT              (5*HZ)
77
78 static char version[] __devinitdata =
79         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
88
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92                              "(0 Disable; 1 Enable (default))");
93
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97                                 " (default is as a number of CPUs)");
98
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102
103 static int int_mode;
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
106
107 static int dropless_fc;
108 module_param(dropless_fc, int, 0);
109 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
111 static int poll;
112 module_param(poll, int, 0);
113 MODULE_PARM_DESC(poll, " Use polling (for debug)");
114
115 static int mrrs = -1;
116 module_param(mrrs, int, 0);
117 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
119 static int debug;
120 module_param(debug, int, 0);
121 MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
124
125 static struct workqueue_struct *bnx2x_wq;
126
127 enum bnx2x_board_type {
128         BCM57710 = 0,
129         BCM57711 = 1,
130         BCM57711E = 2,
131 };
132
133 /* indexed by board_type, above */
134 static struct {
135         char *name;
136 } board_info[] __devinitdata = {
137         { "Broadcom NetXtreme II BCM57710 XGb" },
138         { "Broadcom NetXtreme II BCM57711 XGb" },
139         { "Broadcom NetXtreme II BCM57711E XGb" }
140 };
141
142
143 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
144         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
147         { 0 }
148 };
149
150 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152 /****************************************************************************
153 * General service functions
154 ****************************************************************************/
155
156 /* used only at init
157  * locking is done by mcp
158  */
159 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
160 {
161         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164                                PCICFG_VENDOR_ID_OFFSET);
165 }
166
167 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168 {
169         u32 val;
170
171         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174                                PCICFG_VENDOR_ID_OFFSET);
175
176         return val;
177 }
178
179 static const u32 dmae_reg_go_c[] = {
180         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184 };
185
186 /* copy command into DMAE command memory and set DMAE command go */
187 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188                             int idx)
189 {
190         u32 cmd_offset;
191         int i;
192
193         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
197                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
199         }
200         REG_WR(bp, dmae_reg_go_c[idx], 1);
201 }
202
203 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204                       u32 len32)
205 {
206         struct dmae_command dmae;
207         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
208         int cnt = 200;
209
210         if (!bp->dmae_ready) {
211                 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
214                    "  using indirect\n", dst_addr, len32);
215                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216                 return;
217         }
218
219         memset(&dmae, 0, sizeof(struct dmae_command));
220
221         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
224 #ifdef __BIG_ENDIAN
225                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
226 #else
227                        DMAE_CMD_ENDIANITY_DW_SWAP |
228 #endif
229                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231         dmae.src_addr_lo = U64_LO(dma_addr);
232         dmae.src_addr_hi = U64_HI(dma_addr);
233         dmae.dst_addr_lo = dst_addr >> 2;
234         dmae.dst_addr_hi = 0;
235         dmae.len = len32;
236         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238         dmae.comp_val = DMAE_COMP_VAL;
239
240         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
241            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
242                     "dst_addr [%x:%08x (%08x)]\n"
243            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
244            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
247         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
248            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
250
251         mutex_lock(&bp->dmae_mutex);
252
253         *wb_comp = 0;
254
255         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
256
257         udelay(5);
258
259         while (*wb_comp != DMAE_COMP_VAL) {
260                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
262                 if (!cnt) {
263                         BNX2X_ERR("DMAE timeout!\n");
264                         break;
265                 }
266                 cnt--;
267                 /* adjust delay for emulation/FPGA */
268                 if (CHIP_REV_IS_SLOW(bp))
269                         msleep(100);
270                 else
271                         udelay(5);
272         }
273
274         mutex_unlock(&bp->dmae_mutex);
275 }
276
277 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
278 {
279         struct dmae_command dmae;
280         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
281         int cnt = 200;
282
283         if (!bp->dmae_ready) {
284                 u32 *data = bnx2x_sp(bp, wb_data[0]);
285                 int i;
286
287                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
288                    "  using indirect\n", src_addr, len32);
289                 for (i = 0; i < len32; i++)
290                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291                 return;
292         }
293
294         memset(&dmae, 0, sizeof(struct dmae_command));
295
296         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
299 #ifdef __BIG_ENDIAN
300                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
301 #else
302                        DMAE_CMD_ENDIANITY_DW_SWAP |
303 #endif
304                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306         dmae.src_addr_lo = src_addr >> 2;
307         dmae.src_addr_hi = 0;
308         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310         dmae.len = len32;
311         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313         dmae.comp_val = DMAE_COMP_VAL;
314
315         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
316            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
317                     "dst_addr [%x:%08x (%08x)]\n"
318            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
319            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
322
323         mutex_lock(&bp->dmae_mutex);
324
325         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
326         *wb_comp = 0;
327
328         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
329
330         udelay(5);
331
332         while (*wb_comp != DMAE_COMP_VAL) {
333
334                 if (!cnt) {
335                         BNX2X_ERR("DMAE timeout!\n");
336                         break;
337                 }
338                 cnt--;
339                 /* adjust delay for emulation/FPGA */
340                 if (CHIP_REV_IS_SLOW(bp))
341                         msleep(100);
342                 else
343                         udelay(5);
344         }
345         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
346            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
348
349         mutex_unlock(&bp->dmae_mutex);
350 }
351
352 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353                                u32 addr, u32 len)
354 {
355         int offset = 0;
356
357         while (len > DMAE_LEN32_WR_MAX) {
358                 bnx2x_write_dmae(bp, phys_addr + offset,
359                                  addr + offset, DMAE_LEN32_WR_MAX);
360                 offset += DMAE_LEN32_WR_MAX * 4;
361                 len -= DMAE_LEN32_WR_MAX;
362         }
363
364         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
365 }
366
367 /* used only for slowpath so not inlined */
368 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369 {
370         u32 wb_write[2];
371
372         wb_write[0] = val_hi;
373         wb_write[1] = val_lo;
374         REG_WR_DMAE(bp, reg, wb_write, 2);
375 }
376
377 #ifdef USE_WB_RD
378 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379 {
380         u32 wb_data[2];
381
382         REG_RD_DMAE(bp, reg, wb_data, 2);
383
384         return HILO_U64(wb_data[0], wb_data[1]);
385 }
386 #endif
387
388 static int bnx2x_mc_assert(struct bnx2x *bp)
389 {
390         char last_idx;
391         int i, rc = 0;
392         u32 row0, row1, row2, row3;
393
394         /* XSTORM */
395         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
397         if (last_idx)
398                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
399
400         /* print the asserts */
401         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
402
403                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404                               XSTORM_ASSERT_LIST_OFFSET(i));
405                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
411
412                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414                                   " 0x%08x 0x%08x 0x%08x\n",
415                                   i, row3, row2, row1, row0);
416                         rc++;
417                 } else {
418                         break;
419                 }
420         }
421
422         /* TSTORM */
423         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
425         if (last_idx)
426                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
427
428         /* print the asserts */
429         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
430
431                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432                               TSTORM_ASSERT_LIST_OFFSET(i));
433                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
439
440                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442                                   " 0x%08x 0x%08x 0x%08x\n",
443                                   i, row3, row2, row1, row0);
444                         rc++;
445                 } else {
446                         break;
447                 }
448         }
449
450         /* CSTORM */
451         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
453         if (last_idx)
454                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
455
456         /* print the asserts */
457         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
458
459                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460                               CSTORM_ASSERT_LIST_OFFSET(i));
461                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
467
468                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470                                   " 0x%08x 0x%08x 0x%08x\n",
471                                   i, row3, row2, row1, row0);
472                         rc++;
473                 } else {
474                         break;
475                 }
476         }
477
478         /* USTORM */
479         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480                            USTORM_ASSERT_LIST_INDEX_OFFSET);
481         if (last_idx)
482                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
483
484         /* print the asserts */
485         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
486
487                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488                               USTORM_ASSERT_LIST_OFFSET(i));
489                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
491                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
493                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
495
496                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498                                   " 0x%08x 0x%08x 0x%08x\n",
499                                   i, row3, row2, row1, row0);
500                         rc++;
501                 } else {
502                         break;
503                 }
504         }
505
506         return rc;
507 }
508
509 static void bnx2x_fw_dump(struct bnx2x *bp)
510 {
511         u32 mark, offset;
512         __be32 data[9];
513         int word;
514
515         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
516         mark = ((mark + 0x3) & ~0x3);
517         pr_err("begin fw dump (mark 0x%x)\n", mark);
518
519         pr_err("");
520         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521                 for (word = 0; word < 8; word++)
522                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
523                                                   offset + 4*word));
524                 data[8] = 0x0;
525                 pr_cont("%s", (char *)data);
526         }
527         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528                 for (word = 0; word < 8; word++)
529                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
530                                                   offset + 4*word));
531                 data[8] = 0x0;
532                 pr_cont("%s", (char *)data);
533         }
534         pr_err("end of fw dump\n");
535 }
536
537 static void bnx2x_panic_dump(struct bnx2x *bp)
538 {
539         int i;
540         u16 j, start, end;
541
542         bp->stats_state = STATS_STATE_DISABLED;
543         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
544
545         BNX2X_ERR("begin crash dump -----------------\n");
546
547         /* Indices */
548         /* Common */
549         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
550                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
551                   "  spq_prod_idx(%u)\n",
552                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554
555         /* Rx */
556         for_each_queue(bp, i) {
557                 struct bnx2x_fastpath *fp = &bp->fp[i];
558
559                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
560                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
561                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
562                           i, fp->rx_bd_prod, fp->rx_bd_cons,
563                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
565                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
566                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
567                           fp->rx_sge_prod, fp->last_max_sge,
568                           le16_to_cpu(fp->fp_u_idx),
569                           fp->status_blk->u_status_block.status_block_index);
570         }
571
572         /* Tx */
573         for_each_queue(bp, i) {
574                 struct bnx2x_fastpath *fp = &bp->fp[i];
575
576                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
577                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
578                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
579                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
580                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
581                           "  tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
582                           fp->status_blk->c_status_block.status_block_index,
583                           fp->tx_db.data.prod);
584         }
585
586         /* Rings */
587         /* Rx */
588         for_each_queue(bp, i) {
589                 struct bnx2x_fastpath *fp = &bp->fp[i];
590
591                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
593                 for (j = start; j != end; j = RX_BD(j + 1)) {
594                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
596
597                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
598                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
599                 }
600
601                 start = RX_SGE(fp->rx_sge_prod);
602                 end = RX_SGE(fp->last_max_sge);
603                 for (j = start; j != end; j = RX_SGE(j + 1)) {
604                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
606
607                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
608                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
609                 }
610
611                 start = RCQ_BD(fp->rx_comp_cons - 10);
612                 end = RCQ_BD(fp->rx_comp_cons + 503);
613                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
614                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
615
616                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
618                 }
619         }
620
621         /* Tx */
622         for_each_queue(bp, i) {
623                 struct bnx2x_fastpath *fp = &bp->fp[i];
624
625                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627                 for (j = start; j != end; j = TX_BD(j + 1)) {
628                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
629
630                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631                                   i, j, sw_bd->skb, sw_bd->first_bd);
632                 }
633
634                 start = TX_BD(fp->tx_bd_cons - 10);
635                 end = TX_BD(fp->tx_bd_cons + 254);
636                 for (j = start; j != end; j = TX_BD(j + 1)) {
637                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
638
639                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
641                 }
642         }
643
644         bnx2x_fw_dump(bp);
645         bnx2x_mc_assert(bp);
646         BNX2X_ERR("end crash dump -----------------\n");
647 }
648
649 static void bnx2x_int_enable(struct bnx2x *bp)
650 {
651         int port = BP_PORT(bp);
652         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653         u32 val = REG_RD(bp, addr);
654         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
656
657         if (msix) {
658                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659                          HC_CONFIG_0_REG_INT_LINE_EN_0);
660                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
662         } else if (msi) {
663                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
667         } else {
668                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
671                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
672
673                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674                    val, port, addr);
675
676                 REG_WR(bp, addr, val);
677
678                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
679         }
680
681         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
682            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
683
684         REG_WR(bp, addr, val);
685         /*
686          * Ensure that HC_CONFIG is written before leading/trailing edge config
687          */
688         mmiowb();
689         barrier();
690
691         if (CHIP_IS_E1H(bp)) {
692                 /* init leading/trailing edge */
693                 if (IS_E1HMF(bp)) {
694                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
695                         if (bp->port.pmf)
696                                 /* enable nig and gpio3 attention */
697                                 val |= 0x1100;
698                 } else
699                         val = 0xffff;
700
701                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
703         }
704
705         /* Make sure that interrupts are indeed enabled from here on */
706         mmiowb();
707 }
708
709 static void bnx2x_int_disable(struct bnx2x *bp)
710 {
711         int port = BP_PORT(bp);
712         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713         u32 val = REG_RD(bp, addr);
714
715         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
718                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
719
720         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
721            val, port, addr);
722
723         /* flush all outstanding writes */
724         mmiowb();
725
726         REG_WR(bp, addr, val);
727         if (REG_RD(bp, addr) != val)
728                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
729 }
730
731 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
732 {
733         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
734         int i, offset;
735
736         /* disable interrupt handling */
737         atomic_inc(&bp->intr_sem);
738         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
739
740         if (disable_hw)
741                 /* prevent the HW from sending interrupts */
742                 bnx2x_int_disable(bp);
743
744         /* make sure all ISRs are done */
745         if (msix) {
746                 synchronize_irq(bp->msix_table[0].vector);
747                 offset = 1;
748 #ifdef BCM_CNIC
749                 offset++;
750 #endif
751                 for_each_queue(bp, i)
752                         synchronize_irq(bp->msix_table[i + offset].vector);
753         } else
754                 synchronize_irq(bp->pdev->irq);
755
756         /* make sure sp_task is not running */
757         cancel_delayed_work(&bp->sp_task);
758         flush_workqueue(bnx2x_wq);
759 }
760
761 /* fast path */
762
763 /*
764  * General service functions
765  */
766
767 /* Return true if succeeded to acquire the lock */
768 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
769 {
770         u32 lock_status;
771         u32 resource_bit = (1 << resource);
772         int func = BP_FUNC(bp);
773         u32 hw_lock_control_reg;
774
775         DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
776
777         /* Validating that the resource is within range */
778         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
779                 DP(NETIF_MSG_HW,
780                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
781                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
782                 return -EINVAL;
783         }
784
785         if (func <= 5)
786                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
787         else
788                 hw_lock_control_reg =
789                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
790
791         /* Try to acquire the lock */
792         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
793         lock_status = REG_RD(bp, hw_lock_control_reg);
794         if (lock_status & resource_bit)
795                 return true;
796
797         DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
798         return false;
799 }
800
801 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
802                                 u8 storm, u16 index, u8 op, u8 update)
803 {
804         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
805                        COMMAND_REG_INT_ACK);
806         struct igu_ack_register igu_ack;
807
808         igu_ack.status_block_index = index;
809         igu_ack.sb_id_and_flags =
810                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
811                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
812                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
813                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
814
815         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
816            (*(u32 *)&igu_ack), hc_addr);
817         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
818
819         /* Make sure that ACK is written */
820         mmiowb();
821         barrier();
822 }
823
824 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
825 {
826         struct host_status_block *fpsb = fp->status_blk;
827
828         barrier(); /* status block is written to by the chip */
829         fp->fp_c_idx = fpsb->c_status_block.status_block_index;
830         fp->fp_u_idx = fpsb->u_status_block.status_block_index;
831 }
832
833 static u16 bnx2x_ack_int(struct bnx2x *bp)
834 {
835         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
836                        COMMAND_REG_SIMD_MASK);
837         u32 result = REG_RD(bp, hc_addr);
838
839         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
840            result, hc_addr);
841
842         return result;
843 }
844
845
846 /*
847  * fast path service functions
848  */
849
850 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
851 {
852         /* Tell compiler that consumer and producer can change */
853         barrier();
854         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
855 }
856
857 /* free skb in the packet ring at pos idx
858  * return idx of last bd freed
859  */
860 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
861                              u16 idx)
862 {
863         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
864         struct eth_tx_start_bd *tx_start_bd;
865         struct eth_tx_bd *tx_data_bd;
866         struct sk_buff *skb = tx_buf->skb;
867         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
868         int nbd;
869
870         /* prefetch skb end pointer to speedup dev_kfree_skb() */
871         prefetch(&skb->end);
872
873         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
874            idx, tx_buf, skb);
875
876         /* unmap first bd */
877         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
878         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
879         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
880                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
881
882         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
883 #ifdef BNX2X_STOP_ON_ERROR
884         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
885                 BNX2X_ERR("BAD nbd!\n");
886                 bnx2x_panic();
887         }
888 #endif
889         new_cons = nbd + tx_buf->first_bd;
890
891         /* Get the next bd */
892         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
893
894         /* Skip a parse bd... */
895         --nbd;
896         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
897
898         /* ...and the TSO split header bd since they have no mapping */
899         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
900                 --nbd;
901                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
902         }
903
904         /* now free frags */
905         while (nbd > 0) {
906
907                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
908                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
909                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
910                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
911                 if (--nbd)
912                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
913         }
914
915         /* release skb */
916         WARN_ON(!skb);
917         dev_kfree_skb(skb);
918         tx_buf->first_bd = 0;
919         tx_buf->skb = NULL;
920
921         return new_cons;
922 }
923
924 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
925 {
926         s16 used;
927         u16 prod;
928         u16 cons;
929
930         prod = fp->tx_bd_prod;
931         cons = fp->tx_bd_cons;
932
933         /* NUM_TX_RINGS = number of "next-page" entries
934            It will be used as a threshold */
935         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
936
937 #ifdef BNX2X_STOP_ON_ERROR
938         WARN_ON(used < 0);
939         WARN_ON(used > fp->bp->tx_ring_size);
940         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
941 #endif
942
943         return (s16)(fp->bp->tx_ring_size) - used;
944 }
945
946 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
947 {
948         u16 hw_cons;
949
950         /* Tell compiler that status block fields can change */
951         barrier();
952         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
953         return hw_cons != fp->tx_pkt_cons;
954 }
955
956 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
957 {
958         struct bnx2x *bp = fp->bp;
959         struct netdev_queue *txq;
960         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
961
962 #ifdef BNX2X_STOP_ON_ERROR
963         if (unlikely(bp->panic))
964                 return -1;
965 #endif
966
967         txq = netdev_get_tx_queue(bp->dev, fp->index);
968         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
969         sw_cons = fp->tx_pkt_cons;
970
971         while (sw_cons != hw_cons) {
972                 u16 pkt_cons;
973
974                 pkt_cons = TX_BD(sw_cons);
975
976                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
977
978                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
979                    hw_cons, sw_cons, pkt_cons);
980
981 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
982                         rmb();
983                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
984                 }
985 */
986                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
987                 sw_cons++;
988         }
989
990         fp->tx_pkt_cons = sw_cons;
991         fp->tx_bd_cons = bd_cons;
992
993         /* Need to make the tx_bd_cons update visible to start_xmit()
994          * before checking for netif_tx_queue_stopped().  Without the
995          * memory barrier, there is a small possibility that
996          * start_xmit() will miss it and cause the queue to be stopped
997          * forever.
998          */
999         smp_mb();
1000
1001         /* TBD need a thresh? */
1002         if (unlikely(netif_tx_queue_stopped(txq))) {
1003                 /* Taking tx_lock() is needed to prevent reenabling the queue
1004                  * while it's empty. This could have happen if rx_action() gets
1005                  * suspended in bnx2x_tx_int() after the condition before
1006                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1007                  *
1008                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
1009                  * sends some packets consuming the whole queue again->
1010                  * stops the queue
1011                  */
1012
1013                 __netif_tx_lock(txq, smp_processor_id());
1014
1015                 if ((netif_tx_queue_stopped(txq)) &&
1016                     (bp->state == BNX2X_STATE_OPEN) &&
1017                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
1018                         netif_tx_wake_queue(txq);
1019
1020                 __netif_tx_unlock(txq);
1021         }
1022         return 0;
1023 }
1024
1025 #ifdef BCM_CNIC
1026 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1027 #endif
1028
1029 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1030                            union eth_rx_cqe *rr_cqe)
1031 {
1032         struct bnx2x *bp = fp->bp;
1033         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1034         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1035
1036         DP(BNX2X_MSG_SP,
1037            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1038            fp->index, cid, command, bp->state,
1039            rr_cqe->ramrod_cqe.ramrod_type);
1040
1041         bp->spq_left++;
1042
1043         if (fp->index) {
1044                 switch (command | fp->state) {
1045                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1046                                                 BNX2X_FP_STATE_OPENING):
1047                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1048                            cid);
1049                         fp->state = BNX2X_FP_STATE_OPEN;
1050                         break;
1051
1052                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1053                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1054                            cid);
1055                         fp->state = BNX2X_FP_STATE_HALTED;
1056                         break;
1057
1058                 default:
1059                         BNX2X_ERR("unexpected MC reply (%d)  "
1060                                   "fp->state is %x\n", command, fp->state);
1061                         break;
1062                 }
1063                 mb(); /* force bnx2x_wait_ramrod() to see the change */
1064                 return;
1065         }
1066
1067         switch (command | bp->state) {
1068         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1069                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1070                 bp->state = BNX2X_STATE_OPEN;
1071                 break;
1072
1073         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1074                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1075                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1076                 fp->state = BNX2X_FP_STATE_HALTED;
1077                 break;
1078
1079         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1080                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1081                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1082                 break;
1083
1084 #ifdef BCM_CNIC
1085         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1086                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1087                 bnx2x_cnic_cfc_comp(bp, cid);
1088                 break;
1089 #endif
1090
1091         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1092         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1093                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1094                 bp->set_mac_pending--;
1095                 smp_wmb();
1096                 break;
1097
1098         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1099                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1100                 bp->set_mac_pending--;
1101                 smp_wmb();
1102                 break;
1103
1104         default:
1105                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1106                           command, bp->state);
1107                 break;
1108         }
1109         mb(); /* force bnx2x_wait_ramrod() to see the change */
1110 }
1111
1112 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1113                                      struct bnx2x_fastpath *fp, u16 index)
1114 {
1115         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1116         struct page *page = sw_buf->page;
1117         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1118
1119         /* Skip "next page" elements */
1120         if (!page)
1121                 return;
1122
1123         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
1124                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1125         __free_pages(page, PAGES_PER_SGE_SHIFT);
1126
1127         sw_buf->page = NULL;
1128         sge->addr_hi = 0;
1129         sge->addr_lo = 0;
1130 }
1131
1132 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1133                                            struct bnx2x_fastpath *fp, int last)
1134 {
1135         int i;
1136
1137         for (i = 0; i < last; i++)
1138                 bnx2x_free_rx_sge(bp, fp, i);
1139 }
1140
1141 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1142                                      struct bnx2x_fastpath *fp, u16 index)
1143 {
1144         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1145         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1146         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1147         dma_addr_t mapping;
1148
1149         if (unlikely(page == NULL))
1150                 return -ENOMEM;
1151
1152         mapping = dma_map_page(&bp->pdev->dev, page, 0,
1153                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1154         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1155                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1156                 return -ENOMEM;
1157         }
1158
1159         sw_buf->page = page;
1160         dma_unmap_addr_set(sw_buf, mapping, mapping);
1161
1162         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1163         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1164
1165         return 0;
1166 }
1167
1168 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1169                                      struct bnx2x_fastpath *fp, u16 index)
1170 {
1171         struct sk_buff *skb;
1172         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1173         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1174         dma_addr_t mapping;
1175
1176         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1177         if (unlikely(skb == NULL))
1178                 return -ENOMEM;
1179
1180         mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1181                                  DMA_FROM_DEVICE);
1182         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1183                 dev_kfree_skb(skb);
1184                 return -ENOMEM;
1185         }
1186
1187         rx_buf->skb = skb;
1188         dma_unmap_addr_set(rx_buf, mapping, mapping);
1189
1190         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1191         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1192
1193         return 0;
1194 }
1195
1196 /* note that we are not allocating a new skb,
1197  * we are just moving one from cons to prod
1198  * we are not creating a new mapping,
1199  * so there is no need to check for dma_mapping_error().
1200  */
1201 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1202                                struct sk_buff *skb, u16 cons, u16 prod)
1203 {
1204         struct bnx2x *bp = fp->bp;
1205         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1206         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1207         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1208         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1209
1210         dma_sync_single_for_device(&bp->pdev->dev,
1211                                    dma_unmap_addr(cons_rx_buf, mapping),
1212                                    RX_COPY_THRESH, DMA_FROM_DEVICE);
1213
1214         prod_rx_buf->skb = cons_rx_buf->skb;
1215         dma_unmap_addr_set(prod_rx_buf, mapping,
1216                            dma_unmap_addr(cons_rx_buf, mapping));
1217         *prod_bd = *cons_bd;
1218 }
1219
1220 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1221                                              u16 idx)
1222 {
1223         u16 last_max = fp->last_max_sge;
1224
1225         if (SUB_S16(idx, last_max) > 0)
1226                 fp->last_max_sge = idx;
1227 }
1228
1229 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1230 {
1231         int i, j;
1232
1233         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1234                 int idx = RX_SGE_CNT * i - 1;
1235
1236                 for (j = 0; j < 2; j++) {
1237                         SGE_MASK_CLEAR_BIT(fp, idx);
1238                         idx--;
1239                 }
1240         }
1241 }
1242
1243 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1244                                   struct eth_fast_path_rx_cqe *fp_cqe)
1245 {
1246         struct bnx2x *bp = fp->bp;
1247         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1248                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1249                       SGE_PAGE_SHIFT;
1250         u16 last_max, last_elem, first_elem;
1251         u16 delta = 0;
1252         u16 i;
1253
1254         if (!sge_len)
1255                 return;
1256
1257         /* First mark all used pages */
1258         for (i = 0; i < sge_len; i++)
1259                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1260
1261         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1262            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1263
1264         /* Here we assume that the last SGE index is the biggest */
1265         prefetch((void *)(fp->sge_mask));
1266         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1267
1268         last_max = RX_SGE(fp->last_max_sge);
1269         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1270         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1271
1272         /* If ring is not full */
1273         if (last_elem + 1 != first_elem)
1274                 last_elem++;
1275
1276         /* Now update the prod */
1277         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1278                 if (likely(fp->sge_mask[i]))
1279                         break;
1280
1281                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1282                 delta += RX_SGE_MASK_ELEM_SZ;
1283         }
1284
1285         if (delta > 0) {
1286                 fp->rx_sge_prod += delta;
1287                 /* clear page-end entries */
1288                 bnx2x_clear_sge_mask_next_elems(fp);
1289         }
1290
1291         DP(NETIF_MSG_RX_STATUS,
1292            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1293            fp->last_max_sge, fp->rx_sge_prod);
1294 }
1295
1296 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1297 {
1298         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1299         memset(fp->sge_mask, 0xff,
1300                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1301
1302         /* Clear the two last indices in the page to 1:
1303            these are the indices that correspond to the "next" element,
1304            hence will never be indicated and should be removed from
1305            the calculations. */
1306         bnx2x_clear_sge_mask_next_elems(fp);
1307 }
1308
1309 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1310                             struct sk_buff *skb, u16 cons, u16 prod)
1311 {
1312         struct bnx2x *bp = fp->bp;
1313         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1314         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1315         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1316         dma_addr_t mapping;
1317
1318         /* move empty skb from pool to prod and map it */
1319         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1320         mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1321                                  bp->rx_buf_size, DMA_FROM_DEVICE);
1322         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
1323
1324         /* move partial skb from cons to pool (don't unmap yet) */
1325         fp->tpa_pool[queue] = *cons_rx_buf;
1326
1327         /* mark bin state as start - print error if current state != stop */
1328         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1329                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1330
1331         fp->tpa_state[queue] = BNX2X_TPA_START;
1332
1333         /* point prod_bd to new skb */
1334         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1335         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1336
1337 #ifdef BNX2X_STOP_ON_ERROR
1338         fp->tpa_queue_used |= (1 << queue);
1339 #ifdef __powerpc64__
1340         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1341 #else
1342         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1343 #endif
1344            fp->tpa_queue_used);
1345 #endif
1346 }
1347
1348 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1349                                struct sk_buff *skb,
1350                                struct eth_fast_path_rx_cqe *fp_cqe,
1351                                u16 cqe_idx)
1352 {
1353         struct sw_rx_page *rx_pg, old_rx_pg;
1354         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1355         u32 i, frag_len, frag_size, pages;
1356         int err;
1357         int j;
1358
1359         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1360         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1361
1362         /* This is needed in order to enable forwarding support */
1363         if (frag_size)
1364                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1365                                                max(frag_size, (u32)len_on_bd));
1366
1367 #ifdef BNX2X_STOP_ON_ERROR
1368         if (pages >
1369             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1370                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1371                           pages, cqe_idx);
1372                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1373                           fp_cqe->pkt_len, len_on_bd);
1374                 bnx2x_panic();
1375                 return -EINVAL;
1376         }
1377 #endif
1378
1379         /* Run through the SGL and compose the fragmented skb */
1380         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1381                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1382
1383                 /* FW gives the indices of the SGE as if the ring is an array
1384                    (meaning that "next" element will consume 2 indices) */
1385                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1386                 rx_pg = &fp->rx_page_ring[sge_idx];
1387                 old_rx_pg = *rx_pg;
1388
1389                 /* If we fail to allocate a substitute page, we simply stop
1390                    where we are and drop the whole packet */
1391                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1392                 if (unlikely(err)) {
1393                         fp->eth_q_stats.rx_skb_alloc_failed++;
1394                         return err;
1395                 }
1396
1397                 /* Unmap the page as we r going to pass it to the stack */
1398                 dma_unmap_page(&bp->pdev->dev,
1399                                dma_unmap_addr(&old_rx_pg, mapping),
1400                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1401
1402                 /* Add one frag and update the appropriate fields in the skb */
1403                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1404
1405                 skb->data_len += frag_len;
1406                 skb->truesize += frag_len;
1407                 skb->len += frag_len;
1408
1409                 frag_size -= frag_len;
1410         }
1411
1412         return 0;
1413 }
1414
1415 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1416                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1417                            u16 cqe_idx)
1418 {
1419         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1420         struct sk_buff *skb = rx_buf->skb;
1421         /* alloc new skb */
1422         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1423
1424         /* Unmap skb in the pool anyway, as we are going to change
1425            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1426            fails. */
1427         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1428                          bp->rx_buf_size, DMA_FROM_DEVICE);
1429
1430         if (likely(new_skb)) {
1431                 /* fix ip xsum and give it to the stack */
1432                 /* (no need to map the new skb) */
1433 #ifdef BCM_VLAN
1434                 int is_vlan_cqe =
1435                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1436                          PARSING_FLAGS_VLAN);
1437                 int is_not_hwaccel_vlan_cqe =
1438                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1439 #endif
1440
1441                 prefetch(skb);
1442                 prefetch(((char *)(skb)) + 128);
1443
1444 #ifdef BNX2X_STOP_ON_ERROR
1445                 if (pad + len > bp->rx_buf_size) {
1446                         BNX2X_ERR("skb_put is about to fail...  "
1447                                   "pad %d  len %d  rx_buf_size %d\n",
1448                                   pad, len, bp->rx_buf_size);
1449                         bnx2x_panic();
1450                         return;
1451                 }
1452 #endif
1453
1454                 skb_reserve(skb, pad);
1455                 skb_put(skb, len);
1456
1457                 skb->protocol = eth_type_trans(skb, bp->dev);
1458                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1459
1460                 {
1461                         struct iphdr *iph;
1462
1463                         iph = (struct iphdr *)skb->data;
1464 #ifdef BCM_VLAN
1465                         /* If there is no Rx VLAN offloading -
1466                            take VLAN tag into an account */
1467                         if (unlikely(is_not_hwaccel_vlan_cqe))
1468                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1469 #endif
1470                         iph->check = 0;
1471                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1472                 }
1473
1474                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1475                                          &cqe->fast_path_cqe, cqe_idx)) {
1476 #ifdef BCM_VLAN
1477                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1478                             (!is_not_hwaccel_vlan_cqe))
1479                                 vlan_gro_receive(&fp->napi, bp->vlgrp,
1480                                                  le16_to_cpu(cqe->fast_path_cqe.
1481                                                              vlan_tag), skb);
1482                         else
1483 #endif
1484                                 napi_gro_receive(&fp->napi, skb);
1485                 } else {
1486                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1487                            " - dropping packet!\n");
1488                         dev_kfree_skb(skb);
1489                 }
1490
1491
1492                 /* put new skb in bin */
1493                 fp->tpa_pool[queue].skb = new_skb;
1494
1495         } else {
1496                 /* else drop the packet and keep the buffer in the bin */
1497                 DP(NETIF_MSG_RX_STATUS,
1498                    "Failed to allocate new skb - dropping packet!\n");
1499                 fp->eth_q_stats.rx_skb_alloc_failed++;
1500         }
1501
1502         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1503 }
1504
1505 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1506                                         struct bnx2x_fastpath *fp,
1507                                         u16 bd_prod, u16 rx_comp_prod,
1508                                         u16 rx_sge_prod)
1509 {
1510         struct ustorm_eth_rx_producers rx_prods = {0};
1511         int i;
1512
1513         /* Update producers */
1514         rx_prods.bd_prod = bd_prod;
1515         rx_prods.cqe_prod = rx_comp_prod;
1516         rx_prods.sge_prod = rx_sge_prod;
1517
1518         /*
1519          * Make sure that the BD and SGE data is updated before updating the
1520          * producers since FW might read the BD/SGE right after the producer
1521          * is updated.
1522          * This is only applicable for weak-ordered memory model archs such
1523          * as IA-64. The following barrier is also mandatory since FW will
1524          * assumes BDs must have buffers.
1525          */
1526         wmb();
1527
1528         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1529                 REG_WR(bp, BAR_USTRORM_INTMEM +
1530                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1531                        ((u32 *)&rx_prods)[i]);
1532
1533         mmiowb(); /* keep prod updates ordered */
1534
1535         DP(NETIF_MSG_RX_STATUS,
1536            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1537            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1538 }
1539
1540 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1541 {
1542         struct bnx2x *bp = fp->bp;
1543         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1544         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1545         int rx_pkt = 0;
1546
1547 #ifdef BNX2X_STOP_ON_ERROR
1548         if (unlikely(bp->panic))
1549                 return 0;
1550 #endif
1551
1552         /* CQ "next element" is of the size of the regular element,
1553            that's why it's ok here */
1554         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1555         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1556                 hw_comp_cons++;
1557
1558         bd_cons = fp->rx_bd_cons;
1559         bd_prod = fp->rx_bd_prod;
1560         bd_prod_fw = bd_prod;
1561         sw_comp_cons = fp->rx_comp_cons;
1562         sw_comp_prod = fp->rx_comp_prod;
1563
1564         /* Memory barrier necessary as speculative reads of the rx
1565          * buffer can be ahead of the index in the status block
1566          */
1567         rmb();
1568
1569         DP(NETIF_MSG_RX_STATUS,
1570            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1571            fp->index, hw_comp_cons, sw_comp_cons);
1572
1573         while (sw_comp_cons != hw_comp_cons) {
1574                 struct sw_rx_bd *rx_buf = NULL;
1575                 struct sk_buff *skb;
1576                 union eth_rx_cqe *cqe;
1577                 u8 cqe_fp_flags;
1578                 u16 len, pad;
1579
1580                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1581                 bd_prod = RX_BD(bd_prod);
1582                 bd_cons = RX_BD(bd_cons);
1583
1584                 /* Prefetch the page containing the BD descriptor
1585                    at producer's index. It will be needed when new skb is
1586                    allocated */
1587                 prefetch((void *)(PAGE_ALIGN((unsigned long)
1588                                              (&fp->rx_desc_ring[bd_prod])) -
1589                                   PAGE_SIZE + 1));
1590
1591                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1592                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1593
1594                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1595                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1596                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1597                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1598                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1599                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1600
1601                 /* is this a slowpath msg? */
1602                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1603                         bnx2x_sp_event(fp, cqe);
1604                         goto next_cqe;
1605
1606                 /* this is an rx packet */
1607                 } else {
1608                         rx_buf = &fp->rx_buf_ring[bd_cons];
1609                         skb = rx_buf->skb;
1610                         prefetch(skb);
1611                         prefetch((u8 *)skb + 256);
1612                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1613                         pad = cqe->fast_path_cqe.placement_offset;
1614
1615                         /* If CQE is marked both TPA_START and TPA_END
1616                            it is a non-TPA CQE */
1617                         if ((!fp->disable_tpa) &&
1618                             (TPA_TYPE(cqe_fp_flags) !=
1619                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1620                                 u16 queue = cqe->fast_path_cqe.queue_index;
1621
1622                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1623                                         DP(NETIF_MSG_RX_STATUS,
1624                                            "calling tpa_start on queue %d\n",
1625                                            queue);
1626
1627                                         bnx2x_tpa_start(fp, queue, skb,
1628                                                         bd_cons, bd_prod);
1629                                         goto next_rx;
1630                                 }
1631
1632                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1633                                         DP(NETIF_MSG_RX_STATUS,
1634                                            "calling tpa_stop on queue %d\n",
1635                                            queue);
1636
1637                                         if (!BNX2X_RX_SUM_FIX(cqe))
1638                                                 BNX2X_ERR("STOP on none TCP "
1639                                                           "data\n");
1640
1641                                         /* This is a size of the linear data
1642                                            on this skb */
1643                                         len = le16_to_cpu(cqe->fast_path_cqe.
1644                                                                 len_on_bd);
1645                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1646                                                     len, cqe, comp_ring_cons);
1647 #ifdef BNX2X_STOP_ON_ERROR
1648                                         if (bp->panic)
1649                                                 return 0;
1650 #endif
1651
1652                                         bnx2x_update_sge_prod(fp,
1653                                                         &cqe->fast_path_cqe);
1654                                         goto next_cqe;
1655                                 }
1656                         }
1657
1658                         dma_sync_single_for_device(&bp->pdev->dev,
1659                                         dma_unmap_addr(rx_buf, mapping),
1660                                                    pad + RX_COPY_THRESH,
1661                                                    DMA_FROM_DEVICE);
1662                         prefetch(skb);
1663                         prefetch(((char *)(skb)) + 128);
1664
1665                         /* is this an error packet? */
1666                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1667                                 DP(NETIF_MSG_RX_ERR,
1668                                    "ERROR  flags %x  rx packet %u\n",
1669                                    cqe_fp_flags, sw_comp_cons);
1670                                 fp->eth_q_stats.rx_err_discard_pkt++;
1671                                 goto reuse_rx;
1672                         }
1673
1674                         /* Since we don't have a jumbo ring
1675                          * copy small packets if mtu > 1500
1676                          */
1677                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1678                             (len <= RX_COPY_THRESH)) {
1679                                 struct sk_buff *new_skb;
1680
1681                                 new_skb = netdev_alloc_skb(bp->dev,
1682                                                            len + pad);
1683                                 if (new_skb == NULL) {
1684                                         DP(NETIF_MSG_RX_ERR,
1685                                            "ERROR  packet dropped "
1686                                            "because of alloc failure\n");
1687                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1688                                         goto reuse_rx;
1689                                 }
1690
1691                                 /* aligned copy */
1692                                 skb_copy_from_linear_data_offset(skb, pad,
1693                                                     new_skb->data + pad, len);
1694                                 skb_reserve(new_skb, pad);
1695                                 skb_put(new_skb, len);
1696
1697                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1698
1699                                 skb = new_skb;
1700
1701                         } else
1702                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1703                                 dma_unmap_single(&bp->pdev->dev,
1704                                         dma_unmap_addr(rx_buf, mapping),
1705                                                  bp->rx_buf_size,
1706                                                  DMA_FROM_DEVICE);
1707                                 skb_reserve(skb, pad);
1708                                 skb_put(skb, len);
1709
1710                         } else {
1711                                 DP(NETIF_MSG_RX_ERR,
1712                                    "ERROR  packet dropped because "
1713                                    "of alloc failure\n");
1714                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1715 reuse_rx:
1716                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1717                                 goto next_rx;
1718                         }
1719
1720                         skb->protocol = eth_type_trans(skb, bp->dev);
1721
1722                         skb->ip_summed = CHECKSUM_NONE;
1723                         if (bp->rx_csum) {
1724                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1725                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1726                                 else
1727                                         fp->eth_q_stats.hw_csum_err++;
1728                         }
1729                 }
1730
1731                 skb_record_rx_queue(skb, fp->index);
1732
1733 #ifdef BCM_VLAN
1734                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1735                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1736                      PARSING_FLAGS_VLAN))
1737                         vlan_gro_receive(&fp->napi, bp->vlgrp,
1738                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
1739                 else
1740 #endif
1741                         napi_gro_receive(&fp->napi, skb);
1742
1743
1744 next_rx:
1745                 rx_buf->skb = NULL;
1746
1747                 bd_cons = NEXT_RX_IDX(bd_cons);
1748                 bd_prod = NEXT_RX_IDX(bd_prod);
1749                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1750                 rx_pkt++;
1751 next_cqe:
1752                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1753                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1754
1755                 if (rx_pkt == budget)
1756                         break;
1757         } /* while */
1758
1759         fp->rx_bd_cons = bd_cons;
1760         fp->rx_bd_prod = bd_prod_fw;
1761         fp->rx_comp_cons = sw_comp_cons;
1762         fp->rx_comp_prod = sw_comp_prod;
1763
1764         /* Update producers */
1765         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1766                              fp->rx_sge_prod);
1767
1768         fp->rx_pkt += rx_pkt;
1769         fp->rx_calls++;
1770
1771         return rx_pkt;
1772 }
1773
1774 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1775 {
1776         struct bnx2x_fastpath *fp = fp_cookie;
1777         struct bnx2x *bp = fp->bp;
1778
1779         /* Return here if interrupt is disabled */
1780         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1781                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1782                 return IRQ_HANDLED;
1783         }
1784
1785         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1786            fp->index, fp->sb_id);
1787         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1788
1789 #ifdef BNX2X_STOP_ON_ERROR
1790         if (unlikely(bp->panic))
1791                 return IRQ_HANDLED;
1792 #endif
1793
1794         /* Handle Rx and Tx according to MSI-X vector */
1795         prefetch(fp->rx_cons_sb);
1796         prefetch(fp->tx_cons_sb);
1797         prefetch(&fp->status_blk->u_status_block.status_block_index);
1798         prefetch(&fp->status_blk->c_status_block.status_block_index);
1799         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1800
1801         return IRQ_HANDLED;
1802 }
1803
1804 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1805 {
1806         struct bnx2x *bp = netdev_priv(dev_instance);
1807         u16 status = bnx2x_ack_int(bp);
1808         u16 mask;
1809         int i;
1810
1811         /* Return here if interrupt is shared and it's not for us */
1812         if (unlikely(status == 0)) {
1813                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1814                 return IRQ_NONE;
1815         }
1816         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1817
1818         /* Return here if interrupt is disabled */
1819         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1820                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1821                 return IRQ_HANDLED;
1822         }
1823
1824 #ifdef BNX2X_STOP_ON_ERROR
1825         if (unlikely(bp->panic))
1826                 return IRQ_HANDLED;
1827 #endif
1828
1829         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1830                 struct bnx2x_fastpath *fp = &bp->fp[i];
1831
1832                 mask = 0x2 << fp->sb_id;
1833                 if (status & mask) {
1834                         /* Handle Rx and Tx according to SB id */
1835                         prefetch(fp->rx_cons_sb);
1836                         prefetch(&fp->status_blk->u_status_block.
1837                                                 status_block_index);
1838                         prefetch(fp->tx_cons_sb);
1839                         prefetch(&fp->status_blk->c_status_block.
1840                                                 status_block_index);
1841                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1842                         status &= ~mask;
1843                 }
1844         }
1845
1846 #ifdef BCM_CNIC
1847         mask = 0x2 << CNIC_SB_ID(bp);
1848         if (status & (mask | 0x1)) {
1849                 struct cnic_ops *c_ops = NULL;
1850
1851                 rcu_read_lock();
1852                 c_ops = rcu_dereference(bp->cnic_ops);
1853                 if (c_ops)
1854                         c_ops->cnic_handler(bp->cnic_data, NULL);
1855                 rcu_read_unlock();
1856
1857                 status &= ~mask;
1858         }
1859 #endif
1860
1861         if (unlikely(status & 0x1)) {
1862                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1863
1864                 status &= ~0x1;
1865                 if (!status)
1866                         return IRQ_HANDLED;
1867         }
1868
1869         if (status)
1870                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1871                    status);
1872
1873         return IRQ_HANDLED;
1874 }
1875
1876 /* end of fast path */
1877
1878 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1879
1880 /* Link */
1881
1882 /*
1883  * General service functions
1884  */
1885
1886 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1887 {
1888         u32 lock_status;
1889         u32 resource_bit = (1 << resource);
1890         int func = BP_FUNC(bp);
1891         u32 hw_lock_control_reg;
1892         int cnt;
1893
1894         /* Validating that the resource is within range */
1895         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1896                 DP(NETIF_MSG_HW,
1897                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1898                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1899                 return -EINVAL;
1900         }
1901
1902         if (func <= 5) {
1903                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1904         } else {
1905                 hw_lock_control_reg =
1906                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1907         }
1908
1909         /* Validating that the resource is not already taken */
1910         lock_status = REG_RD(bp, hw_lock_control_reg);
1911         if (lock_status & resource_bit) {
1912                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1913                    lock_status, resource_bit);
1914                 return -EEXIST;
1915         }
1916
1917         /* Try for 5 second every 5ms */
1918         for (cnt = 0; cnt < 1000; cnt++) {
1919                 /* Try to acquire the lock */
1920                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1921                 lock_status = REG_RD(bp, hw_lock_control_reg);
1922                 if (lock_status & resource_bit)
1923                         return 0;
1924
1925                 msleep(5);
1926         }
1927         DP(NETIF_MSG_HW, "Timeout\n");
1928         return -EAGAIN;
1929 }
1930
1931 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1932 {
1933         u32 lock_status;
1934         u32 resource_bit = (1 << resource);
1935         int func = BP_FUNC(bp);
1936         u32 hw_lock_control_reg;
1937
1938         DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1939
1940         /* Validating that the resource is within range */
1941         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1942                 DP(NETIF_MSG_HW,
1943                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1944                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1945                 return -EINVAL;
1946         }
1947
1948         if (func <= 5) {
1949                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1950         } else {
1951                 hw_lock_control_reg =
1952                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1953         }
1954
1955         /* Validating that the resource is currently taken */
1956         lock_status = REG_RD(bp, hw_lock_control_reg);
1957         if (!(lock_status & resource_bit)) {
1958                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1959                    lock_status, resource_bit);
1960                 return -EFAULT;
1961         }
1962
1963         REG_WR(bp, hw_lock_control_reg, resource_bit);
1964         return 0;
1965 }
1966
1967 /* HW Lock for shared dual port PHYs */
1968 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1969 {
1970         mutex_lock(&bp->port.phy_mutex);
1971
1972         if (bp->port.need_hw_lock)
1973                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1974 }
1975
1976 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1977 {
1978         if (bp->port.need_hw_lock)
1979                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1980
1981         mutex_unlock(&bp->port.phy_mutex);
1982 }
1983
1984 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1985 {
1986         /* The GPIO should be swapped if swap register is set and active */
1987         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1988                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1989         int gpio_shift = gpio_num +
1990                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1991         u32 gpio_mask = (1 << gpio_shift);
1992         u32 gpio_reg;
1993         int value;
1994
1995         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1996                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1997                 return -EINVAL;
1998         }
1999
2000         /* read GPIO value */
2001         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2002
2003         /* get the requested pin value */
2004         if ((gpio_reg & gpio_mask) == gpio_mask)
2005                 value = 1;
2006         else
2007                 value = 0;
2008
2009         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
2010
2011         return value;
2012 }
2013
2014 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2015 {
2016         /* The GPIO should be swapped if swap register is set and active */
2017         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2018                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2019         int gpio_shift = gpio_num +
2020                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2021         u32 gpio_mask = (1 << gpio_shift);
2022         u32 gpio_reg;
2023
2024         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2025                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2026                 return -EINVAL;
2027         }
2028
2029         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2030         /* read GPIO and mask except the float bits */
2031         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2032
2033         switch (mode) {
2034         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2035                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2036                    gpio_num, gpio_shift);
2037                 /* clear FLOAT and set CLR */
2038                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2039                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2040                 break;
2041
2042         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2043                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2044                    gpio_num, gpio_shift);
2045                 /* clear FLOAT and set SET */
2046                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2047                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2048                 break;
2049
2050         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2051                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2052                    gpio_num, gpio_shift);
2053                 /* set FLOAT */
2054                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2055                 break;
2056
2057         default:
2058                 break;
2059         }
2060
2061         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2062         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2063
2064         return 0;
2065 }
2066
2067 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2068 {
2069         /* The GPIO should be swapped if swap register is set and active */
2070         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2071                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2072         int gpio_shift = gpio_num +
2073                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2074         u32 gpio_mask = (1 << gpio_shift);
2075         u32 gpio_reg;
2076
2077         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2078                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2079                 return -EINVAL;
2080         }
2081
2082         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2083         /* read GPIO int */
2084         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2085
2086         switch (mode) {
2087         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2088                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2089                                    "output low\n", gpio_num, gpio_shift);
2090                 /* clear SET and set CLR */
2091                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2092                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2093                 break;
2094
2095         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2096                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2097                                    "output high\n", gpio_num, gpio_shift);
2098                 /* clear CLR and set SET */
2099                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2100                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2101                 break;
2102
2103         default:
2104                 break;
2105         }
2106
2107         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2108         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2109
2110         return 0;
2111 }
2112
2113 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2114 {
2115         u32 spio_mask = (1 << spio_num);
2116         u32 spio_reg;
2117
2118         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2119             (spio_num > MISC_REGISTERS_SPIO_7)) {
2120                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2121                 return -EINVAL;
2122         }
2123
2124         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2125         /* read SPIO and mask except the float bits */
2126         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2127
2128         switch (mode) {
2129         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2130                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2131                 /* clear FLOAT and set CLR */
2132                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2133                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2134                 break;
2135
2136         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2137                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2138                 /* clear FLOAT and set SET */
2139                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2140                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2141                 break;
2142
2143         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2144                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2145                 /* set FLOAT */
2146                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2147                 break;
2148
2149         default:
2150                 break;
2151         }
2152
2153         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2154         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2155
2156         return 0;
2157 }
2158
2159 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2160 {
2161         switch (bp->link_vars.ieee_fc &
2162                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2163         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2164                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2165                                           ADVERTISED_Pause);
2166                 break;
2167
2168         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2169                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2170                                          ADVERTISED_Pause);
2171                 break;
2172
2173         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2174                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2175                 break;
2176
2177         default:
2178                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2179                                           ADVERTISED_Pause);
2180                 break;
2181         }
2182 }
2183
2184 static void bnx2x_link_report(struct bnx2x *bp)
2185 {
2186         if (bp->flags & MF_FUNC_DIS) {
2187                 netif_carrier_off(bp->dev);
2188                 netdev_err(bp->dev, "NIC Link is Down\n");
2189                 return;
2190         }
2191
2192         if (bp->link_vars.link_up) {
2193                 u16 line_speed;
2194
2195                 if (bp->state == BNX2X_STATE_OPEN)
2196                         netif_carrier_on(bp->dev);
2197                 netdev_info(bp->dev, "NIC Link is Up, ");
2198
2199                 line_speed = bp->link_vars.line_speed;
2200                 if (IS_E1HMF(bp)) {
2201                         u16 vn_max_rate;
2202
2203                         vn_max_rate =
2204                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2205                                  FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2206                         if (vn_max_rate < line_speed)
2207                                 line_speed = vn_max_rate;
2208                 }
2209                 pr_cont("%d Mbps ", line_speed);
2210
2211                 if (bp->link_vars.duplex == DUPLEX_FULL)
2212                         pr_cont("full duplex");
2213                 else
2214                         pr_cont("half duplex");
2215
2216                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2217                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2218                                 pr_cont(", receive ");
2219                                 if (bp->link_vars.flow_ctrl &
2220                                     BNX2X_FLOW_CTRL_TX)
2221                                         pr_cont("& transmit ");
2222                         } else {
2223                                 pr_cont(", transmit ");
2224                         }
2225                         pr_cont("flow control ON");
2226                 }
2227                 pr_cont("\n");
2228
2229         } else { /* link_down */
2230                 netif_carrier_off(bp->dev);
2231                 netdev_err(bp->dev, "NIC Link is Down\n");
2232         }
2233 }
2234
2235 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2236 {
2237         if (!BP_NOMCP(bp)) {
2238                 u8 rc;
2239
2240                 /* Initialize link parameters structure variables */
2241                 /* It is recommended to turn off RX FC for jumbo frames
2242                    for better performance */
2243                 if (bp->dev->mtu > 5000)
2244                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2245                 else
2246                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2247
2248                 bnx2x_acquire_phy_lock(bp);
2249
2250                 if (load_mode == LOAD_DIAG)
2251                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2252
2253                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2254
2255                 bnx2x_release_phy_lock(bp);
2256
2257                 bnx2x_calc_fc_adv(bp);
2258
2259                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2260                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2261                         bnx2x_link_report(bp);
2262                 }
2263
2264                 return rc;
2265         }
2266         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2267         return -EINVAL;
2268 }
2269
2270 static void bnx2x_link_set(struct bnx2x *bp)
2271 {
2272         if (!BP_NOMCP(bp)) {
2273                 bnx2x_acquire_phy_lock(bp);
2274                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2275                 bnx2x_release_phy_lock(bp);
2276
2277                 bnx2x_calc_fc_adv(bp);
2278         } else
2279                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2280 }
2281
2282 static void bnx2x__link_reset(struct bnx2x *bp)
2283 {
2284         if (!BP_NOMCP(bp)) {
2285                 bnx2x_acquire_phy_lock(bp);
2286                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2287                 bnx2x_release_phy_lock(bp);
2288         } else
2289                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2290 }
2291
2292 static u8 bnx2x_link_test(struct bnx2x *bp)
2293 {
2294         u8 rc;
2295
2296         bnx2x_acquire_phy_lock(bp);
2297         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2298         bnx2x_release_phy_lock(bp);
2299
2300         return rc;
2301 }
2302
2303 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2304 {
2305         u32 r_param = bp->link_vars.line_speed / 8;
2306         u32 fair_periodic_timeout_usec;
2307         u32 t_fair;
2308
2309         memset(&(bp->cmng.rs_vars), 0,
2310                sizeof(struct rate_shaping_vars_per_port));
2311         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2312
2313         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2314         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2315
2316         /* this is the threshold below which no timer arming will occur
2317            1.25 coefficient is for the threshold to be a little bigger
2318            than the real time, to compensate for timer in-accuracy */
2319         bp->cmng.rs_vars.rs_threshold =
2320                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2321
2322         /* resolution of fairness timer */
2323         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2324         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2325         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2326
2327         /* this is the threshold below which we won't arm the timer anymore */
2328         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2329
2330         /* we multiply by 1e3/8 to get bytes/msec.
2331            We don't want the credits to pass a credit
2332            of the t_fair*FAIR_MEM (algorithm resolution) */
2333         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2334         /* since each tick is 4 usec */
2335         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2336 }
2337
2338 /* Calculates the sum of vn_min_rates.
2339    It's needed for further normalizing of the min_rates.
2340    Returns:
2341      sum of vn_min_rates.
2342        or
2343      0 - if all the min_rates are 0.
2344      In the later case fainess algorithm should be deactivated.
2345      If not all min_rates are zero then those that are zeroes will be set to 1.
2346  */
2347 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2348 {
2349         int all_zero = 1;
2350         int port = BP_PORT(bp);
2351         int vn;
2352
2353         bp->vn_weight_sum = 0;
2354         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2355                 int func = 2*vn + port;
2356                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2357                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2358                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2359
2360                 /* Skip hidden vns */
2361                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2362                         continue;
2363
2364                 /* If min rate is zero - set it to 1 */
2365                 if (!vn_min_rate)
2366                         vn_min_rate = DEF_MIN_RATE;
2367                 else
2368                         all_zero = 0;
2369
2370                 bp->vn_weight_sum += vn_min_rate;
2371         }
2372
2373         /* ... only if all min rates are zeros - disable fairness */
2374         if (all_zero) {
2375                 bp->cmng.flags.cmng_enables &=
2376                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2377                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2378                    "  fairness will be disabled\n");
2379         } else
2380                 bp->cmng.flags.cmng_enables |=
2381                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2382 }
2383
2384 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2385 {
2386         struct rate_shaping_vars_per_vn m_rs_vn;
2387         struct fairness_vars_per_vn m_fair_vn;
2388         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2389         u16 vn_min_rate, vn_max_rate;
2390         int i;
2391
2392         /* If function is hidden - set min and max to zeroes */
2393         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2394                 vn_min_rate = 0;
2395                 vn_max_rate = 0;
2396
2397         } else {
2398                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2399                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2400                 /* If min rate is zero - set it to 1 */
2401                 if (!vn_min_rate)
2402                         vn_min_rate = DEF_MIN_RATE;
2403                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2404                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2405         }
2406         DP(NETIF_MSG_IFUP,
2407            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
2408            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2409
2410         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2411         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2412
2413         /* global vn counter - maximal Mbps for this vn */
2414         m_rs_vn.vn_counter.rate = vn_max_rate;
2415
2416         /* quota - number of bytes transmitted in this period */
2417         m_rs_vn.vn_counter.quota =
2418                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2419
2420         if (bp->vn_weight_sum) {
2421                 /* credit for each period of the fairness algorithm:
2422                    number of bytes in T_FAIR (the vn share the port rate).
2423                    vn_weight_sum should not be larger than 10000, thus
2424                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2425                    than zero */
2426                 m_fair_vn.vn_credit_delta =
2427                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2428                                                  (8 * bp->vn_weight_sum))),
2429                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2430                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2431                    m_fair_vn.vn_credit_delta);
2432         }
2433
2434         /* Store it to internal memory */
2435         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2436                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2437                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2438                        ((u32 *)(&m_rs_vn))[i]);
2439
2440         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2441                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2442                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2443                        ((u32 *)(&m_fair_vn))[i]);
2444 }
2445
2446
2447 /* This function is called upon link interrupt */
2448 static void bnx2x_link_attn(struct bnx2x *bp)
2449 {
2450         /* Make sure that we are synced with the current statistics */
2451         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2452
2453         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2454
2455         if (bp->link_vars.link_up) {
2456
2457                 /* dropless flow control */
2458                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2459                         int port = BP_PORT(bp);
2460                         u32 pause_enabled = 0;
2461
2462                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2463                                 pause_enabled = 1;
2464
2465                         REG_WR(bp, BAR_USTRORM_INTMEM +
2466                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2467                                pause_enabled);
2468                 }
2469
2470                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2471                         struct host_port_stats *pstats;
2472
2473                         pstats = bnx2x_sp(bp, port_stats);
2474                         /* reset old bmac stats */
2475                         memset(&(pstats->mac_stx[0]), 0,
2476                                sizeof(struct mac_stx));
2477                 }
2478                 if (bp->state == BNX2X_STATE_OPEN)
2479                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2480         }
2481
2482         /* indicate link status */
2483         bnx2x_link_report(bp);
2484
2485         if (IS_E1HMF(bp)) {
2486                 int port = BP_PORT(bp);
2487                 int func;
2488                 int vn;
2489
2490                 /* Set the attention towards other drivers on the same port */
2491                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2492                         if (vn == BP_E1HVN(bp))
2493                                 continue;
2494
2495                         func = ((vn << 1) | port);
2496                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2497                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2498                 }
2499
2500                 if (bp->link_vars.link_up) {
2501                         int i;
2502
2503                         /* Init rate shaping and fairness contexts */
2504                         bnx2x_init_port_minmax(bp);
2505
2506                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2507                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2508
2509                         /* Store it to internal memory */
2510                         for (i = 0;
2511                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2512                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2513                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2514                                        ((u32 *)(&bp->cmng))[i]);
2515                 }
2516         }
2517 }
2518
2519 static void bnx2x__link_status_update(struct bnx2x *bp)
2520 {
2521         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2522                 return;
2523
2524         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2525
2526         if (bp->link_vars.link_up)
2527                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2528         else
2529                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2530
2531         bnx2x_calc_vn_weight_sum(bp);
2532
2533         /* indicate link status */
2534         bnx2x_link_report(bp);
2535 }
2536
2537 static void bnx2x_pmf_update(struct bnx2x *bp)
2538 {
2539         int port = BP_PORT(bp);
2540         u32 val;
2541
2542         bp->port.pmf = 1;
2543         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2544
2545         /* enable nig attention */
2546         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2547         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2548         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2549
2550         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2551 }
2552
2553 /* end of Link */
2554
2555 /* slow path */
2556
2557 /*
2558  * General service functions
2559  */
2560
2561 /* send the MCP a request, block until there is a reply */
2562 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2563 {
2564         int func = BP_FUNC(bp);
2565         u32 seq = ++bp->fw_seq;
2566         u32 rc = 0;
2567         u32 cnt = 1;
2568         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2569
2570         mutex_lock(&bp->fw_mb_mutex);
2571         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2572         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2573
2574         do {
2575                 /* let the FW do it's magic ... */
2576                 msleep(delay);
2577
2578                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2579
2580                 /* Give the FW up to 5 second (500*10ms) */
2581         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2582
2583         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2584            cnt*delay, rc, seq);
2585
2586         /* is this a reply to our command? */
2587         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2588                 rc &= FW_MSG_CODE_MASK;
2589         else {
2590                 /* FW BUG! */
2591                 BNX2X_ERR("FW failed to respond!\n");
2592                 bnx2x_fw_dump(bp);
2593                 rc = 0;
2594         }
2595         mutex_unlock(&bp->fw_mb_mutex);
2596
2597         return rc;
2598 }
2599
2600 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2601 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2602 static void bnx2x_set_rx_mode(struct net_device *dev);
2603
2604 static void bnx2x_e1h_disable(struct bnx2x *bp)
2605 {
2606         int port = BP_PORT(bp);
2607
2608         netif_tx_disable(bp->dev);
2609
2610         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2611
2612         netif_carrier_off(bp->dev);
2613 }
2614
2615 static void bnx2x_e1h_enable(struct bnx2x *bp)
2616 {
2617         int port = BP_PORT(bp);
2618
2619         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2620
2621         /* Tx queue should be only reenabled */
2622         netif_tx_wake_all_queues(bp->dev);
2623
2624         /*
2625          * Should not call netif_carrier_on since it will be called if the link
2626          * is up when checking for link state
2627          */
2628 }
2629
2630 static void bnx2x_update_min_max(struct bnx2x *bp)
2631 {
2632         int port = BP_PORT(bp);
2633         int vn, i;
2634
2635         /* Init rate shaping and fairness contexts */
2636         bnx2x_init_port_minmax(bp);
2637
2638         bnx2x_calc_vn_weight_sum(bp);
2639
2640         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2641                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2642
2643         if (bp->port.pmf) {
2644                 int func;
2645
2646                 /* Set the attention towards other drivers on the same port */
2647                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2648                         if (vn == BP_E1HVN(bp))
2649                                 continue;
2650
2651                         func = ((vn << 1) | port);
2652                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2653                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2654                 }
2655
2656                 /* Store it to internal memory */
2657                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2658                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2659                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2660                                ((u32 *)(&bp->cmng))[i]);
2661         }
2662 }
2663
2664 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2665 {
2666         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2667
2668         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2669
2670                 /*
2671                  * This is the only place besides the function initialization
2672                  * where the bp->flags can change so it is done without any
2673                  * locks
2674                  */
2675                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2676                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2677                         bp->flags |= MF_FUNC_DIS;
2678
2679                         bnx2x_e1h_disable(bp);
2680                 } else {
2681                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2682                         bp->flags &= ~MF_FUNC_DIS;
2683
2684                         bnx2x_e1h_enable(bp);
2685                 }
2686                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2687         }
2688         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2689
2690                 bnx2x_update_min_max(bp);
2691                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2692         }
2693
2694         /* Report results to MCP */
2695         if (dcc_event)
2696                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2697         else
2698                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2699 }
2700
2701 /* must be called under the spq lock */
2702 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2703 {
2704         struct eth_spe *next_spe = bp->spq_prod_bd;
2705
2706         if (bp->spq_prod_bd == bp->spq_last_bd) {
2707                 bp->spq_prod_bd = bp->spq;
2708                 bp->spq_prod_idx = 0;
2709                 DP(NETIF_MSG_TIMER, "end of spq\n");
2710         } else {
2711                 bp->spq_prod_bd++;
2712                 bp->spq_prod_idx++;
2713         }
2714         return next_spe;
2715 }
2716
2717 /* must be called under the spq lock */
2718 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2719 {
2720         int func = BP_FUNC(bp);
2721
2722         /* Make sure that BD data is updated before writing the producer */
2723         wmb();
2724
2725         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2726                bp->spq_prod_idx);
2727         mmiowb();
2728 }
2729
2730 /* the slow path queue is odd since completions arrive on the fastpath ring */
2731 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2732                          u32 data_hi, u32 data_lo, int common)
2733 {
2734         struct eth_spe *spe;
2735
2736         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2737            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2738            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2739            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2740            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2741
2742 #ifdef BNX2X_STOP_ON_ERROR
2743         if (unlikely(bp->panic))
2744                 return -EIO;
2745 #endif
2746
2747         spin_lock_bh(&bp->spq_lock);
2748
2749         if (!bp->spq_left) {
2750                 BNX2X_ERR("BUG! SPQ ring full!\n");
2751                 spin_unlock_bh(&bp->spq_lock);
2752                 bnx2x_panic();
2753                 return -EBUSY;
2754         }
2755
2756         spe = bnx2x_sp_get_next(bp);
2757
2758         /* CID needs port number to be encoded int it */
2759         spe->hdr.conn_and_cmd_data =
2760                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2761                                      HW_CID(bp, cid)));
2762         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2763         if (common)
2764                 spe->hdr.type |=
2765                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2766
2767         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2768         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2769
2770         bp->spq_left--;
2771
2772         bnx2x_sp_prod_update(bp);
2773         spin_unlock_bh(&bp->spq_lock);
2774         return 0;
2775 }
2776
2777 /* acquire split MCP access lock register */
2778 static int bnx2x_acquire_alr(struct bnx2x *bp)
2779 {
2780         u32 j, val;
2781         int rc = 0;
2782
2783         might_sleep();
2784         for (j = 0; j < 1000; j++) {
2785                 val = (1UL << 31);
2786                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2787                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2788                 if (val & (1L << 31))
2789                         break;
2790
2791                 msleep(5);
2792         }
2793         if (!(val & (1L << 31))) {
2794                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2795                 rc = -EBUSY;
2796         }
2797
2798         return rc;
2799 }
2800
2801 /* release split MCP access lock register */
2802 static void bnx2x_release_alr(struct bnx2x *bp)
2803 {
2804         REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2805 }
2806
2807 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2808 {
2809         struct host_def_status_block *def_sb = bp->def_status_blk;
2810         u16 rc = 0;
2811
2812         barrier(); /* status block is written to by the chip */
2813         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2814                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2815                 rc |= 1;
2816         }
2817         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2818                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2819                 rc |= 2;
2820         }
2821         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2822                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2823                 rc |= 4;
2824         }
2825         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2826                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2827                 rc |= 8;
2828         }
2829         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2830                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2831                 rc |= 16;
2832         }
2833         return rc;
2834 }
2835
2836 /*
2837  * slow path service functions
2838  */
2839
2840 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2841 {
2842         int port = BP_PORT(bp);
2843         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2844                        COMMAND_REG_ATTN_BITS_SET);
2845         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2846                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2847         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2848                                        NIG_REG_MASK_INTERRUPT_PORT0;
2849         u32 aeu_mask;
2850         u32 nig_mask = 0;
2851
2852         if (bp->attn_state & asserted)
2853                 BNX2X_ERR("IGU ERROR\n");
2854
2855         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2856         aeu_mask = REG_RD(bp, aeu_addr);
2857
2858         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2859            aeu_mask, asserted);
2860         aeu_mask &= ~(asserted & 0x3ff);
2861         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2862
2863         REG_WR(bp, aeu_addr, aeu_mask);
2864         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2865
2866         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2867         bp->attn_state |= asserted;
2868         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2869
2870         if (asserted & ATTN_HARD_WIRED_MASK) {
2871                 if (asserted & ATTN_NIG_FOR_FUNC) {
2872
2873                         bnx2x_acquire_phy_lock(bp);
2874
2875                         /* save nig interrupt mask */
2876                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2877                         REG_WR(bp, nig_int_mask_addr, 0);
2878
2879                         bnx2x_link_attn(bp);
2880
2881                         /* handle unicore attn? */
2882                 }
2883                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2884                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2885
2886                 if (asserted & GPIO_2_FUNC)
2887                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2888
2889                 if (asserted & GPIO_3_FUNC)
2890                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2891
2892                 if (asserted & GPIO_4_FUNC)
2893                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2894
2895                 if (port == 0) {
2896                         if (asserted & ATTN_GENERAL_ATTN_1) {
2897                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2898                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2899                         }
2900                         if (asserted & ATTN_GENERAL_ATTN_2) {
2901                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2902                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2903                         }
2904                         if (asserted & ATTN_GENERAL_ATTN_3) {
2905                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2906                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2907                         }
2908                 } else {
2909                         if (asserted & ATTN_GENERAL_ATTN_4) {
2910                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2911                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2912                         }
2913                         if (asserted & ATTN_GENERAL_ATTN_5) {
2914                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2915                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2916                         }
2917                         if (asserted & ATTN_GENERAL_ATTN_6) {
2918                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2919                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2920                         }
2921                 }
2922
2923         } /* if hardwired */
2924
2925         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2926            asserted, hc_addr);
2927         REG_WR(bp, hc_addr, asserted);
2928
2929         /* now set back the mask */
2930         if (asserted & ATTN_NIG_FOR_FUNC) {
2931                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2932                 bnx2x_release_phy_lock(bp);
2933         }
2934 }
2935
2936 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2937 {
2938         int port = BP_PORT(bp);
2939
2940         /* mark the failure */
2941         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2942         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2943         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2944                  bp->link_params.ext_phy_config);
2945
2946         /* log the failure */
2947         netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
2948                    "Please contact Dell Support for assistance.\n");
2949 }
2950
2951 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2952 {
2953         int port = BP_PORT(bp);
2954         int reg_offset;
2955         u32 val, swap_val, swap_override;
2956
2957         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2958                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2959
2960         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2961
2962                 val = REG_RD(bp, reg_offset);
2963                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2964                 REG_WR(bp, reg_offset, val);
2965
2966                 BNX2X_ERR("SPIO5 hw attention\n");
2967
2968                 /* Fan failure attention */
2969                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2970                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2971                         /* Low power mode is controlled by GPIO 2 */
2972                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2973                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2974                         /* The PHY reset is controlled by GPIO 1 */
2975                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2976                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2977                         break;
2978
2979                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2980                         /* The PHY reset is controlled by GPIO 1 */
2981                         /* fake the port number to cancel the swap done in
2982                            set_gpio() */
2983                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2984                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2985                         port = (swap_val && swap_override) ^ 1;
2986                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2987                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2988                         break;
2989
2990                 default:
2991                         break;
2992                 }
2993                 bnx2x_fan_failure(bp);
2994         }
2995
2996         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2997                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2998                 bnx2x_acquire_phy_lock(bp);
2999                 bnx2x_handle_module_detect_int(&bp->link_params);
3000                 bnx2x_release_phy_lock(bp);
3001         }
3002
3003         if (attn & HW_INTERRUT_ASSERT_SET_0) {
3004
3005                 val = REG_RD(bp, reg_offset);
3006                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3007                 REG_WR(bp, reg_offset, val);
3008
3009                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3010                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3011                 bnx2x_panic();
3012         }
3013 }
3014
3015 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3016 {
3017         u32 val;
3018
3019         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3020
3021                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3022                 BNX2X_ERR("DB hw attention 0x%x\n", val);
3023                 /* DORQ discard attention */
3024                 if (val & 0x2)
3025                         BNX2X_ERR("FATAL error from DORQ\n");
3026         }
3027
3028         if (attn & HW_INTERRUT_ASSERT_SET_1) {
3029
3030                 int port = BP_PORT(bp);
3031                 int reg_offset;
3032
3033                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3034                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3035
3036                 val = REG_RD(bp, reg_offset);
3037                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3038                 REG_WR(bp, reg_offset, val);
3039
3040                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3041                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3042                 bnx2x_panic();
3043         }
3044 }
3045
3046 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3047 {
3048         u32 val;
3049
3050         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3051
3052                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3053                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3054                 /* CFC error attention */
3055                 if (val & 0x2)
3056                         BNX2X_ERR("FATAL error from CFC\n");
3057         }
3058
3059         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3060
3061                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3062                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3063                 /* RQ_USDMDP_FIFO_OVERFLOW */
3064                 if (val & 0x18000)
3065                         BNX2X_ERR("FATAL error from PXP\n");
3066         }
3067
3068         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3069
3070                 int port = BP_PORT(bp);
3071                 int reg_offset;
3072
3073                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3074                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3075
3076                 val = REG_RD(bp, reg_offset);
3077                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3078                 REG_WR(bp, reg_offset, val);
3079
3080                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3081                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3082                 bnx2x_panic();
3083         }
3084 }
3085
3086 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3087 {
3088         u32 val;
3089
3090         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3091
3092                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3093                         int func = BP_FUNC(bp);
3094
3095                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3096                         bp->mf_config = SHMEM_RD(bp,
3097                                            mf_cfg.func_mf_config[func].config);
3098                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3099                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3100                                 bnx2x_dcc_event(bp,
3101                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3102                         bnx2x__link_status_update(bp);
3103                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3104                                 bnx2x_pmf_update(bp);
3105
3106                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3107
3108                         BNX2X_ERR("MC assert!\n");
3109                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3110                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3111                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3112                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3113                         bnx2x_panic();
3114
3115                 } else if (attn & BNX2X_MCP_ASSERT) {
3116
3117                         BNX2X_ERR("MCP assert!\n");
3118                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3119                         bnx2x_fw_dump(bp);
3120
3121                 } else
3122                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3123         }
3124
3125         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3126                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3127                 if (attn & BNX2X_GRC_TIMEOUT) {
3128                         val = CHIP_IS_E1H(bp) ?
3129                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3130                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3131                 }
3132                 if (attn & BNX2X_GRC_RSV) {
3133                         val = CHIP_IS_E1H(bp) ?
3134                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3135                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3136                 }
3137                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3138         }
3139 }
3140
3141 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3142 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3143
3144
3145 #define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
3146 #define LOAD_COUNTER_BITS       16 /* Number of bits for load counter */
3147 #define LOAD_COUNTER_MASK       (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3148 #define RESET_DONE_FLAG_MASK    (~LOAD_COUNTER_MASK)
3149 #define RESET_DONE_FLAG_SHIFT   LOAD_COUNTER_BITS
3150 #define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3151 /*
3152  * should be run under rtnl lock
3153  */
3154 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3155 {
3156         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3157         val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3158         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3159         barrier();
3160         mmiowb();
3161 }
3162
3163 /*
3164  * should be run under rtnl lock
3165  */
3166 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3167 {
3168         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3169         val |= (1 << 16);
3170         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3171         barrier();
3172         mmiowb();
3173 }
3174
3175 /*
3176  * should be run under rtnl lock
3177  */
3178 static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3179 {
3180         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3181         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3182         return (val & RESET_DONE_FLAG_MASK) ? false : true;
3183 }
3184
3185 /*
3186  * should be run under rtnl lock
3187  */
3188 static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3189 {
3190         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3191
3192         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3193
3194         val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3195         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3196         barrier();
3197         mmiowb();
3198 }
3199
3200 /*
3201  * should be run under rtnl lock
3202  */
3203 static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3204 {
3205         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3206
3207         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3208
3209         val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3210         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3211         barrier();
3212         mmiowb();
3213
3214         return val1;
3215 }
3216
3217 /*
3218  * should be run under rtnl lock
3219  */
3220 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3221 {
3222         return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3223 }
3224
3225 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3226 {
3227         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3228         REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3229 }
3230
3231 static inline void _print_next_block(int idx, const char *blk)
3232 {
3233         if (idx)
3234                 pr_cont(", ");
3235         pr_cont("%s", blk);
3236 }
3237
3238 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3239 {
3240         int i = 0;
3241         u32 cur_bit = 0;
3242         for (i = 0; sig; i++) {
3243                 cur_bit = ((u32)0x1 << i);
3244                 if (sig & cur_bit) {
3245                         switch (cur_bit) {
3246                         case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3247                                 _print_next_block(par_num++, "BRB");
3248                                 break;
3249                         case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3250                                 _print_next_block(par_num++, "PARSER");
3251                                 break;
3252                         case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3253                                 _print_next_block(par_num++, "TSDM");
3254                                 break;
3255                         case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3256                                 _print_next_block(par_num++, "SEARCHER");
3257                                 break;
3258                         case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3259                                 _print_next_block(par_num++, "TSEMI");
3260                                 break;
3261                         }
3262
3263                         /* Clear the bit */
3264                         sig &= ~cur_bit;
3265                 }
3266         }
3267
3268         return par_num;
3269 }
3270
3271 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3272 {
3273         int i = 0;
3274         u32 cur_bit = 0;
3275         for (i = 0; sig; i++) {
3276                 cur_bit = ((u32)0x1 << i);
3277                 if (sig & cur_bit) {
3278                         switch (cur_bit) {
3279                         case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3280                                 _print_next_block(par_num++, "PBCLIENT");
3281                                 break;
3282                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3283                                 _print_next_block(par_num++, "QM");
3284                                 break;
3285                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3286                                 _print_next_block(par_num++, "XSDM");
3287                                 break;
3288                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3289                                 _print_next_block(par_num++, "XSEMI");
3290                                 break;
3291                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3292                                 _print_next_block(par_num++, "DOORBELLQ");
3293                                 break;
3294                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3295                                 _print_next_block(par_num++, "VAUX PCI CORE");
3296                                 break;
3297                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3298                                 _print_next_block(par_num++, "DEBUG");
3299                                 break;
3300                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3301                                 _print_next_block(par_num++, "USDM");
3302                                 break;
3303                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3304                                 _print_next_block(par_num++, "USEMI");
3305                                 break;
3306                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3307                                 _print_next_block(par_num++, "UPB");
3308                                 break;
3309                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3310                                 _print_next_block(par_num++, "CSDM");
3311                                 break;
3312                         }
3313
3314                         /* Clear the bit */
3315                         sig &= ~cur_bit;
3316                 }
3317         }
3318
3319         return par_num;
3320 }
3321
3322 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3323 {
3324         int i = 0;
3325         u32 cur_bit = 0;
3326         for (i = 0; sig; i++) {
3327                 cur_bit = ((u32)0x1 << i);
3328                 if (sig & cur_bit) {
3329                         switch (cur_bit) {
3330                         case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3331                                 _print_next_block(par_num++, "CSEMI");
3332                                 break;
3333                         case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3334                                 _print_next_block(par_num++, "PXP");
3335                                 break;
3336                         case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3337                                 _print_next_block(par_num++,
3338                                         "PXPPCICLOCKCLIENT");
3339                                 break;
3340                         case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3341                                 _print_next_block(par_num++, "CFC");
3342                                 break;
3343                         case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3344                                 _print_next_block(par_num++, "CDU");
3345                                 break;
3346                         case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3347                                 _print_next_block(par_num++, "IGU");
3348                                 break;
3349                         case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3350                                 _print_next_block(par_num++, "MISC");
3351                                 break;
3352                         }
3353
3354                         /* Clear the bit */
3355                         sig &= ~cur_bit;
3356                 }
3357         }
3358
3359         return par_num;
3360 }
3361
3362 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3363 {
3364         int i = 0;
3365         u32 cur_bit = 0;
3366         for (i = 0; sig; i++) {
3367                 cur_bit = ((u32)0x1 << i);
3368                 if (sig & cur_bit) {
3369                         switch (cur_bit) {
3370                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3371                                 _print_next_block(par_num++, "MCP ROM");
3372                                 break;
3373                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3374                                 _print_next_block(par_num++, "MCP UMP RX");
3375                                 break;
3376                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3377                                 _print_next_block(par_num++, "MCP UMP TX");
3378                                 break;
3379                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3380                                 _print_next_block(par_num++, "MCP SCPAD");
3381                                 break;
3382                         }
3383
3384                         /* Clear the bit */
3385                         sig &= ~cur_bit;
3386                 }
3387         }
3388
3389         return par_num;
3390 }
3391
3392 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3393                                      u32 sig2, u32 sig3)
3394 {
3395         if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3396             (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3397                 int par_num = 0;
3398                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3399                         "[0]:0x%08x [1]:0x%08x "
3400                         "[2]:0x%08x [3]:0x%08x\n",
3401                           sig0 & HW_PRTY_ASSERT_SET_0,
3402                           sig1 & HW_PRTY_ASSERT_SET_1,
3403                           sig2 & HW_PRTY_ASSERT_SET_2,
3404                           sig3 & HW_PRTY_ASSERT_SET_3);
3405                 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3406                        bp->dev->name);
3407                 par_num = bnx2x_print_blocks_with_parity0(
3408                         sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3409                 par_num = bnx2x_print_blocks_with_parity1(
3410                         sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3411                 par_num = bnx2x_print_blocks_with_parity2(
3412                         sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3413                 par_num = bnx2x_print_blocks_with_parity3(
3414                         sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3415                 printk("\n");
3416                 return true;
3417         } else
3418                 return false;
3419 }
3420
3421 static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3422 {
3423         struct attn_route attn;
3424         int port = BP_PORT(bp);
3425
3426         attn.sig[0] = REG_RD(bp,
3427                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3428                              port*4);
3429         attn.sig[1] = REG_RD(bp,
3430                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3431                              port*4);
3432         attn.sig[2] = REG_RD(bp,
3433                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3434                              port*4);
3435         attn.sig[3] = REG_RD(bp,
3436                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3437                              port*4);
3438
3439         return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3440                                         attn.sig[3]);
3441 }
3442
3443 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3444 {
3445         struct attn_route attn, *group_mask;
3446         int port = BP_PORT(bp);
3447         int index;
3448         u32 reg_addr;
3449         u32 val;
3450         u32 aeu_mask;
3451
3452         /* need to take HW lock because MCP or other port might also
3453            try to handle this event */
3454         bnx2x_acquire_alr(bp);
3455
3456         if (bnx2x_chk_parity_attn(bp)) {
3457                 bp->recovery_state = BNX2X_RECOVERY_INIT;
3458                 bnx2x_set_reset_in_progress(bp);
3459                 schedule_delayed_work(&bp->reset_task, 0);
3460                 /* Disable HW interrupts */
3461                 bnx2x_int_disable(bp);
3462                 bnx2x_release_alr(bp);
3463                 /* In case of parity errors don't handle attentions so that
3464                  * other function would "see" parity errors.
3465                  */
3466                 return;
3467         }
3468
3469         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3470         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3471         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3472         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3473         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3474            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3475
3476         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3477                 if (deasserted & (1 << index)) {
3478                         group_mask = &bp->attn_group[index];
3479
3480                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3481                            index, group_mask->sig[0], group_mask->sig[1],
3482                            group_mask->sig[2], group_mask->sig[3]);
3483
3484                         bnx2x_attn_int_deasserted3(bp,
3485                                         attn.sig[3] & group_mask->sig[3]);
3486                         bnx2x_attn_int_deasserted1(bp,
3487                                         attn.sig[1] & group_mask->sig[1]);
3488                         bnx2x_attn_int_deasserted2(bp,
3489                                         attn.sig[2] & group_mask->sig[2]);
3490                         bnx2x_attn_int_deasserted0(bp,
3491                                         attn.sig[0] & group_mask->sig[0]);
3492                 }
3493         }
3494
3495         bnx2x_release_alr(bp);
3496
3497         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3498
3499         val = ~deasserted;
3500         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3501            val, reg_addr);
3502         REG_WR(bp, reg_addr, val);
3503
3504         if (~bp->attn_state & deasserted)
3505                 BNX2X_ERR("IGU ERROR\n");
3506
3507         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3508                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3509
3510         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3511         aeu_mask = REG_RD(bp, reg_addr);
3512
3513         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3514            aeu_mask, deasserted);
3515         aeu_mask |= (deasserted & 0x3ff);
3516         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3517
3518         REG_WR(bp, reg_addr, aeu_mask);
3519         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3520
3521         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3522         bp->attn_state &= ~deasserted;
3523         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3524 }
3525
3526 static void bnx2x_attn_int(struct bnx2x *bp)
3527 {
3528         /* read local copy of bits */
3529         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3530                                                                 attn_bits);
3531         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3532                                                                 attn_bits_ack);
3533         u32 attn_state = bp->attn_state;
3534
3535         /* look for changed bits */
3536         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3537         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3538
3539         DP(NETIF_MSG_HW,
3540            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3541            attn_bits, attn_ack, asserted, deasserted);
3542
3543         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3544                 BNX2X_ERR("BAD attention state\n");
3545
3546         /* handle bits that were raised */
3547         if (asserted)
3548                 bnx2x_attn_int_asserted(bp, asserted);
3549
3550         if (deasserted)
3551                 bnx2x_attn_int_deasserted(bp, deasserted);
3552 }
3553
3554 static void bnx2x_sp_task(struct work_struct *work)
3555 {
3556         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3557         u16 status;
3558
3559
3560         /* Return here if interrupt is disabled */
3561         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3562                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3563                 return;
3564         }
3565
3566         status = bnx2x_update_dsb_idx(bp);
3567 /*      if (status == 0)                                     */
3568 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3569
3570         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3571
3572         /* HW attentions */
3573         if (status & 0x1)
3574                 bnx2x_attn_int(bp);
3575
3576         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3577                      IGU_INT_NOP, 1);
3578         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3579                      IGU_INT_NOP, 1);
3580         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3581                      IGU_INT_NOP, 1);
3582         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3583                      IGU_INT_NOP, 1);
3584         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3585                      IGU_INT_ENABLE, 1);
3586
3587 }
3588
3589 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3590 {
3591         struct net_device *dev = dev_instance;
3592         struct bnx2x *bp = netdev_priv(dev);
3593
3594         /* Return here if interrupt is disabled */
3595         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3596                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3597                 return IRQ_HANDLED;
3598         }
3599
3600         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3601
3602 #ifdef BNX2X_STOP_ON_ERROR
3603         if (unlikely(bp->panic))
3604                 return IRQ_HANDLED;
3605 #endif
3606
3607 #ifdef BCM_CNIC
3608         {
3609                 struct cnic_ops *c_ops;
3610
3611                 rcu_read_lock();
3612                 c_ops = rcu_dereference(bp->cnic_ops);
3613                 if (c_ops)
3614                         c_ops->cnic_handler(bp->cnic_data, NULL);
3615                 rcu_read_unlock();
3616         }
3617 #endif
3618         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3619
3620         return IRQ_HANDLED;
3621 }
3622
3623 /* end of slow path */
3624
3625 /* Statistics */
3626
3627 /****************************************************************************
3628 * Macros
3629 ****************************************************************************/
3630
3631 /* sum[hi:lo] += add[hi:lo] */
3632 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3633         do { \
3634                 s_lo += a_lo; \
3635                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3636         } while (0)
3637
3638 /* difference = minuend - subtrahend */
3639 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3640         do { \
3641                 if (m_lo < s_lo) { \
3642                         /* underflow */ \
3643                         d_hi = m_hi - s_hi; \
3644                         if (d_hi > 0) { \
3645                                 /* we can 'loan' 1 */ \
3646                                 d_hi--; \
3647                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3648                         } else { \
3649                                 /* m_hi <= s_hi */ \
3650                                 d_hi = 0; \
3651                                 d_lo = 0; \
3652                         } \
3653                 } else { \
3654                         /* m_lo >= s_lo */ \
3655                         if (m_hi < s_hi) { \
3656                                 d_hi = 0; \
3657                                 d_lo = 0; \
3658                         } else { \
3659                                 /* m_hi >= s_hi */ \
3660                                 d_hi = m_hi - s_hi; \
3661                                 d_lo = m_lo - s_lo; \
3662                         } \
3663                 } \
3664         } while (0)
3665
3666 #define UPDATE_STAT64(s, t) \
3667         do { \
3668                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3669                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3670                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3671                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3672                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3673                        pstats->mac_stx[1].t##_lo, diff.lo); \
3674         } while (0)
3675
3676 #define UPDATE_STAT64_NIG(s, t) \
3677         do { \
3678                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3679                         diff.lo, new->s##_lo, old->s##_lo); \
3680                 ADD_64(estats->t##_hi, diff.hi, \
3681                        estats->t##_lo, diff.lo); \
3682         } while (0)
3683
3684 /* sum[hi:lo] += add */
3685 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3686         do { \
3687                 s_lo += a; \
3688                 s_hi += (s_lo < a) ? 1 : 0; \
3689         } while (0)
3690
3691 #define UPDATE_EXTEND_STAT(s) \
3692         do { \
3693                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3694                               pstats->mac_stx[1].s##_lo, \
3695                               new->s); \
3696         } while (0)
3697
3698 #define UPDATE_EXTEND_TSTAT(s, t) \
3699         do { \
3700                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3701                 old_tclient->s = tclient->s; \
3702                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3703         } while (0)
3704
3705 #define UPDATE_EXTEND_USTAT(s, t) \
3706         do { \
3707                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3708                 old_uclient->s = uclient->s; \
3709                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3710         } while (0)
3711
3712 #define UPDATE_EXTEND_XSTAT(s, t) \
3713         do { \
3714                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3715                 old_xclient->s = xclient->s; \
3716                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3717         } while (0)
3718
3719 /* minuend -= subtrahend */
3720 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3721         do { \
3722                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3723         } while (0)
3724
3725 /* minuend[hi:lo] -= subtrahend */
3726 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3727         do { \
3728                 SUB_64(m_hi, 0, m_lo, s); \
3729         } while (0)
3730
3731 #define SUB_EXTEND_USTAT(s, t) \
3732         do { \
3733                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3734                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3735         } while (0)
3736
3737 /*
3738  * General service functions
3739  */
3740
3741 static inline long bnx2x_hilo(u32 *hiref)
3742 {
3743         u32 lo = *(hiref + 1);
3744 #if (BITS_PER_LONG == 64)
3745         u32 hi = *hiref;
3746
3747         return HILO_U64(hi, lo);
3748 #else
3749         return lo;
3750 #endif
3751 }
3752
3753 /*
3754  * Init service functions
3755  */
3756
3757 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3758 {
3759         if (!bp->stats_pending) {
3760                 struct eth_query_ramrod_data ramrod_data = {0};
3761                 int i, rc;
3762
3763                 ramrod_data.drv_counter = bp->stats_counter++;
3764                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3765                 for_each_queue(bp, i)
3766                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3767
3768                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3769                                    ((u32 *)&ramrod_data)[1],
3770                                    ((u32 *)&ramrod_data)[0], 0);
3771                 if (rc == 0) {
3772                         /* stats ramrod has it's own slot on the spq */
3773                         bp->spq_left++;
3774                         bp->stats_pending = 1;
3775                 }
3776         }
3777 }
3778
3779 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3780 {
3781         struct dmae_command *dmae = &bp->stats_dmae;
3782         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3783
3784         *stats_comp = DMAE_COMP_VAL;
3785         if (CHIP_REV_IS_SLOW(bp))
3786                 return;
3787
3788         /* loader */
3789         if (bp->executer_idx) {
3790                 int loader_idx = PMF_DMAE_C(bp);
3791
3792                 memset(dmae, 0, sizeof(struct dmae_command));
3793
3794                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3795                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3796                                 DMAE_CMD_DST_RESET |
3797 #ifdef __BIG_ENDIAN
3798                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3799 #else
3800                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3801 #endif
3802                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3803                                                DMAE_CMD_PORT_0) |
3804                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3805                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3806                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3807                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3808                                      sizeof(struct dmae_command) *
3809                                      (loader_idx + 1)) >> 2;
3810                 dmae->dst_addr_hi = 0;
3811                 dmae->len = sizeof(struct dmae_command) >> 2;
3812                 if (CHIP_IS_E1(bp))
3813                         dmae->len--;
3814                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3815                 dmae->comp_addr_hi = 0;
3816                 dmae->comp_val = 1;
3817
3818                 *stats_comp = 0;
3819                 bnx2x_post_dmae(bp, dmae, loader_idx);
3820
3821         } else if (bp->func_stx) {
3822                 *stats_comp = 0;
3823                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3824         }
3825 }
3826
3827 static int bnx2x_stats_comp(struct bnx2x *bp)
3828 {
3829         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3830         int cnt = 10;
3831
3832         might_sleep();
3833         while (*stats_comp != DMAE_COMP_VAL) {
3834                 if (!cnt) {
3835                         BNX2X_ERR("timeout waiting for stats finished\n");
3836                         break;
3837                 }
3838                 cnt--;
3839                 msleep(1);
3840         }
3841         return 1;
3842 }
3843
3844 /*
3845  * Statistics service functions
3846  */
3847
3848 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3849 {
3850         struct dmae_command *dmae;
3851         u32 opcode;
3852         int loader_idx = PMF_DMAE_C(bp);
3853         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3854
3855         /* sanity */
3856         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3857                 BNX2X_ERR("BUG!\n");
3858                 return;
3859         }
3860
3861         bp->executer_idx = 0;
3862
3863         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3864                   DMAE_CMD_C_ENABLE |
3865                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3866 #ifdef __BIG_ENDIAN
3867                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3868 #else
3869                   DMAE_CMD_ENDIANITY_DW_SWAP |
3870 #endif
3871                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3872                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3873
3874         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3875         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3876         dmae->src_addr_lo = bp->port.port_stx >> 2;
3877         dmae->src_addr_hi = 0;
3878         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3879         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3880         dmae->len = DMAE_LEN32_RD_MAX;
3881         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3882         dmae->comp_addr_hi = 0;
3883         dmae->comp_val = 1;
3884
3885         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3886         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3887         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3888         dmae->src_addr_hi = 0;
3889         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3890                                    DMAE_LEN32_RD_MAX * 4);
3891         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3892                                    DMAE_LEN32_RD_MAX * 4);
3893         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3894         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3895         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3896         dmae->comp_val = DMAE_COMP_VAL;
3897
3898         *stats_comp = 0;
3899         bnx2x_hw_stats_post(bp);
3900         bnx2x_stats_comp(bp);
3901 }
3902
3903 static void bnx2x_port_stats_init(struct bnx2x *bp)
3904 {
3905         struct dmae_command *dmae;
3906         int port = BP_PORT(bp);
3907         int vn = BP_E1HVN(bp);
3908         u32 opcode;
3909         int loader_idx = PMF_DMAE_C(bp);
3910         u32 mac_addr;
3911         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3912
3913         /* sanity */
3914         if (!bp->link_vars.link_up || !bp->port.pmf) {
3915                 BNX2X_ERR("BUG!\n");
3916                 return;
3917         }
3918
3919         bp->executer_idx = 0;
3920
3921         /* MCP */
3922         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3923                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3924                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3925 #ifdef __BIG_ENDIAN
3926                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3927 #else
3928                   DMAE_CMD_ENDIANITY_DW_SWAP |
3929 #endif
3930                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3931                   (vn << DMAE_CMD_E1HVN_SHIFT));
3932
3933         if (bp->port.port_stx) {
3934
3935                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3936                 dmae->opcode = opcode;
3937                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3938                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3939                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3940                 dmae->dst_addr_hi = 0;
3941                 dmae->len = sizeof(struct host_port_stats) >> 2;
3942                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3943                 dmae->comp_addr_hi = 0;
3944                 dmae->comp_val = 1;
3945         }
3946
3947         if (bp->func_stx) {
3948
3949                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3950                 dmae->opcode = opcode;
3951                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3952                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3953                 dmae->dst_addr_lo = bp->func_stx >> 2;
3954                 dmae->dst_addr_hi = 0;
3955                 dmae->len = sizeof(struct host_func_stats) >> 2;
3956                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3957                 dmae->comp_addr_hi = 0;
3958                 dmae->comp_val = 1;
3959         }
3960
3961         /* MAC */
3962         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3963                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3964                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3965 #ifdef __BIG_ENDIAN
3966                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3967 #else
3968                   DMAE_CMD_ENDIANITY_DW_SWAP |
3969 #endif
3970                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3971                   (vn << DMAE_CMD_E1HVN_SHIFT));
3972
3973         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3974
3975                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3976                                    NIG_REG_INGRESS_BMAC0_MEM);
3977
3978                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3979                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3980                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3981                 dmae->opcode = opcode;
3982                 dmae->src_addr_lo = (mac_addr +
3983                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3984                 dmae->src_addr_hi = 0;
3985                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3986                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3987                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3988                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3989                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3990                 dmae->comp_addr_hi = 0;
3991                 dmae->comp_val = 1;
3992
3993                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3994                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3995                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3996                 dmae->opcode = opcode;
3997                 dmae->src_addr_lo = (mac_addr +
3998                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3999                 dmae->src_addr_hi = 0;
4000                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4001                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4002                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4003                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4004                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4005                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4006                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4007                 dmae->comp_addr_hi = 0;
4008                 dmae->comp_val = 1;
4009
4010         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
4011
4012                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4013
4014                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4015                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4016                 dmae->opcode = opcode;
4017                 dmae->src_addr_lo = (mac_addr +
4018                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4019                 dmae->src_addr_hi = 0;
4020                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4021                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4022                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4023                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4024                 dmae->comp_addr_hi = 0;
4025                 dmae->comp_val = 1;
4026
4027                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4028                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4029                 dmae->opcode = opcode;
4030                 dmae->src_addr_lo = (mac_addr +
4031                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4032                 dmae->src_addr_hi = 0;
4033                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4034                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4035                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4036                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4037                 dmae->len = 1;
4038                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4039                 dmae->comp_addr_hi = 0;
4040                 dmae->comp_val = 1;
4041
4042                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4043                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4044                 dmae->opcode = opcode;
4045                 dmae->src_addr_lo = (mac_addr +
4046                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4047                 dmae->src_addr_hi = 0;
4048                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4049                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4050                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4051                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4052                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4053                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4054                 dmae->comp_addr_hi = 0;
4055                 dmae->comp_val = 1;
4056         }
4057
4058         /* NIG */
4059         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4060         dmae->opcode = opcode;
4061         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4062                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
4063         dmae->src_addr_hi = 0;
4064         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4065         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4066         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4067         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4068         dmae->comp_addr_hi = 0;
4069         dmae->comp_val = 1;
4070
4071         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4072         dmae->opcode = opcode;
4073         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4074                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4075         dmae->src_addr_hi = 0;
4076         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4077                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
4078         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4079                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
4080         dmae->len = (2*sizeof(u32)) >> 2;
4081         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4082         dmae->comp_addr_hi = 0;
4083         dmae->comp_val = 1;
4084
4085         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4086         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4087                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4088                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4089 #ifdef __BIG_ENDIAN
4090                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4091 #else
4092                         DMAE_CMD_ENDIANITY_DW_SWAP |
4093 #endif
4094                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4095                         (vn << DMAE_CMD_E1HVN_SHIFT));
4096         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4097                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
4098         dmae->src_addr_hi = 0;
4099         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4100                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
4101         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4102                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
4103         dmae->len = (2*sizeof(u32)) >> 2;
4104         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4105         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4106         dmae->comp_val = DMAE_COMP_VAL;
4107
4108         *stats_comp = 0;
4109 }
4110
4111 static void bnx2x_func_stats_init(struct bnx2x *bp)
4112 {
4113         struct dmae_command *dmae = &bp->stats_dmae;
4114         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4115
4116         /* sanity */
4117         if (!bp->func_stx) {
4118                 BNX2X_ERR("BUG!\n");
4119                 return;
4120         }
4121
4122         bp->executer_idx = 0;
4123         memset(dmae, 0, sizeof(struct dmae_command));
4124
4125         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4126                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4127                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4128 #ifdef __BIG_ENDIAN
4129                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4130 #else
4131                         DMAE_CMD_ENDIANITY_DW_SWAP |
4132 #endif
4133                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4134                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4135         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4136         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4137         dmae->dst_addr_lo = bp->func_stx >> 2;
4138         dmae->dst_addr_hi = 0;
4139         dmae->len = sizeof(struct host_func_stats) >> 2;
4140         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4141         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4142         dmae->comp_val = DMAE_COMP_VAL;
4143
4144         *stats_comp = 0;
4145 }
4146
4147 static void bnx2x_stats_start(struct bnx2x *bp)
4148 {
4149         if (bp->port.pmf)
4150                 bnx2x_port_stats_init(bp);
4151
4152         else if (bp->func_stx)
4153                 bnx2x_func_stats_init(bp);
4154
4155         bnx2x_hw_stats_post(bp);
4156         bnx2x_storm_stats_post(bp);
4157 }
4158
4159 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4160 {
4161         bnx2x_stats_comp(bp);
4162         bnx2x_stats_pmf_update(bp);
4163         bnx2x_stats_start(bp);
4164 }
4165
4166 static void bnx2x_stats_restart(struct bnx2x *bp)
4167 {
4168         bnx2x_stats_comp(bp);
4169         bnx2x_stats_start(bp);
4170 }
4171
4172 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4173 {
4174         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4175         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4176         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4177         struct {
4178                 u32 lo;
4179                 u32 hi;
4180         } diff;
4181
4182         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4183         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4184         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4185         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4186         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4187         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
4188         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
4189         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
4190         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
4191         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4192         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4193         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4194         UPDATE_STAT64(tx_stat_gt127,
4195                                 tx_stat_etherstatspkts65octetsto127octets);
4196         UPDATE_STAT64(tx_stat_gt255,
4197                                 tx_stat_etherstatspkts128octetsto255octets);
4198         UPDATE_STAT64(tx_stat_gt511,
4199                                 tx_stat_etherstatspkts256octetsto511octets);
4200         UPDATE_STAT64(tx_stat_gt1023,
4201                                 tx_stat_etherstatspkts512octetsto1023octets);
4202         UPDATE_STAT64(tx_stat_gt1518,
4203                                 tx_stat_etherstatspkts1024octetsto1522octets);
4204         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4205         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4206         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4207         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4208         UPDATE_STAT64(tx_stat_gterr,
4209                                 tx_stat_dot3statsinternalmactransmiterrors);
4210         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
4211
4212         estats->pause_frames_received_hi =
4213                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4214         estats->pause_frames_received_lo =
4215                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4216
4217         estats->pause_frames_sent_hi =
4218                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4219         estats->pause_frames_sent_lo =
4220                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
4221 }
4222
4223 static void bnx2x_emac_stats_update(struct bnx2x *bp)
4224 {
4225         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4226         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4227         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4228
4229         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4230         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4231         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4232         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4233         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4234         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4235         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4236         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4237         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4238         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4239         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4240         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4241         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4242         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4243         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4244         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4245         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4246         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4247         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4248         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4249         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4250         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4251         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4252         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4253         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4254         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4255         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4256         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4257         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4258         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4259         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
4260
4261         estats->pause_frames_received_hi =
4262                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4263         estats->pause_frames_received_lo =
4264                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4265         ADD_64(estats->pause_frames_received_hi,
4266                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4267                estats->pause_frames_received_lo,
4268                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4269
4270         estats->pause_frames_sent_hi =
4271                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
4272         estats->pause_frames_sent_lo =
4273                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
4274         ADD_64(estats->pause_frames_sent_hi,
4275                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4276                estats->pause_frames_sent_lo,
4277                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
4278 }
4279
4280 static int bnx2x_hw_stats_update(struct bnx2x *bp)
4281 {
4282         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4283         struct nig_stats *old = &(bp->port.old_nig_stats);
4284         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4285         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4286         struct {
4287                 u32 lo;
4288                 u32 hi;
4289         } diff;
4290         u32 nig_timer_max;
4291
4292         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4293                 bnx2x_bmac_stats_update(bp);
4294
4295         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4296                 bnx2x_emac_stats_update(bp);
4297
4298         else { /* unreached */
4299                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
4300                 return -1;
4301         }
4302
4303         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4304                       new->brb_discard - old->brb_discard);
4305         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4306                       new->brb_truncate - old->brb_truncate);
4307
4308         UPDATE_STAT64_NIG(egress_mac_pkt0,
4309                                         etherstatspkts1024octetsto1522octets);
4310         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
4311
4312         memcpy(old, new, sizeof(struct nig_stats));
4313
4314         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4315                sizeof(struct mac_stx));
4316         estats->brb_drop_hi = pstats->brb_drop_hi;
4317         estats->brb_drop_lo = pstats->brb_drop_lo;
4318
4319         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
4320
4321         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4322         if (nig_timer_max != estats->nig_timer_max) {
4323                 estats->nig_timer_max = nig_timer_max;
4324                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
4325         }
4326
4327         return 0;
4328 }
4329
4330 static int bnx2x_storm_stats_update(struct bnx2x *bp)
4331 {
4332         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4333         struct tstorm_per_port_stats *tport =
4334                                         &stats->tstorm_common.port_statistics;
4335         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4336         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4337         int i;
4338
4339         memcpy(&(fstats->total_bytes_received_hi),
4340                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4341                sizeof(struct host_func_stats) - 2*sizeof(u32));
4342         estats->error_bytes_received_hi = 0;
4343         estats->error_bytes_received_lo = 0;
4344         estats->etherstatsoverrsizepkts_hi = 0;
4345         estats->etherstatsoverrsizepkts_lo = 0;
4346         estats->no_buff_discard_hi = 0;
4347         estats->no_buff_discard_lo = 0;
4348
4349         for_each_queue(bp, i) {
4350                 struct bnx2x_fastpath *fp = &bp->fp[i];
4351                 int cl_id = fp->cl_id;
4352                 struct tstorm_per_client_stats *tclient =
4353                                 &stats->tstorm_common.client_statistics[cl_id];
4354                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4355                 struct ustorm_per_client_stats *uclient =
4356                                 &stats->ustorm_common.client_statistics[cl_id];
4357                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4358                 struct xstorm_per_client_stats *xclient =
4359                                 &stats->xstorm_common.client_statistics[cl_id];
4360                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4361                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4362                 u32 diff;
4363
4364                 /* are storm stats valid? */
4365                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4366                                                         bp->stats_counter) {
4367                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4368                            "  xstorm counter (%d) != stats_counter (%d)\n",
4369                            i, xclient->stats_counter, bp->stats_counter);
4370                         return -1;
4371                 }
4372                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4373                                                         bp->stats_counter) {
4374                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4375                            "  tstorm counter (%d) != stats_counter (%d)\n",
4376                            i, tclient->stats_counter, bp->stats_counter);
4377                         return -2;
4378                 }
4379                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4380                                                         bp->stats_counter) {
4381                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4382                            "  ustorm counter (%d) != stats_counter (%d)\n",
4383                            i, uclient->stats_counter, bp->stats_counter);
4384                         return -4;
4385                 }
4386
4387                 qstats->total_bytes_received_hi =
4388                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4389                 qstats->total_bytes_received_lo =
4390                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4391
4392                 ADD_64(qstats->total_bytes_received_hi,
4393                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4394                        qstats->total_bytes_received_lo,
4395                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4396
4397                 ADD_64(qstats->total_bytes_received_hi,
4398                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4399                        qstats->total_bytes_received_lo,
4400                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4401
4402                 qstats->valid_bytes_received_hi =
4403                                         qstats->total_bytes_received_hi;
4404                 qstats->valid_bytes_received_lo =
4405                                         qstats->total_bytes_received_lo;
4406
4407                 qstats->error_bytes_received_hi =
4408                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4409                 qstats->error_bytes_received_lo =
4410                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4411
4412                 ADD_64(qstats->total_bytes_received_hi,
4413                        qstats->error_bytes_received_hi,
4414                        qstats->total_bytes_received_lo,
4415                        qstats->error_bytes_received_lo);
4416
4417                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4418                                         total_unicast_packets_received);
4419                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4420                                         total_multicast_packets_received);
4421                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4422                                         total_broadcast_packets_received);
4423                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4424                                         etherstatsoverrsizepkts);
4425                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4426
4427                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4428                                         total_unicast_packets_received);
4429                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4430                                         total_multicast_packets_received);
4431                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4432                                         total_broadcast_packets_received);
4433                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4434                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4435                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4436
4437                 qstats->total_bytes_transmitted_hi =
4438                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4439                 qstats->total_bytes_transmitted_lo =
4440                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4441
4442                 ADD_64(qstats->total_bytes_transmitted_hi,
4443                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4444                        qstats->total_bytes_transmitted_lo,
4445                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4446
4447                 ADD_64(qstats->total_bytes_transmitted_hi,
4448                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4449                        qstats->total_bytes_transmitted_lo,
4450                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4451
4452                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4453                                         total_unicast_packets_transmitted);
4454                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4455                                         total_multicast_packets_transmitted);
4456                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4457                                         total_broadcast_packets_transmitted);
4458
4459                 old_tclient->checksum_discard = tclient->checksum_discard;
4460                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4461
4462                 ADD_64(fstats->total_bytes_received_hi,
4463                        qstats->total_bytes_received_hi,
4464                        fstats->total_bytes_received_lo,
4465                        qstats->total_bytes_received_lo);
4466                 ADD_64(fstats->total_bytes_transmitted_hi,
4467                        qstats->total_bytes_transmitted_hi,
4468                        fstats->total_bytes_transmitted_lo,
4469                        qstats->total_bytes_transmitted_lo);
4470                 ADD_64(fstats->total_unicast_packets_received_hi,
4471                        qstats->total_unicast_packets_received_hi,
4472                        fstats->total_unicast_packets_received_lo,
4473                        qstats->total_unicast_packets_received_lo);
4474                 ADD_64(fstats->total_multicast_packets_received_hi,
4475                        qstats->total_multicast_packets_received_hi,
4476                        fstats->total_multicast_packets_received_lo,
4477                        qstats->total_multicast_packets_received_lo);
4478                 ADD_64(fstats->total_broadcast_packets_received_hi,
4479                        qstats->total_broadcast_packets_received_hi,
4480                        fstats->total_broadcast_packets_received_lo,
4481                        qstats->total_broadcast_packets_received_lo);
4482                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4483                        qstats->total_unicast_packets_transmitted_hi,
4484                        fstats->total_unicast_packets_transmitted_lo,
4485                        qstats->total_unicast_packets_transmitted_lo);
4486                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4487                        qstats->total_multicast_packets_transmitted_hi,
4488                        fstats->total_multicast_packets_transmitted_lo,
4489                        qstats->total_multicast_packets_transmitted_lo);
4490                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4491                        qstats->total_broadcast_packets_transmitted_hi,
4492                        fstats->total_broadcast_packets_transmitted_lo,
4493                        qstats->total_broadcast_packets_transmitted_lo);
4494                 ADD_64(fstats->valid_bytes_received_hi,
4495                        qstats->valid_bytes_received_hi,
4496                        fstats->valid_bytes_received_lo,
4497                        qstats->valid_bytes_received_lo);
4498
4499                 ADD_64(estats->error_bytes_received_hi,
4500                        qstats->error_bytes_received_hi,
4501                        estats->error_bytes_received_lo,
4502                        qstats->error_bytes_received_lo);
4503                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4504                        qstats->etherstatsoverrsizepkts_hi,
4505                        estats->etherstatsoverrsizepkts_lo,
4506                        qstats->etherstatsoverrsizepkts_lo);
4507                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4508                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4509         }
4510
4511         ADD_64(fstats->total_bytes_received_hi,
4512                estats->rx_stat_ifhcinbadoctets_hi,
4513                fstats->total_bytes_received_lo,
4514                estats->rx_stat_ifhcinbadoctets_lo);
4515
4516         memcpy(estats, &(fstats->total_bytes_received_hi),
4517                sizeof(struct host_func_stats) - 2*sizeof(u32));
4518
4519         ADD_64(estats->etherstatsoverrsizepkts_hi,
4520                estats->rx_stat_dot3statsframestoolong_hi,
4521                estats->etherstatsoverrsizepkts_lo,
4522                estats->rx_stat_dot3statsframestoolong_lo);
4523         ADD_64(estats->error_bytes_received_hi,
4524                estats->rx_stat_ifhcinbadoctets_hi,
4525                estats->error_bytes_received_lo,
4526                estats->rx_stat_ifhcinbadoctets_lo);
4527
4528         if (bp->port.pmf) {
4529                 estats->mac_filter_discard =
4530                                 le32_to_cpu(tport->mac_filter_discard);
4531                 estats->xxoverflow_discard =
4532                                 le32_to_cpu(tport->xxoverflow_discard);
4533                 estats->brb_truncate_discard =
4534                                 le32_to_cpu(tport->brb_truncate_discard);
4535                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4536         }
4537
4538         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4539
4540         bp->stats_pending = 0;
4541
4542         return 0;
4543 }
4544
4545 static void bnx2x_net_stats_update(struct bnx2x *bp)
4546 {
4547         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4548         struct net_device_stats *nstats = &bp->dev->stats;
4549         int i;
4550
4551         nstats->rx_packets =
4552                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4553                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4554                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4555
4556         nstats->tx_packets =
4557                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4558                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4559                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4560
4561         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4562
4563         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4564
4565         nstats->rx_dropped = estats->mac_discard;
4566         for_each_queue(bp, i)
4567                 nstats->rx_dropped +=
4568                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4569
4570         nstats->tx_dropped = 0;
4571
4572         nstats->multicast =
4573                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4574
4575         nstats->collisions =
4576                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4577
4578         nstats->rx_length_errors =
4579                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4580                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4581         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4582                                  bnx2x_hilo(&estats->brb_truncate_hi);
4583         nstats->rx_crc_errors =
4584                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4585         nstats->rx_frame_errors =
4586                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4587         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4588         nstats->rx_missed_errors = estats->xxoverflow_discard;
4589
4590         nstats->rx_errors = nstats->rx_length_errors +
4591                             nstats->rx_over_errors +
4592                             nstats->rx_crc_errors +
4593                             nstats->rx_frame_errors +
4594                             nstats->rx_fifo_errors +
4595                             nstats->rx_missed_errors;
4596
4597         nstats->tx_aborted_errors =
4598                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4599                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4600         nstats->tx_carrier_errors =
4601                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4602         nstats->tx_fifo_errors = 0;
4603         nstats->tx_heartbeat_errors = 0;
4604         nstats->tx_window_errors = 0;
4605
4606         nstats->tx_errors = nstats->tx_aborted_errors +
4607                             nstats->tx_carrier_errors +
4608             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4609 }
4610
4611 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4612 {
4613         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4614         int i;
4615
4616         estats->driver_xoff = 0;
4617         estats->rx_err_discard_pkt = 0;
4618         estats->rx_skb_alloc_failed = 0;
4619         estats->hw_csum_err = 0;
4620         for_each_queue(bp, i) {
4621                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4622
4623                 estats->driver_xoff += qstats->driver_xoff;
4624                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4625                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4626                 estats->hw_csum_err += qstats->hw_csum_err;
4627         }
4628 }
4629
4630 static void bnx2x_stats_update(struct bnx2x *bp)
4631 {
4632         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4633
4634         if (*stats_comp != DMAE_COMP_VAL)
4635                 return;
4636
4637         if (bp->port.pmf)
4638                 bnx2x_hw_stats_update(bp);
4639
4640         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4641                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4642                 bnx2x_panic();
4643                 return;
4644         }
4645
4646         bnx2x_net_stats_update(bp);
4647         bnx2x_drv_stats_update(bp);
4648
4649         if (netif_msg_timer(bp)) {
4650                 struct bnx2x_fastpath *fp0_rx = bp->fp;
4651                 struct bnx2x_fastpath *fp0_tx = bp->fp;
4652                 struct tstorm_per_client_stats *old_tclient =
4653                                                         &bp->fp->old_tclient;
4654                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4655                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4656                 struct net_device_stats *nstats = &bp->dev->stats;
4657                 int i;
4658
4659                 netdev_printk(KERN_DEBUG, bp->dev, "\n");
4660                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4661                                   "  tx pkt (%lx)\n",
4662                        bnx2x_tx_avail(fp0_tx),
4663                        le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4664                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4665                                   "  rx pkt (%lx)\n",
4666                        (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4667                              fp0_rx->rx_comp_cons),
4668                        le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4669                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4670                                   "brb truncate %u\n",
4671                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4672                        qstats->driver_xoff,
4673                        estats->brb_drop_lo, estats->brb_truncate_lo);
4674                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4675                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4676                         "mac_discard %u  mac_filter_discard %u  "
4677                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4678                         "ttl0_discard %u\n",
4679                        le32_to_cpu(old_tclient->checksum_discard),
4680                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4681                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4682                        estats->mac_discard, estats->mac_filter_discard,
4683                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4684                        le32_to_cpu(old_tclient->ttl0_discard));
4685
4686                 for_each_queue(bp, i) {
4687                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4688                                bnx2x_fp(bp, i, tx_pkt),
4689                                bnx2x_fp(bp, i, rx_pkt),
4690                                bnx2x_fp(bp, i, rx_calls));
4691                 }
4692         }
4693
4694         bnx2x_hw_stats_post(bp);
4695         bnx2x_storm_stats_post(bp);
4696 }
4697
4698 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4699 {
4700         struct dmae_command *dmae;
4701         u32 opcode;
4702         int loader_idx = PMF_DMAE_C(bp);
4703         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4704
4705         bp->executer_idx = 0;
4706
4707         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4708                   DMAE_CMD_C_ENABLE |
4709                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4710 #ifdef __BIG_ENDIAN
4711                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4712 #else
4713                   DMAE_CMD_ENDIANITY_DW_SWAP |
4714 #endif
4715                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4716                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4717
4718         if (bp->port.port_stx) {
4719
4720                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4721                 if (bp->func_stx)
4722                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4723                 else
4724                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4725                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4726                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4727                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4728                 dmae->dst_addr_hi = 0;
4729                 dmae->len = sizeof(struct host_port_stats) >> 2;
4730                 if (bp->func_stx) {
4731                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4732                         dmae->comp_addr_hi = 0;
4733                         dmae->comp_val = 1;
4734                 } else {
4735                         dmae->comp_addr_lo =
4736                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4737                         dmae->comp_addr_hi =
4738                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4739                         dmae->comp_val = DMAE_COMP_VAL;
4740
4741                         *stats_comp = 0;
4742                 }
4743         }
4744
4745         if (bp->func_stx) {
4746
4747                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4748                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4749                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4750                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4751                 dmae->dst_addr_lo = bp->func_stx >> 2;
4752                 dmae->dst_addr_hi = 0;
4753                 dmae->len = sizeof(struct host_func_stats) >> 2;
4754                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4755                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4756                 dmae->comp_val = DMAE_COMP_VAL;
4757
4758                 *stats_comp = 0;
4759         }
4760 }
4761
4762 static void bnx2x_stats_stop(struct bnx2x *bp)
4763 {
4764         int update = 0;
4765
4766         bnx2x_stats_comp(bp);
4767
4768         if (bp->port.pmf)
4769                 update = (bnx2x_hw_stats_update(bp) == 0);
4770
4771         update |= (bnx2x_storm_stats_update(bp) == 0);
4772
4773         if (update) {
4774                 bnx2x_net_stats_update(bp);
4775
4776                 if (bp->port.pmf)
4777                         bnx2x_port_stats_stop(bp);
4778
4779                 bnx2x_hw_stats_post(bp);
4780                 bnx2x_stats_comp(bp);
4781         }
4782 }
4783
4784 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4785 {
4786 }
4787
4788 static const struct {
4789         void (*action)(struct bnx2x *bp);
4790         enum bnx2x_stats_state next_state;
4791 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4792 /* state        event   */
4793 {
4794 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4795 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4796 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4797 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4798 },
4799 {
4800 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4801 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4802 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4803 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4804 }
4805 };
4806
4807 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4808 {
4809         enum bnx2x_stats_state state = bp->stats_state;
4810
4811         bnx2x_stats_stm[state][event].action(bp);
4812         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4813
4814         /* Make sure the state has been "changed" */
4815         smp_wmb();
4816
4817         if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
4818                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4819                    state, event, bp->stats_state);
4820 }
4821
4822 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4823 {
4824         struct dmae_command *dmae;
4825         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4826
4827         /* sanity */
4828         if (!bp->port.pmf || !bp->port.port_stx) {
4829                 BNX2X_ERR("BUG!\n");
4830                 return;
4831         }
4832
4833         bp->executer_idx = 0;
4834
4835         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4836         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4837                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4838                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4839 #ifdef __BIG_ENDIAN
4840                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4841 #else
4842                         DMAE_CMD_ENDIANITY_DW_SWAP |
4843 #endif
4844                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4845                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4846         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4847         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4848         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4849         dmae->dst_addr_hi = 0;
4850         dmae->len = sizeof(struct host_port_stats) >> 2;
4851         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4852         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4853         dmae->comp_val = DMAE_COMP_VAL;
4854
4855         *stats_comp = 0;
4856         bnx2x_hw_stats_post(bp);
4857         bnx2x_stats_comp(bp);
4858 }
4859
4860 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4861 {
4862         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4863         int port = BP_PORT(bp);
4864         int func;
4865         u32 func_stx;
4866
4867         /* sanity */
4868         if (!bp->port.pmf || !bp->func_stx) {
4869                 BNX2X_ERR("BUG!\n");
4870                 return;
4871         }
4872
4873         /* save our func_stx */
4874         func_stx = bp->func_stx;
4875
4876         for (vn = VN_0; vn < vn_max; vn++) {
4877                 func = 2*vn + port;
4878
4879                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4880                 bnx2x_func_stats_init(bp);
4881                 bnx2x_hw_stats_post(bp);
4882                 bnx2x_stats_comp(bp);
4883         }
4884
4885         /* restore our func_stx */
4886         bp->func_stx = func_stx;
4887 }
4888
4889 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4890 {
4891         struct dmae_command *dmae = &bp->stats_dmae;
4892         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4893
4894         /* sanity */
4895         if (!bp->func_stx) {
4896                 BNX2X_ERR("BUG!\n");
4897                 return;
4898         }
4899
4900         bp->executer_idx = 0;
4901         memset(dmae, 0, sizeof(struct dmae_command));
4902
4903         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4904                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4905                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4906 #ifdef __BIG_ENDIAN
4907                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4908 #else
4909                         DMAE_CMD_ENDIANITY_DW_SWAP |
4910 #endif
4911                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4912                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4913         dmae->src_addr_lo = bp->func_stx >> 2;
4914         dmae->src_addr_hi = 0;
4915         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4916         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4917         dmae->len = sizeof(struct host_func_stats) >> 2;
4918         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4919         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4920         dmae->comp_val = DMAE_COMP_VAL;
4921
4922         *stats_comp = 0;
4923         bnx2x_hw_stats_post(bp);
4924         bnx2x_stats_comp(bp);
4925 }
4926
4927 static void bnx2x_stats_init(struct bnx2x *bp)
4928 {
4929         int port = BP_PORT(bp);
4930         int func = BP_FUNC(bp);
4931         int i;
4932
4933         bp->stats_pending = 0;
4934         bp->executer_idx = 0;
4935         bp->stats_counter = 0;
4936
4937         /* port and func stats for management */
4938         if (!BP_NOMCP(bp)) {
4939                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4940                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4941
4942         } else {
4943                 bp->port.port_stx = 0;
4944                 bp->func_stx = 0;
4945         }
4946         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
4947            bp->port.port_stx, bp->func_stx);
4948
4949         /* port stats */
4950         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4951         bp->port.old_nig_stats.brb_discard =
4952                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4953         bp->port.old_nig_stats.brb_truncate =
4954                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4955         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4956                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4957         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4958                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4959
4960         /* function stats */
4961         for_each_queue(bp, i) {
4962                 struct bnx2x_fastpath *fp = &bp->fp[i];
4963
4964                 memset(&fp->old_tclient, 0,
4965                        sizeof(struct tstorm_per_client_stats));
4966                 memset(&fp->old_uclient, 0,
4967                        sizeof(struct ustorm_per_client_stats));
4968                 memset(&fp->old_xclient, 0,
4969                        sizeof(struct xstorm_per_client_stats));
4970                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4971         }
4972
4973         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4974         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4975
4976         bp->stats_state = STATS_STATE_DISABLED;
4977
4978         if (bp->port.pmf) {
4979                 if (bp->port.port_stx)
4980                         bnx2x_port_stats_base_init(bp);
4981
4982                 if (bp->func_stx)
4983                         bnx2x_func_stats_base_init(bp);
4984
4985         } else if (bp->func_stx)
4986                 bnx2x_func_stats_base_update(bp);
4987 }
4988
4989 static void bnx2x_timer(unsigned long data)
4990 {
4991         struct bnx2x *bp = (struct bnx2x *) data;
4992
4993         if (!netif_running(bp->dev))
4994                 return;
4995
4996         if (atomic_read(&bp->intr_sem) != 0)
4997                 goto timer_restart;
4998
4999         if (poll) {
5000                 struct bnx2x_fastpath *fp = &bp->fp[0];
5001                 int rc;
5002
5003                 bnx2x_tx_int(fp);
5004                 rc = bnx2x_rx_int(fp, 1000);
5005         }
5006
5007         if (!BP_NOMCP(bp)) {
5008                 int func = BP_FUNC(bp);
5009                 u32 drv_pulse;
5010                 u32 mcp_pulse;
5011
5012                 ++bp->fw_drv_pulse_wr_seq;
5013                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5014                 /* TBD - add SYSTEM_TIME */
5015                 drv_pulse = bp->fw_drv_pulse_wr_seq;
5016                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
5017
5018                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
5019                              MCP_PULSE_SEQ_MASK);
5020                 /* The delta between driver pulse and mcp response
5021                  * should be 1 (before mcp response) or 0 (after mcp response)
5022                  */
5023                 if ((drv_pulse != mcp_pulse) &&
5024                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5025                         /* someone lost a heartbeat... */
5026                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5027                                   drv_pulse, mcp_pulse);
5028                 }
5029         }
5030
5031         if (bp->state == BNX2X_STATE_OPEN)
5032                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5033
5034 timer_restart:
5035         mod_timer(&bp->timer, jiffies + bp->current_interval);
5036 }
5037
5038 /* end of Statistics */
5039
5040 /* nic init */
5041
5042 /*
5043  * nic init service functions
5044  */
5045
5046 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
5047 {
5048         int port = BP_PORT(bp);
5049
5050         /* "CSTORM" */
5051         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5052                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5053                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5054         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5055                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5056                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
5057 }
5058
5059 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5060                           dma_addr_t mapping, int sb_id)
5061 {
5062         int port = BP_PORT(bp);
5063         int func = BP_FUNC(bp);
5064         int index;
5065         u64 section;
5066
5067         /* USTORM */
5068         section = ((u64)mapping) + offsetof(struct host_status_block,
5069                                             u_status_block);
5070         sb->u_status_block.status_block_id = sb_id;
5071
5072         REG_WR(bp, BAR_CSTRORM_INTMEM +
5073                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5074         REG_WR(bp, BAR_CSTRORM_INTMEM +
5075                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
5076                U64_HI(section));
5077         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5078                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
5079
5080         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5081                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5082                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
5083
5084         /* CSTORM */
5085         section = ((u64)mapping) + offsetof(struct host_status_block,
5086                                             c_status_block);
5087         sb->c_status_block.status_block_id = sb_id;
5088
5089         REG_WR(bp, BAR_CSTRORM_INTMEM +
5090                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
5091         REG_WR(bp, BAR_CSTRORM_INTMEM +
5092                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
5093                U64_HI(section));
5094         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
5095                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
5096
5097         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5098                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5099                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
5100
5101         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5102 }
5103
5104 static void bnx2x_zero_def_sb(struct bnx2x *bp)
5105 {
5106         int func = BP_FUNC(bp);
5107
5108         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
5109                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5110                         sizeof(struct tstorm_def_status_block)/4);
5111         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5112                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5113                         sizeof(struct cstorm_def_status_block_u)/4);
5114         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5115                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5116                         sizeof(struct cstorm_def_status_block_c)/4);
5117         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
5118                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5119                         sizeof(struct xstorm_def_status_block)/4);
5120 }
5121
5122 static void bnx2x_init_def_sb(struct bnx2x *bp,
5123                               struct host_def_status_block *def_sb,
5124                               dma_addr_t mapping, int sb_id)
5125 {
5126         int port = BP_PORT(bp);
5127         int func = BP_FUNC(bp);
5128         int index, val, reg_offset;
5129         u64 section;
5130
5131         /* ATTN */
5132         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5133                                             atten_status_block);
5134         def_sb->atten_status_block.status_block_id = sb_id;
5135
5136         bp->attn_state = 0;
5137
5138         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5139                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5140
5141         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5142                 bp->attn_group[index].sig[0] = REG_RD(bp,
5143                                                      reg_offset + 0x10*index);
5144                 bp->attn_group[index].sig[1] = REG_RD(bp,
5145                                                reg_offset + 0x4 + 0x10*index);
5146                 bp->attn_group[index].sig[2] = REG_RD(bp,
5147                                                reg_offset + 0x8 + 0x10*index);
5148                 bp->attn_group[index].sig[3] = REG_RD(bp,
5149                                                reg_offset + 0xc + 0x10*index);
5150         }
5151
5152         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5153                              HC_REG_ATTN_MSG0_ADDR_L);
5154
5155         REG_WR(bp, reg_offset, U64_LO(section));
5156         REG_WR(bp, reg_offset + 4, U64_HI(section));
5157
5158         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5159
5160         val = REG_RD(bp, reg_offset);
5161         val |= sb_id;
5162         REG_WR(bp, reg_offset, val);
5163
5164         /* USTORM */
5165         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5166                                             u_def_status_block);
5167         def_sb->u_def_status_block.status_block_id = sb_id;
5168
5169         REG_WR(bp, BAR_CSTRORM_INTMEM +
5170                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5171         REG_WR(bp, BAR_CSTRORM_INTMEM +
5172                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
5173                U64_HI(section));
5174         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5175                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
5176
5177         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5178                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5179                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
5180
5181         /* CSTORM */
5182         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5183                                             c_def_status_block);
5184         def_sb->c_def_status_block.status_block_id = sb_id;
5185
5186         REG_WR(bp, BAR_CSTRORM_INTMEM +
5187                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
5188         REG_WR(bp, BAR_CSTRORM_INTMEM +
5189                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
5190                U64_HI(section));
5191         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
5192                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
5193
5194         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5195                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5196                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
5197
5198         /* TSTORM */
5199         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5200                                             t_def_status_block);
5201         def_sb->t_def_status_block.status_block_id = sb_id;
5202
5203         REG_WR(bp, BAR_TSTRORM_INTMEM +
5204                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5205         REG_WR(bp, BAR_TSTRORM_INTMEM +
5206                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5207                U64_HI(section));
5208         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
5209                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5210
5211         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5212                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5213                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5214
5215         /* XSTORM */
5216         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5217                                             x_def_status_block);
5218         def_sb->x_def_status_block.status_block_id = sb_id;
5219
5220         REG_WR(bp, BAR_XSTRORM_INTMEM +
5221                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5222         REG_WR(bp, BAR_XSTRORM_INTMEM +
5223                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5224                U64_HI(section));
5225         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
5226                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5227
5228         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5229                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5230                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5231
5232         bp->stats_pending = 0;
5233         bp->set_mac_pending = 0;
5234
5235         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5236 }
5237
5238 static void bnx2x_update_coalesce(struct bnx2x *bp)
5239 {
5240         int port = BP_PORT(bp);
5241         int i;
5242
5243         for_each_queue(bp, i) {
5244                 int sb_id = bp->fp[i].sb_id;
5245
5246                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5247                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5248                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5249                                                       U_SB_ETH_RX_CQ_INDEX),
5250                         bp->rx_ticks/(4 * BNX2X_BTR));
5251                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5252                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5253                                                        U_SB_ETH_RX_CQ_INDEX),
5254                          (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5255
5256                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5257                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5258                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5259                                                       C_SB_ETH_TX_CQ_INDEX),
5260                         bp->tx_ticks/(4 * BNX2X_BTR));
5261                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5262                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5263                                                        C_SB_ETH_TX_CQ_INDEX),
5264                          (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5265         }
5266 }
5267
5268 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5269                                        struct bnx2x_fastpath *fp, int last)
5270 {
5271         int i;
5272
5273         for (i = 0; i < last; i++) {
5274                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5275                 struct sk_buff *skb = rx_buf->skb;
5276
5277                 if (skb == NULL) {
5278                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5279                         continue;
5280                 }
5281
5282                 if (fp->tpa_state[i] == BNX2X_TPA_START)
5283                         dma_unmap_single(&bp->pdev->dev,
5284                                          dma_unmap_addr(rx_buf, mapping),
5285                                          bp->rx_buf_size, DMA_FROM_DEVICE);
5286
5287                 dev_kfree_skb(skb);
5288                 rx_buf->skb = NULL;
5289         }
5290 }
5291
5292 static void bnx2x_init_rx_rings(struct bnx2x *bp)
5293 {
5294         int func = BP_FUNC(bp);
5295         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5296                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
5297         u16 ring_prod, cqe_ring_prod;
5298         int i, j;
5299
5300         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
5301         DP(NETIF_MSG_IFUP,
5302            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
5303
5304         if (bp->flags & TPA_ENABLE_FLAG) {
5305
5306                 for_each_queue(bp, j) {
5307                         struct bnx2x_fastpath *fp = &bp->fp[j];
5308
5309                         for (i = 0; i < max_agg_queues; i++) {
5310                                 fp->tpa_pool[i].skb =
5311                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5312                                 if (!fp->tpa_pool[i].skb) {
5313                                         BNX2X_ERR("Failed to allocate TPA "
5314                                                   "skb pool for queue[%d] - "
5315                                                   "disabling TPA on this "
5316                                                   "queue!\n", j);
5317                                         bnx2x_free_tpa_pool(bp, fp, i);
5318                                         fp->disable_tpa = 1;
5319                                         break;
5320                                 }
5321                                 dma_unmap_addr_set((struct sw_rx_bd *)
5322                                                         &bp->fp->tpa_pool[i],
5323                                                    mapping, 0);
5324                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
5325                         }
5326                 }
5327         }
5328
5329         for_each_queue(bp, j) {
5330                 struct bnx2x_fastpath *fp = &bp->fp[j];
5331
5332                 fp->rx_bd_cons = 0;
5333                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5334                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5335
5336                 /* "next page" elements initialization */
5337                 /* SGE ring */
5338                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5339                         struct eth_rx_sge *sge;
5340
5341                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5342                         sge->addr_hi =
5343                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5344                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5345                         sge->addr_lo =
5346                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5347                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5348                 }
5349
5350                 bnx2x_init_sge_ring_bit_mask(fp);
5351
5352                 /* RX BD ring */
5353                 for (i = 1; i <= NUM_RX_RINGS; i++) {
5354                         struct eth_rx_bd *rx_bd;
5355
5356                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5357                         rx_bd->addr_hi =
5358                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5359                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5360                         rx_bd->addr_lo =
5361                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5362                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5363                 }
5364
5365                 /* CQ ring */
5366                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5367                         struct eth_rx_cqe_next_page *nextpg;
5368
5369                         nextpg = (struct eth_rx_cqe_next_page *)
5370                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5371                         nextpg->addr_hi =
5372                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5373                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5374                         nextpg->addr_lo =
5375                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5376                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5377                 }
5378
5379                 /* Allocate SGEs and initialize the ring elements */
5380                 for (i = 0, ring_prod = 0;
5381                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5382
5383                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5384                                 BNX2X_ERR("was only able to allocate "
5385                                           "%d rx sges\n", i);
5386                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5387                                 /* Cleanup already allocated elements */
5388                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5389                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5390                                 fp->disable_tpa = 1;
5391                                 ring_prod = 0;
5392                                 break;
5393                         }
5394                         ring_prod = NEXT_SGE_IDX(ring_prod);
5395                 }
5396                 fp->rx_sge_prod = ring_prod;
5397
5398                 /* Allocate BDs and initialize BD ring */
5399                 fp->rx_comp_cons = 0;
5400                 cqe_ring_prod = ring_prod = 0;
5401                 for (i = 0; i < bp->rx_ring_size; i++) {
5402                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5403                                 BNX2X_ERR("was only able to allocate "
5404                                           "%d rx skbs on queue[%d]\n", i, j);
5405                                 fp->eth_q_stats.rx_skb_alloc_failed++;
5406                                 break;
5407                         }
5408                         ring_prod = NEXT_RX_IDX(ring_prod);
5409                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5410                         WARN_ON(ring_prod <= i);
5411                 }
5412
5413                 fp->rx_bd_prod = ring_prod;
5414                 /* must not have more available CQEs than BDs */
5415                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5416                                        cqe_ring_prod);
5417                 fp->rx_pkt = fp->rx_calls = 0;
5418
5419                 /* Warning!
5420                  * this will generate an interrupt (to the TSTORM)
5421                  * must only be done after chip is initialized
5422                  */
5423                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5424                                      fp->rx_sge_prod);
5425                 if (j != 0)
5426                         continue;
5427
5428                 REG_WR(bp, BAR_USTRORM_INTMEM +
5429                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5430                        U64_LO(fp->rx_comp_mapping));
5431                 REG_WR(bp, BAR_USTRORM_INTMEM +
5432                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5433                        U64_HI(fp->rx_comp_mapping));
5434         }
5435 }
5436
5437 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5438 {
5439         int i, j;
5440
5441         for_each_queue(bp, j) {
5442                 struct bnx2x_fastpath *fp = &bp->fp[j];
5443
5444                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5445                         struct eth_tx_next_bd *tx_next_bd =
5446                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5447
5448                         tx_next_bd->addr_hi =
5449                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5450                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5451                         tx_next_bd->addr_lo =
5452                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5453                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5454                 }
5455
5456                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5457                 fp->tx_db.data.zero_fill1 = 0;
5458                 fp->tx_db.data.prod = 0;
5459
5460                 fp->tx_pkt_prod = 0;
5461                 fp->tx_pkt_cons = 0;
5462                 fp->tx_bd_prod = 0;
5463                 fp->tx_bd_cons = 0;
5464                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5465                 fp->tx_pkt = 0;
5466         }
5467 }
5468
5469 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5470 {
5471         int func = BP_FUNC(bp);
5472
5473         spin_lock_init(&bp->spq_lock);
5474
5475         bp->spq_left = MAX_SPQ_PENDING;
5476         bp->spq_prod_idx = 0;
5477         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5478         bp->spq_prod_bd = bp->spq;
5479         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5480
5481         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5482                U64_LO(bp->spq_mapping));
5483         REG_WR(bp,
5484                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5485                U64_HI(bp->spq_mapping));
5486
5487         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5488                bp->spq_prod_idx);
5489 }
5490
5491 static void bnx2x_init_context(struct bnx2x *bp)
5492 {
5493         int i;
5494
5495         /* Rx */
5496         for_each_queue(bp, i) {
5497                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5498                 struct bnx2x_fastpath *fp = &bp->fp[i];
5499                 u8 cl_id = fp->cl_id;
5500
5501                 context->ustorm_st_context.common.sb_index_numbers =
5502                                                 BNX2X_RX_SB_INDEX_NUM;
5503                 context->ustorm_st_context.common.clientId = cl_id;
5504                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5505                 context->ustorm_st_context.common.flags =
5506                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5507                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5508                 context->ustorm_st_context.common.statistics_counter_id =
5509                                                 cl_id;
5510                 context->ustorm_st_context.common.mc_alignment_log_size =
5511                                                 BNX2X_RX_ALIGN_SHIFT;
5512                 context->ustorm_st_context.common.bd_buff_size =
5513                                                 bp->rx_buf_size;
5514                 context->ustorm_st_context.common.bd_page_base_hi =
5515                                                 U64_HI(fp->rx_desc_mapping);
5516                 context->ustorm_st_context.common.bd_page_base_lo =
5517                                                 U64_LO(fp->rx_desc_mapping);
5518                 if (!fp->disable_tpa) {
5519                         context->ustorm_st_context.common.flags |=
5520                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5521                         context->ustorm_st_context.common.sge_buff_size =
5522                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5523                                          (u32)0xffff);
5524                         context->ustorm_st_context.common.sge_page_base_hi =
5525                                                 U64_HI(fp->rx_sge_mapping);
5526                         context->ustorm_st_context.common.sge_page_base_lo =
5527                                                 U64_LO(fp->rx_sge_mapping);
5528
5529                         context->ustorm_st_context.common.max_sges_for_packet =
5530                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5531                         context->ustorm_st_context.common.max_sges_for_packet =
5532                                 ((context->ustorm_st_context.common.
5533                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5534                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5535                 }
5536
5537                 context->ustorm_ag_context.cdu_usage =
5538                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5539                                                CDU_REGION_NUMBER_UCM_AG,
5540                                                ETH_CONNECTION_TYPE);
5541
5542                 context->xstorm_ag_context.cdu_reserved =
5543                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5544                                                CDU_REGION_NUMBER_XCM_AG,
5545                                                ETH_CONNECTION_TYPE);
5546         }
5547
5548         /* Tx */
5549         for_each_queue(bp, i) {
5550                 struct bnx2x_fastpath *fp = &bp->fp[i];
5551                 struct eth_context *context =
5552                         bnx2x_sp(bp, context[i].eth);
5553
5554                 context->cstorm_st_context.sb_index_number =
5555                                                 C_SB_ETH_TX_CQ_INDEX;
5556                 context->cstorm_st_context.status_block_id = fp->sb_id;
5557
5558                 context->xstorm_st_context.tx_bd_page_base_hi =
5559                                                 U64_HI(fp->tx_desc_mapping);
5560                 context->xstorm_st_context.tx_bd_page_base_lo =
5561                                                 U64_LO(fp->tx_desc_mapping);
5562                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5563                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5564         }
5565 }
5566
5567 static void bnx2x_init_ind_table(struct bnx2x *bp)
5568 {
5569         int func = BP_FUNC(bp);
5570         int i;
5571
5572         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5573                 return;
5574
5575         DP(NETIF_MSG_IFUP,
5576            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5577         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5578                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5579                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5580                         bp->fp->cl_id + (i % bp->num_queues));
5581 }
5582
5583 static void bnx2x_set_client_config(struct bnx2x *bp)
5584 {
5585         struct tstorm_eth_client_config tstorm_client = {0};
5586         int port = BP_PORT(bp);
5587         int i;
5588
5589         tstorm_client.mtu = bp->dev->mtu;
5590         tstorm_client.config_flags =
5591                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5592                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5593 #ifdef BCM_VLAN
5594         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5595                 tstorm_client.config_flags |=
5596                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5597                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5598         }
5599 #endif
5600
5601         for_each_queue(bp, i) {
5602                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5603
5604                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5605                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5606                        ((u32 *)&tstorm_client)[0]);
5607                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5608                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5609                        ((u32 *)&tstorm_client)[1]);
5610         }
5611
5612         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5613            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5614 }
5615
5616 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5617 {
5618         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5619         int mode = bp->rx_mode;
5620         int mask = bp->rx_mode_cl_mask;
5621         int func = BP_FUNC(bp);
5622         int port = BP_PORT(bp);
5623         int i;
5624         /* All but management unicast packets should pass to the host as well */
5625         u32 llh_mask =
5626                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5627                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5628                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5629                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5630
5631         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5632
5633         switch (mode) {
5634         case BNX2X_RX_MODE_NONE: /* no Rx */
5635                 tstorm_mac_filter.ucast_drop_all = mask;
5636                 tstorm_mac_filter.mcast_drop_all = mask;
5637                 tstorm_mac_filter.bcast_drop_all = mask;
5638                 break;
5639
5640         case BNX2X_RX_MODE_NORMAL:
5641                 tstorm_mac_filter.bcast_accept_all = mask;
5642                 break;
5643
5644         case BNX2X_RX_MODE_ALLMULTI:
5645                 tstorm_mac_filter.mcast_accept_all = mask;
5646                 tstorm_mac_filter.bcast_accept_all = mask;
5647                 break;
5648
5649         case BNX2X_RX_MODE_PROMISC:
5650                 tstorm_mac_filter.ucast_accept_all = mask;
5651                 tstorm_mac_filter.mcast_accept_all = mask;
5652                 tstorm_mac_filter.bcast_accept_all = mask;
5653                 /* pass management unicast packets as well */
5654                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5655                 break;
5656
5657         default:
5658                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5659                 break;
5660         }
5661
5662         REG_WR(bp,
5663                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5664                llh_mask);
5665
5666         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5667                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5668                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5669                        ((u32 *)&tstorm_mac_filter)[i]);
5670
5671 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5672                    ((u32 *)&tstorm_mac_filter)[i]); */
5673         }
5674
5675         if (mode != BNX2X_RX_MODE_NONE)
5676                 bnx2x_set_client_config(bp);
5677 }
5678
5679 static void bnx2x_init_internal_common(struct bnx2x *bp)
5680 {
5681         int i;
5682
5683         /* Zero this manually as its initialization is
5684            currently missing in the initTool */
5685         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5686                 REG_WR(bp, BAR_USTRORM_INTMEM +
5687                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5688 }
5689
5690 static void bnx2x_init_internal_port(struct bnx2x *bp)
5691 {
5692         int port = BP_PORT(bp);
5693
5694         REG_WR(bp,
5695                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5696         REG_WR(bp,
5697                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5698         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5699         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5700 }
5701
5702 static void bnx2x_init_internal_func(struct bnx2x *bp)
5703 {
5704         struct tstorm_eth_function_common_config tstorm_config = {0};
5705         struct stats_indication_flags stats_flags = {0};
5706         int port = BP_PORT(bp);
5707         int func = BP_FUNC(bp);
5708         int i, j;
5709         u32 offset;
5710         u16 max_agg_size;
5711
5712         if (is_multi(bp)) {
5713                 tstorm_config.config_flags = MULTI_FLAGS(bp);
5714                 tstorm_config.rss_result_mask = MULTI_MASK;
5715         }
5716
5717         /* Enable TPA if needed */
5718         if (bp->flags & TPA_ENABLE_FLAG)
5719                 tstorm_config.config_flags |=
5720                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5721
5722         if (IS_E1HMF(bp))
5723                 tstorm_config.config_flags |=
5724                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5725
5726         tstorm_config.leading_client_id = BP_L_ID(bp);
5727
5728         REG_WR(bp, BAR_TSTRORM_INTMEM +
5729                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5730                (*(u32 *)&tstorm_config));
5731
5732         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5733         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5734         bnx2x_set_storm_rx_mode(bp);
5735
5736         for_each_queue(bp, i) {
5737                 u8 cl_id = bp->fp[i].cl_id;
5738
5739                 /* reset xstorm per client statistics */
5740                 offset = BAR_XSTRORM_INTMEM +
5741                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5742                 for (j = 0;
5743                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5744                         REG_WR(bp, offset + j*4, 0);
5745
5746                 /* reset tstorm per client statistics */
5747                 offset = BAR_TSTRORM_INTMEM +
5748                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5749                 for (j = 0;
5750                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5751                         REG_WR(bp, offset + j*4, 0);
5752
5753                 /* reset ustorm per client statistics */
5754                 offset = BAR_USTRORM_INTMEM +
5755                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5756                 for (j = 0;
5757                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5758                         REG_WR(bp, offset + j*4, 0);
5759         }
5760
5761         /* Init statistics related context */
5762         stats_flags.collect_eth = 1;
5763
5764         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5765                ((u32 *)&stats_flags)[0]);
5766         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5767                ((u32 *)&stats_flags)[1]);
5768
5769         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5770                ((u32 *)&stats_flags)[0]);
5771         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5772                ((u32 *)&stats_flags)[1]);
5773
5774         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5775                ((u32 *)&stats_flags)[0]);
5776         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5777                ((u32 *)&stats_flags)[1]);
5778
5779         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5780                ((u32 *)&stats_flags)[0]);
5781         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5782                ((u32 *)&stats_flags)[1]);
5783
5784         REG_WR(bp, BAR_XSTRORM_INTMEM +
5785                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5786                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5787         REG_WR(bp, BAR_XSTRORM_INTMEM +
5788                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5789                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5790
5791         REG_WR(bp, BAR_TSTRORM_INTMEM +
5792                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5793                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5794         REG_WR(bp, BAR_TSTRORM_INTMEM +
5795                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5796                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5797
5798         REG_WR(bp, BAR_USTRORM_INTMEM +
5799                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5800                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5801         REG_WR(bp, BAR_USTRORM_INTMEM +
5802                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5803                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5804
5805         if (CHIP_IS_E1H(bp)) {
5806                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5807                         IS_E1HMF(bp));
5808                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5809                         IS_E1HMF(bp));
5810                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5811                         IS_E1HMF(bp));
5812                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5813                         IS_E1HMF(bp));
5814
5815                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5816                          bp->e1hov);
5817         }
5818
5819         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5820         max_agg_size =
5821                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5822                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5823                     (u32)0xffff);
5824         for_each_queue(bp, i) {
5825                 struct bnx2x_fastpath *fp = &bp->fp[i];
5826
5827                 REG_WR(bp, BAR_USTRORM_INTMEM +
5828                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5829                        U64_LO(fp->rx_comp_mapping));
5830                 REG_WR(bp, BAR_USTRORM_INTMEM +
5831                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5832                        U64_HI(fp->rx_comp_mapping));
5833
5834                 /* Next page */
5835                 REG_WR(bp, BAR_USTRORM_INTMEM +
5836                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5837                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5838                 REG_WR(bp, BAR_USTRORM_INTMEM +
5839                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5840                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5841
5842                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5843                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5844                          max_agg_size);
5845         }
5846
5847         /* dropless flow control */
5848         if (CHIP_IS_E1H(bp)) {
5849                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5850
5851                 rx_pause.bd_thr_low = 250;
5852                 rx_pause.cqe_thr_low = 250;
5853                 rx_pause.cos = 1;
5854                 rx_pause.sge_thr_low = 0;
5855                 rx_pause.bd_thr_high = 350;
5856                 rx_pause.cqe_thr_high = 350;
5857                 rx_pause.sge_thr_high = 0;
5858
5859                 for_each_queue(bp, i) {
5860                         struct bnx2x_fastpath *fp = &bp->fp[i];
5861
5862                         if (!fp->disable_tpa) {
5863                                 rx_pause.sge_thr_low = 150;
5864                                 rx_pause.sge_thr_high = 250;
5865                         }
5866
5867
5868                         offset = BAR_USTRORM_INTMEM +
5869                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5870                                                                    fp->cl_id);
5871                         for (j = 0;
5872                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5873                              j++)
5874                                 REG_WR(bp, offset + j*4,
5875                                        ((u32 *)&rx_pause)[j]);
5876                 }
5877         }
5878
5879         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5880
5881         /* Init rate shaping and fairness contexts */
5882         if (IS_E1HMF(bp)) {
5883                 int vn;
5884
5885                 /* During init there is no active link
5886                    Until link is up, set link rate to 10Gbps */
5887                 bp->link_vars.line_speed = SPEED_10000;
5888                 bnx2x_init_port_minmax(bp);
5889
5890                 if (!BP_NOMCP(bp))
5891                         bp->mf_config =
5892                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5893                 bnx2x_calc_vn_weight_sum(bp);
5894
5895                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5896                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5897
5898                 /* Enable rate shaping and fairness */
5899                 bp->cmng.flags.cmng_enables |=
5900                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5901
5902         } else {
5903                 /* rate shaping and fairness are disabled */
5904                 DP(NETIF_MSG_IFUP,
5905                    "single function mode  minmax will be disabled\n");
5906         }
5907
5908
5909         /* Store it to internal memory */
5910         if (bp->port.pmf)
5911                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5912                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5913                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5914                                ((u32 *)(&bp->cmng))[i]);
5915 }
5916
5917 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5918 {
5919         switch (load_code) {
5920         case FW_MSG_CODE_DRV_LOAD_COMMON:
5921                 bnx2x_init_internal_common(bp);
5922                 /* no break */
5923
5924         case FW_MSG_CODE_DRV_LOAD_PORT:
5925                 bnx2x_init_internal_port(bp);
5926                 /* no break */
5927
5928         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5929                 bnx2x_init_internal_func(bp);
5930                 break;
5931
5932         default:
5933                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5934                 break;
5935         }
5936 }
5937
5938 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5939 {
5940         int i;
5941
5942         for_each_queue(bp, i) {
5943                 struct bnx2x_fastpath *fp = &bp->fp[i];
5944
5945                 fp->bp = bp;
5946                 fp->state = BNX2X_FP_STATE_CLOSED;
5947                 fp->index = i;
5948                 fp->cl_id = BP_L_ID(bp) + i;
5949 #ifdef BCM_CNIC
5950                 fp->sb_id = fp->cl_id + 1;
5951 #else
5952                 fp->sb_id = fp->cl_id;
5953 #endif
5954                 DP(NETIF_MSG_IFUP,
5955                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5956                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5957                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5958                               fp->sb_id);
5959                 bnx2x_update_fpsb_idx(fp);
5960         }
5961
5962         /* ensure status block indices were read */
5963         rmb();
5964
5965
5966         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5967                           DEF_SB_ID);
5968         bnx2x_update_dsb_idx(bp);
5969         bnx2x_update_coalesce(bp);
5970         bnx2x_init_rx_rings(bp);
5971         bnx2x_init_tx_ring(bp);
5972         bnx2x_init_sp_ring(bp);
5973         bnx2x_init_context(bp);
5974         bnx2x_init_internal(bp, load_code);
5975         bnx2x_init_ind_table(bp);
5976         bnx2x_stats_init(bp);
5977
5978         /* At this point, we are ready for interrupts */
5979         atomic_set(&bp->intr_sem, 0);
5980
5981         /* flush all before enabling interrupts */
5982         mb();
5983         mmiowb();
5984
5985         bnx2x_int_enable(bp);
5986
5987         /* Check for SPIO5 */
5988         bnx2x_attn_int_deasserted0(bp,
5989                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5990                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5991 }
5992
5993 /* end of nic init */
5994
5995 /*
5996  * gzip service functions
5997  */
5998
5999 static int bnx2x_gunzip_init(struct bnx2x *bp)
6000 {
6001         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6002                                             &bp->gunzip_mapping, GFP_KERNEL);
6003         if (bp->gunzip_buf  == NULL)
6004                 goto gunzip_nomem1;
6005
6006         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6007         if (bp->strm  == NULL)
6008                 goto gunzip_nomem2;
6009
6010         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6011                                       GFP_KERNEL);
6012         if (bp->strm->workspace == NULL)
6013                 goto gunzip_nomem3;
6014
6015         return 0;
6016
6017 gunzip_nomem3:
6018         kfree(bp->strm);
6019         bp->strm = NULL;
6020
6021 gunzip_nomem2:
6022         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6023                           bp->gunzip_mapping);
6024         bp->gunzip_buf = NULL;
6025
6026 gunzip_nomem1:
6027         netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n");
6028         return -ENOMEM;
6029 }
6030
6031 static void bnx2x_gunzip_end(struct bnx2x *bp)
6032 {
6033         kfree(bp->strm->workspace);
6034
6035         kfree(bp->strm);
6036         bp->strm = NULL;
6037
6038         if (bp->gunzip_buf) {
6039                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6040                                   bp->gunzip_mapping);
6041                 bp->gunzip_buf = NULL;
6042         }
6043 }
6044
6045 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6046 {
6047         int n, rc;
6048
6049         /* check gzip header */
6050         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6051                 BNX2X_ERR("Bad gzip header\n");
6052                 return -EINVAL;
6053         }
6054
6055         n = 10;
6056
6057 #define FNAME                           0x8
6058
6059         if (zbuf[3] & FNAME)
6060                 while ((zbuf[n++] != 0) && (n < len));
6061
6062         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6063         bp->strm->avail_in = len - n;
6064         bp->strm->next_out = bp->gunzip_buf;
6065         bp->strm->avail_out = FW_BUF_SIZE;
6066
6067         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6068         if (rc != Z_OK)
6069                 return rc;
6070
6071         rc = zlib_inflate(bp->strm, Z_FINISH);
6072         if ((rc != Z_OK) && (rc != Z_STREAM_END))
6073                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6074                            bp->strm->msg);
6075
6076         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6077         if (bp->gunzip_outlen & 0x3)
6078                 netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
6079                            bp->gunzip_outlen);
6080         bp->gunzip_outlen >>= 2;
6081
6082         zlib_inflateEnd(bp->strm);
6083
6084         if (rc == Z_STREAM_END)
6085                 return 0;
6086
6087         return rc;
6088 }
6089
6090 /* nic load/unload */
6091
6092 /*
6093  * General service functions
6094  */
6095
6096 /* send a NIG loopback debug packet */
6097 static void bnx2x_lb_pckt(struct bnx2x *bp)
6098 {
6099         u32 wb_write[3];
6100
6101         /* Ethernet source and destination addresses */
6102         wb_write[0] = 0x55555555;
6103         wb_write[1] = 0x55555555;
6104         wb_write[2] = 0x20;             /* SOP */
6105         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6106
6107         /* NON-IP protocol */
6108         wb_write[0] = 0x09000000;
6109         wb_write[1] = 0x55555555;
6110         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
6111         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6112 }
6113
6114 /* some of the internal memories
6115  * are not directly readable from the driver
6116  * to test them we send debug packets
6117  */
6118 static int bnx2x_int_mem_test(struct bnx2x *bp)
6119 {
6120         int factor;
6121         int count, i;
6122         u32 val = 0;
6123
6124         if (CHIP_REV_IS_FPGA(bp))
6125                 factor = 120;
6126         else if (CHIP_REV_IS_EMUL(bp))
6127                 factor = 200;
6128         else
6129                 factor = 1;
6130
6131         DP(NETIF_MSG_HW, "start part1\n");
6132
6133         /* Disable inputs of parser neighbor blocks */
6134         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6135         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6136         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6137         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6138
6139         /*  Write 0 to parser credits for CFC search request */
6140         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6141
6142         /* send Ethernet packet */
6143         bnx2x_lb_pckt(bp);
6144
6145         /* TODO do i reset NIG statistic? */
6146         /* Wait until NIG register shows 1 packet of size 0x10 */
6147         count = 1000 * factor;
6148         while (count) {
6149
6150                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6151                 val = *bnx2x_sp(bp, wb_data[0]);
6152                 if (val == 0x10)
6153                         break;
6154
6155                 msleep(10);
6156                 count--;
6157         }
6158         if (val != 0x10) {
6159                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6160                 return -1;
6161         }
6162
6163         /* Wait until PRS register shows 1 packet */
6164         count = 1000 * factor;
6165         while (count) {
6166                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6167                 if (val == 1)
6168                         break;
6169
6170                 msleep(10);
6171                 count--;
6172         }
6173         if (val != 0x1) {
6174                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6175                 return -2;
6176         }
6177
6178         /* Reset and init BRB, PRS */
6179         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6180         msleep(50);
6181         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6182         msleep(50);
6183         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6184         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6185
6186         DP(NETIF_MSG_HW, "part2\n");
6187
6188         /* Disable inputs of parser neighbor blocks */
6189         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6190         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6191         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6192         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6193
6194         /* Write 0 to parser credits for CFC search request */
6195         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6196
6197         /* send 10 Ethernet packets */
6198         for (i = 0; i < 10; i++)
6199                 bnx2x_lb_pckt(bp);
6200
6201         /* Wait until NIG register shows 10 + 1
6202            packets of size 11*0x10 = 0xb0 */
6203         count = 1000 * factor;
6204         while (count) {
6205
6206                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6207                 val = *bnx2x_sp(bp, wb_data[0]);
6208                 if (val == 0xb0)
6209                         break;
6210
6211                 msleep(10);
6212                 count--;
6213         }
6214         if (val != 0xb0) {
6215                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6216                 return -3;
6217         }
6218
6219         /* Wait until PRS register shows 2 packets */
6220         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6221         if (val != 2)
6222                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6223
6224         /* Write 1 to parser credits for CFC search request */
6225         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6226
6227         /* Wait until PRS register shows 3 packets */
6228         msleep(10 * factor);
6229         /* Wait until NIG register shows 1 packet of size 0x10 */
6230         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6231         if (val != 3)
6232                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6233
6234         /* clear NIG EOP FIFO */
6235         for (i = 0; i < 11; i++)
6236                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6237         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6238         if (val != 1) {
6239                 BNX2X_ERR("clear of NIG failed\n");
6240                 return -4;
6241         }
6242
6243         /* Reset and init BRB, PRS, NIG */
6244         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6245         msleep(50);
6246         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6247         msleep(50);
6248         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6249         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6250 #ifndef BCM_CNIC
6251         /* set NIC mode */
6252         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6253 #endif
6254
6255         /* Enable inputs of parser neighbor blocks */
6256         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6257         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6258         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6259         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6260
6261         DP(NETIF_MSG_HW, "done\n");
6262
6263         return 0; /* OK */
6264 }
6265
6266 static void enable_blocks_attention(struct bnx2x *bp)
6267 {
6268         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6269         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6270         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6271         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6272         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6273         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6274         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6275         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6276         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6277 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6278 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6279         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6280         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6281         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6282 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6283 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6284         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6285         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6286         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6287         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6288 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6289 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6290         if (CHIP_REV_IS_FPGA(bp))
6291                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6292         else
6293                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
6294         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6295         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6296         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6297 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6298 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
6299         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6300         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6301 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6302         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
6303 }
6304
6305 static const struct {
6306         u32 addr;
6307         u32 mask;
6308 } bnx2x_parity_mask[] = {
6309         {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6310         {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6311         {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6312         {HC_REG_HC_PRTY_MASK, 0xffffffff},
6313         {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6314         {QM_REG_QM_PRTY_MASK, 0x0},
6315         {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6316         {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6317         {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6318         {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6319         {CDU_REG_CDU_PRTY_MASK, 0x0},
6320         {CFC_REG_CFC_PRTY_MASK, 0x0},
6321         {DBG_REG_DBG_PRTY_MASK, 0x0},
6322         {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6323         {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6324         {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6325         {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6326         {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6327         {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6328         {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6329         {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6330         {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6331         {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6332         {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6333         {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6334         {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6335         {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6336         {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6337 };
6338
6339 static void enable_blocks_parity(struct bnx2x *bp)
6340 {
6341         int i, mask_arr_len =
6342                 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6343
6344         for (i = 0; i < mask_arr_len; i++)
6345                 REG_WR(bp, bnx2x_parity_mask[i].addr,
6346                         bnx2x_parity_mask[i].mask);
6347 }
6348
6349
6350 static void bnx2x_reset_common(struct bnx2x *bp)
6351 {
6352         /* reset_common */
6353         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6354                0xd3ffff7f);
6355         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6356 }
6357
6358 static void bnx2x_init_pxp(struct bnx2x *bp)
6359 {
6360         u16 devctl;
6361         int r_order, w_order;
6362
6363         pci_read_config_word(bp->pdev,
6364                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6365         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6366         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6367         if (bp->mrrs == -1)
6368                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6369         else {
6370                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6371                 r_order = bp->mrrs;
6372         }
6373
6374         bnx2x_init_pxp_arb(bp, r_order, w_order);
6375 }
6376
6377 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6378 {
6379         u32 val;
6380         u8 port;
6381         u8 is_required = 0;
6382
6383         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6384               SHARED_HW_CFG_FAN_FAILURE_MASK;
6385
6386         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6387                 is_required = 1;
6388
6389         /*
6390          * The fan failure mechanism is usually related to the PHY type since
6391          * the power consumption of the board is affected by the PHY. Currently,
6392          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6393          */
6394         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6395                 for (port = PORT_0; port < PORT_MAX; port++) {
6396                         u32 phy_type =
6397                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
6398                                          external_phy_config) &
6399                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6400                         is_required |=
6401                                 ((phy_type ==
6402                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6403                                  (phy_type ==
6404                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6405                                  (phy_type ==
6406                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6407                 }
6408
6409         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6410
6411         if (is_required == 0)
6412                 return;
6413
6414         /* Fan failure is indicated by SPIO 5 */
6415         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6416                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
6417
6418         /* set to active low mode */
6419         val = REG_RD(bp, MISC_REG_SPIO_INT);
6420         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6421                                 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6422         REG_WR(bp, MISC_REG_SPIO_INT, val);
6423
6424         /* enable interrupt to signal the IGU */
6425         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6426         val |= (1 << MISC_REGISTERS_SPIO_5);
6427         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6428 }
6429
6430 static int bnx2x_init_common(struct bnx2x *bp)
6431 {
6432         u32 val, i;
6433 #ifdef BCM_CNIC
6434         u32 wb_write[2];
6435 #endif
6436
6437         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
6438
6439         bnx2x_reset_common(bp);
6440         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6441         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6442
6443         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6444         if (CHIP_IS_E1H(bp))
6445                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6446
6447         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6448         msleep(30);
6449         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6450
6451         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6452         if (CHIP_IS_E1(bp)) {
6453                 /* enable HW interrupt from PXP on USDM overflow
6454                    bit 16 on INT_MASK_0 */
6455                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6456         }
6457
6458         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6459         bnx2x_init_pxp(bp);
6460
6461 #ifdef __BIG_ENDIAN
6462         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6463         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6464         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6465         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6466         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6467         /* make sure this value is 0 */
6468         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6469
6470 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6471         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6472         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6473         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6474         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6475 #endif
6476
6477         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6478 #ifdef BCM_CNIC
6479         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6480         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6481         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6482 #endif
6483
6484         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6485                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6486
6487         /* let the HW do it's magic ... */
6488         msleep(100);
6489         /* finish PXP init */
6490         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6491         if (val != 1) {
6492                 BNX2X_ERR("PXP2 CFG failed\n");
6493                 return -EBUSY;
6494         }
6495         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6496         if (val != 1) {
6497                 BNX2X_ERR("PXP2 RD_INIT failed\n");
6498                 return -EBUSY;
6499         }
6500
6501         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6502         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6503
6504         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6505
6506         /* clean the DMAE memory */
6507         bp->dmae_ready = 1;
6508         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6509
6510         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6511         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6512         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6513         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6514
6515         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6516         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6517         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6518         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6519
6520         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6521
6522 #ifdef BCM_CNIC
6523         wb_write[0] = 0;
6524         wb_write[1] = 0;
6525         for (i = 0; i < 64; i++) {
6526                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6527                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6528
6529                 if (CHIP_IS_E1H(bp)) {
6530                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6531                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6532                                           wb_write, 2);
6533                 }
6534         }
6535 #endif
6536         /* soft reset pulse */
6537         REG_WR(bp, QM_REG_SOFT_RESET, 1);
6538         REG_WR(bp, QM_REG_SOFT_RESET, 0);
6539
6540 #ifdef BCM_CNIC
6541         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6542 #endif
6543
6544         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6545         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6546         if (!CHIP_REV_IS_SLOW(bp)) {
6547                 /* enable hw interrupt from doorbell Q */
6548                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6549         }
6550
6551         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6552         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6553         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6554 #ifndef BCM_CNIC
6555         /* set NIC mode */
6556         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6557 #endif
6558         if (CHIP_IS_E1H(bp))
6559                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6560
6561         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6562         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6563         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6564         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6565
6566         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6567         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6568         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6569         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6570
6571         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6572         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6573         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6574         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6575
6576         /* sync semi rtc */
6577         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6578                0x80000000);
6579         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6580                0x80000000);
6581
6582         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6583         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6584         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6585
6586         REG_WR(bp, SRC_REG_SOFT_RST, 1);
6587         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6588                 REG_WR(bp, i, 0xc0cac01a);
6589                 /* TODO: replace with something meaningful */
6590         }
6591         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6592 #ifdef BCM_CNIC
6593         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6594         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6595         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6596         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6597         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6598         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6599         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6600         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6601         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6602         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6603 #endif
6604         REG_WR(bp, SRC_REG_SOFT_RST, 0);
6605
6606         if (sizeof(union cdu_context) != 1024)
6607                 /* we currently assume that a context is 1024 bytes */
6608                 pr_alert("please adjust the size of cdu_context(%ld)\n",
6609                          (long)sizeof(union cdu_context));
6610
6611         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6612         val = (4 << 24) + (0 << 12) + 1024;
6613         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6614
6615         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6616         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6617         /* enable context validation interrupt from CFC */
6618         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6619
6620         /* set the thresholds to prevent CFC/CDU race */
6621         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6622
6623         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6624         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6625
6626         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6627         /* Reset PCIE errors for debug */
6628         REG_WR(bp, 0x2814, 0xffffffff);
6629         REG_WR(bp, 0x3820, 0xffffffff);
6630
6631         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6632         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6633         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6634         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6635
6636         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6637         if (CHIP_IS_E1H(bp)) {
6638                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6639                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6640         }
6641
6642         if (CHIP_REV_IS_SLOW(bp))
6643                 msleep(200);
6644
6645         /* finish CFC init */
6646         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6647         if (val != 1) {
6648                 BNX2X_ERR("CFC LL_INIT failed\n");
6649                 return -EBUSY;
6650         }
6651         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6652         if (val != 1) {
6653                 BNX2X_ERR("CFC AC_INIT failed\n");
6654                 return -EBUSY;
6655         }
6656         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6657         if (val != 1) {
6658                 BNX2X_ERR("CFC CAM_INIT failed\n");
6659                 return -EBUSY;
6660         }
6661         REG_WR(bp, CFC_REG_DEBUG0, 0);
6662
6663         /* read NIG statistic
6664            to see if this is our first up since powerup */
6665         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6666         val = *bnx2x_sp(bp, wb_data[0]);
6667
6668         /* do internal memory self test */
6669         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6670                 BNX2X_ERR("internal mem self test failed\n");
6671                 return -EBUSY;
6672         }
6673
6674         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6675         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6676         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6677         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6678         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6679                 bp->port.need_hw_lock = 1;
6680                 break;
6681
6682         default:
6683                 break;
6684         }
6685
6686         bnx2x_setup_fan_failure_detection(bp);
6687
6688         /* clear PXP2 attentions */
6689         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6690
6691         enable_blocks_attention(bp);
6692         if (CHIP_PARITY_SUPPORTED(bp))
6693                 enable_blocks_parity(bp);
6694
6695         if (!BP_NOMCP(bp)) {
6696                 bnx2x_acquire_phy_lock(bp);
6697                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6698                 bnx2x_release_phy_lock(bp);
6699         } else
6700                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6701
6702         return 0;
6703 }
6704
6705 static int bnx2x_init_port(struct bnx2x *bp)
6706 {
6707         int port = BP_PORT(bp);
6708         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6709         u32 low, high;
6710         u32 val;
6711
6712         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
6713
6714         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6715
6716         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6717         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6718
6719         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6720         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6721         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6722         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6723
6724 #ifdef BCM_CNIC
6725         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6726
6727         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6728         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6729         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6730 #endif
6731         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6732
6733         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6734         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6735                 /* no pause for emulation and FPGA */
6736                 low = 0;
6737                 high = 513;
6738         } else {
6739                 if (IS_E1HMF(bp))
6740                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6741                 else if (bp->dev->mtu > 4096) {
6742                         if (bp->flags & ONE_PORT_FLAG)
6743                                 low = 160;
6744                         else {
6745                                 val = bp->dev->mtu;
6746                                 /* (24*1024 + val*4)/256 */
6747                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6748                         }
6749                 } else
6750                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6751                 high = low + 56;        /* 14*1024/256 */
6752         }
6753         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6754         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6755
6756
6757         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6758
6759         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6760         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6761         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6762         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6763
6764         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6765         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6766         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6767         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6768
6769         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6770         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6771
6772         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6773
6774         /* configure PBF to work without PAUSE mtu 9000 */
6775         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6776
6777         /* update threshold */
6778         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6779         /* update init credit */
6780         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6781
6782         /* probe changes */
6783         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6784         msleep(5);
6785         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6786
6787 #ifdef BCM_CNIC
6788         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6789 #endif
6790         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6791         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6792
6793         if (CHIP_IS_E1(bp)) {
6794                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6795                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6796         }
6797         bnx2x_init_block(bp, HC_BLOCK, init_stage);
6798
6799         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6800         /* init aeu_mask_attn_func_0/1:
6801          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6802          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6803          *             bits 4-7 are used for "per vn group attention" */
6804         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6805                (IS_E1HMF(bp) ? 0xF7 : 0x7));
6806
6807         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6808         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6809         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6810         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6811         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6812
6813         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6814
6815         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6816
6817         if (CHIP_IS_E1H(bp)) {
6818                 /* 0x2 disable e1hov, 0x1 enable */
6819                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6820                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6821
6822                 {
6823                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6824                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6825                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6826                 }
6827         }
6828
6829         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6830         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6831
6832         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6833         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6834                 {
6835                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6836
6837                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6838                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6839
6840                 /* The GPIO should be swapped if the swap register is
6841                    set and active */
6842                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6843                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6844
6845                 /* Select function upon port-swap configuration */
6846                 if (port == 0) {
6847                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6848                         aeu_gpio_mask = (swap_val && swap_override) ?
6849                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6850                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6851                 } else {
6852                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6853                         aeu_gpio_mask = (swap_val && swap_override) ?
6854                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6855                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6856                 }
6857                 val = REG_RD(bp, offset);
6858                 /* add GPIO3 to group */
6859                 val |= aeu_gpio_mask;
6860                 REG_WR(bp, offset, val);
6861                 }
6862                 break;
6863
6864         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6865         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6866                 /* add SPIO 5 to group 0 */
6867                 {
6868                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6869                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6870                 val = REG_RD(bp, reg_addr);
6871                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6872                 REG_WR(bp, reg_addr, val);
6873                 }
6874                 break;
6875
6876         default:
6877                 break;
6878         }
6879
6880         bnx2x__link_reset(bp);
6881
6882         return 0;
6883 }
6884
6885 #define ILT_PER_FUNC            (768/2)
6886 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6887 /* the phys address is shifted right 12 bits and has an added
6888    1=valid bit added to the 53rd bit
6889    then since this is a wide register(TM)
6890    we split it into two 32 bit writes
6891  */
6892 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6893 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6894 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6895 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6896
6897 #ifdef BCM_CNIC
6898 #define CNIC_ILT_LINES          127
6899 #define CNIC_CTX_PER_ILT        16
6900 #else
6901 #define CNIC_ILT_LINES          0
6902 #endif
6903
6904 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6905 {
6906         int reg;
6907
6908         if (CHIP_IS_E1H(bp))
6909                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6910         else /* E1 */
6911                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6912
6913         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6914 }
6915
6916 static int bnx2x_init_func(struct bnx2x *bp)
6917 {
6918         int port = BP_PORT(bp);
6919         int func = BP_FUNC(bp);
6920         u32 addr, val;
6921         int i;
6922
6923         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6924
6925         /* set MSI reconfigure capability */
6926         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6927         val = REG_RD(bp, addr);
6928         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6929         REG_WR(bp, addr, val);
6930
6931         i = FUNC_ILT_BASE(func);
6932
6933         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6934         if (CHIP_IS_E1H(bp)) {
6935                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6936                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6937         } else /* E1 */
6938                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6939                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6940
6941 #ifdef BCM_CNIC
6942         i += 1 + CNIC_ILT_LINES;
6943         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6944         if (CHIP_IS_E1(bp))
6945                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6946         else {
6947                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6948                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6949         }
6950
6951         i++;
6952         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6953         if (CHIP_IS_E1(bp))
6954                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6955         else {
6956                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6957                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6958         }
6959
6960         i++;
6961         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6962         if (CHIP_IS_E1(bp))
6963                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6964         else {
6965                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6966                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6967         }
6968
6969         /* tell the searcher where the T2 table is */
6970         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6971
6972         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6973                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6974
6975         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6976                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6977                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6978
6979         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6980 #endif
6981
6982         if (CHIP_IS_E1H(bp)) {
6983                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6984                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6985                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6986                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6987                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6988                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6989                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6990                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6991                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
6992
6993                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6994                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6995         }
6996
6997         /* HC init per function */
6998         if (CHIP_IS_E1H(bp)) {
6999                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7000
7001                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7002                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7003         }
7004         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
7005
7006         /* Reset PCIE errors for debug */
7007         REG_WR(bp, 0x2114, 0xffffffff);
7008         REG_WR(bp, 0x2120, 0xffffffff);
7009
7010         return 0;
7011 }
7012
7013 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7014 {
7015         int i, rc = 0;
7016
7017         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
7018            BP_FUNC(bp), load_code);
7019
7020         bp->dmae_ready = 0;
7021         mutex_init(&bp->dmae_mutex);
7022         rc = bnx2x_gunzip_init(bp);
7023         if (rc)
7024                 return rc;
7025
7026         switch (load_code) {
7027         case FW_MSG_CODE_DRV_LOAD_COMMON:
7028                 rc = bnx2x_init_common(bp);
7029                 if (rc)
7030                         goto init_hw_err;
7031                 /* no break */
7032
7033         case FW_MSG_CODE_DRV_LOAD_PORT:
7034                 bp->dmae_ready = 1;
7035                 rc = bnx2x_init_port(bp);
7036                 if (rc)
7037                         goto init_hw_err;
7038                 /* no break */
7039
7040         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7041                 bp->dmae_ready = 1;
7042                 rc = bnx2x_init_func(bp);
7043                 if (rc)
7044                         goto init_hw_err;
7045                 break;
7046
7047         default:
7048                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7049                 break;
7050         }
7051
7052         if (!BP_NOMCP(bp)) {
7053                 int func = BP_FUNC(bp);
7054
7055                 bp->fw_drv_pulse_wr_seq =
7056                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
7057                                  DRV_PULSE_SEQ_MASK);
7058                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7059         }
7060
7061         /* this needs to be done before gunzip end */
7062         bnx2x_zero_def_sb(bp);
7063         for_each_queue(bp, i)
7064                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7065 #ifdef BCM_CNIC
7066         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7067 #endif
7068
7069 init_hw_err:
7070         bnx2x_gunzip_end(bp);
7071
7072         return rc;
7073 }
7074
7075 static void bnx2x_free_mem(struct bnx2x *bp)
7076 {
7077
7078 #define BNX2X_PCI_FREE(x, y, size) \
7079         do { \
7080                 if (x) { \
7081                         dma_free_coherent(&bp->pdev->dev, size, x, y); \
7082                         x = NULL; \
7083                         y = 0; \
7084                 } \
7085         } while (0)
7086
7087 #define BNX2X_FREE(x) \
7088         do { \
7089                 if (x) { \
7090                         vfree(x); \
7091                         x = NULL; \
7092                 } \
7093         } while (0)
7094
7095         int i;
7096
7097         /* fastpath */
7098         /* Common */
7099         for_each_queue(bp, i) {
7100
7101                 /* status blocks */
7102                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7103                                bnx2x_fp(bp, i, status_blk_mapping),
7104                                sizeof(struct host_status_block));
7105         }
7106         /* Rx */
7107         for_each_queue(bp, i) {
7108
7109                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7110                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7111                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7112                                bnx2x_fp(bp, i, rx_desc_mapping),
7113                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
7114
7115                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7116                                bnx2x_fp(bp, i, rx_comp_mapping),
7117                                sizeof(struct eth_fast_path_rx_cqe) *
7118                                NUM_RCQ_BD);
7119
7120                 /* SGE ring */
7121                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7122                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7123                                bnx2x_fp(bp, i, rx_sge_mapping),
7124                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7125         }
7126         /* Tx */
7127         for_each_queue(bp, i) {
7128
7129                 /* fastpath tx rings: tx_buf tx_desc */
7130                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7131                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7132                                bnx2x_fp(bp, i, tx_desc_mapping),
7133                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7134         }
7135         /* end of fastpath */
7136
7137         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7138                        sizeof(struct host_def_status_block));
7139
7140         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7141                        sizeof(struct bnx2x_slowpath));
7142
7143 #ifdef BCM_CNIC
7144         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7145         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7146         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7147         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
7148         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7149                        sizeof(struct host_status_block));
7150 #endif
7151         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
7152
7153 #undef BNX2X_PCI_FREE
7154 #undef BNX2X_KFREE
7155 }
7156
7157 static int bnx2x_alloc_mem(struct bnx2x *bp)
7158 {
7159
7160 #define BNX2X_PCI_ALLOC(x, y, size) \
7161         do { \
7162                 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
7163                 if (x == NULL) \
7164                         goto alloc_mem_err; \
7165                 memset(x, 0, size); \
7166         } while (0)
7167
7168 #define BNX2X_ALLOC(x, size) \
7169         do { \
7170                 x = vmalloc(size); \
7171                 if (x == NULL) \
7172                         goto alloc_mem_err; \
7173                 memset(x, 0, size); \
7174         } while (0)
7175
7176         int i;
7177
7178         /* fastpath */
7179         /* Common */
7180         for_each_queue(bp, i) {
7181                 bnx2x_fp(bp, i, bp) = bp;
7182
7183                 /* status blocks */
7184                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7185                                 &bnx2x_fp(bp, i, status_blk_mapping),
7186                                 sizeof(struct host_status_block));
7187         }
7188         /* Rx */
7189         for_each_queue(bp, i) {
7190
7191                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7192                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7193                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7194                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7195                                 &bnx2x_fp(bp, i, rx_desc_mapping),
7196                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7197
7198                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7199                                 &bnx2x_fp(bp, i, rx_comp_mapping),
7200                                 sizeof(struct eth_fast_path_rx_cqe) *
7201                                 NUM_RCQ_BD);
7202
7203                 /* SGE ring */
7204                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7205                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7206                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7207                                 &bnx2x_fp(bp, i, rx_sge_mapping),
7208                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7209         }
7210         /* Tx */
7211         for_each_queue(bp, i) {
7212
7213                 /* fastpath tx rings: tx_buf tx_desc */
7214                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7215                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7216                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7217                                 &bnx2x_fp(bp, i, tx_desc_mapping),
7218                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7219         }
7220         /* end of fastpath */
7221
7222         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7223                         sizeof(struct host_def_status_block));
7224
7225         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7226                         sizeof(struct bnx2x_slowpath));
7227
7228 #ifdef BCM_CNIC
7229         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7230
7231         /* allocate searcher T2 table
7232            we allocate 1/4 of alloc num for T2
7233           (which is not entered into the ILT) */
7234         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7235
7236         /* Initialize T2 (for 1024 connections) */
7237         for (i = 0; i < 16*1024; i += 64)
7238                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
7239
7240         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
7241         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7242
7243         /* QM queues (128*MAX_CONN) */
7244         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
7245
7246         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7247                         sizeof(struct host_status_block));
7248 #endif
7249
7250         /* Slow path ring */
7251         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7252
7253         return 0;
7254
7255 alloc_mem_err:
7256         bnx2x_free_mem(bp);
7257         return -ENOMEM;
7258
7259 #undef BNX2X_PCI_ALLOC
7260 #undef BNX2X_ALLOC
7261 }
7262
7263 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7264 {
7265         int i;
7266
7267         for_each_queue(bp, i) {
7268                 struct bnx2x_fastpath *fp = &bp->fp[i];
7269
7270                 u16 bd_cons = fp->tx_bd_cons;
7271                 u16 sw_prod = fp->tx_pkt_prod;
7272                 u16 sw_cons = fp->tx_pkt_cons;
7273
7274                 while (sw_cons != sw_prod) {
7275                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7276                         sw_cons++;
7277                 }
7278         }
7279 }
7280
7281 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7282 {
7283         int i, j;
7284
7285         for_each_queue(bp, j) {
7286                 struct bnx2x_fastpath *fp = &bp->fp[j];
7287
7288                 for (i = 0; i < NUM_RX_BD; i++) {
7289                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7290                         struct sk_buff *skb = rx_buf->skb;
7291
7292                         if (skb == NULL)
7293                                 continue;
7294
7295                         dma_unmap_single(&bp->pdev->dev,
7296                                          dma_unmap_addr(rx_buf, mapping),
7297                                          bp->rx_buf_size, DMA_FROM_DEVICE);
7298
7299                         rx_buf->skb = NULL;
7300                         dev_kfree_skb(skb);
7301                 }
7302                 if (!fp->disable_tpa)
7303                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7304                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
7305                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
7306         }
7307 }
7308
7309 static void bnx2x_free_skbs(struct bnx2x *bp)
7310 {
7311         bnx2x_free_tx_skbs(bp);
7312         bnx2x_free_rx_skbs(bp);
7313 }
7314
7315 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7316 {
7317         int i, offset = 1;
7318
7319         free_irq(bp->msix_table[0].vector, bp->dev);
7320         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
7321            bp->msix_table[0].vector);
7322
7323 #ifdef BCM_CNIC
7324         offset++;
7325 #endif
7326         for_each_queue(bp, i) {
7327                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
7328                    "state %x\n", i, bp->msix_table[i + offset].vector,
7329                    bnx2x_fp(bp, i, state));
7330
7331                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
7332         }
7333 }
7334
7335 static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
7336 {
7337         if (bp->flags & USING_MSIX_FLAG) {
7338                 if (!disable_only)
7339                         bnx2x_free_msix_irqs(bp);
7340                 pci_disable_msix(bp->pdev);
7341                 bp->flags &= ~USING_MSIX_FLAG;
7342
7343         } else if (bp->flags & USING_MSI_FLAG) {
7344                 if (!disable_only)
7345                         free_irq(bp->pdev->irq, bp->dev);
7346                 pci_disable_msi(bp->pdev);
7347                 bp->flags &= ~USING_MSI_FLAG;
7348
7349         } else if (!disable_only)
7350                 free_irq(bp->pdev->irq, bp->dev);
7351 }
7352
7353 static int bnx2x_enable_msix(struct bnx2x *bp)
7354 {
7355         int i, rc, offset = 1;
7356         int igu_vec = 0;
7357
7358         bp->msix_table[0].entry = igu_vec;
7359         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
7360
7361 #ifdef BCM_CNIC
7362         igu_vec = BP_L_ID(bp) + offset;
7363         bp->msix_table[1].entry = igu_vec;
7364         DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7365         offset++;
7366 #endif
7367         for_each_queue(bp, i) {
7368                 igu_vec = BP_L_ID(bp) + offset + i;
7369                 bp->msix_table[i + offset].entry = igu_vec;
7370                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7371                    "(fastpath #%u)\n", i + offset, igu_vec, i);
7372         }
7373
7374         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7375                              BNX2X_NUM_QUEUES(bp) + offset);
7376         if (rc) {
7377                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
7378                 return rc;
7379         }
7380
7381         bp->flags |= USING_MSIX_FLAG;
7382
7383         return 0;
7384 }
7385
7386 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7387 {
7388         int i, rc, offset = 1;
7389
7390         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7391                          bp->dev->name, bp->dev);
7392         if (rc) {
7393                 BNX2X_ERR("request sp irq failed\n");
7394                 return -EBUSY;
7395         }
7396
7397 #ifdef BCM_CNIC
7398         offset++;
7399 #endif
7400         for_each_queue(bp, i) {
7401                 struct bnx2x_fastpath *fp = &bp->fp[i];
7402                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7403                          bp->dev->name, i);
7404
7405                 rc = request_irq(bp->msix_table[i + offset].vector,
7406                                  bnx2x_msix_fp_int, 0, fp->name, fp);
7407                 if (rc) {
7408                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
7409                         bnx2x_free_msix_irqs(bp);
7410                         return -EBUSY;
7411                 }
7412
7413                 fp->state = BNX2X_FP_STATE_IRQ;
7414         }
7415
7416         i = BNX2X_NUM_QUEUES(bp);
7417         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
7418                     bp->msix_table[0].vector,
7419                     0, bp->msix_table[offset].vector,
7420                     i - 1, bp->msix_table[offset + i - 1].vector);
7421
7422         return 0;
7423 }
7424
7425 static int bnx2x_enable_msi(struct bnx2x *bp)
7426 {
7427         int rc;
7428
7429         rc = pci_enable_msi(bp->pdev);
7430         if (rc) {
7431                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7432                 return -1;
7433         }
7434         bp->flags |= USING_MSI_FLAG;
7435
7436         return 0;
7437 }
7438
7439 static int bnx2x_req_irq(struct bnx2x *bp)
7440 {
7441         unsigned long flags;
7442         int rc;
7443
7444         if (bp->flags & USING_MSI_FLAG)
7445                 flags = 0;
7446         else
7447                 flags = IRQF_SHARED;
7448
7449         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7450                          bp->dev->name, bp->dev);
7451         if (!rc)
7452                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7453
7454         return rc;
7455 }
7456
7457 static void bnx2x_napi_enable(struct bnx2x *bp)
7458 {
7459         int i;
7460
7461         for_each_queue(bp, i)
7462                 napi_enable(&bnx2x_fp(bp, i, napi));
7463 }
7464
7465 static void bnx2x_napi_disable(struct bnx2x *bp)
7466 {
7467         int i;
7468
7469         for_each_queue(bp, i)
7470                 napi_disable(&bnx2x_fp(bp, i, napi));
7471 }
7472
7473 static void bnx2x_netif_start(struct bnx2x *bp)
7474 {
7475         int intr_sem;
7476
7477         intr_sem = atomic_dec_and_test(&bp->intr_sem);
7478         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7479
7480         if (intr_sem) {
7481                 if (netif_running(bp->dev)) {
7482                         bnx2x_napi_enable(bp);
7483                         bnx2x_int_enable(bp);
7484                         if (bp->state == BNX2X_STATE_OPEN)
7485                                 netif_tx_wake_all_queues(bp->dev);
7486                 }
7487         }
7488 }
7489
7490 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7491 {
7492         bnx2x_int_disable_sync(bp, disable_hw);
7493         bnx2x_napi_disable(bp);
7494         netif_tx_disable(bp->dev);
7495 }
7496
7497 /*
7498  * Init service functions
7499  */
7500
7501 /**
7502  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7503  *
7504  * @param bp driver descriptor
7505  * @param set set or clear an entry (1 or 0)
7506  * @param mac pointer to a buffer containing a MAC
7507  * @param cl_bit_vec bit vector of clients to register a MAC for
7508  * @param cam_offset offset in a CAM to use
7509  * @param with_bcast set broadcast MAC as well
7510  */
7511 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7512                                       u32 cl_bit_vec, u8 cam_offset,
7513                                       u8 with_bcast)
7514 {
7515         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7516         int port = BP_PORT(bp);
7517
7518         /* CAM allocation
7519          * unicasts 0-31:port0 32-63:port1
7520          * multicast 64-127:port0 128-191:port1
7521          */
7522         config->hdr.length = 1 + (with_bcast ? 1 : 0);
7523         config->hdr.offset = cam_offset;
7524         config->hdr.client_id = 0xff;
7525         config->hdr.reserved1 = 0;
7526
7527         /* primary MAC */
7528         config->config_table[0].cam_entry.msb_mac_addr =
7529                                         swab16(*(u16 *)&mac[0]);
7530         config->config_table[0].cam_entry.middle_mac_addr =
7531                                         swab16(*(u16 *)&mac[2]);
7532         config->config_table[0].cam_entry.lsb_mac_addr =
7533                                         swab16(*(u16 *)&mac[4]);
7534         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7535         if (set)
7536                 config->config_table[0].target_table_entry.flags = 0;
7537         else
7538                 CAM_INVALIDATE(config->config_table[0]);
7539         config->config_table[0].target_table_entry.clients_bit_vector =
7540                                                 cpu_to_le32(cl_bit_vec);
7541         config->config_table[0].target_table_entry.vlan_id = 0;
7542
7543         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7544            (set ? "setting" : "clearing"),
7545            config->config_table[0].cam_entry.msb_mac_addr,
7546            config->config_table[0].cam_entry.middle_mac_addr,
7547            config->config_table[0].cam_entry.lsb_mac_addr);
7548
7549         /* broadcast */
7550         if (with_bcast) {
7551                 config->config_table[1].cam_entry.msb_mac_addr =
7552                         cpu_to_le16(0xffff);
7553                 config->config_table[1].cam_entry.middle_mac_addr =
7554                         cpu_to_le16(0xffff);
7555                 config->config_table[1].cam_entry.lsb_mac_addr =
7556                         cpu_to_le16(0xffff);
7557                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7558                 if (set)
7559                         config->config_table[1].target_table_entry.flags =
7560                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7561                 else
7562                         CAM_INVALIDATE(config->config_table[1]);
7563                 config->config_table[1].target_table_entry.clients_bit_vector =
7564                                                         cpu_to_le32(cl_bit_vec);
7565                 config->config_table[1].target_table_entry.vlan_id = 0;
7566         }
7567
7568         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7569                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7570                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7571 }
7572
7573 /**
7574  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7575  *
7576  * @param bp driver descriptor
7577  * @param set set or clear an entry (1 or 0)
7578  * @param mac pointer to a buffer containing a MAC
7579  * @param cl_bit_vec bit vector of clients to register a MAC for
7580  * @param cam_offset offset in a CAM to use
7581  */
7582 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7583                                        u32 cl_bit_vec, u8 cam_offset)
7584 {
7585         struct mac_configuration_cmd_e1h *config =
7586                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7587
7588         config->hdr.length = 1;
7589         config->hdr.offset = cam_offset;
7590         config->hdr.client_id = 0xff;
7591         config->hdr.reserved1 = 0;
7592
7593         /* primary MAC */
7594         config->config_table[0].msb_mac_addr =
7595                                         swab16(*(u16 *)&mac[0]);
7596         config->config_table[0].middle_mac_addr =
7597                                         swab16(*(u16 *)&mac[2]);
7598         config->config_table[0].lsb_mac_addr =
7599                                         swab16(*(u16 *)&mac[4]);
7600         config->config_table[0].clients_bit_vector =
7601                                         cpu_to_le32(cl_bit_vec);
7602         config->config_table[0].vlan_id = 0;
7603         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7604         if (set)
7605                 config->config_table[0].flags = BP_PORT(bp);
7606         else
7607                 config->config_table[0].flags =
7608                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7609
7610         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
7611            (set ? "setting" : "clearing"),
7612            config->config_table[0].msb_mac_addr,
7613            config->config_table[0].middle_mac_addr,
7614            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7615
7616         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7617                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7618                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7619 }
7620
7621 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7622                              int *state_p, int poll)
7623 {
7624         /* can take a while if any port is running */
7625         int cnt = 5000;
7626
7627         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7628            poll ? "polling" : "waiting", state, idx);
7629
7630         might_sleep();
7631         while (cnt--) {
7632                 if (poll) {
7633                         bnx2x_rx_int(bp->fp, 10);
7634                         /* if index is different from 0
7635                          * the reply for some commands will
7636                          * be on the non default queue
7637                          */
7638                         if (idx)
7639                                 bnx2x_rx_int(&bp->fp[idx], 10);
7640                 }
7641
7642                 mb(); /* state is changed by bnx2x_sp_event() */
7643                 if (*state_p == state) {
7644 #ifdef BNX2X_STOP_ON_ERROR
7645                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
7646 #endif
7647                         return 0;
7648                 }
7649
7650                 msleep(1);
7651
7652                 if (bp->panic)
7653                         return -EIO;
7654         }
7655
7656         /* timeout! */
7657         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7658                   poll ? "polling" : "waiting", state, idx);
7659 #ifdef BNX2X_STOP_ON_ERROR
7660         bnx2x_panic();
7661 #endif
7662
7663         return -EBUSY;
7664 }
7665
7666 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7667 {
7668         bp->set_mac_pending++;
7669         smp_wmb();
7670
7671         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7672                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
7673
7674         /* Wait for a completion */
7675         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7676 }
7677
7678 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7679 {
7680         bp->set_mac_pending++;
7681         smp_wmb();
7682
7683         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7684                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7685                                   1);
7686
7687         /* Wait for a completion */
7688         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7689 }
7690
7691 #ifdef BCM_CNIC
7692 /**
7693  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7694  * MAC(s). This function will wait until the ramdord completion
7695  * returns.
7696  *
7697  * @param bp driver handle
7698  * @param set set or clear the CAM entry
7699  *
7700  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7701  */
7702 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7703 {
7704         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7705
7706         bp->set_mac_pending++;
7707         smp_wmb();
7708
7709         /* Send a SET_MAC ramrod */
7710         if (CHIP_IS_E1(bp))
7711                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7712                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7713                                   1);
7714         else
7715                 /* CAM allocation for E1H
7716                 * unicasts: by func number
7717                 * multicast: 20+FUNC*20, 20 each
7718                 */
7719                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7720                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7721
7722         /* Wait for a completion when setting */
7723         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7724
7725         return 0;
7726 }
7727 #endif
7728
7729 static int bnx2x_setup_leading(struct bnx2x *bp)
7730 {
7731         int rc;
7732
7733         /* reset IGU state */
7734         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7735
7736         /* SETUP ramrod */
7737         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7738
7739         /* Wait for completion */
7740         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7741
7742         return rc;
7743 }
7744
7745 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7746 {
7747         struct bnx2x_fastpath *fp = &bp->fp[index];
7748
7749         /* reset IGU state */
7750         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7751
7752         /* SETUP ramrod */
7753         fp->state = BNX2X_FP_STATE_OPENING;
7754         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7755                       fp->cl_id, 0);
7756
7757         /* Wait for completion */
7758         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7759                                  &(fp->state), 0);
7760 }
7761
7762 static int bnx2x_poll(struct napi_struct *napi, int budget);
7763
7764 static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7765 {
7766
7767         switch (bp->multi_mode) {
7768         case ETH_RSS_MODE_DISABLED:
7769                 bp->num_queues = 1;
7770                 break;
7771
7772         case ETH_RSS_MODE_REGULAR:
7773                 if (num_queues)
7774                         bp->num_queues = min_t(u32, num_queues,
7775                                                   BNX2X_MAX_QUEUES(bp));
7776                 else
7777                         bp->num_queues = min_t(u32, num_online_cpus(),
7778                                                   BNX2X_MAX_QUEUES(bp));
7779                 break;
7780
7781
7782         default:
7783                 bp->num_queues = 1;
7784                 break;
7785         }
7786 }
7787
7788 static int bnx2x_set_num_queues(struct bnx2x *bp)
7789 {
7790         int rc = 0;
7791
7792         switch (int_mode) {
7793         case INT_MODE_INTx:
7794         case INT_MODE_MSI:
7795                 bp->num_queues = 1;
7796                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7797                 break;
7798
7799         case INT_MODE_MSIX:
7800         default:
7801                 /* Set number of queues according to bp->multi_mode value */
7802                 bnx2x_set_num_queues_msix(bp);
7803
7804                 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7805                    bp->num_queues);
7806
7807                 /* if we can't use MSI-X we only need one fp,
7808                  * so try to enable MSI-X with the requested number of fp's
7809                  * and fallback to MSI or legacy INTx with one fp
7810                  */
7811                 rc = bnx2x_enable_msix(bp);
7812                 if (rc)
7813                         /* failed to enable MSI-X */
7814                         bp->num_queues = 1;
7815                 break;
7816         }
7817         bp->dev->real_num_tx_queues = bp->num_queues;
7818         return rc;
7819 }
7820
7821 #ifdef BCM_CNIC
7822 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7823 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7824 #endif
7825
7826 /* must be called with rtnl_lock */
7827 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7828 {
7829         u32 load_code;
7830         int i, rc;
7831
7832 #ifdef BNX2X_STOP_ON_ERROR
7833         if (unlikely(bp->panic))
7834                 return -EPERM;
7835 #endif
7836
7837         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7838
7839         rc = bnx2x_set_num_queues(bp);
7840
7841         if (bnx2x_alloc_mem(bp)) {
7842                 bnx2x_free_irq(bp, true);
7843                 return -ENOMEM;
7844         }
7845
7846         for_each_queue(bp, i)
7847                 bnx2x_fp(bp, i, disable_tpa) =
7848                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
7849
7850         for_each_queue(bp, i)
7851                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7852                                bnx2x_poll, 128);
7853
7854         bnx2x_napi_enable(bp);
7855
7856         if (bp->flags & USING_MSIX_FLAG) {
7857                 rc = bnx2x_req_msix_irqs(bp);
7858                 if (rc) {
7859                         bnx2x_free_irq(bp, true);
7860                         goto load_error1;
7861                 }
7862         } else {
7863                 /* Fall to INTx if failed to enable MSI-X due to lack of
7864                    memory (in bnx2x_set_num_queues()) */
7865                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7866                         bnx2x_enable_msi(bp);
7867                 bnx2x_ack_int(bp);
7868                 rc = bnx2x_req_irq(bp);
7869                 if (rc) {
7870                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
7871                         bnx2x_free_irq(bp, true);
7872                         goto load_error1;
7873                 }
7874                 if (bp->flags & USING_MSI_FLAG) {
7875                         bp->dev->irq = bp->pdev->irq;
7876                         netdev_info(bp->dev, "using MSI  IRQ %d\n",
7877                                     bp->pdev->irq);
7878                 }
7879         }
7880
7881         /* Send LOAD_REQUEST command to MCP
7882            Returns the type of LOAD command:
7883            if it is the first port to be initialized
7884            common blocks should be initialized, otherwise - not
7885         */
7886         if (!BP_NOMCP(bp)) {
7887                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7888                 if (!load_code) {
7889                         BNX2X_ERR("MCP response failure, aborting\n");
7890                         rc = -EBUSY;
7891                         goto load_error2;
7892                 }
7893                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7894                         rc = -EBUSY; /* other port in diagnostic mode */
7895                         goto load_error2;
7896                 }
7897
7898         } else {
7899                 int port = BP_PORT(bp);
7900
7901                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7902                    load_count[0], load_count[1], load_count[2]);
7903                 load_count[0]++;
7904                 load_count[1 + port]++;
7905                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7906                    load_count[0], load_count[1], load_count[2]);
7907                 if (load_count[0] == 1)
7908                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7909                 else if (load_count[1 + port] == 1)
7910                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7911                 else
7912                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7913         }
7914
7915         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7916             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7917                 bp->port.pmf = 1;
7918         else
7919                 bp->port.pmf = 0;
7920         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7921
7922         /* Initialize HW */
7923         rc = bnx2x_init_hw(bp, load_code);
7924         if (rc) {
7925                 BNX2X_ERR("HW init failed, aborting\n");
7926                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7927                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7928                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7929                 goto load_error2;
7930         }
7931
7932         /* Setup NIC internals and enable interrupts */
7933         bnx2x_nic_init(bp, load_code);
7934
7935         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7936             (bp->common.shmem2_base))
7937                 SHMEM2_WR(bp, dcc_support,
7938                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7939                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7940
7941         /* Send LOAD_DONE command to MCP */
7942         if (!BP_NOMCP(bp)) {
7943                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7944                 if (!load_code) {
7945                         BNX2X_ERR("MCP response failure, aborting\n");
7946                         rc = -EBUSY;
7947                         goto load_error3;
7948                 }
7949         }
7950
7951         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7952
7953         rc = bnx2x_setup_leading(bp);
7954         if (rc) {
7955                 BNX2X_ERR("Setup leading failed!\n");
7956 #ifndef BNX2X_STOP_ON_ERROR
7957                 goto load_error3;
7958 #else
7959                 bp->panic = 1;
7960                 return -EBUSY;
7961 #endif
7962         }
7963
7964         if (CHIP_IS_E1H(bp))
7965                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7966                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7967                         bp->flags |= MF_FUNC_DIS;
7968                 }
7969
7970         if (bp->state == BNX2X_STATE_OPEN) {
7971 #ifdef BCM_CNIC
7972                 /* Enable Timer scan */
7973                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7974 #endif
7975                 for_each_nondefault_queue(bp, i) {
7976                         rc = bnx2x_setup_multi(bp, i);
7977                         if (rc)
7978 #ifdef BCM_CNIC
7979                                 goto load_error4;
7980 #else
7981                                 goto load_error3;
7982 #endif
7983                 }
7984
7985                 if (CHIP_IS_E1(bp))
7986                         bnx2x_set_eth_mac_addr_e1(bp, 1);
7987                 else
7988                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
7989 #ifdef BCM_CNIC
7990                 /* Set iSCSI L2 MAC */
7991                 mutex_lock(&bp->cnic_mutex);
7992                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7993                         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7994                         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7995                         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
7996                                       CNIC_SB_ID(bp));
7997                 }
7998                 mutex_unlock(&bp->cnic_mutex);
7999 #endif
8000         }
8001
8002         if (bp->port.pmf)
8003                 bnx2x_initial_phy_init(bp, load_mode);
8004
8005         /* Start fast path */
8006         switch (load_mode) {
8007         case LOAD_NORMAL:
8008                 if (bp->state == BNX2X_STATE_OPEN) {
8009                         /* Tx queue should be only reenabled */
8010                         netif_tx_wake_all_queues(bp->dev);
8011                 }
8012                 /* Initialize the receive filter. */
8013                 bnx2x_set_rx_mode(bp->dev);
8014                 break;
8015
8016         case LOAD_OPEN:
8017                 netif_tx_start_all_queues(bp->dev);
8018                 if (bp->state != BNX2X_STATE_OPEN)
8019                         netif_tx_disable(bp->dev);
8020                 /* Initialize the receive filter. */
8021                 bnx2x_set_rx_mode(bp->dev);
8022                 break;
8023
8024         case LOAD_DIAG:
8025                 /* Initialize the receive filter. */
8026                 bnx2x_set_rx_mode(bp->dev);
8027                 bp->state = BNX2X_STATE_DIAG;
8028                 break;
8029
8030         default:
8031                 break;
8032         }
8033
8034         if (!bp->port.pmf)
8035                 bnx2x__link_status_update(bp);
8036
8037         /* start the timer */
8038         mod_timer(&bp->timer, jiffies + bp->current_interval);
8039
8040 #ifdef BCM_CNIC
8041         bnx2x_setup_cnic_irq_info(bp);
8042         if (bp->state == BNX2X_STATE_OPEN)
8043                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8044 #endif
8045         bnx2x_inc_load_cnt(bp);
8046
8047         return 0;
8048
8049 #ifdef BCM_CNIC
8050 load_error4:
8051         /* Disable Timer scan */
8052         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8053 #endif
8054 load_error3:
8055         bnx2x_int_disable_sync(bp, 1);
8056         if (!BP_NOMCP(bp)) {
8057                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8058                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8059         }
8060         bp->port.pmf = 0;
8061         /* Free SKBs, SGEs, TPA pool and driver internals */
8062         bnx2x_free_skbs(bp);
8063         for_each_queue(bp, i)
8064                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8065 load_error2:
8066         /* Release IRQs */
8067         bnx2x_free_irq(bp, false);
8068 load_error1:
8069         bnx2x_napi_disable(bp);
8070         for_each_queue(bp, i)
8071                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8072         bnx2x_free_mem(bp);
8073
8074         return rc;
8075 }
8076
8077 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8078 {
8079         struct bnx2x_fastpath *fp = &bp->fp[index];
8080         int rc;
8081
8082         /* halt the connection */
8083         fp->state = BNX2X_FP_STATE_HALTING;
8084         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
8085
8086         /* Wait for completion */
8087         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
8088                                &(fp->state), 1);
8089         if (rc) /* timeout */
8090                 return rc;
8091
8092         /* delete cfc entry */
8093         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8094
8095         /* Wait for completion */
8096         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
8097                                &(fp->state), 1);
8098         return rc;
8099 }
8100
8101 static int bnx2x_stop_leading(struct bnx2x *bp)
8102 {
8103         __le16 dsb_sp_prod_idx;
8104         /* if the other port is handling traffic,
8105            this can take a lot of time */
8106         int cnt = 500;
8107         int rc;
8108
8109         might_sleep();
8110
8111         /* Send HALT ramrod */
8112         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
8113         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
8114
8115         /* Wait for completion */
8116         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8117                                &(bp->fp[0].state), 1);
8118         if (rc) /* timeout */
8119                 return rc;
8120
8121         dsb_sp_prod_idx = *bp->dsb_sp_prod;
8122
8123         /* Send PORT_DELETE ramrod */
8124         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8125
8126         /* Wait for completion to arrive on default status block
8127            we are going to reset the chip anyway
8128            so there is not much to do if this times out
8129          */
8130         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
8131                 if (!cnt) {
8132                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8133                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8134                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
8135 #ifdef BNX2X_STOP_ON_ERROR
8136                         bnx2x_panic();
8137 #endif
8138                         rc = -EBUSY;
8139                         break;
8140                 }
8141                 cnt--;
8142                 msleep(1);
8143                 rmb(); /* Refresh the dsb_sp_prod */
8144         }
8145         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8146         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
8147
8148         return rc;
8149 }
8150
8151 static void bnx2x_reset_func(struct bnx2x *bp)
8152 {
8153         int port = BP_PORT(bp);
8154         int func = BP_FUNC(bp);
8155         int base, i;
8156
8157         /* Configure IGU */
8158         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8159         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8160
8161 #ifdef BCM_CNIC
8162         /* Disable Timer scan */
8163         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8164         /*
8165          * Wait for at least 10ms and up to 2 second for the timers scan to
8166          * complete
8167          */
8168         for (i = 0; i < 200; i++) {
8169                 msleep(10);
8170                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8171                         break;
8172         }
8173 #endif
8174         /* Clear ILT */
8175         base = FUNC_ILT_BASE(func);
8176         for (i = base; i < base + ILT_PER_FUNC; i++)
8177                 bnx2x_ilt_wr(bp, i, 0);
8178 }
8179
8180 static void bnx2x_reset_port(struct bnx2x *bp)
8181 {
8182         int port = BP_PORT(bp);
8183         u32 val;
8184
8185         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8186
8187         /* Do not rcv packets to BRB */
8188         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8189         /* Do not direct rcv packets that are not for MCP to the BRB */
8190         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8191                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8192
8193         /* Configure AEU */
8194         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8195
8196         msleep(100);
8197         /* Check for BRB port occupancy */
8198         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8199         if (val)
8200                 DP(NETIF_MSG_IFDOWN,
8201                    "BRB1 is not empty  %d blocks are occupied\n", val);
8202
8203         /* TODO: Close Doorbell port? */
8204 }
8205
8206 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8207 {
8208         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
8209            BP_FUNC(bp), reset_code);
8210
8211         switch (reset_code) {
8212         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8213                 bnx2x_reset_port(bp);
8214                 bnx2x_reset_func(bp);
8215                 bnx2x_reset_common(bp);
8216                 break;
8217
8218         case FW_MSG_CODE_DRV_UNLOAD_PORT:
8219                 bnx2x_reset_port(bp);
8220                 bnx2x_reset_func(bp);
8221                 break;
8222
8223         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8224                 bnx2x_reset_func(bp);
8225                 break;
8226
8227         default:
8228                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8229                 break;
8230         }
8231 }
8232
8233 static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
8234 {
8235         int port = BP_PORT(bp);
8236         u32 reset_code = 0;
8237         int i, cnt, rc;
8238
8239         /* Wait until tx fastpath tasks complete */
8240         for_each_queue(bp, i) {
8241                 struct bnx2x_fastpath *fp = &bp->fp[i];
8242
8243                 cnt = 1000;
8244                 while (bnx2x_has_tx_work_unload(fp)) {
8245
8246                         bnx2x_tx_int(fp);
8247                         if (!cnt) {
8248                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
8249                                           i);
8250 #ifdef BNX2X_STOP_ON_ERROR
8251                                 bnx2x_panic();
8252                                 return -EBUSY;
8253 #else
8254                                 break;
8255 #endif
8256                         }
8257                         cnt--;
8258                         msleep(1);
8259                 }
8260         }
8261         /* Give HW time to discard old tx messages */
8262         msleep(1);
8263
8264         if (CHIP_IS_E1(bp)) {
8265                 struct mac_configuration_cmd *config =
8266                                                 bnx2x_sp(bp, mcast_config);
8267
8268                 bnx2x_set_eth_mac_addr_e1(bp, 0);
8269
8270                 for (i = 0; i < config->hdr.length; i++)
8271                         CAM_INVALIDATE(config->config_table[i]);
8272
8273                 config->hdr.length = i;
8274                 if (CHIP_REV_IS_SLOW(bp))
8275                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8276                 else
8277                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
8278                 config->hdr.client_id = bp->fp->cl_id;
8279                 config->hdr.reserved1 = 0;
8280
8281                 bp->set_mac_pending++;
8282                 smp_wmb();
8283
8284                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8285                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8286                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8287
8288         } else { /* E1H */
8289                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8290
8291                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
8292
8293                 for (i = 0; i < MC_HASH_SIZE; i++)
8294                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
8295
8296                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
8297         }
8298 #ifdef BCM_CNIC
8299         /* Clear iSCSI L2 MAC */
8300         mutex_lock(&bp->cnic_mutex);
8301         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8302                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8303                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8304         }
8305         mutex_unlock(&bp->cnic_mutex);
8306 #endif
8307
8308         if (unload_mode == UNLOAD_NORMAL)
8309                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8310
8311         else if (bp->flags & NO_WOL_FLAG)
8312                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
8313
8314         else if (bp->wol) {
8315                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8316                 u8 *mac_addr = bp->dev->dev_addr;
8317                 u32 val;
8318                 /* The mac address is written to entries 1-4 to
8319                    preserve entry 0 which is used by the PMF */
8320                 u8 entry = (BP_E1HVN(bp) + 1)*8;
8321
8322                 val = (mac_addr[0] << 8) | mac_addr[1];
8323                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8324
8325                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8326                       (mac_addr[4] << 8) | mac_addr[5];
8327                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8328
8329                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8330
8331         } else
8332                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8333
8334         /* Close multi and leading connections
8335            Completions for ramrods are collected in a synchronous way */
8336         for_each_nondefault_queue(bp, i)
8337                 if (bnx2x_stop_multi(bp, i))
8338                         goto unload_error;
8339
8340         rc = bnx2x_stop_leading(bp);
8341         if (rc) {
8342                 BNX2X_ERR("Stop leading failed!\n");
8343 #ifdef BNX2X_STOP_ON_ERROR
8344                 return -EBUSY;
8345 #else
8346                 goto unload_error;
8347 #endif
8348         }
8349
8350 unload_error:
8351         if (!BP_NOMCP(bp))
8352                 reset_code = bnx2x_fw_command(bp, reset_code);
8353         else {
8354                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
8355                    load_count[0], load_count[1], load_count[2]);
8356                 load_count[0]--;
8357                 load_count[1 + port]--;
8358                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
8359                    load_count[0], load_count[1], load_count[2]);
8360                 if (load_count[0] == 0)
8361                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8362                 else if (load_count[1 + port] == 0)
8363                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8364                 else
8365                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8366         }
8367
8368         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8369             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8370                 bnx2x__link_reset(bp);
8371
8372         /* Reset the chip */
8373         bnx2x_reset_chip(bp, reset_code);
8374
8375         /* Report UNLOAD_DONE to MCP */
8376         if (!BP_NOMCP(bp))
8377                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8378
8379 }
8380
8381 static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8382 {
8383         u32 val;
8384
8385         DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8386
8387         if (CHIP_IS_E1(bp)) {
8388                 int port = BP_PORT(bp);
8389                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8390                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
8391
8392                 val = REG_RD(bp, addr);
8393                 val &= ~(0x300);
8394                 REG_WR(bp, addr, val);
8395         } else if (CHIP_IS_E1H(bp)) {
8396                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8397                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8398                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8399                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8400         }
8401 }
8402
8403 /* must be called with rtnl_lock */
8404 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8405 {
8406         int i;
8407
8408         if (bp->state == BNX2X_STATE_CLOSED) {
8409                 /* Interface has been removed - nothing to recover */
8410                 bp->recovery_state = BNX2X_RECOVERY_DONE;
8411                 bp->is_leader = 0;
8412                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8413                 smp_wmb();
8414
8415                 return -EINVAL;
8416         }
8417
8418 #ifdef BCM_CNIC
8419         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8420 #endif
8421         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8422
8423         /* Set "drop all" */
8424         bp->rx_mode = BNX2X_RX_MODE_NONE;
8425         bnx2x_set_storm_rx_mode(bp);
8426
8427         /* Disable HW interrupts, NAPI and Tx */
8428         bnx2x_netif_stop(bp, 1);
8429
8430         del_timer_sync(&bp->timer);
8431         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8432                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8433         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8434
8435         /* Release IRQs */
8436         bnx2x_free_irq(bp, false);
8437
8438         /* Cleanup the chip if needed */
8439         if (unload_mode != UNLOAD_RECOVERY)
8440                 bnx2x_chip_cleanup(bp, unload_mode);
8441
8442         bp->port.pmf = 0;
8443
8444         /* Free SKBs, SGEs, TPA pool and driver internals */
8445         bnx2x_free_skbs(bp);
8446         for_each_queue(bp, i)
8447                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8448         for_each_queue(bp, i)
8449                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8450         bnx2x_free_mem(bp);
8451
8452         bp->state = BNX2X_STATE_CLOSED;
8453
8454         netif_carrier_off(bp->dev);
8455
8456         /* The last driver must disable a "close the gate" if there is no
8457          * parity attention or "process kill" pending.
8458          */
8459         if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8460             bnx2x_reset_is_done(bp))
8461                 bnx2x_disable_close_the_gate(bp);
8462
8463         /* Reset MCP mail box sequence if there is on going recovery */
8464         if (unload_mode == UNLOAD_RECOVERY)
8465                 bp->fw_seq = 0;
8466
8467         return 0;
8468 }
8469
8470 /* Close gates #2, #3 and #4: */
8471 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8472 {
8473         u32 val, addr;
8474
8475         /* Gates #2 and #4a are closed/opened for "not E1" only */
8476         if (!CHIP_IS_E1(bp)) {
8477                 /* #4 */
8478                 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8479                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8480                        close ? (val | 0x1) : (val & (~(u32)1)));
8481                 /* #2 */
8482                 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8483                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8484                        close ? (val | 0x1) : (val & (~(u32)1)));
8485         }
8486
8487         /* #3 */
8488         addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8489         val = REG_RD(bp, addr);
8490         REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8491
8492         DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8493                 close ? "closing" : "opening");
8494         mmiowb();
8495 }
8496
8497 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
8498
8499 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8500 {
8501         /* Do some magic... */
8502         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8503         *magic_val = val & SHARED_MF_CLP_MAGIC;
8504         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8505 }
8506
8507 /* Restore the value of the `magic' bit.
8508  *
8509  * @param pdev Device handle.
8510  * @param magic_val Old value of the `magic' bit.
8511  */
8512 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8513 {
8514         /* Restore the `magic' bit value... */
8515         /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8516         SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8517                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8518         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8519         MF_CFG_WR(bp, shared_mf_config.clp_mb,
8520                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8521 }
8522
8523 /* Prepares for MCP reset: takes care of CLP configurations.
8524  *
8525  * @param bp
8526  * @param magic_val Old value of 'magic' bit.
8527  */
8528 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8529 {
8530         u32 shmem;
8531         u32 validity_offset;
8532
8533         DP(NETIF_MSG_HW, "Starting\n");
8534
8535         /* Set `magic' bit in order to save MF config */
8536         if (!CHIP_IS_E1(bp))
8537                 bnx2x_clp_reset_prep(bp, magic_val);
8538
8539         /* Get shmem offset */
8540         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8541         validity_offset = offsetof(struct shmem_region, validity_map[0]);
8542
8543         /* Clear validity map flags */
8544         if (shmem > 0)
8545                 REG_WR(bp, shmem + validity_offset, 0);
8546 }
8547
8548 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
8549 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
8550
8551 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8552  * depending on the HW type.
8553  *
8554  * @param bp
8555  */
8556 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8557 {
8558         /* special handling for emulation and FPGA,
8559            wait 10 times longer */
8560         if (CHIP_REV_IS_SLOW(bp))
8561                 msleep(MCP_ONE_TIMEOUT*10);
8562         else
8563                 msleep(MCP_ONE_TIMEOUT);
8564 }
8565
8566 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8567 {
8568         u32 shmem, cnt, validity_offset, val;
8569         int rc = 0;
8570
8571         msleep(100);
8572
8573         /* Get shmem offset */
8574         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8575         if (shmem == 0) {
8576                 BNX2X_ERR("Shmem 0 return failure\n");
8577                 rc = -ENOTTY;
8578                 goto exit_lbl;
8579         }
8580
8581         validity_offset = offsetof(struct shmem_region, validity_map[0]);
8582
8583         /* Wait for MCP to come up */
8584         for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8585                 /* TBD: its best to check validity map of last port.
8586                  * currently checks on port 0.
8587                  */
8588                 val = REG_RD(bp, shmem + validity_offset);
8589                 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8590                    shmem + validity_offset, val);
8591
8592                 /* check that shared memory is valid. */
8593                 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8594                     == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8595                         break;
8596
8597                 bnx2x_mcp_wait_one(bp);
8598         }
8599
8600         DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8601
8602         /* Check that shared memory is valid. This indicates that MCP is up. */
8603         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8604             (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8605                 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8606                 rc = -ENOTTY;
8607                 goto exit_lbl;
8608         }
8609
8610 exit_lbl:
8611         /* Restore the `magic' bit value */
8612         if (!CHIP_IS_E1(bp))
8613                 bnx2x_clp_reset_done(bp, magic_val);
8614
8615         return rc;
8616 }
8617
8618 static void bnx2x_pxp_prep(struct bnx2x *bp)
8619 {
8620         if (!CHIP_IS_E1(bp)) {
8621                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8622                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8623                 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8624                 mmiowb();
8625         }
8626 }
8627
8628 /*
8629  * Reset the whole chip except for:
8630  *      - PCIE core
8631  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8632  *              one reset bit)
8633  *      - IGU
8634  *      - MISC (including AEU)
8635  *      - GRC
8636  *      - RBCN, RBCP
8637  */
8638 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8639 {
8640         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8641
8642         not_reset_mask1 =
8643                 MISC_REGISTERS_RESET_REG_1_RST_HC |
8644                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8645                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8646
8647         not_reset_mask2 =
8648                 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8649                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8650                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8651                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8652                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8653                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
8654                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8655                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8656
8657         reset_mask1 = 0xffffffff;
8658
8659         if (CHIP_IS_E1(bp))
8660                 reset_mask2 = 0xffff;
8661         else
8662                 reset_mask2 = 0x1ffff;
8663
8664         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8665                reset_mask1 & (~not_reset_mask1));
8666         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8667                reset_mask2 & (~not_reset_mask2));
8668
8669         barrier();
8670         mmiowb();
8671
8672         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8673         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8674         mmiowb();
8675 }
8676
8677 static int bnx2x_process_kill(struct bnx2x *bp)
8678 {
8679         int cnt = 1000;
8680         u32 val = 0;
8681         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8682
8683
8684         /* Empty the Tetris buffer, wait for 1s */
8685         do {
8686                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8687                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8688                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8689                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8690                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8691                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8692                     ((port_is_idle_0 & 0x1) == 0x1) &&
8693                     ((port_is_idle_1 & 0x1) == 0x1) &&
8694                     (pgl_exp_rom2 == 0xffffffff))
8695                         break;
8696                 msleep(1);
8697         } while (cnt-- > 0);
8698
8699         if (cnt <= 0) {
8700                 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8701                           " are still"
8702                           " outstanding read requests after 1s!\n");
8703                 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8704                           " port_is_idle_0=0x%08x,"
8705                           " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8706                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8707                           pgl_exp_rom2);
8708                 return -EAGAIN;
8709         }
8710
8711         barrier();
8712
8713         /* Close gates #2, #3 and #4 */
8714         bnx2x_set_234_gates(bp, true);
8715
8716         /* TBD: Indicate that "process kill" is in progress to MCP */
8717
8718         /* Clear "unprepared" bit */
8719         REG_WR(bp, MISC_REG_UNPREPARED, 0);
8720         barrier();
8721
8722         /* Make sure all is written to the chip before the reset */
8723         mmiowb();
8724
8725         /* Wait for 1ms to empty GLUE and PCI-E core queues,
8726          * PSWHST, GRC and PSWRD Tetris buffer.
8727          */
8728         msleep(1);
8729
8730         /* Prepare to chip reset: */
8731         /* MCP */
8732         bnx2x_reset_mcp_prep(bp, &val);
8733
8734         /* PXP */
8735         bnx2x_pxp_prep(bp);
8736         barrier();
8737
8738         /* reset the chip */
8739         bnx2x_process_kill_chip_reset(bp);
8740         barrier();
8741
8742         /* Recover after reset: */
8743         /* MCP */
8744         if (bnx2x_reset_mcp_comp(bp, val))
8745                 return -EAGAIN;
8746
8747         /* PXP */
8748         bnx2x_pxp_prep(bp);
8749
8750         /* Open the gates #2, #3 and #4 */
8751         bnx2x_set_234_gates(bp, false);
8752
8753         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8754          * reset state, re-enable attentions. */
8755
8756         return 0;
8757 }
8758
8759 static int bnx2x_leader_reset(struct bnx2x *bp)
8760 {
8761         int rc = 0;
8762         /* Try to recover after the failure */
8763         if (bnx2x_process_kill(bp)) {
8764                 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8765                        bp->dev->name);
8766                 rc = -EAGAIN;
8767                 goto exit_leader_reset;
8768         }
8769
8770         /* Clear "reset is in progress" bit and update the driver state */
8771         bnx2x_set_reset_done(bp);
8772         bp->recovery_state = BNX2X_RECOVERY_DONE;
8773
8774 exit_leader_reset:
8775         bp->is_leader = 0;
8776         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8777         smp_wmb();
8778         return rc;
8779 }
8780
8781 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8782
8783 /* Assumption: runs under rtnl lock. This together with the fact
8784  * that it's called only from bnx2x_reset_task() ensure that it
8785  * will never be called when netif_running(bp->dev) is false.
8786  */
8787 static void bnx2x_parity_recover(struct bnx2x *bp)
8788 {
8789         DP(NETIF_MSG_HW, "Handling parity\n");
8790         while (1) {
8791                 switch (bp->recovery_state) {
8792                 case BNX2X_RECOVERY_INIT:
8793                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8794                         /* Try to get a LEADER_LOCK HW lock */
8795                         if (bnx2x_trylock_hw_lock(bp,
8796                                 HW_LOCK_RESOURCE_RESERVED_08))
8797                                 bp->is_leader = 1;
8798
8799                         /* Stop the driver */
8800                         /* If interface has been removed - break */
8801                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8802                                 return;
8803
8804                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
8805                         /* Ensure "is_leader" and "recovery_state"
8806                          *  update values are seen on other CPUs
8807                          */
8808                         smp_wmb();
8809                         break;
8810
8811                 case BNX2X_RECOVERY_WAIT:
8812                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8813                         if (bp->is_leader) {
8814                                 u32 load_counter = bnx2x_get_load_cnt(bp);
8815                                 if (load_counter) {
8816                                         /* Wait until all other functions get
8817                                          * down.
8818                                          */
8819                                         schedule_delayed_work(&bp->reset_task,
8820                                                                 HZ/10);
8821                                         return;
8822                                 } else {
8823                                         /* If all other functions got down -
8824                                          * try to bring the chip back to
8825                                          * normal. In any case it's an exit
8826                                          * point for a leader.
8827                                          */
8828                                         if (bnx2x_leader_reset(bp) ||
8829                                         bnx2x_nic_load(bp, LOAD_NORMAL)) {
8830                                                 printk(KERN_ERR"%s: Recovery "
8831                                                 "has failed. Power cycle is "
8832                                                 "needed.\n", bp->dev->name);
8833                                                 /* Disconnect this device */
8834                                                 netif_device_detach(bp->dev);
8835                                                 /* Block ifup for all function
8836                                                  * of this ASIC until
8837                                                  * "process kill" or power
8838                                                  * cycle.
8839                                                  */
8840                                                 bnx2x_set_reset_in_progress(bp);
8841                                                 /* Shut down the power */
8842                                                 bnx2x_set_power_state(bp,
8843                                                                 PCI_D3hot);
8844                                                 return;
8845                                         }
8846
8847                                         return;
8848                                 }
8849                         } else { /* non-leader */
8850                                 if (!bnx2x_reset_is_done(bp)) {
8851                                         /* Try to get a LEADER_LOCK HW lock as
8852                                          * long as a former leader may have
8853                                          * been unloaded by the user or
8854                                          * released a leadership by another
8855                                          * reason.
8856                                          */
8857                                         if (bnx2x_trylock_hw_lock(bp,
8858                                             HW_LOCK_RESOURCE_RESERVED_08)) {
8859                                                 /* I'm a leader now! Restart a
8860                                                  * switch case.
8861                                                  */
8862                                                 bp->is_leader = 1;
8863                                                 break;
8864                                         }
8865
8866                                         schedule_delayed_work(&bp->reset_task,
8867                                                                 HZ/10);
8868                                         return;
8869
8870                                 } else { /* A leader has completed
8871                                           * the "process kill". It's an exit
8872                                           * point for a non-leader.
8873                                           */
8874                                         bnx2x_nic_load(bp, LOAD_NORMAL);
8875                                         bp->recovery_state =
8876                                                 BNX2X_RECOVERY_DONE;
8877                                         smp_wmb();
8878                                         return;
8879                                 }
8880                         }
8881                 default:
8882                         return;
8883                 }
8884         }
8885 }
8886
8887 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8888  * scheduled on a general queue in order to prevent a dead lock.
8889  */
8890 static void bnx2x_reset_task(struct work_struct *work)
8891 {
8892         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
8893
8894 #ifdef BNX2X_STOP_ON_ERROR
8895         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8896                   " so reset not done to allow debug dump,\n"
8897          KERN_ERR " you will need to reboot when done\n");
8898         return;
8899 #endif
8900
8901         rtnl_lock();
8902
8903         if (!netif_running(bp->dev))
8904                 goto reset_task_exit;
8905
8906         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8907                 bnx2x_parity_recover(bp);
8908         else {
8909                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8910                 bnx2x_nic_load(bp, LOAD_NORMAL);
8911         }
8912
8913 reset_task_exit:
8914         rtnl_unlock();
8915 }
8916
8917 /* end of nic load/unload */
8918
8919 /* ethtool_ops */
8920
8921 /*
8922  * Init service functions
8923  */
8924
8925 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8926 {
8927         switch (func) {
8928         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8929         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8930         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8931         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8932         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8933         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8934         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8935         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8936         default:
8937                 BNX2X_ERR("Unsupported function index: %d\n", func);
8938                 return (u32)(-1);
8939         }
8940 }
8941
8942 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8943 {
8944         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8945
8946         /* Flush all outstanding writes */
8947         mmiowb();
8948
8949         /* Pretend to be function 0 */
8950         REG_WR(bp, reg, 0);
8951         /* Flush the GRC transaction (in the chip) */
8952         new_val = REG_RD(bp, reg);
8953         if (new_val != 0) {
8954                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8955                           new_val);
8956                 BUG();
8957         }
8958
8959         /* From now we are in the "like-E1" mode */
8960         bnx2x_int_disable(bp);
8961
8962         /* Flush all outstanding writes */
8963         mmiowb();
8964
8965         /* Restore the original funtion settings */
8966         REG_WR(bp, reg, orig_func);
8967         new_val = REG_RD(bp, reg);
8968         if (new_val != orig_func) {
8969                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8970                           orig_func, new_val);
8971                 BUG();
8972         }
8973 }
8974
8975 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8976 {
8977         if (CHIP_IS_E1H(bp))
8978                 bnx2x_undi_int_disable_e1h(bp, func);
8979         else
8980                 bnx2x_int_disable(bp);
8981 }
8982
8983 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8984 {
8985         u32 val;
8986
8987         /* Check if there is any driver already loaded */
8988         val = REG_RD(bp, MISC_REG_UNPREPARED);
8989         if (val == 0x1) {
8990                 /* Check if it is the UNDI driver
8991                  * UNDI driver initializes CID offset for normal bell to 0x7
8992                  */
8993                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8994                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8995                 if (val == 0x7) {
8996                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8997                         /* save our func */
8998                         int func = BP_FUNC(bp);
8999                         u32 swap_en;
9000                         u32 swap_val;
9001
9002                         /* clear the UNDI indication */
9003                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9004
9005                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
9006
9007                         /* try unload UNDI on port 0 */
9008                         bp->func = 0;
9009                         bp->fw_seq =
9010                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9011                                 DRV_MSG_SEQ_NUMBER_MASK);
9012                         reset_code = bnx2x_fw_command(bp, reset_code);
9013
9014                         /* if UNDI is loaded on the other port */
9015                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9016
9017                                 /* send "DONE" for previous unload */
9018                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9019
9020                                 /* unload UNDI on port 1 */
9021                                 bp->func = 1;
9022                                 bp->fw_seq =
9023                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9024                                         DRV_MSG_SEQ_NUMBER_MASK);
9025                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9026
9027                                 bnx2x_fw_command(bp, reset_code);
9028                         }
9029
9030                         /* now it's safe to release the lock */
9031                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9032
9033                         bnx2x_undi_int_disable(bp, func);
9034
9035                         /* close input traffic and wait for it */
9036                         /* Do not rcv packets to BRB */
9037                         REG_WR(bp,
9038                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9039                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9040                         /* Do not direct rcv packets that are not for MCP to
9041                          * the BRB */
9042                         REG_WR(bp,
9043                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9044                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9045                         /* clear AEU */
9046                         REG_WR(bp,
9047                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9048                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9049                         msleep(10);
9050
9051                         /* save NIG port swap info */
9052                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9053                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
9054                         /* reset device */
9055                         REG_WR(bp,
9056                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9057                                0xd3ffffff);
9058                         REG_WR(bp,
9059                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9060                                0x1403);
9061                         /* take the NIG out of reset and restore swap values */
9062                         REG_WR(bp,
9063                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9064                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
9065                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9066                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9067
9068                         /* send unload done to the MCP */
9069                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9070
9071                         /* restore our func and fw_seq */
9072                         bp->func = func;
9073                         bp->fw_seq =
9074                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9075                                 DRV_MSG_SEQ_NUMBER_MASK);
9076
9077                 } else
9078                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9079         }
9080 }
9081
9082 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9083 {
9084         u32 val, val2, val3, val4, id;
9085         u16 pmc;
9086
9087         /* Get the chip revision id and number. */
9088         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9089         val = REG_RD(bp, MISC_REG_CHIP_NUM);
9090         id = ((val & 0xffff) << 16);
9091         val = REG_RD(bp, MISC_REG_CHIP_REV);
9092         id |= ((val & 0xf) << 12);
9093         val = REG_RD(bp, MISC_REG_CHIP_METAL);
9094         id |= ((val & 0xff) << 4);
9095         val = REG_RD(bp, MISC_REG_BOND_ID);
9096         id |= (val & 0xf);
9097         bp->common.chip_id = id;
9098         bp->link_params.chip_id = bp->common.chip_id;
9099         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9100
9101         val = (REG_RD(bp, 0x2874) & 0x55);
9102         if ((bp->common.chip_id & 0x1) ||
9103             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9104                 bp->flags |= ONE_PORT_FLAG;
9105                 BNX2X_DEV_INFO("single port device\n");
9106         }
9107
9108         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9109         bp->common.flash_size = (NVRAM_1MB_SIZE <<
9110                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
9111         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9112                        bp->common.flash_size, bp->common.flash_size);
9113
9114         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9115         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
9116         bp->link_params.shmem_base = bp->common.shmem_base;
9117         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
9118                        bp->common.shmem_base, bp->common.shmem2_base);
9119
9120         if (!bp->common.shmem_base ||
9121             (bp->common.shmem_base < 0xA0000) ||
9122             (bp->common.shmem_base >= 0xC0000)) {
9123                 BNX2X_DEV_INFO("MCP not active\n");
9124                 bp->flags |= NO_MCP_FLAG;
9125                 return;
9126         }
9127
9128         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9129         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9130                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9131                 BNX2X_ERR("BAD MCP validity signature\n");
9132
9133         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
9134         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
9135
9136         bp->link_params.hw_led_mode = ((bp->common.hw_config &
9137                                         SHARED_HW_CFG_LED_MODE_MASK) >>
9138                                        SHARED_HW_CFG_LED_MODE_SHIFT);
9139
9140         bp->link_params.feature_config_flags = 0;
9141         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9142         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9143                 bp->link_params.feature_config_flags |=
9144                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9145         else
9146                 bp->link_params.feature_config_flags &=
9147                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9148
9149         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9150         bp->common.bc_ver = val;
9151         BNX2X_DEV_INFO("bc_ver %X\n", val);
9152         if (val < BNX2X_BC_VER) {
9153                 /* for now only warn
9154                  * later we might need to enforce this */
9155                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
9156                           " please upgrade BC\n", BNX2X_BC_VER, val);
9157         }
9158         bp->link_params.feature_config_flags |=
9159                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9160                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
9161
9162         if (BP_E1HVN(bp) == 0) {
9163                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9164                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9165         } else {
9166                 /* no WOL capability for E1HVN != 0 */
9167                 bp->flags |= NO_WOL_FLAG;
9168         }
9169         BNX2X_DEV_INFO("%sWoL capable\n",
9170                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
9171
9172         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9173         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9174         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9175         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9176
9177         pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4);
9178 }
9179
9180 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9181                                                     u32 switch_cfg)
9182 {
9183         int port = BP_PORT(bp);
9184         u32 ext_phy_type;
9185
9186         switch (switch_cfg) {
9187         case SWITCH_CFG_1G:
9188                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9189
9190                 ext_phy_type =
9191                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9192                 switch (ext_phy_type) {
9193                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9194                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9195                                        ext_phy_type);
9196
9197                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9198                                                SUPPORTED_10baseT_Full |
9199                                                SUPPORTED_100baseT_Half |
9200                                                SUPPORTED_100baseT_Full |
9201                                                SUPPORTED_1000baseT_Full |
9202                                                SUPPORTED_2500baseX_Full |
9203                                                SUPPORTED_TP |
9204                                                SUPPORTED_FIBRE |
9205                                                SUPPORTED_Autoneg |
9206                                                SUPPORTED_Pause |
9207                                                SUPPORTED_Asym_Pause);
9208                         break;
9209
9210                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9211                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9212                                        ext_phy_type);
9213
9214                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9215                                                SUPPORTED_10baseT_Full |
9216                                                SUPPORTED_100baseT_Half |
9217                                                SUPPORTED_100baseT_Full |
9218                                                SUPPORTED_1000baseT_Full |
9219                                                SUPPORTED_TP |
9220                                                SUPPORTED_FIBRE |
9221                                                SUPPORTED_Autoneg |
9222                                                SUPPORTED_Pause |
9223                                                SUPPORTED_Asym_Pause);
9224                         break;
9225
9226                 default:
9227                         BNX2X_ERR("NVRAM config error. "
9228                                   "BAD SerDes ext_phy_config 0x%x\n",
9229                                   bp->link_params.ext_phy_config);
9230                         return;
9231                 }
9232
9233                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9234                                            port*0x10);
9235                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9236                 break;
9237
9238         case SWITCH_CFG_10G:
9239                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9240
9241                 ext_phy_type =
9242                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9243                 switch (ext_phy_type) {
9244                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9245                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9246                                        ext_phy_type);
9247
9248                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9249                                                SUPPORTED_10baseT_Full |
9250                                                SUPPORTED_100baseT_Half |
9251                                                SUPPORTED_100baseT_Full |
9252                                                SUPPORTED_1000baseT_Full |
9253                                                SUPPORTED_2500baseX_Full |
9254                                                SUPPORTED_10000baseT_Full |
9255                                                SUPPORTED_TP |
9256                                                SUPPORTED_FIBRE |
9257                                                SUPPORTED_Autoneg |
9258                                                SUPPORTED_Pause |
9259                                                SUPPORTED_Asym_Pause);
9260                         break;
9261
9262                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9263                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
9264                                        ext_phy_type);
9265
9266                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9267                                                SUPPORTED_1000baseT_Full |
9268                                                SUPPORTED_FIBRE |
9269                                                SUPPORTED_Autoneg |
9270                                                SUPPORTED_Pause |
9271                                                SUPPORTED_Asym_Pause);
9272                         break;
9273
9274                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9275                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
9276                                        ext_phy_type);
9277
9278                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9279                                                SUPPORTED_2500baseX_Full |
9280                                                SUPPORTED_1000baseT_Full |
9281                                                SUPPORTED_FIBRE |
9282                                                SUPPORTED_Autoneg |
9283                                                SUPPORTED_Pause |
9284                                                SUPPORTED_Asym_Pause);
9285                         break;
9286
9287                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9288                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9289                                        ext_phy_type);
9290
9291                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9292                                                SUPPORTED_FIBRE |
9293                                                SUPPORTED_Pause |
9294                                                SUPPORTED_Asym_Pause);
9295                         break;
9296
9297                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9298                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
9299                                        ext_phy_type);
9300
9301                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9302                                                SUPPORTED_1000baseT_Full |
9303                                                SUPPORTED_FIBRE |
9304                                                SUPPORTED_Pause |
9305                                                SUPPORTED_Asym_Pause);
9306                         break;
9307
9308                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9309                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
9310                                        ext_phy_type);
9311
9312                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9313                                                SUPPORTED_1000baseT_Full |
9314                                                SUPPORTED_Autoneg |
9315                                                SUPPORTED_FIBRE |
9316                                                SUPPORTED_Pause |
9317                                                SUPPORTED_Asym_Pause);
9318                         break;
9319
9320                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9321                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9322                                        ext_phy_type);
9323
9324                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9325                                                SUPPORTED_1000baseT_Full |
9326                                                SUPPORTED_Autoneg |
9327                                                SUPPORTED_FIBRE |
9328                                                SUPPORTED_Pause |
9329                                                SUPPORTED_Asym_Pause);
9330                         break;
9331
9332                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9333                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9334                                        ext_phy_type);
9335
9336                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9337                                                SUPPORTED_TP |
9338                                                SUPPORTED_Autoneg |
9339                                                SUPPORTED_Pause |
9340                                                SUPPORTED_Asym_Pause);
9341                         break;
9342
9343                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9344                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9345                                        ext_phy_type);
9346
9347                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9348                                                SUPPORTED_10baseT_Full |
9349                                                SUPPORTED_100baseT_Half |
9350                                                SUPPORTED_100baseT_Full |
9351                                                SUPPORTED_1000baseT_Full |
9352                                                SUPPORTED_10000baseT_Full |
9353                                                SUPPORTED_TP |
9354                                                SUPPORTED_Autoneg |
9355                                                SUPPORTED_Pause |
9356                                                SUPPORTED_Asym_Pause);
9357                         break;
9358
9359                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9360                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9361                                   bp->link_params.ext_phy_config);
9362                         break;
9363
9364                 default:
9365                         BNX2X_ERR("NVRAM config error. "
9366                                   "BAD XGXS ext_phy_config 0x%x\n",
9367                                   bp->link_params.ext_phy_config);
9368                         return;
9369                 }
9370
9371                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9372                                            port*0x18);
9373                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9374
9375                 break;
9376
9377         default:
9378                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
9379                           bp->port.link_config);
9380                 return;
9381         }
9382         bp->link_params.phy_addr = bp->port.phy_addr;
9383
9384         /* mask what we support according to speed_cap_mask */
9385         if (!(bp->link_params.speed_cap_mask &
9386                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
9387                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
9388
9389         if (!(bp->link_params.speed_cap_mask &
9390                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
9391                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
9392
9393         if (!(bp->link_params.speed_cap_mask &
9394                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
9395                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
9396
9397         if (!(bp->link_params.speed_cap_mask &
9398                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
9399                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
9400
9401         if (!(bp->link_params.speed_cap_mask &
9402                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
9403                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9404                                         SUPPORTED_1000baseT_Full);
9405
9406         if (!(bp->link_params.speed_cap_mask &
9407                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
9408                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
9409
9410         if (!(bp->link_params.speed_cap_mask &
9411                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
9412                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
9413
9414         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
9415 }
9416
9417 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
9418 {
9419         bp->link_params.req_duplex = DUPLEX_FULL;
9420
9421         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
9422         case PORT_FEATURE_LINK_SPEED_AUTO:
9423                 if (bp->port.supported & SUPPORTED_Autoneg) {
9424                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9425                         bp->port.advertising = bp->port.supported;
9426                 } else {
9427                         u32 ext_phy_type =
9428                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9429
9430                         if ((ext_phy_type ==
9431                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9432                             (ext_phy_type ==
9433                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
9434                                 /* force 10G, no AN */
9435                                 bp->link_params.req_line_speed = SPEED_10000;
9436                                 bp->port.advertising =
9437                                                 (ADVERTISED_10000baseT_Full |
9438                                                  ADVERTISED_FIBRE);
9439                                 break;
9440                         }
9441                         BNX2X_ERR("NVRAM config error. "
9442                                   "Invalid link_config 0x%x"
9443                                   "  Autoneg not supported\n",
9444                                   bp->port.link_config);
9445                         return;
9446                 }
9447                 break;
9448
9449         case PORT_FEATURE_LINK_SPEED_10M_FULL:
9450                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
9451                         bp->link_params.req_line_speed = SPEED_10;
9452                         bp->port.advertising = (ADVERTISED_10baseT_Full |
9453                                                 ADVERTISED_TP);
9454                 } else {
9455                         BNX2X_ERR("NVRAM config error. "
9456                                   "Invalid link_config 0x%x"
9457                                   "  speed_cap_mask 0x%x\n",
9458                                   bp->port.link_config,
9459                                   bp->link_params.speed_cap_mask);
9460                         return;
9461                 }
9462                 break;
9463
9464         case PORT_FEATURE_LINK_SPEED_10M_HALF:
9465                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
9466                         bp->link_params.req_line_speed = SPEED_10;
9467                         bp->link_params.req_duplex = DUPLEX_HALF;
9468                         bp->port.advertising = (ADVERTISED_10baseT_Half |
9469                                                 ADVERTISED_TP);
9470                 } else {
9471                         BNX2X_ERR("NVRAM config error. "
9472                                   "Invalid link_config 0x%x"
9473                                   "  speed_cap_mask 0x%x\n",
9474                                   bp->port.link_config,
9475                                   bp->link_params.speed_cap_mask);
9476                         return;
9477                 }
9478                 break;
9479
9480         case PORT_FEATURE_LINK_SPEED_100M_FULL:
9481                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
9482                         bp->link_params.req_line_speed = SPEED_100;
9483                         bp->port.advertising = (ADVERTISED_100baseT_Full |
9484                                                 ADVERTISED_TP);
9485                 } else {
9486                         BNX2X_ERR("NVRAM config error. "
9487                                   "Invalid link_config 0x%x"
9488                                   "  speed_cap_mask 0x%x\n",
9489                                   bp->port.link_config,
9490                                   bp->link_params.speed_cap_mask);
9491                         return;
9492                 }
9493                 break;
9494
9495         case PORT_FEATURE_LINK_SPEED_100M_HALF:
9496                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
9497                         bp->link_params.req_line_speed = SPEED_100;
9498                         bp->link_params.req_duplex = DUPLEX_HALF;
9499                         bp->port.advertising = (ADVERTISED_100baseT_Half |
9500                                                 ADVERTISED_TP);
9501                 } else {
9502                         BNX2X_ERR("NVRAM config error. "
9503                                   "Invalid link_config 0x%x"
9504                                   "  speed_cap_mask 0x%x\n",
9505                                   bp->port.link_config,
9506                                   bp->link_params.speed_cap_mask);
9507                         return;
9508                 }
9509                 break;
9510
9511         case PORT_FEATURE_LINK_SPEED_1G:
9512                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
9513                         bp->link_params.req_line_speed = SPEED_1000;
9514                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
9515                                                 ADVERTISED_TP);
9516                 } else {
9517                         BNX2X_ERR("NVRAM config error. "
9518                                   "Invalid link_config 0x%x"
9519                                   "  speed_cap_mask 0x%x\n",
9520                                   bp->port.link_config,
9521                                   bp->link_params.speed_cap_mask);
9522                         return;
9523                 }
9524                 break;
9525
9526         case PORT_FEATURE_LINK_SPEED_2_5G:
9527                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
9528                         bp->link_params.req_line_speed = SPEED_2500;
9529                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
9530                                                 ADVERTISED_TP);
9531                 } else {
9532                         BNX2X_ERR("NVRAM config error. "
9533                                   "Invalid link_config 0x%x"
9534                                   "  speed_cap_mask 0x%x\n",
9535                                   bp->port.link_config,
9536                                   bp->link_params.speed_cap_mask);
9537                         return;
9538                 }
9539                 break;
9540
9541         case PORT_FEATURE_LINK_SPEED_10G_CX4:
9542         case PORT_FEATURE_LINK_SPEED_10G_KX4:
9543         case PORT_FEATURE_LINK_SPEED_10G_KR:
9544                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
9545                         bp->link_params.req_line_speed = SPEED_10000;
9546                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
9547                                                 ADVERTISED_FIBRE);
9548                 } else {
9549                         BNX2X_ERR("NVRAM config error. "
9550                                   "Invalid link_config 0x%x"
9551                                   "  speed_cap_mask 0x%x\n",
9552                                   bp->port.link_config,
9553                                   bp->link_params.speed_cap_mask);
9554                         return;
9555                 }
9556                 break;
9557
9558         default:
9559                 BNX2X_ERR("NVRAM config error. "
9560                           "BAD link speed link_config 0x%x\n",
9561                           bp->port.link_config);
9562                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9563                 bp->port.advertising = bp->port.supported;
9564                 break;
9565         }
9566
9567         bp->link_params.req_flow_ctrl = (bp->port.link_config &
9568                                          PORT_FEATURE_FLOW_CONTROL_MASK);
9569         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
9570             !(bp->port.supported & SUPPORTED_Autoneg))
9571                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9572
9573         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
9574                        "  advertising 0x%x\n",
9575                        bp->link_params.req_line_speed,
9576                        bp->link_params.req_duplex,
9577                        bp->link_params.req_flow_ctrl, bp->port.advertising);
9578 }
9579
9580 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9581 {
9582         mac_hi = cpu_to_be16(mac_hi);
9583         mac_lo = cpu_to_be32(mac_lo);
9584         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9585         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9586 }
9587
9588 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
9589 {
9590         int port = BP_PORT(bp);
9591         u32 val, val2;
9592         u32 config;
9593         u16 i;
9594         u32 ext_phy_type;
9595
9596         bp->link_params.bp = bp;
9597         bp->link_params.port = port;
9598
9599         bp->link_params.lane_config =
9600                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
9601         bp->link_params.ext_phy_config =
9602                 SHMEM_RD(bp,
9603                          dev_info.port_hw_config[port].external_phy_config);
9604         /* BCM8727_NOC => BCM8727 no over current */
9605         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9606             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9607                 bp->link_params.ext_phy_config &=
9608                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9609                 bp->link_params.ext_phy_config |=
9610                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9611                 bp->link_params.feature_config_flags |=
9612                         FEATURE_CONFIG_BCM8727_NOC;
9613         }
9614
9615         bp->link_params.speed_cap_mask =
9616                 SHMEM_RD(bp,
9617                          dev_info.port_hw_config[port].speed_capability_mask);
9618
9619         bp->port.link_config =
9620                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9621
9622         /* Get the 4 lanes xgxs config rx and tx */
9623         for (i = 0; i < 2; i++) {
9624                 val = SHMEM_RD(bp,
9625                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9626                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9627                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9628
9629                 val = SHMEM_RD(bp,
9630                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9631                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9632                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9633         }
9634
9635         /* If the device is capable of WoL, set the default state according
9636          * to the HW
9637          */
9638         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
9639         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9640                    (config & PORT_FEATURE_WOL_ENABLED));
9641
9642         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
9643                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
9644                        bp->link_params.lane_config,
9645                        bp->link_params.ext_phy_config,
9646                        bp->link_params.speed_cap_mask, bp->port.link_config);
9647
9648         bp->link_params.switch_cfg |= (bp->port.link_config &
9649                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
9650         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
9651
9652         bnx2x_link_settings_requested(bp);
9653
9654         /*
9655          * If connected directly, work with the internal PHY, otherwise, work
9656          * with the external PHY
9657          */
9658         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9659         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9660                 bp->mdio.prtad = bp->link_params.phy_addr;
9661
9662         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9663                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9664                 bp->mdio.prtad =
9665                         XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9666
9667         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9668         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
9669         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
9670         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9671         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9672
9673 #ifdef BCM_CNIC
9674         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9675         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9676         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9677 #endif
9678 }
9679
9680 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9681 {
9682         int func = BP_FUNC(bp);
9683         u32 val, val2;
9684         int rc = 0;
9685
9686         bnx2x_get_common_hwinfo(bp);
9687
9688         bp->e1hov = 0;
9689         bp->e1hmf = 0;
9690         if (CHIP_IS_E1H(bp)) {
9691                 bp->mf_config =
9692                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
9693
9694                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
9695                        FUNC_MF_CFG_E1HOV_TAG_MASK);
9696                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
9697                         bp->e1hmf = 1;
9698                 BNX2X_DEV_INFO("%s function mode\n",
9699                                IS_E1HMF(bp) ? "multi" : "single");
9700
9701                 if (IS_E1HMF(bp)) {
9702                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9703                                                                 e1hov_tag) &
9704                                FUNC_MF_CFG_E1HOV_TAG_MASK);
9705                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9706                                 bp->e1hov = val;
9707                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9708                                                "(0x%04x)\n",
9709                                                func, bp->e1hov, bp->e1hov);
9710                         } else {
9711                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
9712                                           "  aborting\n", func);
9713                                 rc = -EPERM;
9714                         }
9715                 } else {
9716                         if (BP_E1HVN(bp)) {
9717                                 BNX2X_ERR("!!!  VN %d in single function mode,"
9718                                           "  aborting\n", BP_E1HVN(bp));
9719                                 rc = -EPERM;
9720                         }
9721                 }
9722         }
9723
9724         if (!BP_NOMCP(bp)) {
9725                 bnx2x_get_port_hwinfo(bp);
9726
9727                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9728                               DRV_MSG_SEQ_NUMBER_MASK);
9729                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9730         }
9731
9732         if (IS_E1HMF(bp)) {
9733                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9734                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
9735                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9736                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9737                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9738                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9739                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9740                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9741                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
9742                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
9743                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9744                                ETH_ALEN);
9745                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9746                                ETH_ALEN);
9747                 }
9748
9749                 return rc;
9750         }
9751
9752         if (BP_NOMCP(bp)) {
9753                 /* only supposed to happen on emulation/FPGA */
9754                 BNX2X_ERR("warning random MAC workaround active\n");
9755                 random_ether_addr(bp->dev->dev_addr);
9756                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9757         }
9758
9759         return rc;
9760 }
9761
9762 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9763 {
9764         int cnt, i, block_end, rodi;
9765         char vpd_data[BNX2X_VPD_LEN+1];
9766         char str_id_reg[VENDOR_ID_LEN+1];
9767         char str_id_cap[VENDOR_ID_LEN+1];
9768         u8 len;
9769
9770         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9771         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9772
9773         if (cnt < BNX2X_VPD_LEN)
9774                 goto out_not_found;
9775
9776         i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9777                              PCI_VPD_LRDT_RO_DATA);
9778         if (i < 0)
9779                 goto out_not_found;
9780
9781
9782         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9783                     pci_vpd_lrdt_size(&vpd_data[i]);
9784
9785         i += PCI_VPD_LRDT_TAG_SIZE;
9786
9787         if (block_end > BNX2X_VPD_LEN)
9788                 goto out_not_found;
9789
9790         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9791                                    PCI_VPD_RO_KEYWORD_MFR_ID);
9792         if (rodi < 0)
9793                 goto out_not_found;
9794
9795         len = pci_vpd_info_field_size(&vpd_data[rodi]);
9796
9797         if (len != VENDOR_ID_LEN)
9798                 goto out_not_found;
9799
9800         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9801
9802         /* vendor specific info */
9803         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9804         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9805         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9806             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9807
9808                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9809                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
9810                 if (rodi >= 0) {
9811                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
9812
9813                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9814
9815                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9816                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9817                                 bp->fw_ver[len] = ' ';
9818                         }
9819                 }
9820                 return;
9821         }
9822 out_not_found:
9823         return;
9824 }
9825
9826 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9827 {
9828         int func = BP_FUNC(bp);
9829         int timer_interval;
9830         int rc;
9831
9832         /* Disable interrupt handling until HW is initialized */
9833         atomic_set(&bp->intr_sem, 1);
9834         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
9835
9836         mutex_init(&bp->port.phy_mutex);
9837         mutex_init(&bp->fw_mb_mutex);
9838 #ifdef BCM_CNIC
9839         mutex_init(&bp->cnic_mutex);
9840 #endif
9841
9842         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
9843         INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
9844
9845         rc = bnx2x_get_hwinfo(bp);
9846
9847         bnx2x_read_fwinfo(bp);
9848         /* need to reset chip if undi was active */
9849         if (!BP_NOMCP(bp))
9850                 bnx2x_undi_unload(bp);
9851
9852         if (CHIP_REV_IS_FPGA(bp))
9853                 pr_err("FPGA detected\n");
9854
9855         if (BP_NOMCP(bp) && (func == 0))
9856                 pr_err("MCP disabled, must load devices in order!\n");
9857
9858         /* Set multi queue mode */
9859         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9860             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
9861                 pr_err("Multi disabled since int_mode requested is not MSI-X\n");
9862                 multi_mode = ETH_RSS_MODE_DISABLED;
9863         }
9864         bp->multi_mode = multi_mode;
9865
9866
9867         bp->dev->features |= NETIF_F_GRO;
9868
9869         /* Set TPA flags */
9870         if (disable_tpa) {
9871                 bp->flags &= ~TPA_ENABLE_FLAG;
9872                 bp->dev->features &= ~NETIF_F_LRO;
9873         } else {
9874                 bp->flags |= TPA_ENABLE_FLAG;
9875                 bp->dev->features |= NETIF_F_LRO;
9876         }
9877
9878         if (CHIP_IS_E1(bp))
9879                 bp->dropless_fc = 0;
9880         else
9881                 bp->dropless_fc = dropless_fc;
9882
9883         bp->mrrs = mrrs;
9884
9885         bp->tx_ring_size = MAX_TX_AVAIL;
9886         bp->rx_ring_size = MAX_RX_AVAIL;
9887
9888         bp->rx_csum = 1;
9889
9890         /* make sure that the numbers are in the right granularity */
9891         bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9892         bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9893
9894         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9895         bp->current_interval = (poll ? poll : timer_interval);
9896
9897         init_timer(&bp->timer);
9898         bp->timer.expires = jiffies + bp->current_interval;
9899         bp->timer.data = (unsigned long) bp;
9900         bp->timer.function = bnx2x_timer;
9901
9902         return rc;
9903 }
9904
9905 /*
9906  * ethtool service functions
9907  */
9908
9909 /* All ethtool functions called with rtnl_lock */
9910
9911 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9912 {
9913         struct bnx2x *bp = netdev_priv(dev);
9914
9915         cmd->supported = bp->port.supported;
9916         cmd->advertising = bp->port.advertising;
9917
9918         if ((bp->state == BNX2X_STATE_OPEN) &&
9919             !(bp->flags & MF_FUNC_DIS) &&
9920             (bp->link_vars.link_up)) {
9921                 cmd->speed = bp->link_vars.line_speed;
9922                 cmd->duplex = bp->link_vars.duplex;
9923                 if (IS_E1HMF(bp)) {
9924                         u16 vn_max_rate;
9925
9926                         vn_max_rate =
9927                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
9928                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
9929                         if (vn_max_rate < cmd->speed)
9930                                 cmd->speed = vn_max_rate;
9931                 }
9932         } else {
9933                 cmd->speed = -1;
9934                 cmd->duplex = -1;
9935         }
9936
9937         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9938                 u32 ext_phy_type =
9939                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9940
9941                 switch (ext_phy_type) {
9942                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9943                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9944                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9945                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9946                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9947                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9948                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9949                         cmd->port = PORT_FIBRE;
9950                         break;
9951
9952                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9953                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9954                         cmd->port = PORT_TP;
9955                         break;
9956
9957                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9958                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9959                                   bp->link_params.ext_phy_config);
9960                         break;
9961
9962                 default:
9963                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
9964                            bp->link_params.ext_phy_config);
9965                         break;
9966                 }
9967         } else
9968                 cmd->port = PORT_TP;
9969
9970         cmd->phy_address = bp->mdio.prtad;
9971         cmd->transceiver = XCVR_INTERNAL;
9972
9973         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9974                 cmd->autoneg = AUTONEG_ENABLE;
9975         else
9976                 cmd->autoneg = AUTONEG_DISABLE;
9977
9978         cmd->maxtxpkt = 0;
9979         cmd->maxrxpkt = 0;
9980
9981         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9982            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
9983            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
9984            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
9985            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9986            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9987            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9988
9989         return 0;
9990 }
9991
9992 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9993 {
9994         struct bnx2x *bp = netdev_priv(dev);
9995         u32 advertising;
9996
9997         if (IS_E1HMF(bp))
9998                 return 0;
9999
10000         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10001            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
10002            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
10003            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
10004            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10005            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10006            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10007
10008         if (cmd->autoneg == AUTONEG_ENABLE) {
10009                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10010                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
10011                         return -EINVAL;
10012                 }
10013
10014                 /* advertise the requested speed and duplex if supported */
10015                 cmd->advertising &= bp->port.supported;
10016
10017                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10018                 bp->link_params.req_duplex = DUPLEX_FULL;
10019                 bp->port.advertising |= (ADVERTISED_Autoneg |
10020                                          cmd->advertising);
10021
10022         } else { /* forced speed */
10023                 /* advertise the requested speed and duplex if supported */
10024                 switch (cmd->speed) {
10025                 case SPEED_10:
10026                         if (cmd->duplex == DUPLEX_FULL) {
10027                                 if (!(bp->port.supported &
10028                                       SUPPORTED_10baseT_Full)) {
10029                                         DP(NETIF_MSG_LINK,
10030                                            "10M full not supported\n");
10031                                         return -EINVAL;
10032                                 }
10033
10034                                 advertising = (ADVERTISED_10baseT_Full |
10035                                                ADVERTISED_TP);
10036                         } else {
10037                                 if (!(bp->port.supported &
10038                                       SUPPORTED_10baseT_Half)) {
10039                                         DP(NETIF_MSG_LINK,
10040                                            "10M half not supported\n");
10041                                         return -EINVAL;
10042                                 }
10043
10044                                 advertising = (ADVERTISED_10baseT_Half |
10045                                                ADVERTISED_TP);
10046                         }
10047                         break;
10048
10049                 case SPEED_100:
10050                         if (cmd->duplex == DUPLEX_FULL) {
10051                                 if (!(bp->port.supported &
10052                                                 SUPPORTED_100baseT_Full)) {
10053                                         DP(NETIF_MSG_LINK,
10054                                            "100M full not supported\n");
10055                                         return -EINVAL;
10056                                 }
10057
10058                                 advertising = (ADVERTISED_100baseT_Full |
10059                                                ADVERTISED_TP);
10060                         } else {
10061                                 if (!(bp->port.supported &
10062                                                 SUPPORTED_100baseT_Half)) {
10063                                         DP(NETIF_MSG_LINK,
10064                                            "100M half not supported\n");
10065                                         return -EINVAL;
10066                                 }
10067
10068                                 advertising = (ADVERTISED_100baseT_Half |
10069                                                ADVERTISED_TP);
10070                         }
10071                         break;
10072
10073                 case SPEED_1000:
10074                         if (cmd->duplex != DUPLEX_FULL) {
10075                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
10076                                 return -EINVAL;
10077                         }
10078
10079                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
10080                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
10081                                 return -EINVAL;
10082                         }
10083
10084                         advertising = (ADVERTISED_1000baseT_Full |
10085                                        ADVERTISED_TP);
10086                         break;
10087
10088                 case SPEED_2500:
10089                         if (cmd->duplex != DUPLEX_FULL) {
10090                                 DP(NETIF_MSG_LINK,
10091                                    "2.5G half not supported\n");
10092                                 return -EINVAL;
10093                         }
10094
10095                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
10096                                 DP(NETIF_MSG_LINK,
10097                                    "2.5G full not supported\n");
10098                                 return -EINVAL;
10099                         }
10100
10101                         advertising = (ADVERTISED_2500baseX_Full |
10102                                        ADVERTISED_TP);
10103                         break;
10104
10105                 case SPEED_10000:
10106                         if (cmd->duplex != DUPLEX_FULL) {
10107                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
10108                                 return -EINVAL;
10109                         }
10110
10111                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
10112                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
10113                                 return -EINVAL;
10114                         }
10115
10116                         advertising = (ADVERTISED_10000baseT_Full |
10117                                        ADVERTISED_FIBRE);
10118                         break;
10119
10120                 default:
10121                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
10122                         return -EINVAL;
10123                 }
10124
10125                 bp->link_params.req_line_speed = cmd->speed;
10126                 bp->link_params.req_duplex = cmd->duplex;
10127                 bp->port.advertising = advertising;
10128         }
10129
10130         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
10131            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
10132            bp->link_params.req_line_speed, bp->link_params.req_duplex,
10133            bp->port.advertising);
10134
10135         if (netif_running(dev)) {
10136                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10137                 bnx2x_link_set(bp);
10138         }
10139
10140         return 0;
10141 }
10142
10143 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10144 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10145
10146 static int bnx2x_get_regs_len(struct net_device *dev)
10147 {
10148         struct bnx2x *bp = netdev_priv(dev);
10149         int regdump_len = 0;
10150         int i;
10151
10152         if (CHIP_IS_E1(bp)) {
10153                 for (i = 0; i < REGS_COUNT; i++)
10154                         if (IS_E1_ONLINE(reg_addrs[i].info))
10155                                 regdump_len += reg_addrs[i].size;
10156
10157                 for (i = 0; i < WREGS_COUNT_E1; i++)
10158                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10159                                 regdump_len += wreg_addrs_e1[i].size *
10160                                         (1 + wreg_addrs_e1[i].read_regs_count);
10161
10162         } else { /* E1H */
10163                 for (i = 0; i < REGS_COUNT; i++)
10164                         if (IS_E1H_ONLINE(reg_addrs[i].info))
10165                                 regdump_len += reg_addrs[i].size;
10166
10167                 for (i = 0; i < WREGS_COUNT_E1H; i++)
10168                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10169                                 regdump_len += wreg_addrs_e1h[i].size *
10170                                         (1 + wreg_addrs_e1h[i].read_regs_count);
10171         }
10172         regdump_len *= 4;
10173         regdump_len += sizeof(struct dump_hdr);
10174
10175         return regdump_len;
10176 }
10177
10178 static void bnx2x_get_regs(struct net_device *dev,
10179                            struct ethtool_regs *regs, void *_p)
10180 {
10181         u32 *p = _p, i, j;
10182         struct bnx2x *bp = netdev_priv(dev);
10183         struct dump_hdr dump_hdr = {0};
10184
10185         regs->version = 0;
10186         memset(p, 0, regs->len);
10187
10188         if (!netif_running(bp->dev))
10189                 return;
10190
10191         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10192         dump_hdr.dump_sign = dump_sign_all;
10193         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10194         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10195         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10196         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10197         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10198
10199         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10200         p += dump_hdr.hdr_size + 1;
10201
10202         if (CHIP_IS_E1(bp)) {
10203                 for (i = 0; i < REGS_COUNT; i++)
10204                         if (IS_E1_ONLINE(reg_addrs[i].info))
10205                                 for (j = 0; j < reg_addrs[i].size; j++)
10206                                         *p++ = REG_RD(bp,
10207                                                       reg_addrs[i].addr + j*4);
10208
10209         } else { /* E1H */
10210                 for (i = 0; i < REGS_COUNT; i++)
10211                         if (IS_E1H_ONLINE(reg_addrs[i].info))
10212                                 for (j = 0; j < reg_addrs[i].size; j++)
10213                                         *p++ = REG_RD(bp,
10214                                                       reg_addrs[i].addr + j*4);
10215         }
10216 }
10217
10218 #define PHY_FW_VER_LEN                  10
10219
10220 static void bnx2x_get_drvinfo(struct net_device *dev,
10221                               struct ethtool_drvinfo *info)
10222 {
10223         struct bnx2x *bp = netdev_priv(dev);
10224         u8 phy_fw_ver[PHY_FW_VER_LEN];
10225
10226         strcpy(info->driver, DRV_MODULE_NAME);
10227         strcpy(info->version, DRV_MODULE_VERSION);
10228
10229         phy_fw_ver[0] = '\0';
10230         if (bp->port.pmf) {
10231                 bnx2x_acquire_phy_lock(bp);
10232                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10233                                              (bp->state != BNX2X_STATE_CLOSED),
10234                                              phy_fw_ver, PHY_FW_VER_LEN);
10235                 bnx2x_release_phy_lock(bp);
10236         }
10237
10238         strncpy(info->fw_version, bp->fw_ver, 32);
10239         snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10240                  "bc %d.%d.%d%s%s",
10241                  (bp->common.bc_ver & 0xff0000) >> 16,
10242                  (bp->common.bc_ver & 0xff00) >> 8,
10243                  (bp->common.bc_ver & 0xff),
10244                  ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
10245         strcpy(info->bus_info, pci_name(bp->pdev));
10246         info->n_stats = BNX2X_NUM_STATS;
10247         info->testinfo_len = BNX2X_NUM_TESTS;
10248         info->eedump_len = bp->common.flash_size;
10249         info->regdump_len = bnx2x_get_regs_len(dev);
10250 }
10251
10252 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10253 {
10254         struct bnx2x *bp = netdev_priv(dev);
10255
10256         if (bp->flags & NO_WOL_FLAG) {
10257                 wol->supported = 0;
10258                 wol->wolopts = 0;
10259         } else {
10260                 wol->supported = WAKE_MAGIC;
10261                 if (bp->wol)
10262                         wol->wolopts = WAKE_MAGIC;
10263                 else
10264                         wol->wolopts = 0;
10265         }
10266         memset(&wol->sopass, 0, sizeof(wol->sopass));
10267 }
10268
10269 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10270 {
10271         struct bnx2x *bp = netdev_priv(dev);
10272
10273         if (wol->wolopts & ~WAKE_MAGIC)
10274                 return -EINVAL;
10275
10276         if (wol->wolopts & WAKE_MAGIC) {
10277                 if (bp->flags & NO_WOL_FLAG)
10278                         return -EINVAL;
10279
10280                 bp->wol = 1;
10281         } else
10282                 bp->wol = 0;
10283
10284         return 0;
10285 }
10286
10287 static u32 bnx2x_get_msglevel(struct net_device *dev)
10288 {
10289         struct bnx2x *bp = netdev_priv(dev);
10290
10291         return bp->msg_enable;
10292 }
10293
10294 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10295 {
10296         struct bnx2x *bp = netdev_priv(dev);
10297
10298         if (capable(CAP_NET_ADMIN))
10299                 bp->msg_enable = level;
10300 }
10301
10302 static int bnx2x_nway_reset(struct net_device *dev)
10303 {
10304         struct bnx2x *bp = netdev_priv(dev);
10305
10306         if (!bp->port.pmf)
10307                 return 0;
10308
10309         if (netif_running(dev)) {
10310                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10311                 bnx2x_link_set(bp);
10312         }
10313
10314         return 0;
10315 }
10316
10317 static u32 bnx2x_get_link(struct net_device *dev)
10318 {
10319         struct bnx2x *bp = netdev_priv(dev);
10320
10321         if (bp->flags & MF_FUNC_DIS)
10322                 return 0;
10323
10324         return bp->link_vars.link_up;
10325 }
10326
10327 static int bnx2x_get_eeprom_len(struct net_device *dev)
10328 {
10329         struct bnx2x *bp = netdev_priv(dev);
10330
10331         return bp->common.flash_size;
10332 }
10333
10334 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10335 {
10336         int port = BP_PORT(bp);
10337         int count, i;
10338         u32 val = 0;
10339
10340         /* adjust timeout for emulation/FPGA */
10341         count = NVRAM_TIMEOUT_COUNT;
10342         if (CHIP_REV_IS_SLOW(bp))
10343                 count *= 100;
10344
10345         /* request access to nvram interface */
10346         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10347                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10348
10349         for (i = 0; i < count*10; i++) {
10350                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10351                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10352                         break;
10353
10354                 udelay(5);
10355         }
10356
10357         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
10358                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
10359                 return -EBUSY;
10360         }
10361
10362         return 0;
10363 }
10364
10365 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10366 {
10367         int port = BP_PORT(bp);
10368         int count, i;
10369         u32 val = 0;
10370
10371         /* adjust timeout for emulation/FPGA */
10372         count = NVRAM_TIMEOUT_COUNT;
10373         if (CHIP_REV_IS_SLOW(bp))
10374                 count *= 100;
10375
10376         /* relinquish nvram interface */
10377         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10378                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10379
10380         for (i = 0; i < count*10; i++) {
10381                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10382                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10383                         break;
10384
10385                 udelay(5);
10386         }
10387
10388         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
10389                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
10390                 return -EBUSY;
10391         }
10392
10393         return 0;
10394 }
10395
10396 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10397 {
10398         u32 val;
10399
10400         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10401
10402         /* enable both bits, even on read */
10403         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10404                (val | MCPR_NVM_ACCESS_ENABLE_EN |
10405                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
10406 }
10407
10408 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10409 {
10410         u32 val;
10411
10412         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10413
10414         /* disable both bits, even after read */
10415         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10416                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10417                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10418 }
10419
10420 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
10421                                   u32 cmd_flags)
10422 {
10423         int count, i, rc;
10424         u32 val;
10425
10426         /* build the command word */
10427         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10428
10429         /* need to clear DONE bit separately */
10430         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10431
10432         /* address of the NVRAM to read from */
10433         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10434                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10435
10436         /* issue a read command */
10437         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10438
10439         /* adjust timeout for emulation/FPGA */
10440         count = NVRAM_TIMEOUT_COUNT;
10441         if (CHIP_REV_IS_SLOW(bp))
10442                 count *= 100;
10443
10444         /* wait for completion */
10445         *ret_val = 0;
10446         rc = -EBUSY;
10447         for (i = 0; i < count; i++) {
10448                 udelay(5);
10449                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10450
10451                 if (val & MCPR_NVM_COMMAND_DONE) {
10452                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
10453                         /* we read nvram data in cpu order
10454                          * but ethtool sees it as an array of bytes
10455                          * converting to big-endian will do the work */
10456                         *ret_val = cpu_to_be32(val);
10457                         rc = 0;
10458                         break;
10459                 }
10460         }
10461
10462         return rc;
10463 }
10464
10465 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10466                             int buf_size)
10467 {
10468         int rc;
10469         u32 cmd_flags;
10470         __be32 val;
10471
10472         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10473                 DP(BNX2X_MSG_NVM,
10474                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
10475                    offset, buf_size);
10476                 return -EINVAL;
10477         }
10478
10479         if (offset + buf_size > bp->common.flash_size) {
10480                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10481                                   " buf_size (0x%x) > flash_size (0x%x)\n",
10482                    offset, buf_size, bp->common.flash_size);
10483                 return -EINVAL;
10484         }
10485
10486         /* request access to nvram interface */
10487         rc = bnx2x_acquire_nvram_lock(bp);
10488         if (rc)
10489                 return rc;
10490
10491         /* enable access to nvram interface */
10492         bnx2x_enable_nvram_access(bp);
10493
10494         /* read the first word(s) */
10495         cmd_flags = MCPR_NVM_COMMAND_FIRST;
10496         while ((buf_size > sizeof(u32)) && (rc == 0)) {
10497                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10498                 memcpy(ret_buf, &val, 4);
10499
10500                 /* advance to the next dword */
10501                 offset += sizeof(u32);
10502                 ret_buf += sizeof(u32);
10503                 buf_size -= sizeof(u32);
10504                 cmd_flags = 0;
10505         }
10506
10507         if (rc == 0) {
10508                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10509                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10510                 memcpy(ret_buf, &val, 4);
10511         }
10512
10513         /* disable access to nvram interface */
10514         bnx2x_disable_nvram_access(bp);
10515         bnx2x_release_nvram_lock(bp);
10516
10517         return rc;
10518 }
10519
10520 static int bnx2x_get_eeprom(struct net_device *dev,
10521                             struct ethtool_eeprom *eeprom, u8 *eebuf)
10522 {
10523         struct bnx2x *bp = netdev_priv(dev);
10524         int rc;
10525
10526         if (!netif_running(dev))
10527                 return -EAGAIN;
10528
10529         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10530            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
10531            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10532            eeprom->len, eeprom->len);
10533
10534         /* parameters already validated in ethtool_get_eeprom */
10535
10536         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10537
10538         return rc;
10539 }
10540
10541 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10542                                    u32 cmd_flags)
10543 {
10544         int count, i, rc;
10545
10546         /* build the command word */
10547         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10548
10549         /* need to clear DONE bit separately */
10550         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10551
10552         /* write the data */
10553         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10554
10555         /* address of the NVRAM to write to */
10556         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10557                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10558
10559         /* issue the write command */
10560         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10561
10562         /* adjust timeout for emulation/FPGA */
10563         count = NVRAM_TIMEOUT_COUNT;
10564         if (CHIP_REV_IS_SLOW(bp))
10565                 count *= 100;
10566
10567         /* wait for completion */
10568         rc = -EBUSY;
10569         for (i = 0; i < count; i++) {
10570                 udelay(5);
10571                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10572                 if (val & MCPR_NVM_COMMAND_DONE) {
10573                         rc = 0;
10574                         break;
10575                 }
10576         }
10577
10578         return rc;
10579 }
10580
10581 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
10582
10583 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10584                               int buf_size)
10585 {
10586         int rc;
10587         u32 cmd_flags;
10588         u32 align_offset;
10589         __be32 val;
10590
10591         if (offset + buf_size > bp->common.flash_size) {
10592                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10593                                   " buf_size (0x%x) > flash_size (0x%x)\n",
10594                    offset, buf_size, bp->common.flash_size);
10595                 return -EINVAL;
10596         }
10597
10598         /* request access to nvram interface */
10599         rc = bnx2x_acquire_nvram_lock(bp);
10600         if (rc)
10601                 return rc;
10602
10603         /* enable access to nvram interface */
10604         bnx2x_enable_nvram_access(bp);
10605
10606         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10607         align_offset = (offset & ~0x03);
10608         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10609
10610         if (rc == 0) {
10611                 val &= ~(0xff << BYTE_OFFSET(offset));
10612                 val |= (*data_buf << BYTE_OFFSET(offset));
10613
10614                 /* nvram data is returned as an array of bytes
10615                  * convert it back to cpu order */
10616                 val = be32_to_cpu(val);
10617
10618                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10619                                              cmd_flags);
10620         }
10621
10622         /* disable access to nvram interface */
10623         bnx2x_disable_nvram_access(bp);
10624         bnx2x_release_nvram_lock(bp);
10625
10626         return rc;
10627 }
10628
10629 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10630                              int buf_size)
10631 {
10632         int rc;
10633         u32 cmd_flags;
10634         u32 val;
10635         u32 written_so_far;
10636
10637         if (buf_size == 1)      /* ethtool */
10638                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
10639
10640         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10641                 DP(BNX2X_MSG_NVM,
10642                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
10643                    offset, buf_size);
10644                 return -EINVAL;
10645         }
10646
10647         if (offset + buf_size > bp->common.flash_size) {
10648                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10649                                   " buf_size (0x%x) > flash_size (0x%x)\n",
10650                    offset, buf_size, bp->common.flash_size);
10651                 return -EINVAL;
10652         }
10653
10654         /* request access to nvram interface */
10655         rc = bnx2x_acquire_nvram_lock(bp);
10656         if (rc)
10657                 return rc;
10658
10659         /* enable access to nvram interface */
10660         bnx2x_enable_nvram_access(bp);
10661
10662         written_so_far = 0;
10663         cmd_flags = MCPR_NVM_COMMAND_FIRST;
10664         while ((written_so_far < buf_size) && (rc == 0)) {
10665                 if (written_so_far == (buf_size - sizeof(u32)))
10666                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
10667                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10668                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
10669                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10670                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10671
10672                 memcpy(&val, data_buf, 4);
10673
10674                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10675
10676                 /* advance to the next dword */
10677                 offset += sizeof(u32);
10678                 data_buf += sizeof(u32);
10679                 written_so_far += sizeof(u32);
10680                 cmd_flags = 0;
10681         }
10682
10683         /* disable access to nvram interface */
10684         bnx2x_disable_nvram_access(bp);
10685         bnx2x_release_nvram_lock(bp);
10686
10687         return rc;
10688 }
10689
10690 static int bnx2x_set_eeprom(struct net_device *dev,
10691                             struct ethtool_eeprom *eeprom, u8 *eebuf)
10692 {
10693         struct bnx2x *bp = netdev_priv(dev);
10694         int port = BP_PORT(bp);
10695         int rc = 0;
10696
10697         if (!netif_running(dev))
10698                 return -EAGAIN;
10699
10700         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10701            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
10702            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10703            eeprom->len, eeprom->len);
10704
10705         /* parameters already validated in ethtool_set_eeprom */
10706
10707         /* PHY eeprom can be accessed only by the PMF */
10708         if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10709             !bp->port.pmf)
10710                 return -EINVAL;
10711
10712         if (eeprom->magic == 0x50485950) {
10713                 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10714                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10715
10716                 bnx2x_acquire_phy_lock(bp);
10717                 rc |= bnx2x_link_reset(&bp->link_params,
10718                                        &bp->link_vars, 0);
10719                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10720                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10721                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10722                                        MISC_REGISTERS_GPIO_HIGH, port);
10723                 bnx2x_release_phy_lock(bp);
10724                 bnx2x_link_report(bp);
10725
10726         } else if (eeprom->magic == 0x50485952) {
10727                 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
10728                 if (bp->state == BNX2X_STATE_OPEN) {
10729                         bnx2x_acquire_phy_lock(bp);
10730                         rc |= bnx2x_link_reset(&bp->link_params,
10731                                                &bp->link_vars, 1);
10732
10733                         rc |= bnx2x_phy_init(&bp->link_params,
10734                                              &bp->link_vars);
10735                         bnx2x_release_phy_lock(bp);
10736                         bnx2x_calc_fc_adv(bp);
10737                 }
10738         } else if (eeprom->magic == 0x53985943) {
10739                 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10740                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10741                                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10742                         u8 ext_phy_addr =
10743                              XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
10744
10745                         /* DSP Remove Download Mode */
10746                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10747                                        MISC_REGISTERS_GPIO_LOW, port);
10748
10749                         bnx2x_acquire_phy_lock(bp);
10750
10751                         bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10752
10753                         /* wait 0.5 sec to allow it to run */
10754                         msleep(500);
10755                         bnx2x_ext_phy_hw_reset(bp, port);
10756                         msleep(500);
10757                         bnx2x_release_phy_lock(bp);
10758                 }
10759         } else
10760                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
10761
10762         return rc;
10763 }
10764
10765 static int bnx2x_get_coalesce(struct net_device *dev,
10766                               struct ethtool_coalesce *coal)
10767 {
10768         struct bnx2x *bp = netdev_priv(dev);
10769
10770         memset(coal, 0, sizeof(struct ethtool_coalesce));
10771
10772         coal->rx_coalesce_usecs = bp->rx_ticks;
10773         coal->tx_coalesce_usecs = bp->tx_ticks;
10774
10775         return 0;
10776 }
10777
10778 #define BNX2X_MAX_COALES_TOUT  (0xf0*12) /* Maximal coalescing timeout in us */
10779 static int bnx2x_set_coalesce(struct net_device *dev,
10780                               struct ethtool_coalesce *coal)
10781 {
10782         struct bnx2x *bp = netdev_priv(dev);
10783
10784         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
10785         if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
10786                 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
10787
10788         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
10789         if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
10790                 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
10791
10792         if (netif_running(dev))
10793                 bnx2x_update_coalesce(bp);
10794
10795         return 0;
10796 }
10797
10798 static void bnx2x_get_ringparam(struct net_device *dev,
10799                                 struct ethtool_ringparam *ering)
10800 {
10801         struct bnx2x *bp = netdev_priv(dev);
10802
10803         ering->rx_max_pending = MAX_RX_AVAIL;
10804         ering->rx_mini_max_pending = 0;
10805         ering->rx_jumbo_max_pending = 0;
10806
10807         ering->rx_pending = bp->rx_ring_size;
10808         ering->rx_mini_pending = 0;
10809         ering->rx_jumbo_pending = 0;
10810
10811         ering->tx_max_pending = MAX_TX_AVAIL;
10812         ering->tx_pending = bp->tx_ring_size;
10813 }
10814
10815 static int bnx2x_set_ringparam(struct net_device *dev,
10816                                struct ethtool_ringparam *ering)
10817 {
10818         struct bnx2x *bp = netdev_priv(dev);
10819         int rc = 0;
10820
10821         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10822                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10823                 return -EAGAIN;
10824         }
10825
10826         if ((ering->rx_pending > MAX_RX_AVAIL) ||
10827             (ering->tx_pending > MAX_TX_AVAIL) ||
10828             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10829                 return -EINVAL;
10830
10831         bp->rx_ring_size = ering->rx_pending;
10832         bp->tx_ring_size = ering->tx_pending;
10833
10834         if (netif_running(dev)) {
10835                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10836                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10837         }
10838
10839         return rc;
10840 }
10841
10842 static void bnx2x_get_pauseparam(struct net_device *dev,
10843                                  struct ethtool_pauseparam *epause)
10844 {
10845         struct bnx2x *bp = netdev_priv(dev);
10846
10847         epause->autoneg = (bp->link_params.req_flow_ctrl ==
10848                            BNX2X_FLOW_CTRL_AUTO) &&
10849                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10850
10851         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10852                             BNX2X_FLOW_CTRL_RX);
10853         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10854                             BNX2X_FLOW_CTRL_TX);
10855
10856         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10857            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
10858            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10859 }
10860
10861 static int bnx2x_set_pauseparam(struct net_device *dev,
10862                                 struct ethtool_pauseparam *epause)
10863 {
10864         struct bnx2x *bp = netdev_priv(dev);
10865
10866         if (IS_E1HMF(bp))
10867                 return 0;
10868
10869         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10870            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
10871            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10872
10873         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10874
10875         if (epause->rx_pause)
10876                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
10877
10878         if (epause->tx_pause)
10879                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
10880
10881         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10882                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
10883
10884         if (epause->autoneg) {
10885                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10886                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
10887                         return -EINVAL;
10888                 }
10889
10890                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10891                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10892         }
10893
10894         DP(NETIF_MSG_LINK,
10895            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
10896
10897         if (netif_running(dev)) {
10898                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10899                 bnx2x_link_set(bp);
10900         }
10901
10902         return 0;
10903 }
10904
10905 static int bnx2x_set_flags(struct net_device *dev, u32 data)
10906 {
10907         struct bnx2x *bp = netdev_priv(dev);
10908         int changed = 0;
10909         int rc = 0;
10910
10911         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10912                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10913                 return -EAGAIN;
10914         }
10915
10916         /* TPA requires Rx CSUM offloading */
10917         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10918                 if (!disable_tpa) {
10919                         if (!(dev->features & NETIF_F_LRO)) {
10920                                 dev->features |= NETIF_F_LRO;
10921                                 bp->flags |= TPA_ENABLE_FLAG;
10922                                 changed = 1;
10923                         }
10924                 } else
10925                         rc = -EINVAL;
10926         } else if (dev->features & NETIF_F_LRO) {
10927                 dev->features &= ~NETIF_F_LRO;
10928                 bp->flags &= ~TPA_ENABLE_FLAG;
10929                 changed = 1;
10930         }
10931
10932         if (changed && netif_running(dev)) {
10933                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10934                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10935         }
10936
10937         return rc;
10938 }
10939
10940 static u32 bnx2x_get_rx_csum(struct net_device *dev)
10941 {
10942         struct bnx2x *bp = netdev_priv(dev);
10943
10944         return bp->rx_csum;
10945 }
10946
10947 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10948 {
10949         struct bnx2x *bp = netdev_priv(dev);
10950         int rc = 0;
10951
10952         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10953                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10954                 return -EAGAIN;
10955         }
10956
10957         bp->rx_csum = data;
10958
10959         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10960            TPA'ed packets will be discarded due to wrong TCP CSUM */
10961         if (!data) {
10962                 u32 flags = ethtool_op_get_flags(dev);
10963
10964                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10965         }
10966
10967         return rc;
10968 }
10969
10970 static int bnx2x_set_tso(struct net_device *dev, u32 data)
10971 {
10972         if (data) {
10973                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10974                 dev->features |= NETIF_F_TSO6;
10975         } else {
10976                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
10977                 dev->features &= ~NETIF_F_TSO6;
10978         }
10979
10980         return 0;
10981 }
10982
10983 static const struct {
10984         char string[ETH_GSTRING_LEN];
10985 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
10986         { "register_test (offline)" },
10987         { "memory_test (offline)" },
10988         { "loopback_test (offline)" },
10989         { "nvram_test (online)" },
10990         { "interrupt_test (online)" },
10991         { "link_test (online)" },
10992         { "idle check (online)" }
10993 };
10994
10995 static int bnx2x_test_registers(struct bnx2x *bp)
10996 {
10997         int idx, i, rc = -ENODEV;
10998         u32 wr_val = 0;
10999         int port = BP_PORT(bp);
11000         static const struct {
11001                 u32  offset0;
11002                 u32  offset1;
11003                 u32  mask;
11004         } reg_tbl[] = {
11005 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
11006                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
11007                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
11008                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
11009                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
11010                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
11011                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
11012                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
11013                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
11014                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
11015 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
11016                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
11017                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
11018                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
11019                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
11020                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11021                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
11022                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
11023                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
11024                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
11025 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
11026                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
11027                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
11028                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
11029                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
11030                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
11031                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
11032                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
11033                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
11034                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
11035 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
11036                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
11037                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
11038                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11039                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
11040                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11041                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
11042
11043                 { 0xffffffff, 0, 0x00000000 }
11044         };
11045
11046         if (!netif_running(bp->dev))
11047                 return rc;
11048
11049         /* Repeat the test twice:
11050            First by writing 0x00000000, second by writing 0xffffffff */
11051         for (idx = 0; idx < 2; idx++) {
11052
11053                 switch (idx) {
11054                 case 0:
11055                         wr_val = 0;
11056                         break;
11057                 case 1:
11058                         wr_val = 0xffffffff;
11059                         break;
11060                 }
11061
11062                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11063                         u32 offset, mask, save_val, val;
11064
11065                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11066                         mask = reg_tbl[i].mask;
11067
11068                         save_val = REG_RD(bp, offset);
11069
11070                         REG_WR(bp, offset, wr_val);
11071                         val = REG_RD(bp, offset);
11072
11073                         /* Restore the original register's value */
11074                         REG_WR(bp, offset, save_val);
11075
11076                         /* verify that value is as expected value */
11077                         if ((val & mask) != (wr_val & mask))
11078                                 goto test_reg_exit;
11079                 }
11080         }
11081
11082         rc = 0;
11083
11084 test_reg_exit:
11085         return rc;
11086 }
11087
11088 static int bnx2x_test_memory(struct bnx2x *bp)
11089 {
11090         int i, j, rc = -ENODEV;
11091         u32 val;
11092         static const struct {
11093                 u32 offset;
11094                 int size;
11095         } mem_tbl[] = {
11096                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
11097                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11098                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
11099                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
11100                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
11101                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
11102                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
11103
11104                 { 0xffffffff, 0 }
11105         };
11106         static const struct {
11107                 char *name;
11108                 u32 offset;
11109                 u32 e1_mask;
11110                 u32 e1h_mask;
11111         } prty_tbl[] = {
11112                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
11113                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
11114                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
11115                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
11116                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
11117                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
11118
11119                 { NULL, 0xffffffff, 0, 0 }
11120         };
11121
11122         if (!netif_running(bp->dev))
11123                 return rc;
11124
11125         /* Go through all the memories */
11126         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11127                 for (j = 0; j < mem_tbl[i].size; j++)
11128                         REG_RD(bp, mem_tbl[i].offset + j*4);
11129
11130         /* Check the parity status */
11131         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11132                 val = REG_RD(bp, prty_tbl[i].offset);
11133                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11134                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
11135                         DP(NETIF_MSG_HW,
11136                            "%s is 0x%x\n", prty_tbl[i].name, val);
11137                         goto test_mem_exit;
11138                 }
11139         }
11140
11141         rc = 0;
11142
11143 test_mem_exit:
11144         return rc;
11145 }
11146
11147 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11148 {
11149         int cnt = 1000;
11150
11151         if (link_up)
11152                 while (bnx2x_link_test(bp) && cnt--)
11153                         msleep(10);
11154 }
11155
11156 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11157 {
11158         unsigned int pkt_size, num_pkts, i;
11159         struct sk_buff *skb;
11160         unsigned char *packet;
11161         struct bnx2x_fastpath *fp_rx = &bp->fp[0];
11162         struct bnx2x_fastpath *fp_tx = &bp->fp[0];
11163         u16 tx_start_idx, tx_idx;
11164         u16 rx_start_idx, rx_idx;
11165         u16 pkt_prod, bd_prod;
11166         struct sw_tx_bd *tx_buf;
11167         struct eth_tx_start_bd *tx_start_bd;
11168         struct eth_tx_parse_bd *pbd = NULL;
11169         dma_addr_t mapping;
11170         union eth_rx_cqe *cqe;
11171         u8 cqe_fp_flags;
11172         struct sw_rx_bd *rx_buf;
11173         u16 len;
11174         int rc = -ENODEV;
11175
11176         /* check the loopback mode */
11177         switch (loopback_mode) {
11178         case BNX2X_PHY_LOOPBACK:
11179                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11180                         return -EINVAL;
11181                 break;
11182         case BNX2X_MAC_LOOPBACK:
11183                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
11184                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
11185                 break;
11186         default:
11187                 return -EINVAL;
11188         }
11189
11190         /* prepare the loopback packet */
11191         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11192                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
11193         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11194         if (!skb) {
11195                 rc = -ENOMEM;
11196                 goto test_loopback_exit;
11197         }
11198         packet = skb_put(skb, pkt_size);
11199         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
11200         memset(packet + ETH_ALEN, 0, ETH_ALEN);
11201         memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
11202         for (i = ETH_HLEN; i < pkt_size; i++)
11203                 packet[i] = (unsigned char) (i & 0xff);
11204
11205         /* send the loopback packet */
11206         num_pkts = 0;
11207         tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11208         rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11209
11210         pkt_prod = fp_tx->tx_pkt_prod++;
11211         tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11212         tx_buf->first_bd = fp_tx->tx_bd_prod;
11213         tx_buf->skb = skb;
11214         tx_buf->flags = 0;
11215
11216         bd_prod = TX_BD(fp_tx->tx_bd_prod);
11217         tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
11218         mapping = dma_map_single(&bp->pdev->dev, skb->data,
11219                                  skb_headlen(skb), DMA_TO_DEVICE);
11220         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11221         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11222         tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11223         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11224         tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11225         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11226         tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11227                                 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11228
11229         /* turn on parsing and get a BD */
11230         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11231         pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11232
11233         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11234
11235         wmb();
11236
11237         fp_tx->tx_db.data.prod += 2;
11238         barrier();
11239         DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
11240
11241         mmiowb();
11242
11243         num_pkts++;
11244         fp_tx->tx_bd_prod += 2; /* start + pbd */
11245
11246         udelay(100);
11247
11248         tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11249         if (tx_idx != tx_start_idx + num_pkts)
11250                 goto test_loopback_exit;
11251
11252         rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11253         if (rx_idx != rx_start_idx + num_pkts)
11254                 goto test_loopback_exit;
11255
11256         cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
11257         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11258         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11259                 goto test_loopback_rx_exit;
11260
11261         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11262         if (len != pkt_size)
11263                 goto test_loopback_rx_exit;
11264
11265         rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
11266         skb = rx_buf->skb;
11267         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11268         for (i = ETH_HLEN; i < pkt_size; i++)
11269                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11270                         goto test_loopback_rx_exit;
11271
11272         rc = 0;
11273
11274 test_loopback_rx_exit:
11275
11276         fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11277         fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11278         fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11279         fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
11280
11281         /* Update producers */
11282         bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11283                              fp_rx->rx_sge_prod);
11284
11285 test_loopback_exit:
11286         bp->link_params.loopback_mode = LOOPBACK_NONE;
11287
11288         return rc;
11289 }
11290
11291 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11292 {
11293         int rc = 0, res;
11294
11295         if (!netif_running(bp->dev))
11296                 return BNX2X_LOOPBACK_FAILED;
11297
11298         bnx2x_netif_stop(bp, 1);
11299         bnx2x_acquire_phy_lock(bp);
11300
11301         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11302         if (res) {
11303                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
11304                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
11305         }
11306
11307         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11308         if (res) {
11309                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
11310                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
11311         }
11312
11313         bnx2x_release_phy_lock(bp);
11314         bnx2x_netif_start(bp);
11315
11316         return rc;
11317 }
11318
11319 #define CRC32_RESIDUAL                  0xdebb20e3
11320
11321 static int bnx2x_test_nvram(struct bnx2x *bp)
11322 {
11323         static const struct {
11324                 int offset;
11325                 int size;
11326         } nvram_tbl[] = {
11327                 {     0,  0x14 }, /* bootstrap */
11328                 {  0x14,  0xec }, /* dir */
11329                 { 0x100, 0x350 }, /* manuf_info */
11330                 { 0x450,  0xf0 }, /* feature_info */
11331                 { 0x640,  0x64 }, /* upgrade_key_info */
11332                 { 0x6a4,  0x64 },
11333                 { 0x708,  0x70 }, /* manuf_key_info */
11334                 { 0x778,  0x70 },
11335                 {     0,     0 }
11336         };
11337         __be32 buf[0x350 / 4];
11338         u8 *data = (u8 *)buf;
11339         int i, rc;
11340         u32 magic, crc;
11341
11342         rc = bnx2x_nvram_read(bp, 0, data, 4);
11343         if (rc) {
11344                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
11345                 goto test_nvram_exit;
11346         }
11347
11348         magic = be32_to_cpu(buf[0]);
11349         if (magic != 0x669955aa) {
11350                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11351                 rc = -ENODEV;
11352                 goto test_nvram_exit;
11353         }
11354
11355         for (i = 0; nvram_tbl[i].size; i++) {
11356
11357                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11358                                       nvram_tbl[i].size);
11359                 if (rc) {
11360                         DP(NETIF_MSG_PROBE,
11361                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
11362                         goto test_nvram_exit;
11363                 }
11364
11365                 crc = ether_crc_le(nvram_tbl[i].size, data);
11366                 if (crc != CRC32_RESIDUAL) {
11367                         DP(NETIF_MSG_PROBE,
11368                            "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
11369                         rc = -ENODEV;
11370                         goto test_nvram_exit;
11371                 }
11372         }
11373
11374 test_nvram_exit:
11375         return rc;
11376 }
11377
11378 static int bnx2x_test_intr(struct bnx2x *bp)
11379 {
11380         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11381         int i, rc;
11382
11383         if (!netif_running(bp->dev))
11384                 return -ENODEV;
11385
11386         config->hdr.length = 0;
11387         if (CHIP_IS_E1(bp))
11388                 /* use last unicast entries */
11389                 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
11390         else
11391                 config->hdr.offset = BP_FUNC(bp);
11392         config->hdr.client_id = bp->fp->cl_id;
11393         config->hdr.reserved1 = 0;
11394
11395         bp->set_mac_pending++;
11396         smp_wmb();
11397         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11398                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11399                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11400         if (rc == 0) {
11401                 for (i = 0; i < 10; i++) {
11402                         if (!bp->set_mac_pending)
11403                                 break;
11404                         smp_rmb();
11405                         msleep_interruptible(10);
11406                 }
11407                 if (i == 10)
11408                         rc = -ENODEV;
11409         }
11410
11411         return rc;
11412 }
11413
11414 static void bnx2x_self_test(struct net_device *dev,
11415                             struct ethtool_test *etest, u64 *buf)
11416 {
11417         struct bnx2x *bp = netdev_priv(dev);
11418
11419         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11420                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11421                 etest->flags |= ETH_TEST_FL_FAILED;
11422                 return;
11423         }
11424
11425         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11426
11427         if (!netif_running(dev))
11428                 return;
11429
11430         /* offline tests are not supported in MF mode */
11431         if (IS_E1HMF(bp))
11432                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11433
11434         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11435                 int port = BP_PORT(bp);
11436                 u32 val;
11437                 u8 link_up;
11438
11439                 /* save current value of input enable for TX port IF */
11440                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11441                 /* disable input for TX port IF */
11442                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11443
11444                 link_up = (bnx2x_link_test(bp) == 0);
11445                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11446                 bnx2x_nic_load(bp, LOAD_DIAG);
11447                 /* wait until link state is restored */
11448                 bnx2x_wait_for_link(bp, link_up);
11449
11450                 if (bnx2x_test_registers(bp) != 0) {
11451                         buf[0] = 1;
11452                         etest->flags |= ETH_TEST_FL_FAILED;
11453                 }
11454                 if (bnx2x_test_memory(bp) != 0) {
11455                         buf[1] = 1;
11456                         etest->flags |= ETH_TEST_FL_FAILED;
11457                 }
11458                 buf[2] = bnx2x_test_loopback(bp, link_up);
11459                 if (buf[2] != 0)
11460                         etest->flags |= ETH_TEST_FL_FAILED;
11461
11462                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11463
11464                 /* restore input for TX port IF */
11465                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11466
11467                 bnx2x_nic_load(bp, LOAD_NORMAL);
11468                 /* wait until link state is restored */
11469                 bnx2x_wait_for_link(bp, link_up);
11470         }
11471         if (bnx2x_test_nvram(bp) != 0) {
11472                 buf[3] = 1;
11473                 etest->flags |= ETH_TEST_FL_FAILED;
11474         }
11475         if (bnx2x_test_intr(bp) != 0) {
11476                 buf[4] = 1;
11477                 etest->flags |= ETH_TEST_FL_FAILED;
11478         }
11479         if (bp->port.pmf)
11480                 if (bnx2x_link_test(bp) != 0) {
11481                         buf[5] = 1;
11482                         etest->flags |= ETH_TEST_FL_FAILED;
11483                 }
11484
11485 #ifdef BNX2X_EXTRA_DEBUG
11486         bnx2x_panic_dump(bp);
11487 #endif
11488 }
11489
11490 static const struct {
11491         long offset;
11492         int size;
11493         u8 string[ETH_GSTRING_LEN];
11494 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11495 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11496         { Q_STATS_OFFSET32(error_bytes_received_hi),
11497                                                 8, "[%d]: rx_error_bytes" },
11498         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11499                                                 8, "[%d]: rx_ucast_packets" },
11500         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11501                                                 8, "[%d]: rx_mcast_packets" },
11502         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11503                                                 8, "[%d]: rx_bcast_packets" },
11504         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11505         { Q_STATS_OFFSET32(rx_err_discard_pkt),
11506                                          4, "[%d]: rx_phy_ip_err_discards"},
11507         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11508                                          4, "[%d]: rx_skb_alloc_discard" },
11509         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11510
11511 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11512         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11513                                                         8, "[%d]: tx_packets" }
11514 };
11515
11516 static const struct {
11517         long offset;
11518         int size;
11519         u32 flags;
11520 #define STATS_FLAGS_PORT                1
11521 #define STATS_FLAGS_FUNC                2
11522 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
11523         u8 string[ETH_GSTRING_LEN];
11524 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
11525 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11526                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
11527         { STATS_OFFSET32(error_bytes_received_hi),
11528                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
11529         { STATS_OFFSET32(total_unicast_packets_received_hi),
11530                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
11531         { STATS_OFFSET32(total_multicast_packets_received_hi),
11532                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
11533         { STATS_OFFSET32(total_broadcast_packets_received_hi),
11534                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
11535         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
11536                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
11537         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
11538                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
11539         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11540                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11541         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11542                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11543 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11544                                 8, STATS_FLAGS_PORT, "rx_fragments" },
11545         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11546                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
11547         { STATS_OFFSET32(no_buff_discard_hi),
11548                                 8, STATS_FLAGS_BOTH, "rx_discards" },
11549         { STATS_OFFSET32(mac_filter_discard),
11550                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11551         { STATS_OFFSET32(xxoverflow_discard),
11552                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11553         { STATS_OFFSET32(brb_drop_hi),
11554                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11555         { STATS_OFFSET32(brb_truncate_hi),
11556                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11557         { STATS_OFFSET32(pause_frames_received_hi),
11558                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11559         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11560                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11561         { STATS_OFFSET32(nig_timer_max),
11562                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11563 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11564                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11565         { STATS_OFFSET32(rx_skb_alloc_failed),
11566                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11567         { STATS_OFFSET32(hw_csum_err),
11568                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11569
11570         { STATS_OFFSET32(total_bytes_transmitted_hi),
11571                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
11572         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11573                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11574         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11575                                 8, STATS_FLAGS_BOTH, "tx_packets" },
11576         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11577                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11578         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11579                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
11580         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
11581                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
11582         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
11583                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
11584 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
11585                                 8, STATS_FLAGS_PORT, "tx_deferred" },
11586         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
11587                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
11588         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
11589                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
11590         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
11591                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
11592         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
11593                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
11594         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
11595                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
11596         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
11597                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
11598         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
11599                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
11600         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
11601                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
11602         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
11603                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
11604 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
11605                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
11606         { STATS_OFFSET32(pause_frames_sent_hi),
11607                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
11608 };
11609
11610 #define IS_PORT_STAT(i) \
11611         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11612 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11613 #define IS_E1HMF_MODE_STAT(bp) \
11614                         (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
11615
11616 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11617 {
11618         struct bnx2x *bp = netdev_priv(dev);
11619         int i, num_stats;
11620
11621         switch(stringset) {
11622         case ETH_SS_STATS:
11623                 if (is_multi(bp)) {
11624                         num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
11625                         if (!IS_E1HMF_MODE_STAT(bp))
11626                                 num_stats += BNX2X_NUM_STATS;
11627                 } else {
11628                         if (IS_E1HMF_MODE_STAT(bp)) {
11629                                 num_stats = 0;
11630                                 for (i = 0; i < BNX2X_NUM_STATS; i++)
11631                                         if (IS_FUNC_STAT(i))
11632                                                 num_stats++;
11633                         } else
11634                                 num_stats = BNX2X_NUM_STATS;
11635                 }
11636                 return num_stats;
11637
11638         case ETH_SS_TEST:
11639                 return BNX2X_NUM_TESTS;
11640
11641         default:
11642                 return -EINVAL;
11643         }
11644 }
11645
11646 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11647 {
11648         struct bnx2x *bp = netdev_priv(dev);
11649         int i, j, k;
11650
11651         switch (stringset) {
11652         case ETH_SS_STATS:
11653                 if (is_multi(bp)) {
11654                         k = 0;
11655                         for_each_queue(bp, i) {
11656                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11657                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11658                                                 bnx2x_q_stats_arr[j].string, i);
11659                                 k += BNX2X_NUM_Q_STATS;
11660                         }
11661                         if (IS_E1HMF_MODE_STAT(bp))
11662                                 break;
11663                         for (j = 0; j < BNX2X_NUM_STATS; j++)
11664                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11665                                        bnx2x_stats_arr[j].string);
11666                 } else {
11667                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11668                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11669                                         continue;
11670                                 strcpy(buf + j*ETH_GSTRING_LEN,
11671                                        bnx2x_stats_arr[i].string);
11672                                 j++;
11673                         }
11674                 }
11675                 break;
11676
11677         case ETH_SS_TEST:
11678                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11679                 break;
11680         }
11681 }
11682
11683 static void bnx2x_get_ethtool_stats(struct net_device *dev,
11684                                     struct ethtool_stats *stats, u64 *buf)
11685 {
11686         struct bnx2x *bp = netdev_priv(dev);
11687         u32 *hw_stats, *offset;
11688         int i, j, k;
11689
11690         if (is_multi(bp)) {
11691                 k = 0;
11692                 for_each_queue(bp, i) {
11693                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11694                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11695                                 if (bnx2x_q_stats_arr[j].size == 0) {
11696                                         /* skip this counter */
11697                                         buf[k + j] = 0;
11698                                         continue;
11699                                 }
11700                                 offset = (hw_stats +
11701                                           bnx2x_q_stats_arr[j].offset);
11702                                 if (bnx2x_q_stats_arr[j].size == 4) {
11703                                         /* 4-byte counter */
11704                                         buf[k + j] = (u64) *offset;
11705                                         continue;
11706                                 }
11707                                 /* 8-byte counter */
11708                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11709                         }
11710                         k += BNX2X_NUM_Q_STATS;
11711                 }
11712                 if (IS_E1HMF_MODE_STAT(bp))
11713                         return;
11714                 hw_stats = (u32 *)&bp->eth_stats;
11715                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11716                         if (bnx2x_stats_arr[j].size == 0) {
11717                                 /* skip this counter */
11718                                 buf[k + j] = 0;
11719                                 continue;
11720                         }
11721                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
11722                         if (bnx2x_stats_arr[j].size == 4) {
11723                                 /* 4-byte counter */
11724                                 buf[k + j] = (u64) *offset;
11725                                 continue;
11726                         }
11727                         /* 8-byte counter */
11728                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
11729                 }
11730         } else {
11731                 hw_stats = (u32 *)&bp->eth_stats;
11732                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11733                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11734                                 continue;
11735                         if (bnx2x_stats_arr[i].size == 0) {
11736                                 /* skip this counter */
11737                                 buf[j] = 0;
11738                                 j++;
11739                                 continue;
11740                         }
11741                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
11742                         if (bnx2x_stats_arr[i].size == 4) {
11743                                 /* 4-byte counter */
11744                                 buf[j] = (u64) *offset;
11745                                 j++;
11746                                 continue;
11747                         }
11748                         /* 8-byte counter */
11749                         buf[j] = HILO_U64(*offset, *(offset + 1));
11750                         j++;
11751                 }
11752         }
11753 }
11754
11755 static int bnx2x_phys_id(struct net_device *dev, u32 data)
11756 {
11757         struct bnx2x *bp = netdev_priv(dev);
11758         int i;
11759
11760         if (!netif_running(dev))
11761                 return 0;
11762
11763         if (!bp->port.pmf)
11764                 return 0;
11765
11766         if (data == 0)
11767                 data = 2;
11768
11769         for (i = 0; i < (data * 2); i++) {
11770                 if ((i % 2) == 0)
11771                         bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11772                                       SPEED_1000);
11773                 else
11774                         bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
11775
11776                 msleep_interruptible(500);
11777                 if (signal_pending(current))
11778                         break;
11779         }
11780
11781         if (bp->link_vars.link_up)
11782                 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11783                               bp->link_vars.line_speed);
11784
11785         return 0;
11786 }
11787
11788 static const struct ethtool_ops bnx2x_ethtool_ops = {
11789         .get_settings           = bnx2x_get_settings,
11790         .set_settings           = bnx2x_set_settings,
11791         .get_drvinfo            = bnx2x_get_drvinfo,
11792         .get_regs_len           = bnx2x_get_regs_len,
11793         .get_regs               = bnx2x_get_regs,
11794         .get_wol                = bnx2x_get_wol,
11795         .set_wol                = bnx2x_set_wol,
11796         .get_msglevel           = bnx2x_get_msglevel,
11797         .set_msglevel           = bnx2x_set_msglevel,
11798         .nway_reset             = bnx2x_nway_reset,
11799         .get_link               = bnx2x_get_link,
11800         .get_eeprom_len         = bnx2x_get_eeprom_len,
11801         .get_eeprom             = bnx2x_get_eeprom,
11802         .set_eeprom             = bnx2x_set_eeprom,
11803         .get_coalesce           = bnx2x_get_coalesce,
11804         .set_coalesce           = bnx2x_set_coalesce,
11805         .get_ringparam          = bnx2x_get_ringparam,
11806         .set_ringparam          = bnx2x_set_ringparam,
11807         .get_pauseparam         = bnx2x_get_pauseparam,
11808         .set_pauseparam         = bnx2x_set_pauseparam,
11809         .get_rx_csum            = bnx2x_get_rx_csum,
11810         .set_rx_csum            = bnx2x_set_rx_csum,
11811         .get_tx_csum            = ethtool_op_get_tx_csum,
11812         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
11813         .set_flags              = bnx2x_set_flags,
11814         .get_flags              = ethtool_op_get_flags,
11815         .get_sg                 = ethtool_op_get_sg,
11816         .set_sg                 = ethtool_op_set_sg,
11817         .get_tso                = ethtool_op_get_tso,
11818         .set_tso                = bnx2x_set_tso,
11819         .self_test              = bnx2x_self_test,
11820         .get_sset_count         = bnx2x_get_sset_count,
11821         .get_strings            = bnx2x_get_strings,
11822         .phys_id                = bnx2x_phys_id,
11823         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
11824 };
11825
11826 /* end of ethtool_ops */
11827
11828 /****************************************************************************
11829 * General service functions
11830 ****************************************************************************/
11831
11832 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11833 {
11834         u16 pmcsr;
11835
11836         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11837
11838         switch (state) {
11839         case PCI_D0:
11840                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11841                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11842                                        PCI_PM_CTRL_PME_STATUS));
11843
11844                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
11845                         /* delay required during transition out of D3hot */
11846                         msleep(20);
11847                 break;
11848
11849         case PCI_D3hot:
11850                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11851                 pmcsr |= 3;
11852
11853                 if (bp->wol)
11854                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
11855
11856                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11857                                       pmcsr);
11858
11859                 /* No more memory access after this point until
11860                 * device is brought back to D0.
11861                 */
11862                 break;
11863
11864         default:
11865                 return -EINVAL;
11866         }
11867         return 0;
11868 }
11869
11870 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11871 {
11872         u16 rx_cons_sb;
11873
11874         /* Tell compiler that status block fields can change */
11875         barrier();
11876         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11877         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
11878                 rx_cons_sb++;
11879         return (fp->rx_comp_cons != rx_cons_sb);
11880 }
11881
11882 /*
11883  * net_device service functions
11884  */
11885
11886 static int bnx2x_poll(struct napi_struct *napi, int budget)
11887 {
11888         int work_done = 0;
11889         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
11890                                                  napi);
11891         struct bnx2x *bp = fp->bp;
11892
11893         while (1) {
11894 #ifdef BNX2X_STOP_ON_ERROR
11895                 if (unlikely(bp->panic)) {
11896                         napi_complete(napi);
11897                         return 0;
11898                 }
11899 #endif
11900
11901                 if (bnx2x_has_tx_work(fp))
11902                         bnx2x_tx_int(fp);
11903
11904                 if (bnx2x_has_rx_work(fp)) {
11905                         work_done += bnx2x_rx_int(fp, budget - work_done);
11906
11907                         /* must not complete if we consumed full budget */
11908                         if (work_done >= budget)
11909                                 break;
11910                 }
11911
11912                 /* Fall out from the NAPI loop if needed */
11913                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
11914                         bnx2x_update_fpsb_idx(fp);
11915                 /* bnx2x_has_rx_work() reads the status block, thus we need
11916                  * to ensure that status block indices have been actually read
11917                  * (bnx2x_update_fpsb_idx) prior to this check
11918                  * (bnx2x_has_rx_work) so that we won't write the "newer"
11919                  * value of the status block to IGU (if there was a DMA right
11920                  * after bnx2x_has_rx_work and if there is no rmb, the memory
11921                  * reading (bnx2x_update_fpsb_idx) may be postponed to right
11922                  * before bnx2x_ack_sb). In this case there will never be
11923                  * another interrupt until there is another update of the
11924                  * status block, while there is still unhandled work.
11925                  */
11926                         rmb();
11927
11928                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
11929                                 napi_complete(napi);
11930                                 /* Re-enable interrupts */
11931                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
11932                                              le16_to_cpu(fp->fp_c_idx),
11933                                              IGU_INT_NOP, 1);
11934                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
11935                                              le16_to_cpu(fp->fp_u_idx),
11936                                              IGU_INT_ENABLE, 1);
11937                                 break;
11938                         }
11939                 }
11940         }
11941
11942         return work_done;
11943 }
11944
11945
11946 /* we split the first BD into headers and data BDs
11947  * to ease the pain of our fellow microcode engineers
11948  * we use one mapping for both BDs
11949  * So far this has only been observed to happen
11950  * in Other Operating Systems(TM)
11951  */
11952 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
11953                                    struct bnx2x_fastpath *fp,
11954                                    struct sw_tx_bd *tx_buf,
11955                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
11956                                    u16 bd_prod, int nbd)
11957 {
11958         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
11959         struct eth_tx_bd *d_tx_bd;
11960         dma_addr_t mapping;
11961         int old_len = le16_to_cpu(h_tx_bd->nbytes);
11962
11963         /* first fix first BD */
11964         h_tx_bd->nbd = cpu_to_le16(nbd);
11965         h_tx_bd->nbytes = cpu_to_le16(hlen);
11966
11967         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11968            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11969            h_tx_bd->addr_lo, h_tx_bd->nbd);
11970
11971         /* now get a new data BD
11972          * (after the pbd) and fill it */
11973         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11974         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11975
11976         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11977                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11978
11979         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11980         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11981         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
11982
11983         /* this marks the BD as one that has no individual mapping */
11984         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11985
11986         DP(NETIF_MSG_TX_QUEUED,
11987            "TSO split data size is %d (%x:%x)\n",
11988            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11989
11990         /* update tx_bd */
11991         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
11992
11993         return bd_prod;
11994 }
11995
11996 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11997 {
11998         if (fix > 0)
11999                 csum = (u16) ~csum_fold(csum_sub(csum,
12000                                 csum_partial(t_header - fix, fix, 0)));
12001
12002         else if (fix < 0)
12003                 csum = (u16) ~csum_fold(csum_add(csum,
12004                                 csum_partial(t_header, -fix, 0)));
12005
12006         return swab16(csum);
12007 }
12008
12009 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12010 {
12011         u32 rc;
12012
12013         if (skb->ip_summed != CHECKSUM_PARTIAL)
12014                 rc = XMIT_PLAIN;
12015
12016         else {
12017                 if (skb->protocol == htons(ETH_P_IPV6)) {
12018                         rc = XMIT_CSUM_V6;
12019                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12020                                 rc |= XMIT_CSUM_TCP;
12021
12022                 } else {
12023                         rc = XMIT_CSUM_V4;
12024                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12025                                 rc |= XMIT_CSUM_TCP;
12026                 }
12027         }
12028
12029         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
12030                 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
12031
12032         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
12033                 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
12034
12035         return rc;
12036 }
12037
12038 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12039 /* check if packet requires linearization (packet is too fragmented)
12040    no need to check fragmentation if page size > 8K (there will be no
12041    violation to FW restrictions) */
12042 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12043                              u32 xmit_type)
12044 {
12045         int to_copy = 0;
12046         int hlen = 0;
12047         int first_bd_sz = 0;
12048
12049         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12050         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12051
12052                 if (xmit_type & XMIT_GSO) {
12053                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12054                         /* Check if LSO packet needs to be copied:
12055                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12056                         int wnd_size = MAX_FETCH_BD - 3;
12057                         /* Number of windows to check */
12058                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12059                         int wnd_idx = 0;
12060                         int frag_idx = 0;
12061                         u32 wnd_sum = 0;
12062
12063                         /* Headers length */
12064                         hlen = (int)(skb_transport_header(skb) - skb->data) +
12065                                 tcp_hdrlen(skb);
12066
12067                         /* Amount of data (w/o headers) on linear part of SKB*/
12068                         first_bd_sz = skb_headlen(skb) - hlen;
12069
12070                         wnd_sum  = first_bd_sz;
12071
12072                         /* Calculate the first sum - it's special */
12073                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12074                                 wnd_sum +=
12075                                         skb_shinfo(skb)->frags[frag_idx].size;
12076
12077                         /* If there was data on linear skb data - check it */
12078                         if (first_bd_sz > 0) {
12079                                 if (unlikely(wnd_sum < lso_mss)) {
12080                                         to_copy = 1;
12081                                         goto exit_lbl;
12082                                 }
12083
12084                                 wnd_sum -= first_bd_sz;
12085                         }
12086
12087                         /* Others are easier: run through the frag list and
12088                            check all windows */
12089                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12090                                 wnd_sum +=
12091                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12092
12093                                 if (unlikely(wnd_sum < lso_mss)) {
12094                                         to_copy = 1;
12095                                         break;
12096                                 }
12097                                 wnd_sum -=
12098                                         skb_shinfo(skb)->frags[wnd_idx].size;
12099                         }
12100                 } else {
12101                         /* in non-LSO too fragmented packet should always
12102                            be linearized */
12103                         to_copy = 1;
12104                 }
12105         }
12106
12107 exit_lbl:
12108         if (unlikely(to_copy))
12109                 DP(NETIF_MSG_TX_QUEUED,
12110                    "Linearization IS REQUIRED for %s packet. "
12111                    "num_frags %d  hlen %d  first_bd_sz %d\n",
12112                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12113                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12114
12115         return to_copy;
12116 }
12117 #endif
12118
12119 /* called with netif_tx_lock
12120  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
12121  * netif_wake_queue()
12122  */
12123 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
12124 {
12125         struct bnx2x *bp = netdev_priv(dev);
12126         struct bnx2x_fastpath *fp;
12127         struct netdev_queue *txq;
12128         struct sw_tx_bd *tx_buf;
12129         struct eth_tx_start_bd *tx_start_bd;
12130         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
12131         struct eth_tx_parse_bd *pbd = NULL;
12132         u16 pkt_prod, bd_prod;
12133         int nbd, fp_index;
12134         dma_addr_t mapping;
12135         u32 xmit_type = bnx2x_xmit_type(bp, skb);
12136         int i;
12137         u8 hlen = 0;
12138         __le16 pkt_size = 0;
12139
12140 #ifdef BNX2X_STOP_ON_ERROR
12141         if (unlikely(bp->panic))
12142                 return NETDEV_TX_BUSY;
12143 #endif
12144
12145         fp_index = skb_get_queue_mapping(skb);
12146         txq = netdev_get_tx_queue(dev, fp_index);
12147
12148         fp = &bp->fp[fp_index];
12149
12150         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
12151                 fp->eth_q_stats.driver_xoff++;
12152                 netif_tx_stop_queue(txq);
12153                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12154                 return NETDEV_TX_BUSY;
12155         }
12156
12157         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
12158            "  gso type %x  xmit_type %x\n",
12159            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12160            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12161
12162 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12163         /* First, check if we need to linearize the skb (due to FW
12164            restrictions). No need to check fragmentation if page size > 8K
12165            (there will be no violation to FW restrictions) */
12166         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12167                 /* Statistics of linearization */
12168                 bp->lin_cnt++;
12169                 if (skb_linearize(skb) != 0) {
12170                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12171                            "silently dropping this SKB\n");
12172                         dev_kfree_skb_any(skb);
12173                         return NETDEV_TX_OK;
12174                 }
12175         }
12176 #endif
12177
12178         /*
12179         Please read carefully. First we use one BD which we mark as start,
12180         then we have a parsing info BD (used for TSO or xsum),
12181         and only then we have the rest of the TSO BDs.
12182         (don't forget to mark the last one as last,
12183         and to unmap only AFTER you write to the BD ...)
12184         And above all, all pdb sizes are in words - NOT DWORDS!
12185         */
12186
12187         pkt_prod = fp->tx_pkt_prod++;
12188         bd_prod = TX_BD(fp->tx_bd_prod);
12189
12190         /* get a tx_buf and first BD */
12191         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
12192         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
12193
12194         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
12195         tx_start_bd->general_data = (UNICAST_ADDRESS <<
12196                                      ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
12197         /* header nbd */
12198         tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
12199
12200         /* remember the first BD of the packet */
12201         tx_buf->first_bd = fp->tx_bd_prod;
12202         tx_buf->skb = skb;
12203         tx_buf->flags = 0;
12204
12205         DP(NETIF_MSG_TX_QUEUED,
12206            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
12207            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
12208
12209 #ifdef BCM_VLAN
12210         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12211             (bp->flags & HW_VLAN_TX_FLAG)) {
12212                 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12213                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
12214         } else
12215 #endif
12216                 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
12217
12218         /* turn on parsing and get a BD */
12219         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12220         pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
12221
12222         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
12223
12224         if (xmit_type & XMIT_CSUM) {
12225                 hlen = (skb_network_header(skb) - skb->data) / 2;
12226
12227                 /* for now NS flag is not used in Linux */
12228                 pbd->global_data =
12229                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12230                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
12231
12232                 pbd->ip_hlen = (skb_transport_header(skb) -
12233                                 skb_network_header(skb)) / 2;
12234
12235                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
12236
12237                 pbd->total_hlen = cpu_to_le16(hlen);
12238                 hlen = hlen*2;
12239
12240                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
12241
12242                 if (xmit_type & XMIT_CSUM_V4)
12243                         tx_start_bd->bd_flags.as_bitfield |=
12244                                                 ETH_TX_BD_FLAGS_IP_CSUM;
12245                 else
12246                         tx_start_bd->bd_flags.as_bitfield |=
12247                                                 ETH_TX_BD_FLAGS_IPV6;
12248
12249                 if (xmit_type & XMIT_CSUM_TCP) {
12250                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12251
12252                 } else {
12253                         s8 fix = SKB_CS_OFF(skb); /* signed! */
12254
12255                         pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
12256
12257                         DP(NETIF_MSG_TX_QUEUED,
12258                            "hlen %d  fix %d  csum before fix %x\n",
12259                            le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
12260
12261                         /* HW bug: fixup the CSUM */
12262                         pbd->tcp_pseudo_csum =
12263                                 bnx2x_csum_fix(skb_transport_header(skb),
12264                                                SKB_CS(skb), fix);
12265
12266                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12267                            pbd->tcp_pseudo_csum);
12268                 }
12269         }
12270
12271         mapping = dma_map_single(&bp->pdev->dev, skb->data,
12272                                  skb_headlen(skb), DMA_TO_DEVICE);
12273
12274         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12275         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12276         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12277         tx_start_bd->nbd = cpu_to_le16(nbd);
12278         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12279         pkt_size = tx_start_bd->nbytes;
12280
12281         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
12282            "  nbytes %d  flags %x  vlan %x\n",
12283            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12284            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12285            tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
12286
12287         if (xmit_type & XMIT_GSO) {
12288
12289                 DP(NETIF_MSG_TX_QUEUED,
12290                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
12291                    skb->len, hlen, skb_headlen(skb),
12292                    skb_shinfo(skb)->gso_size);
12293
12294                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
12295
12296                 if (unlikely(skb_headlen(skb) > hlen))
12297                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12298                                                  hlen, bd_prod, ++nbd);
12299
12300                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12301                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
12302                 pbd->tcp_flags = pbd_tcp_flags(skb);
12303
12304                 if (xmit_type & XMIT_GSO_V4) {
12305                         pbd->ip_id = swab16(ip_hdr(skb)->id);
12306                         pbd->tcp_pseudo_csum =
12307                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12308                                                           ip_hdr(skb)->daddr,
12309                                                           0, IPPROTO_TCP, 0));
12310
12311                 } else
12312                         pbd->tcp_pseudo_csum =
12313                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12314                                                         &ipv6_hdr(skb)->daddr,
12315                                                         0, IPPROTO_TCP, 0));
12316
12317                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12318         }
12319         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
12320
12321         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12322                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
12323
12324                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12325                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12326                 if (total_pkt_bd == NULL)
12327                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12328
12329                 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12330                                        frag->page_offset,
12331                                        frag->size, DMA_TO_DEVICE);
12332
12333                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12334                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12335                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12336                 le16_add_cpu(&pkt_size, frag->size);
12337
12338                 DP(NETIF_MSG_TX_QUEUED,
12339                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
12340                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12341                    le16_to_cpu(tx_data_bd->nbytes));
12342         }
12343
12344         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
12345
12346         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12347
12348         /* now send a tx doorbell, counting the next BD
12349          * if the packet contains or ends with it
12350          */
12351         if (TX_BD_POFF(bd_prod) < nbd)
12352                 nbd++;
12353
12354         if (total_pkt_bd != NULL)
12355                 total_pkt_bd->total_pkt_bytes = pkt_size;
12356
12357         if (pbd)
12358                 DP(NETIF_MSG_TX_QUEUED,
12359                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
12360                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
12361                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12362                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
12363                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
12364
12365         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
12366
12367         /*
12368          * Make sure that the BD data is updated before updating the producer
12369          * since FW might read the BD right after the producer is updated.
12370          * This is only applicable for weak-ordered memory model archs such
12371          * as IA-64. The following barrier is also mandatory since FW will
12372          * assumes packets must have BDs.
12373          */
12374         wmb();
12375
12376         fp->tx_db.data.prod += nbd;
12377         barrier();
12378         DOORBELL(bp, fp->index, fp->tx_db.raw);
12379
12380         mmiowb();
12381
12382         fp->tx_bd_prod += nbd;
12383
12384         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
12385                 netif_tx_stop_queue(txq);
12386
12387                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12388                  * ordering of set_bit() in netif_tx_stop_queue() and read of
12389                  * fp->bd_tx_cons */
12390                 smp_mb();
12391
12392                 fp->eth_q_stats.driver_xoff++;
12393                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
12394                         netif_tx_wake_queue(txq);
12395         }
12396         fp->tx_pkt++;
12397
12398         return NETDEV_TX_OK;
12399 }
12400
12401 /* called with rtnl_lock */
12402 static int bnx2x_open(struct net_device *dev)
12403 {
12404         struct bnx2x *bp = netdev_priv(dev);
12405
12406         netif_carrier_off(dev);
12407
12408         bnx2x_set_power_state(bp, PCI_D0);
12409
12410         if (!bnx2x_reset_is_done(bp)) {
12411                 do {
12412                         /* Reset MCP mail box sequence if there is on going
12413                          * recovery
12414                          */
12415                         bp->fw_seq = 0;
12416
12417                         /* If it's the first function to load and reset done
12418                          * is still not cleared it may mean that. We don't
12419                          * check the attention state here because it may have
12420                          * already been cleared by a "common" reset but we
12421                          * shell proceed with "process kill" anyway.
12422                          */
12423                         if ((bnx2x_get_load_cnt(bp) == 0) &&
12424                                 bnx2x_trylock_hw_lock(bp,
12425                                 HW_LOCK_RESOURCE_RESERVED_08) &&
12426                                 (!bnx2x_leader_reset(bp))) {
12427                                 DP(NETIF_MSG_HW, "Recovered in open\n");
12428                                 break;
12429                         }
12430
12431                         bnx2x_set_power_state(bp, PCI_D3hot);
12432
12433                         printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12434                         " completed yet. Try again later. If u still see this"
12435                         " message after a few retries then power cycle is"
12436                         " required.\n", bp->dev->name);
12437
12438                         return -EAGAIN;
12439                 } while (0);
12440         }
12441
12442         bp->recovery_state = BNX2X_RECOVERY_DONE;
12443
12444         return bnx2x_nic_load(bp, LOAD_OPEN);
12445 }
12446
12447 /* called with rtnl_lock */
12448 static int bnx2x_close(struct net_device *dev)
12449 {
12450         struct bnx2x *bp = netdev_priv(dev);
12451
12452         /* Unload the driver, release IRQs */
12453         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12454         if (atomic_read(&bp->pdev->enable_cnt) == 1)
12455                 if (!CHIP_REV_IS_SLOW(bp))
12456                         bnx2x_set_power_state(bp, PCI_D3hot);
12457
12458         return 0;
12459 }
12460
12461 /* called with netif_tx_lock from dev_mcast.c */
12462 static void bnx2x_set_rx_mode(struct net_device *dev)
12463 {
12464         struct bnx2x *bp = netdev_priv(dev);
12465         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12466         int port = BP_PORT(bp);
12467
12468         if (bp->state != BNX2X_STATE_OPEN) {
12469                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12470                 return;
12471         }
12472
12473         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12474
12475         if (dev->flags & IFF_PROMISC)
12476                 rx_mode = BNX2X_RX_MODE_PROMISC;
12477
12478         else if ((dev->flags & IFF_ALLMULTI) ||
12479                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12480                   CHIP_IS_E1(bp)))
12481                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12482
12483         else { /* some multicasts */
12484                 if (CHIP_IS_E1(bp)) {
12485                         int i, old, offset;
12486                         struct netdev_hw_addr *ha;
12487                         struct mac_configuration_cmd *config =
12488                                                 bnx2x_sp(bp, mcast_config);
12489
12490                         i = 0;
12491                         netdev_for_each_mc_addr(ha, dev) {
12492                                 config->config_table[i].
12493                                         cam_entry.msb_mac_addr =
12494                                         swab16(*(u16 *)&ha->addr[0]);
12495                                 config->config_table[i].
12496                                         cam_entry.middle_mac_addr =
12497                                         swab16(*(u16 *)&ha->addr[2]);
12498                                 config->config_table[i].
12499                                         cam_entry.lsb_mac_addr =
12500                                         swab16(*(u16 *)&ha->addr[4]);
12501                                 config->config_table[i].cam_entry.flags =
12502                                                         cpu_to_le16(port);
12503                                 config->config_table[i].
12504                                         target_table_entry.flags = 0;
12505                                 config->config_table[i].target_table_entry.
12506                                         clients_bit_vector =
12507                                                 cpu_to_le32(1 << BP_L_ID(bp));
12508                                 config->config_table[i].
12509                                         target_table_entry.vlan_id = 0;
12510
12511                                 DP(NETIF_MSG_IFUP,
12512                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12513                                    config->config_table[i].
12514                                                 cam_entry.msb_mac_addr,
12515                                    config->config_table[i].
12516                                                 cam_entry.middle_mac_addr,
12517                                    config->config_table[i].
12518                                                 cam_entry.lsb_mac_addr);
12519                                 i++;
12520                         }
12521                         old = config->hdr.length;
12522                         if (old > i) {
12523                                 for (; i < old; i++) {
12524                                         if (CAM_IS_INVALID(config->
12525                                                            config_table[i])) {
12526                                                 /* already invalidated */
12527                                                 break;
12528                                         }
12529                                         /* invalidate */
12530                                         CAM_INVALIDATE(config->
12531                                                        config_table[i]);
12532                                 }
12533                         }
12534
12535                         if (CHIP_REV_IS_SLOW(bp))
12536                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12537                         else
12538                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
12539
12540                         config->hdr.length = i;
12541                         config->hdr.offset = offset;
12542                         config->hdr.client_id = bp->fp->cl_id;
12543                         config->hdr.reserved1 = 0;
12544
12545                         bp->set_mac_pending++;
12546                         smp_wmb();
12547
12548                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12549                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12550                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12551                                       0);
12552                 } else { /* E1H */
12553                         /* Accept one or more multicasts */
12554                         struct netdev_hw_addr *ha;
12555                         u32 mc_filter[MC_HASH_SIZE];
12556                         u32 crc, bit, regidx;
12557                         int i;
12558
12559                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12560
12561                         netdev_for_each_mc_addr(ha, dev) {
12562                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
12563                                    ha->addr);
12564
12565                                 crc = crc32c_le(0, ha->addr, ETH_ALEN);
12566                                 bit = (crc >> 24) & 0xff;
12567                                 regidx = bit >> 5;
12568                                 bit &= 0x1f;
12569                                 mc_filter[regidx] |= (1 << bit);
12570                         }
12571
12572                         for (i = 0; i < MC_HASH_SIZE; i++)
12573                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12574                                        mc_filter[i]);
12575                 }
12576         }
12577
12578         bp->rx_mode = rx_mode;
12579         bnx2x_set_storm_rx_mode(bp);
12580 }
12581
12582 /* called with rtnl_lock */
12583 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12584 {
12585         struct sockaddr *addr = p;
12586         struct bnx2x *bp = netdev_priv(dev);
12587
12588         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
12589                 return -EINVAL;
12590
12591         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12592         if (netif_running(dev)) {
12593                 if (CHIP_IS_E1(bp))
12594                         bnx2x_set_eth_mac_addr_e1(bp, 1);
12595                 else
12596                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
12597         }
12598
12599         return 0;
12600 }
12601
12602 /* called with rtnl_lock */
12603 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12604                            int devad, u16 addr)
12605 {
12606         struct bnx2x *bp = netdev_priv(netdev);
12607         u16 value;
12608         int rc;
12609         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12610
12611         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12612            prtad, devad, addr);
12613
12614         if (prtad != bp->mdio.prtad) {
12615                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12616                    prtad, bp->mdio.prtad);
12617                 return -EINVAL;
12618         }
12619
12620         /* The HW expects different devad if CL22 is used */
12621         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12622
12623         bnx2x_acquire_phy_lock(bp);
12624         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12625                              devad, addr, &value);
12626         bnx2x_release_phy_lock(bp);
12627         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12628
12629         if (!rc)
12630                 rc = value;
12631         return rc;
12632 }
12633
12634 /* called with rtnl_lock */
12635 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12636                             u16 addr, u16 value)
12637 {
12638         struct bnx2x *bp = netdev_priv(netdev);
12639         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12640         int rc;
12641
12642         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12643                            " value 0x%x\n", prtad, devad, addr, value);
12644
12645         if (prtad != bp->mdio.prtad) {
12646                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12647                    prtad, bp->mdio.prtad);
12648                 return -EINVAL;
12649         }
12650
12651         /* The HW expects different devad if CL22 is used */
12652         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12653
12654         bnx2x_acquire_phy_lock(bp);
12655         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12656                               devad, addr, value);
12657         bnx2x_release_phy_lock(bp);
12658         return rc;
12659 }
12660
12661 /* called with rtnl_lock */
12662 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12663 {
12664         struct bnx2x *bp = netdev_priv(dev);
12665         struct mii_ioctl_data *mdio = if_mii(ifr);
12666
12667         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12668            mdio->phy_id, mdio->reg_num, mdio->val_in);
12669
12670         if (!netif_running(dev))
12671                 return -EAGAIN;
12672
12673         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12674 }
12675
12676 /* called with rtnl_lock */
12677 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12678 {
12679         struct bnx2x *bp = netdev_priv(dev);
12680         int rc = 0;
12681
12682         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12683                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12684                 return -EAGAIN;
12685         }
12686
12687         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12688             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12689                 return -EINVAL;
12690
12691         /* This does not race with packet allocation
12692          * because the actual alloc size is
12693          * only updated as part of load
12694          */
12695         dev->mtu = new_mtu;
12696
12697         if (netif_running(dev)) {
12698                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12699                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
12700         }
12701
12702         return rc;
12703 }
12704
12705 static void bnx2x_tx_timeout(struct net_device *dev)
12706 {
12707         struct bnx2x *bp = netdev_priv(dev);
12708
12709 #ifdef BNX2X_STOP_ON_ERROR
12710         if (!bp->panic)
12711                 bnx2x_panic();
12712 #endif
12713         /* This allows the netif to be shutdown gracefully before resetting */
12714         schedule_delayed_work(&bp->reset_task, 0);
12715 }
12716
12717 #ifdef BCM_VLAN
12718 /* called with rtnl_lock */
12719 static void bnx2x_vlan_rx_register(struct net_device *dev,
12720                                    struct vlan_group *vlgrp)
12721 {
12722         struct bnx2x *bp = netdev_priv(dev);
12723
12724         bp->vlgrp = vlgrp;
12725
12726         /* Set flags according to the required capabilities */
12727         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12728
12729         if (dev->features & NETIF_F_HW_VLAN_TX)
12730                 bp->flags |= HW_VLAN_TX_FLAG;
12731
12732         if (dev->features & NETIF_F_HW_VLAN_RX)
12733                 bp->flags |= HW_VLAN_RX_FLAG;
12734
12735         if (netif_running(dev))
12736                 bnx2x_set_client_config(bp);
12737 }
12738
12739 #endif
12740
12741 #ifdef CONFIG_NET_POLL_CONTROLLER
12742 static void poll_bnx2x(struct net_device *dev)
12743 {
12744         struct bnx2x *bp = netdev_priv(dev);
12745
12746         disable_irq(bp->pdev->irq);
12747         bnx2x_interrupt(bp->pdev->irq, dev);
12748         enable_irq(bp->pdev->irq);
12749 }
12750 #endif
12751
12752 static const struct net_device_ops bnx2x_netdev_ops = {
12753         .ndo_open               = bnx2x_open,
12754         .ndo_stop               = bnx2x_close,
12755         .ndo_start_xmit         = bnx2x_start_xmit,
12756         .ndo_set_multicast_list = bnx2x_set_rx_mode,
12757         .ndo_set_mac_address    = bnx2x_change_mac_addr,
12758         .ndo_validate_addr      = eth_validate_addr,
12759         .ndo_do_ioctl           = bnx2x_ioctl,
12760         .ndo_change_mtu         = bnx2x_change_mtu,
12761         .ndo_tx_timeout         = bnx2x_tx_timeout,
12762 #ifdef BCM_VLAN
12763         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
12764 #endif
12765 #ifdef CONFIG_NET_POLL_CONTROLLER
12766         .ndo_poll_controller    = poll_bnx2x,
12767 #endif
12768 };
12769
12770 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12771                                     struct net_device *dev)
12772 {
12773         struct bnx2x *bp;
12774         int rc;
12775
12776         SET_NETDEV_DEV(dev, &pdev->dev);
12777         bp = netdev_priv(dev);
12778
12779         bp->dev = dev;
12780         bp->pdev = pdev;
12781         bp->flags = 0;
12782         bp->func = PCI_FUNC(pdev->devfn);
12783
12784         rc = pci_enable_device(pdev);
12785         if (rc) {
12786                 pr_err("Cannot enable PCI device, aborting\n");
12787                 goto err_out;
12788         }
12789
12790         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12791                 pr_err("Cannot find PCI device base address, aborting\n");
12792                 rc = -ENODEV;
12793                 goto err_out_disable;
12794         }
12795
12796         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12797                 pr_err("Cannot find second PCI device base address, aborting\n");
12798                 rc = -ENODEV;
12799                 goto err_out_disable;
12800         }
12801
12802         if (atomic_read(&pdev->enable_cnt) == 1) {
12803                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12804                 if (rc) {
12805                         pr_err("Cannot obtain PCI resources, aborting\n");
12806                         goto err_out_disable;
12807                 }
12808
12809                 pci_set_master(pdev);
12810                 pci_save_state(pdev);
12811         }
12812
12813         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12814         if (bp->pm_cap == 0) {
12815                 pr_err("Cannot find power management capability, aborting\n");
12816                 rc = -EIO;
12817                 goto err_out_release;
12818         }
12819
12820         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12821         if (bp->pcie_cap == 0) {
12822                 pr_err("Cannot find PCI Express capability, aborting\n");
12823                 rc = -EIO;
12824                 goto err_out_release;
12825         }
12826
12827         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
12828                 bp->flags |= USING_DAC_FLAG;
12829                 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
12830                         pr_err("dma_set_coherent_mask failed, aborting\n");
12831                         rc = -EIO;
12832                         goto err_out_release;
12833                 }
12834
12835         } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12836                 pr_err("System does not support DMA, aborting\n");
12837                 rc = -EIO;
12838                 goto err_out_release;
12839         }
12840
12841         dev->mem_start = pci_resource_start(pdev, 0);
12842         dev->base_addr = dev->mem_start;
12843         dev->mem_end = pci_resource_end(pdev, 0);
12844
12845         dev->irq = pdev->irq;
12846
12847         bp->regview = pci_ioremap_bar(pdev, 0);
12848         if (!bp->regview) {
12849                 pr_err("Cannot map register space, aborting\n");
12850                 rc = -ENOMEM;
12851                 goto err_out_release;
12852         }
12853
12854         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12855                                         min_t(u64, BNX2X_DB_SIZE,
12856                                               pci_resource_len(pdev, 2)));
12857         if (!bp->doorbells) {
12858                 pr_err("Cannot map doorbell space, aborting\n");
12859                 rc = -ENOMEM;
12860                 goto err_out_unmap;
12861         }
12862
12863         bnx2x_set_power_state(bp, PCI_D0);
12864
12865         /* clean indirect addresses */
12866         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12867                                PCICFG_VENDOR_ID_OFFSET);
12868         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
12869         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
12870         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
12871         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
12872
12873         /* Reset the load counter */
12874         bnx2x_clear_load_cnt(bp);
12875
12876         dev->watchdog_timeo = TX_TIMEOUT;
12877
12878         dev->netdev_ops = &bnx2x_netdev_ops;
12879         dev->ethtool_ops = &bnx2x_ethtool_ops;
12880         dev->features |= NETIF_F_SG;
12881         dev->features |= NETIF_F_HW_CSUM;
12882         if (bp->flags & USING_DAC_FLAG)
12883                 dev->features |= NETIF_F_HIGHDMA;
12884         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
12885         dev->features |= NETIF_F_TSO6;
12886 #ifdef BCM_VLAN
12887         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
12888         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12889
12890         dev->vlan_features |= NETIF_F_SG;
12891         dev->vlan_features |= NETIF_F_HW_CSUM;
12892         if (bp->flags & USING_DAC_FLAG)
12893                 dev->vlan_features |= NETIF_F_HIGHDMA;
12894         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
12895         dev->vlan_features |= NETIF_F_TSO6;
12896 #endif
12897
12898         /* get_port_hwinfo() will set prtad and mmds properly */
12899         bp->mdio.prtad = MDIO_PRTAD_NONE;
12900         bp->mdio.mmds = 0;
12901         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
12902         bp->mdio.dev = dev;
12903         bp->mdio.mdio_read = bnx2x_mdio_read;
12904         bp->mdio.mdio_write = bnx2x_mdio_write;
12905
12906         return 0;
12907
12908 err_out_unmap:
12909         if (bp->regview) {
12910                 iounmap(bp->regview);
12911                 bp->regview = NULL;
12912         }
12913         if (bp->doorbells) {
12914                 iounmap(bp->doorbells);
12915                 bp->doorbells = NULL;
12916         }
12917
12918 err_out_release:
12919         if (atomic_read(&pdev->enable_cnt) == 1)
12920                 pci_release_regions(pdev);
12921
12922 err_out_disable:
12923         pci_disable_device(pdev);
12924         pci_set_drvdata(pdev, NULL);
12925
12926 err_out:
12927         return rc;
12928 }
12929
12930 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
12931                                                  int *width, int *speed)
12932 {
12933         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
12934
12935         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
12936
12937         /* return value of 1=2.5GHz 2=5GHz */
12938         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
12939 }
12940
12941 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
12942 {
12943         const struct firmware *firmware = bp->firmware;
12944         struct bnx2x_fw_file_hdr *fw_hdr;
12945         struct bnx2x_fw_file_section *sections;
12946         u32 offset, len, num_ops;
12947         u16 *ops_offsets;
12948         int i;
12949         const u8 *fw_ver;
12950
12951         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
12952                 return -EINVAL;
12953
12954         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12955         sections = (struct bnx2x_fw_file_section *)fw_hdr;
12956
12957         /* Make sure none of the offsets and sizes make us read beyond
12958          * the end of the firmware data */
12959         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12960                 offset = be32_to_cpu(sections[i].offset);
12961                 len = be32_to_cpu(sections[i].len);
12962                 if (offset + len > firmware->size) {
12963                         pr_err("Section %d length is out of bounds\n", i);
12964                         return -EINVAL;
12965                 }
12966         }
12967
12968         /* Likewise for the init_ops offsets */
12969         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12970         ops_offsets = (u16 *)(firmware->data + offset);
12971         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12972
12973         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12974                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
12975                         pr_err("Section offset %d is out of bounds\n", i);
12976                         return -EINVAL;
12977                 }
12978         }
12979
12980         /* Check FW version */
12981         offset = be32_to_cpu(fw_hdr->fw_version.offset);
12982         fw_ver = firmware->data + offset;
12983         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12984             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12985             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12986             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
12987                 pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
12988                        fw_ver[0], fw_ver[1], fw_ver[2],
12989                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
12990                        BCM_5710_FW_MINOR_VERSION,
12991                        BCM_5710_FW_REVISION_VERSION,
12992                        BCM_5710_FW_ENGINEERING_VERSION);
12993                 return -EINVAL;
12994         }
12995
12996         return 0;
12997 }
12998
12999 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13000 {
13001         const __be32 *source = (const __be32 *)_source;
13002         u32 *target = (u32 *)_target;
13003         u32 i;
13004
13005         for (i = 0; i < n/4; i++)
13006                 target[i] = be32_to_cpu(source[i]);
13007 }
13008
13009 /*
13010    Ops array is stored in the following format:
13011    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13012  */
13013 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13014 {
13015         const __be32 *source = (const __be32 *)_source;
13016         struct raw_op *target = (struct raw_op *)_target;
13017         u32 i, j, tmp;
13018
13019         for (i = 0, j = 0; i < n/8; i++, j += 2) {
13020                 tmp = be32_to_cpu(source[j]);
13021                 target[i].op = (tmp >> 24) & 0xff;
13022                 target[i].offset =  tmp & 0xffffff;
13023                 target[i].raw_data = be32_to_cpu(source[j+1]);
13024         }
13025 }
13026
13027 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13028 {
13029         const __be16 *source = (const __be16 *)_source;
13030         u16 *target = (u16 *)_target;
13031         u32 i;
13032
13033         for (i = 0; i < n/2; i++)
13034                 target[i] = be16_to_cpu(source[i]);
13035 }
13036
13037 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
13038 do {                                                                    \
13039         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
13040         bp->arr = kmalloc(len, GFP_KERNEL);                             \
13041         if (!bp->arr) {                                                 \
13042                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
13043                 goto lbl;                                               \
13044         }                                                               \
13045         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
13046              (u8 *)bp->arr, len);                                       \
13047 } while (0)
13048
13049 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
13050 {
13051         const char *fw_file_name;
13052         struct bnx2x_fw_file_hdr *fw_hdr;
13053         int rc;
13054
13055         if (CHIP_IS_E1(bp))
13056                 fw_file_name = FW_FILE_NAME_E1;
13057         else
13058                 fw_file_name = FW_FILE_NAME_E1H;
13059
13060         pr_info("Loading %s\n", fw_file_name);
13061
13062         rc = request_firmware(&bp->firmware, fw_file_name, dev);
13063         if (rc) {
13064                 pr_err("Can't load firmware file %s\n", fw_file_name);
13065                 goto request_firmware_exit;
13066         }
13067
13068         rc = bnx2x_check_firmware(bp);
13069         if (rc) {
13070                 pr_err("Corrupt firmware file %s\n", fw_file_name);
13071                 goto request_firmware_exit;
13072         }
13073
13074         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13075
13076         /* Initialize the pointers to the init arrays */
13077         /* Blob */
13078         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13079
13080         /* Opcodes */
13081         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13082
13083         /* Offsets */
13084         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13085                             be16_to_cpu_n);
13086
13087         /* STORMs firmware */
13088         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13089                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13090         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
13091                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13092         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13093                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13094         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
13095                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
13096         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13097                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13098         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
13099                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13100         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13101                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13102         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
13103                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
13104
13105         return 0;
13106
13107 init_offsets_alloc_err:
13108         kfree(bp->init_ops);
13109 init_ops_alloc_err:
13110         kfree(bp->init_data);
13111 request_firmware_exit:
13112         release_firmware(bp->firmware);
13113
13114         return rc;
13115 }
13116
13117
13118 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13119                                     const struct pci_device_id *ent)
13120 {
13121         struct net_device *dev = NULL;
13122         struct bnx2x *bp;
13123         int pcie_width, pcie_speed;
13124         int rc;
13125
13126         /* dev zeroed in init_etherdev */
13127         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
13128         if (!dev) {
13129                 pr_err("Cannot allocate net device\n");
13130                 return -ENOMEM;
13131         }
13132
13133         bp = netdev_priv(dev);
13134         bp->msg_enable = debug;
13135
13136         pci_set_drvdata(pdev, dev);
13137
13138         rc = bnx2x_init_dev(pdev, dev);
13139         if (rc < 0) {
13140                 free_netdev(dev);
13141                 return rc;
13142         }
13143
13144         rc = bnx2x_init_bp(bp);
13145         if (rc)
13146                 goto init_one_exit;
13147
13148         /* Set init arrays */
13149         rc = bnx2x_init_firmware(bp, &pdev->dev);
13150         if (rc) {
13151                 pr_err("Error loading firmware\n");
13152                 goto init_one_exit;
13153         }
13154
13155         rc = register_netdev(dev);
13156         if (rc) {
13157                 dev_err(&pdev->dev, "Cannot register net device\n");
13158                 goto init_one_exit;
13159         }
13160
13161         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
13162         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
13163                     board_info[ent->driver_data].name,
13164                     (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13165                     pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13166                     dev->base_addr, bp->pdev->irq, dev->dev_addr);
13167
13168         return 0;
13169
13170 init_one_exit:
13171         if (bp->regview)
13172                 iounmap(bp->regview);
13173
13174         if (bp->doorbells)
13175                 iounmap(bp->doorbells);
13176
13177         free_netdev(dev);
13178
13179         if (atomic_read(&pdev->enable_cnt) == 1)
13180                 pci_release_regions(pdev);
13181
13182         pci_disable_device(pdev);
13183         pci_set_drvdata(pdev, NULL);
13184
13185         return rc;
13186 }
13187
13188 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13189 {
13190         struct net_device *dev = pci_get_drvdata(pdev);
13191         struct bnx2x *bp;
13192
13193         if (!dev) {
13194                 pr_err("BAD net device from bnx2x_init_one\n");
13195                 return;
13196         }
13197         bp = netdev_priv(dev);
13198
13199         unregister_netdev(dev);
13200
13201         /* Make sure RESET task is not scheduled before continuing */
13202         cancel_delayed_work_sync(&bp->reset_task);
13203
13204         kfree(bp->init_ops_offsets);
13205         kfree(bp->init_ops);
13206         kfree(bp->init_data);
13207         release_firmware(bp->firmware);
13208
13209         if (bp->regview)
13210                 iounmap(bp->regview);
13211
13212         if (bp->doorbells)
13213                 iounmap(bp->doorbells);
13214
13215         free_netdev(dev);
13216
13217         if (atomic_read(&pdev->enable_cnt) == 1)
13218                 pci_release_regions(pdev);
13219
13220         pci_disable_device(pdev);
13221         pci_set_drvdata(pdev, NULL);
13222 }
13223
13224 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13225 {
13226         struct net_device *dev = pci_get_drvdata(pdev);
13227         struct bnx2x *bp;
13228
13229         if (!dev) {
13230                 pr_err("BAD net device from bnx2x_init_one\n");
13231                 return -ENODEV;
13232         }
13233         bp = netdev_priv(dev);
13234
13235         rtnl_lock();
13236
13237         pci_save_state(pdev);
13238
13239         if (!netif_running(dev)) {
13240                 rtnl_unlock();
13241                 return 0;
13242         }
13243
13244         netif_device_detach(dev);
13245
13246         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
13247
13248         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
13249
13250         rtnl_unlock();
13251
13252         return 0;
13253 }
13254
13255 static int bnx2x_resume(struct pci_dev *pdev)
13256 {
13257         struct net_device *dev = pci_get_drvdata(pdev);
13258         struct bnx2x *bp;
13259         int rc;
13260
13261         if (!dev) {
13262                 pr_err("BAD net device from bnx2x_init_one\n");
13263                 return -ENODEV;
13264         }
13265         bp = netdev_priv(dev);
13266
13267         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13268                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13269                 return -EAGAIN;
13270         }
13271
13272         rtnl_lock();
13273
13274         pci_restore_state(pdev);
13275
13276         if (!netif_running(dev)) {
13277                 rtnl_unlock();
13278                 return 0;
13279         }
13280
13281         bnx2x_set_power_state(bp, PCI_D0);
13282         netif_device_attach(dev);
13283
13284         rc = bnx2x_nic_load(bp, LOAD_OPEN);
13285
13286         rtnl_unlock();
13287
13288         return rc;
13289 }
13290
13291 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13292 {
13293         int i;
13294
13295         bp->state = BNX2X_STATE_ERROR;
13296
13297         bp->rx_mode = BNX2X_RX_MODE_NONE;
13298
13299         bnx2x_netif_stop(bp, 0);
13300
13301         del_timer_sync(&bp->timer);
13302         bp->stats_state = STATS_STATE_DISABLED;
13303         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13304
13305         /* Release IRQs */
13306         bnx2x_free_irq(bp, false);
13307
13308         if (CHIP_IS_E1(bp)) {
13309                 struct mac_configuration_cmd *config =
13310                                                 bnx2x_sp(bp, mcast_config);
13311
13312                 for (i = 0; i < config->hdr.length; i++)
13313                         CAM_INVALIDATE(config->config_table[i]);
13314         }
13315
13316         /* Free SKBs, SGEs, TPA pool and driver internals */
13317         bnx2x_free_skbs(bp);
13318         for_each_queue(bp, i)
13319                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
13320         for_each_queue(bp, i)
13321                 netif_napi_del(&bnx2x_fp(bp, i, napi));
13322         bnx2x_free_mem(bp);
13323
13324         bp->state = BNX2X_STATE_CLOSED;
13325
13326         netif_carrier_off(bp->dev);
13327
13328         return 0;
13329 }
13330
13331 static void bnx2x_eeh_recover(struct bnx2x *bp)
13332 {
13333         u32 val;
13334
13335         mutex_init(&bp->port.phy_mutex);
13336
13337         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13338         bp->link_params.shmem_base = bp->common.shmem_base;
13339         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13340
13341         if (!bp->common.shmem_base ||
13342             (bp->common.shmem_base < 0xA0000) ||
13343             (bp->common.shmem_base >= 0xC0000)) {
13344                 BNX2X_DEV_INFO("MCP not active\n");
13345                 bp->flags |= NO_MCP_FLAG;
13346                 return;
13347         }
13348
13349         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13350         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13351                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13352                 BNX2X_ERR("BAD MCP validity signature\n");
13353
13354         if (!BP_NOMCP(bp)) {
13355                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13356                               & DRV_MSG_SEQ_NUMBER_MASK);
13357                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13358         }
13359 }
13360
13361 /**
13362  * bnx2x_io_error_detected - called when PCI error is detected
13363  * @pdev: Pointer to PCI device
13364  * @state: The current pci connection state
13365  *
13366  * This function is called after a PCI bus error affecting
13367  * this device has been detected.
13368  */
13369 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13370                                                 pci_channel_state_t state)
13371 {
13372         struct net_device *dev = pci_get_drvdata(pdev);
13373         struct bnx2x *bp = netdev_priv(dev);
13374
13375         rtnl_lock();
13376
13377         netif_device_detach(dev);
13378
13379         if (state == pci_channel_io_perm_failure) {
13380                 rtnl_unlock();
13381                 return PCI_ERS_RESULT_DISCONNECT;
13382         }
13383
13384         if (netif_running(dev))
13385                 bnx2x_eeh_nic_unload(bp);
13386
13387         pci_disable_device(pdev);
13388
13389         rtnl_unlock();
13390
13391         /* Request a slot reset */
13392         return PCI_ERS_RESULT_NEED_RESET;
13393 }
13394
13395 /**
13396  * bnx2x_io_slot_reset - called after the PCI bus has been reset
13397  * @pdev: Pointer to PCI device
13398  *
13399  * Restart the card from scratch, as if from a cold-boot.
13400  */
13401 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13402 {
13403         struct net_device *dev = pci_get_drvdata(pdev);
13404         struct bnx2x *bp = netdev_priv(dev);
13405
13406         rtnl_lock();
13407
13408         if (pci_enable_device(pdev)) {
13409                 dev_err(&pdev->dev,
13410                         "Cannot re-enable PCI device after reset\n");
13411                 rtnl_unlock();
13412                 return PCI_ERS_RESULT_DISCONNECT;
13413         }
13414
13415         pci_set_master(pdev);
13416         pci_restore_state(pdev);
13417
13418         if (netif_running(dev))
13419                 bnx2x_set_power_state(bp, PCI_D0);
13420
13421         rtnl_unlock();
13422
13423         return PCI_ERS_RESULT_RECOVERED;
13424 }
13425
13426 /**
13427  * bnx2x_io_resume - called when traffic can start flowing again
13428  * @pdev: Pointer to PCI device
13429  *
13430  * This callback is called when the error recovery driver tells us that
13431  * its OK to resume normal operation.
13432  */
13433 static void bnx2x_io_resume(struct pci_dev *pdev)
13434 {
13435         struct net_device *dev = pci_get_drvdata(pdev);
13436         struct bnx2x *bp = netdev_priv(dev);
13437
13438         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13439                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13440                 return;
13441         }
13442
13443         rtnl_lock();
13444
13445         bnx2x_eeh_recover(bp);
13446
13447         if (netif_running(dev))
13448                 bnx2x_nic_load(bp, LOAD_NORMAL);
13449
13450         netif_device_attach(dev);
13451
13452         rtnl_unlock();
13453 }
13454
13455 static struct pci_error_handlers bnx2x_err_handler = {
13456         .error_detected = bnx2x_io_error_detected,
13457         .slot_reset     = bnx2x_io_slot_reset,
13458         .resume         = bnx2x_io_resume,
13459 };
13460
13461 static struct pci_driver bnx2x_pci_driver = {
13462         .name        = DRV_MODULE_NAME,
13463         .id_table    = bnx2x_pci_tbl,
13464         .probe       = bnx2x_init_one,
13465         .remove      = __devexit_p(bnx2x_remove_one),
13466         .suspend     = bnx2x_suspend,
13467         .resume      = bnx2x_resume,
13468         .err_handler = &bnx2x_err_handler,
13469 };
13470
13471 static int __init bnx2x_init(void)
13472 {
13473         int ret;
13474
13475         pr_info("%s", version);
13476
13477         bnx2x_wq = create_singlethread_workqueue("bnx2x");
13478         if (bnx2x_wq == NULL) {
13479                 pr_err("Cannot create workqueue\n");
13480                 return -ENOMEM;
13481         }
13482
13483         ret = pci_register_driver(&bnx2x_pci_driver);
13484         if (ret) {
13485                 pr_err("Cannot register driver\n");
13486                 destroy_workqueue(bnx2x_wq);
13487         }
13488         return ret;
13489 }
13490
13491 static void __exit bnx2x_cleanup(void)
13492 {
13493         pci_unregister_driver(&bnx2x_pci_driver);
13494
13495         destroy_workqueue(bnx2x_wq);
13496 }
13497
13498 module_init(bnx2x_init);
13499 module_exit(bnx2x_cleanup);
13500
13501 #ifdef BCM_CNIC
13502
13503 /* count denotes the number of new completions we have seen */
13504 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13505 {
13506         struct eth_spe *spe;
13507
13508 #ifdef BNX2X_STOP_ON_ERROR
13509         if (unlikely(bp->panic))
13510                 return;
13511 #endif
13512
13513         spin_lock_bh(&bp->spq_lock);
13514         bp->cnic_spq_pending -= count;
13515
13516         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13517              bp->cnic_spq_pending++) {
13518
13519                 if (!bp->cnic_kwq_pending)
13520                         break;
13521
13522                 spe = bnx2x_sp_get_next(bp);
13523                 *spe = *bp->cnic_kwq_cons;
13524
13525                 bp->cnic_kwq_pending--;
13526
13527                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13528                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13529
13530                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13531                         bp->cnic_kwq_cons = bp->cnic_kwq;
13532                 else
13533                         bp->cnic_kwq_cons++;
13534         }
13535         bnx2x_sp_prod_update(bp);
13536         spin_unlock_bh(&bp->spq_lock);
13537 }
13538
13539 static int bnx2x_cnic_sp_queue(struct net_device *dev,
13540                                struct kwqe_16 *kwqes[], u32 count)
13541 {
13542         struct bnx2x *bp = netdev_priv(dev);
13543         int i;
13544
13545 #ifdef BNX2X_STOP_ON_ERROR
13546         if (unlikely(bp->panic))
13547                 return -EIO;
13548 #endif
13549
13550         spin_lock_bh(&bp->spq_lock);
13551
13552         for (i = 0; i < count; i++) {
13553                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13554
13555                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13556                         break;
13557
13558                 *bp->cnic_kwq_prod = *spe;
13559
13560                 bp->cnic_kwq_pending++;
13561
13562                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13563                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
13564                    spe->data.mac_config_addr.hi,
13565                    spe->data.mac_config_addr.lo,
13566                    bp->cnic_kwq_pending);
13567
13568                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13569                         bp->cnic_kwq_prod = bp->cnic_kwq;
13570                 else
13571                         bp->cnic_kwq_prod++;
13572         }
13573
13574         spin_unlock_bh(&bp->spq_lock);
13575
13576         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13577                 bnx2x_cnic_sp_post(bp, 0);
13578
13579         return i;
13580 }
13581
13582 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13583 {
13584         struct cnic_ops *c_ops;
13585         int rc = 0;
13586
13587         mutex_lock(&bp->cnic_mutex);
13588         c_ops = bp->cnic_ops;
13589         if (c_ops)
13590                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13591         mutex_unlock(&bp->cnic_mutex);
13592
13593         return rc;
13594 }
13595
13596 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13597 {
13598         struct cnic_ops *c_ops;
13599         int rc = 0;
13600
13601         rcu_read_lock();
13602         c_ops = rcu_dereference(bp->cnic_ops);
13603         if (c_ops)
13604                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13605         rcu_read_unlock();
13606
13607         return rc;
13608 }
13609
13610 /*
13611  * for commands that have no data
13612  */
13613 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13614 {
13615         struct cnic_ctl_info ctl = {0};
13616
13617         ctl.cmd = cmd;
13618
13619         return bnx2x_cnic_ctl_send(bp, &ctl);
13620 }
13621
13622 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13623 {
13624         struct cnic_ctl_info ctl;
13625
13626         /* first we tell CNIC and only then we count this as a completion */
13627         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13628         ctl.data.comp.cid = cid;
13629
13630         bnx2x_cnic_ctl_send_bh(bp, &ctl);
13631         bnx2x_cnic_sp_post(bp, 1);
13632 }
13633
13634 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13635 {
13636         struct bnx2x *bp = netdev_priv(dev);
13637         int rc = 0;
13638
13639         switch (ctl->cmd) {
13640         case DRV_CTL_CTXTBL_WR_CMD: {
13641                 u32 index = ctl->data.io.offset;
13642                 dma_addr_t addr = ctl->data.io.dma_addr;
13643
13644                 bnx2x_ilt_wr(bp, index, addr);
13645                 break;
13646         }
13647
13648         case DRV_CTL_COMPLETION_CMD: {
13649                 int count = ctl->data.comp.comp_count;
13650
13651                 bnx2x_cnic_sp_post(bp, count);
13652                 break;
13653         }
13654
13655         /* rtnl_lock is held.  */
13656         case DRV_CTL_START_L2_CMD: {
13657                 u32 cli = ctl->data.ring.client_id;
13658
13659                 bp->rx_mode_cl_mask |= (1 << cli);
13660                 bnx2x_set_storm_rx_mode(bp);
13661                 break;
13662         }
13663
13664         /* rtnl_lock is held.  */
13665         case DRV_CTL_STOP_L2_CMD: {
13666                 u32 cli = ctl->data.ring.client_id;
13667
13668                 bp->rx_mode_cl_mask &= ~(1 << cli);
13669                 bnx2x_set_storm_rx_mode(bp);
13670                 break;
13671         }
13672
13673         default:
13674                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13675                 rc = -EINVAL;
13676         }
13677
13678         return rc;
13679 }
13680
13681 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13682 {
13683         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13684
13685         if (bp->flags & USING_MSIX_FLAG) {
13686                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13687                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13688                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13689         } else {
13690                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13691                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13692         }
13693         cp->irq_arr[0].status_blk = bp->cnic_sb;
13694         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13695         cp->irq_arr[1].status_blk = bp->def_status_blk;
13696         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13697
13698         cp->num_irq = 2;
13699 }
13700
13701 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13702                                void *data)
13703 {
13704         struct bnx2x *bp = netdev_priv(dev);
13705         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13706
13707         if (ops == NULL)
13708                 return -EINVAL;
13709
13710         if (atomic_read(&bp->intr_sem) != 0)
13711                 return -EBUSY;
13712
13713         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13714         if (!bp->cnic_kwq)
13715                 return -ENOMEM;
13716
13717         bp->cnic_kwq_cons = bp->cnic_kwq;
13718         bp->cnic_kwq_prod = bp->cnic_kwq;
13719         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13720
13721         bp->cnic_spq_pending = 0;
13722         bp->cnic_kwq_pending = 0;
13723
13724         bp->cnic_data = data;
13725
13726         cp->num_irq = 0;
13727         cp->drv_state = CNIC_DRV_STATE_REGD;
13728
13729         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13730
13731         bnx2x_setup_cnic_irq_info(bp);
13732         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13733         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13734         rcu_assign_pointer(bp->cnic_ops, ops);
13735
13736         return 0;
13737 }
13738
13739 static int bnx2x_unregister_cnic(struct net_device *dev)
13740 {
13741         struct bnx2x *bp = netdev_priv(dev);
13742         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13743
13744         mutex_lock(&bp->cnic_mutex);
13745         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13746                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13747                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13748         }
13749         cp->drv_state = 0;
13750         rcu_assign_pointer(bp->cnic_ops, NULL);
13751         mutex_unlock(&bp->cnic_mutex);
13752         synchronize_rcu();
13753         kfree(bp->cnic_kwq);
13754         bp->cnic_kwq = NULL;
13755
13756         return 0;
13757 }
13758
13759 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13760 {
13761         struct bnx2x *bp = netdev_priv(dev);
13762         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13763
13764         cp->drv_owner = THIS_MODULE;
13765         cp->chip_id = CHIP_ID(bp);
13766         cp->pdev = bp->pdev;
13767         cp->io_base = bp->regview;
13768         cp->io_base2 = bp->doorbells;
13769         cp->max_kwqe_pending = 8;
13770         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13771         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13772         cp->ctx_tbl_len = CNIC_ILT_LINES;
13773         cp->starting_cid = BCM_CNIC_CID_START;
13774         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13775         cp->drv_ctl = bnx2x_drv_ctl;
13776         cp->drv_register_cnic = bnx2x_register_cnic;
13777         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13778
13779         return cp;
13780 }
13781 EXPORT_SYMBOL(bnx2x_cnic_probe);
13782
13783 #endif /* BCM_CNIC */
13784